diff options
664 files changed, 3605 insertions, 129362 deletions
diff --git a/Jamroot b/Jamroot deleted file mode 100644 index 8f6629ea..00000000 --- a/Jamroot +++ /dev/null @@ -1,45 +0,0 @@ -#cdec compilation with bjam -# -#--with-boost=/usr/include -#--with-google-hash=/usr/include so that $with-google-hash/google/dense_hash_map exists -# -#-a forces the build to run from scratch -#-jN parallelizes just like make -# -#Respects CXXFLAGS, CFLAGS, and LDFLAGS environment variables. - -path-constant TOP : . ; -include $(TOP)/jam-files/sanity.jam ; -boost 104200 ; -external-lib z ; - -with-google-hash = [ option.get "with-google-hash" ] ; -if [ test_header sparsehash/dense_hash_map ] || $(with-google-hash) { - requirements += <define>HAVE_SPARSEHASH <include>$(with-google-hash) ; -} - -if [ test_header cmph.h ] || $(with-cmph) { - requirements += <define>HAVE_CMPH <include>$(with-cmph) ; -} - -if [ test_header boost/serialization/map.hpp ] && [ test_library boost_serialization ] { - requirements += <define>HAVE_BOOST_ARCHIVE_TEXT_OARCHIVE_HPP ; -} - -project : requirements $(requirements) <toolset>darwin:<link>static <library>boost_system ; -project : default-build <warnings>on <variant>release ; - -install-bin-libs dpmert//programs utils//programs mteval//programs klm/lm//programs training//liblbfgs decoder//cdec phrasinator//programs mira//kbest_mira ; - -install perl-scripts : dpmert//dpmert.pl : <location>$(bindir) ; - -build-projects mteval decoder dpmert klm/lm training/liblbfgs ; - -#Compile everything ending with _test.cc into a test and run it. -rule all_tests ( targets * : dependencies : properties * ) { - targets ?= [ glob *_test.cc ] ; - for t in $(targets) { - local base = [ MATCH "^(.*).cc$" : $(t) ] ; - unit-test $(base) : $(t) $(dependencies) /top//boost_unit_test_framework : $(properties) ; - } -} diff --git a/Makefile.am b/Makefile.am index 24aafd63..3e0103a8 100644 --- a/Makefile.am +++ b/Makefile.am @@ -7,7 +7,6 @@ SUBDIRS = \ klm/util \ klm/lm \ decoder \ - phrasinator \ training \ training/liblbfgs \ mira \ @@ -16,9 +15,7 @@ SUBDIRS = \ pro-train \ rampion \ minrisk \ - gi/pf \ - gi/markov_al \ - rst_parser + example_extff #gi/pyp-topics/src gi/clda/src gi/posterior-regularisation/prjava diff --git a/README b/README deleted file mode 100644 index 47b52355..00000000 --- a/README +++ /dev/null @@ -1,57 +0,0 @@ -cdec is a fast decoder. - -SPEED COMPARISON ------------------------------------------------------------------------------- - -Here is a comparison with a couple of other decoders doing SCFG decoding: - - Decoder Lang. BLEU Run-Time Memory - cdec c++ 31.47 0.37 sec/sent 1.0-1.1GB - Joshua Java 31.55 2.34 sec/sent 4.0-4.8GB - Hiero Python 31.22 27.2 sec/sent 1.7-1.9GB - -The maximum number of pops from candidate heap at each node is k=30, no other -pruning, 3gm LM, Chinese-English translation task. - - -GETTING STARTED ------------------------------------------------------------------------------- - -See the BUILDING file for instructions on how to build the software. To -explore the decoder's features, the best way to get started is to look -at cdec's command line options or to have a look at the test cases in -the tests/system_tests/ directory. Each of these can be run with a command -like ./cdec -c cdec.ini -i input.txt -w weights . The files should be -self explanatory. - - -EXTRACTING A SYNCHRONOUS GRAMMAR / PHRASE TABLE ------------------------------------------------------------------------------- -cdec does not include code for generating grammars. To build these, you will -need to write your own software or use an existing package like Joshua, Hiero, -or Moses. - - -OPTIMIZING / TRAINING MODELS ------------------------------------------------------------------------------- -cdec does include code for optimizing models, according to a number of -training criteria, including training models as CRFs (with latent derivation -variables), MERT (over hypergraphs) to opimize BLEU, TER, etc. - -Eventually, I will provide documentation for this. - - -ALIGNMENT / SYNCHRONOUS PARSING / CONSTRAINED DECODING ------------------------------------------------------------------------------- -cdec can be used as an aligner. For examples, see the test cases. - - -COPYRIGHT AND LICENSE ------------------------------------------------------------------------------- -Copyright (c) 2009 by Chris Dyer <redpony@gmail.com> - -See the file LICENSE.txt for the licensing terms that this software is -released under. This software also includes the file m4/boost.m4 which is -licensed under the LGPL v3, for more information refer to the comments -in that file. - diff --git a/README.md b/README.md new file mode 100644 index 00000000..d89e9558 --- /dev/null +++ b/README.md @@ -0,0 +1,24 @@ +`cdec` is a research platform for machine translation and similar structured prediction problems. + +## Installation + +Build `cdec`: + + autoreconf -ifv + ./configure + make + ./tests/run-system-tests.pl + +You will need the following libraries / tools: + +- [Autoconf / Automake / Libtool](http://www.gnu.org/software/autoconf/) + - Older versions of GNU autotools may not work properly. +- [Boost C++ libraries (version 1.44 or later)](http://www.boost.org/) + - If you build your own boost, you _must install it_ using `bjam install`. + - Older versions of Boost _may_ work, but problems have been reported with command line option parsing on some platforms with older versions. +- [GNU Flex](http://flex.sourceforge.net/) + +## Further information + +[For more information, refer to the cdec documentation](http://www.cdec-decoder.org) + @@ -1,23 +0,0 @@ -#!/bin/bash -set -e -if - bjam="$(which bjam 2>/dev/null)" && #exists - [ ${#bjam} != 0 ] && #paranoia about which printing nothing then returning true - ! grep UFIHGUFIHBDJKNCFZXAEVA "${bjam}" </dev/null >/dev/null && #bjam in path isn't this script - "${bjam}" --sanity-test 2>/dev/null |grep Sane >/dev/null && #The test in jam-files/sanity.jam passes - (cd jam-files/fail && ! "${bjam}") >/dev/null #Returns non-zero on failure -then - #Delegate to system bjam - exec "${bjam}" "$@" -fi - -top="$(dirname "$0")" -if [ ! -x "$top"/jam-files/bjam ]; then - pushd "$top/jam-files/engine" - ./build.sh - cp -f bin.*/bjam ../bjam - popd -fi - -export BOOST_BUILD_PATH="$top"/jam-files/boost-build -exec "$top"/jam-files/bjam "$@" diff --git a/configure.ac b/configure.ac index ea9e84fb..03a0ee87 100644 --- a/configure.ac +++ b/configure.ac @@ -14,7 +14,8 @@ BOOST_REQUIRE([1.44]) BOOST_PROGRAM_OPTIONS BOOST_TEST AM_PATH_PYTHON -# TODO detect Cython, generate python/Makefile that calls "python setup.py build" +AC_CHECK_HEADER(dlfcn.h,AC_DEFINE(HAVE_DLFCN_H)) +AC_CHECK_LIB(dl, dlopen) AC_ARG_ENABLE(mpi, [ --enable-mpi Build MPI binaries, assumes mpi.h is present ], @@ -114,7 +115,6 @@ AC_CONFIG_FILES([Makefile]) AC_CONFIG_FILES([utils/Makefile]) AC_CONFIG_FILES([mteval/Makefile]) AC_CONFIG_FILES([decoder/Makefile]) -AC_CONFIG_FILES([phrasinator/Makefile]) AC_CONFIG_FILES([training/Makefile]) AC_CONFIG_FILES([training/liblbfgs/Makefile]) AC_CONFIG_FILES([dpmert/Makefile]) @@ -125,11 +125,7 @@ AC_CONFIG_FILES([klm/util/Makefile]) AC_CONFIG_FILES([klm/lm/Makefile]) AC_CONFIG_FILES([mira/Makefile]) AC_CONFIG_FILES([dtrain/Makefile]) -AC_CONFIG_FILES([gi/pyp-topics/src/Makefile]) -AC_CONFIG_FILES([gi/clda/src/Makefile]) -AC_CONFIG_FILES([gi/pf/Makefile]) -AC_CONFIG_FILES([gi/markov_al/Makefile]) -AC_CONFIG_FILES([rst_parser/Makefile]) +AC_CONFIG_FILES([example_extff/Makefile]) AC_CONFIG_FILES([python/setup.py]) diff --git a/decoder/Jamfile b/decoder/Jamfile deleted file mode 100644 index d778dc7f..00000000 --- a/decoder/Jamfile +++ /dev/null @@ -1,83 +0,0 @@ -import testing ; -import lex ; -import option ; - -if [ option.get "with-glc" ] { - glc = ff_glc.cc string_util.cc feature-factory.cc ; -} - -lib decoder : - forest_writer.cc - maxtrans_blunsom.cc - cdec_ff.cc - cfg.cc - dwarf.cc - ff_dwarf.cc - rule_lexer.ll - fst_translator.cc - csplit.cc - translator.cc - scfg_translator.cc - hg.cc - hg_io.cc - decoder.cc - hg_intersect.cc - hg_sampler.cc - factored_lexicon_helper.cc - viterbi.cc - lattice.cc - aligner.cc - apply_models.cc - earley_composer.cc - phrasetable_fst.cc - trule.cc - ff.cc - ff_rules.cc - ff_wordset.cc - ff_context.cc - ff_charset.cc - ff_lm.cc - ff_klm.cc - ff_ngrams.cc - ff_spans.cc - ff_ruleshape.cc - ff_wordalign.cc - ff_csplit.cc - ff_tagger.cc - ff_source_syntax.cc - ff_bleu.cc - ff_factory.cc - lexalign.cc - lextrans.cc - tagger.cc - bottom_up_parser.cc - phrasebased_translator.cc - JSON_parser.c - json_parse.cc - grammar.cc - rescore_translator.cc - hg_remove_eps.cc - hg_union.cc - lazy.cc - $(glc) - ..//utils - ..//mteval - ../klm/lm//kenlm - ../klm/search//search - ..//boost_program_options - : <include>. - : : - <library>..//utils - <library>..//mteval - <library>../klm/lm//kenlm - <library>..//boost_program_options - <include>. - ; - -exe cdec : cdec.cc decoder ..//utils ..//mteval ../klm/lm//kenlm ..//boost_program_options ; - -all_tests [ glob *_test.cc : cfg_test.cc ] : decoder : <testing.arg>$(TOP)/decoder/test_data ; - -install legacy : cdec - : <location>$(TOP)/cdec <install-type>EXE <install-dependencies>on <link>shared:<dll-path>$(TOP)/cdec <link>shared:<install-type>LIB ; - diff --git a/decoder/Makefile.am b/decoder/Makefile.am index 4a98a4f1..5c0a1964 100644 --- a/decoder/Makefile.am +++ b/decoder/Makefile.am @@ -33,6 +33,7 @@ libcdec_a_SOURCES = \ cfg.cc \ dwarf.cc \ ff_dwarf.cc \ + ff_external.cc \ rule_lexer.cc \ fst_translator.cc \ csplit.cc \ @@ -55,6 +56,8 @@ libcdec_a_SOURCES = \ phrasetable_fst.cc \ trule.cc \ ff.cc \ + ffset.cc \ + ff_basic.cc \ ff_rules.cc \ ff_wordset.cc \ ff_context.cc \ diff --git a/decoder/apply_models.cc b/decoder/apply_models.cc index 9ba59d1b..330de9e2 100644 --- a/decoder/apply_models.cc +++ b/decoder/apply_models.cc @@ -16,6 +16,7 @@ #include "verbose.h" #include "hg.h" #include "ff.h" +#include "ffset.h" #define NORMAL_CP 1 #define FAST_CP 2 diff --git a/decoder/cdec.cc b/decoder/cdec.cc index 25d3b6af..cc3fcff1 100644 --- a/decoder/cdec.cc +++ b/decoder/cdec.cc @@ -4,6 +4,7 @@ #include "decoder.h" #include "ff_register.h" #include "verbose.h" +#include "timing_stats.h" #include "util/usage.hh" using namespace std; @@ -28,6 +29,7 @@ int main(int argc, char** argv) { if (buf.empty()) continue; decoder.Decode(buf); } + Timer::Summarize(); #ifdef CP_TIME cerr << "Time required for Cube Pruning execution: " << CpTime::Get() diff --git a/decoder/cdec_ff.cc b/decoder/cdec_ff.cc index b516c386..99ab7473 100644 --- a/decoder/cdec_ff.cc +++ b/decoder/cdec_ff.cc @@ -1,6 +1,7 @@ #include <boost/shared_ptr.hpp> #include "ff.h" +#include "ff_basic.h" #include "ff_context.h" #include "ff_spans.h" #include "ff_lm.h" @@ -18,6 +19,7 @@ #include "ff_charset.h" #include "ff_wordset.h" #include "ff_dwarf.h" +#include "ff_external.h" #ifdef HAVE_GLC #include <cdec/ff_glc.h> @@ -69,6 +71,7 @@ void register_feature_functions() { ff_registry.Register("WordPairFeatures", new FFFactory<WordPairFeatures>); ff_registry.Register("WordSet", new FFFactory<WordSet>); ff_registry.Register("Dwarf", new FFFactory<Dwarf>); + ff_registry.Register("External", new FFFactory<ExternalFeature>); #ifdef HAVE_GLC ff_registry.Register("ContextCRF", new FFFactory<Model1Features>); #endif diff --git a/decoder/cfg.h b/decoder/cfg.h index 8cb29bb9..aeeacb83 100644 --- a/decoder/cfg.h +++ b/decoder/cfg.h @@ -130,7 +130,7 @@ struct CFG { int lhs; // index into nts RHS rhs; prob_t p; // h unused for now (there's nothing admissable, and p is already using 1st pass inside as pushed toward top) - FeatureVector f; // may be empty, unless copy_features on Init + SparseVector<double> f; // may be empty, unless copy_features on Init IF_CFG_TRULE(TRulePtr rule;) int size() const { // for stats only return rhs.size(); diff --git a/decoder/cfg_format.h b/decoder/cfg_format.h index 2f40d483..d12da261 100644 --- a/decoder/cfg_format.h +++ b/decoder/cfg_format.h @@ -100,7 +100,7 @@ struct CFGFormat { } } - void print_features(std::ostream &o,prob_t p,FeatureVector const& fv=FeatureVector()) const { + void print_features(std::ostream &o,prob_t p,SparseVector<double> const& fv=SparseVector<double>()) const { bool logp=(logprob_feat && p!=prob_t::One()); if (features || logp) { o << partsep; diff --git a/decoder/cfg_test.cc b/decoder/cfg_test.cc index b8f4cf11..316c6d16 100644 --- a/decoder/cfg_test.cc +++ b/decoder/cfg_test.cc @@ -25,9 +25,9 @@ struct CFGTest : public TestWithParam<HgW> { Hypergraph hg; CFG cfg; CFGFormat form; - FeatureVector weights; + SparseVector<double> weights; - static void JsonFN(Hypergraph &hg,CFG &cfg,FeatureVector &featw,std::string file + static void JsonFN(Hypergraph &hg,CFG &cfg,SparseVector<double> &featw,std::string file ,std::string const& wts="Model_0 1 EgivenF 1 f1 1") { istringstream ws(wts); diff --git a/decoder/decoder.cc b/decoder/decoder.cc index 29eaa4f6..052823ca 100644 --- a/decoder/decoder.cc +++ b/decoder/decoder.cc @@ -29,6 +29,7 @@ #include "oracle_bleu.h" #include "apply_models.h" #include "ff.h" +#include "ffset.h" #include "ff_factory.h" #include "viterbi.h" #include "kbest.h" @@ -91,11 +92,6 @@ inline void ShowBanner() { cerr << "cdec v1.0 (c) 2009-2011 by Chris Dyer\n"; } -inline void show_models(po::variables_map const& conf,ModelSet &ms,char const* header) { - cerr<<header<<": "; - ms.show_features(cerr,cerr,conf.count("warn_0_weight")); -} - inline string str(char const* name,po::variables_map const& conf) { return conf[name].as<string>(); } @@ -133,7 +129,7 @@ inline boost::shared_ptr<FeatureFunction> make_ff(string const& ffp,bool verbose } boost::shared_ptr<FeatureFunction> pf = ff_registry.Create(ff, param); if (!pf) exit(1); - int nbyte=pf->NumBytesContext(); + int nbyte=pf->StateSize(); if (verbose_feature_functions && !SILENT) cerr<<"State is "<<nbyte<<" bytes for "<<pre<<"feature "<<ffp<<endl; return pf; @@ -644,8 +640,6 @@ DecoderImpl::DecoderImpl(po::variables_map& conf, int argc, char** argv, istream prev_weights = rp.weight_vector; } rp.models.reset(new ModelSet(*rp.weight_vector, rp.ffs)); - string ps = "Pass1 "; ps[4] += pass; - if (!SILENT) show_models(conf,*rp.models,ps.c_str()); } // show configuration of rescoring passes @@ -879,13 +873,13 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { if (rp.fid_summary) { if (summary_feature_type == kEDGE_PROB) { const prob_t z = forest.PushWeightsToGoal(1.0); - if (!isfinite(log(z)) || isnan(log(z))) { + if (!std::isfinite(log(z)) || std::isnan(log(z))) { cerr << " " << passtr << " !!! Invalid partition detected, abandoning.\n"; } else { for (int i = 0; i < forest.edges_.size(); ++i) { const double log_prob_transition = log(forest.edges_[i].edge_prob_); // locally normalized by the edge // head node by forest.PushWeightsToGoal - if (!isfinite(log_prob_transition) || isnan(log_prob_transition)) { + if (!std::isfinite(log_prob_transition) || std::isnan(log_prob_transition)) { cerr << "Edge: i=" << i << " got bad inside prob: " << *forest.edges_[i].rule_ << endl; abort(); } @@ -897,7 +891,7 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { } else if (summary_feature_type == kNODE_RISK) { Hypergraph::EdgeProbs posts; const prob_t z = forest.ComputeEdgePosteriors(1.0, &posts); - if (!isfinite(log(z)) || isnan(log(z))) { + if (!std::isfinite(log(z)) || std::isnan(log(z))) { cerr << " " << passtr << " !!! Invalid partition detected, abandoning.\n"; } else { for (int i = 0; i < forest.nodes_.size(); ++i) { @@ -906,7 +900,7 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { for (int j = 0; j < in_edges.size(); ++j) node_post += (posts[in_edges[j]] / z); const double log_np = log(node_post); - if (!isfinite(log_np) || isnan(log_np)) { + if (!std::isfinite(log_np) || std::isnan(log_np)) { cerr << "got bad posterior prob for node " << i << endl; abort(); } @@ -921,13 +915,13 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { } else if (summary_feature_type == kEDGE_RISK) { Hypergraph::EdgeProbs posts; const prob_t z = forest.ComputeEdgePosteriors(1.0, &posts); - if (!isfinite(log(z)) || isnan(log(z))) { + if (!std::isfinite(log(z)) || std::isnan(log(z))) { cerr << " " << passtr << " !!! Invalid partition detected, abandoning.\n"; } else { assert(posts.size() == forest.edges_.size()); for (int i = 0; i < posts.size(); ++i) { const double log_np = log(posts[i] / z); - if (!isfinite(log_np) || isnan(log_np)) { + if (!std::isfinite(log_np) || std::isnan(log_np)) { cerr << "got bad posterior prob for node " << i << endl; abort(); } @@ -967,7 +961,7 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { // Oracle Rescoring if(get_oracle_forest) { - assert(!"this is broken"); FeatureVector dummy; // = last_weights + assert(!"this is broken"); SparseVector<double> dummy; // = last_weights Oracle oc=oracle.ComputeOracle(smeta,&forest,dummy,10,conf["forest_output"].as<std::string>()); if (!SILENT) cerr << " +Oracle BLEU forest (nodes/edges): " << forest.nodes_.size() << '/' << forest.edges_.size() << endl; if (!SILENT) cerr << " +Oracle BLEU (paths): " << forest.NumberOfPaths() << endl; @@ -1098,7 +1092,7 @@ bool DecoderImpl::Decode(const string& input, DecoderObserver* o) { cerr << "DIFF. ERR! log_z < log_ref_z: " << log_z << " " << log_ref_z << endl; exit(1); } - assert(!isnan(log_ref_z)); + assert(!std::isnan(log_ref_z)); ref_exp -= full_exp; acc_vec += ref_exp; acc_obj += (log_z - log_ref_z); diff --git a/decoder/decoder.h b/decoder/decoder.h index bef2ff5e..79c7a602 100644 --- a/decoder/decoder.h +++ b/decoder/decoder.h @@ -24,7 +24,7 @@ private: #endif class SentenceMetadata; -struct Hypergraph; +class Hypergraph; struct DecoderImpl; struct DecoderObserver { diff --git a/decoder/exp_semiring.h b/decoder/exp_semiring.h index 111eaaf1..2a9034bb 100644 --- a/decoder/exp_semiring.h +++ b/decoder/exp_semiring.h @@ -59,7 +59,7 @@ struct PRWeightFunction { explicit PRWeightFunction(const PWeightFunction& pwf = PWeightFunction(), const RWeightFunction& rwf = RWeightFunction()) : pweight(pwf), rweight(rwf) {} - PRPair<P,R> operator()(const Hypergraph::Edge& e) const { + PRPair<P,R> operator()(const HG::Edge& e) const { const P p = pweight(e); const R r = rweight(e); return PRPair<P,R>(p, r * p); diff --git a/decoder/ff.cc b/decoder/ff.cc index 557e0b5f..a6a035b5 100644 --- a/decoder/ff.cc +++ b/decoder/ff.cc @@ -1,9 +1,3 @@ -//TODO: non-sparse vector for all feature functions? modelset applymodels keeps track of who has what features? it's nice having FF that could generate a handful out of 10000 possible feats, though. - -//TODO: actually score rule_feature()==true features once only, hash keyed on rule or modify TRule directly? need to keep clear in forest which features come from models vs. rules; then rescoring could drop all the old models features at once - -#include "fast_lexical_cast.hpp" -#include <stdexcept> #include "ff.h" #include "tdict.h" @@ -16,8 +10,7 @@ FeatureFunction::~FeatureFunction() {} void FeatureFunction::PrepareForInput(const SentenceMetadata&) {} void FeatureFunction::FinalTraversalFeatures(const void* /* ant_state */, - SparseVector<double>* /* features */) const { -} + SparseVector<double>* /* features */) const {} string FeatureFunction::usage_helper(std::string const& name,std::string const& params,std::string const& details,bool sp,bool sd) { string r=name; @@ -32,188 +25,14 @@ string FeatureFunction::usage_helper(std::string const& name,std::string const& return r; } -Features FeatureFunction::single_feature(WordID feat) { - return Features(1,feat); -} - -Features ModelSet::all_features(std::ostream *warn,bool warn0) { - //return ::all_features(models_,weights_,warn,warn0); -} - -void show_features(Features const& ffs,DenseWeightVector const& weights_,std::ostream &out,std::ostream &warn,bool warn_zero_wt) { - out << "Weight Feature\n"; - for (unsigned i=0;i<ffs.size();++i) { - WordID fid=ffs[i]; - string const& fname=FD::Convert(fid); - double wt=weights_[fid]; - if (warn_zero_wt && wt==0) - warn<<"WARNING: "<<fname<<" has 0 weight."<<endl; - out << wt << " " << fname<<endl; - } -} - -void ModelSet::show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt) -{ -// ::show_features(all_features(),weights_,out,warn,warn_zero_wt); - //show_all_features(models_,weights_,out,warn,warn_zero_wt,warn_zero_wt); -} - -// Hiero and Joshua use log_10(e) as the value, so I do to -WordPenalty::WordPenalty(const string& param) : - fid_(FD::Convert("WordPenalty")), - value_(-1.0 / log(10)) { - if (!param.empty()) { - cerr << "Warning WordPenalty ignoring parameter: " << param << endl; - } -} - -void FeatureFunction::TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_states, - SparseVector<double>* features, - SparseVector<double>* estimated_features, - void* state) const { - throw std::runtime_error("TraversalFeaturesImpl not implemented - override it or TraversalFeaturesLog.\n"); +void FeatureFunction::TraversalFeaturesImpl(const SentenceMetadata&, + const Hypergraph::Edge&, + const std::vector<const void*>&, + SparseVector<double>*, + SparseVector<double>*, + void*) const { + cerr << "TraversalFeaturesImpl not implemented - override it or TraversalFeaturesLog\n"; abort(); } -void WordPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_states, - SparseVector<double>* features, - SparseVector<double>* estimated_features, - void* state) const { - (void) smeta; - (void) ant_states; - (void) state; - (void) estimated_features; - features->set_value(fid_, edge.rule_->EWords() * value_); -} - -SourceWordPenalty::SourceWordPenalty(const string& param) : - fid_(FD::Convert("SourceWordPenalty")), - value_(-1.0 / log(10)) { - if (!param.empty()) { - cerr << "Warning SourceWordPenalty ignoring parameter: " << param << endl; - } -} - -Features SourceWordPenalty::features() const { - return single_feature(fid_); -} - -Features WordPenalty::features() const { - return single_feature(fid_); -} - - -void SourceWordPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_states, - SparseVector<double>* features, - SparseVector<double>* estimated_features, - void* state) const { - (void) smeta; - (void) ant_states; - (void) state; - (void) estimated_features; - features->set_value(fid_, edge.rule_->FWords() * value_); -} - -ArityPenalty::ArityPenalty(const std::string& param) : - value_(-1.0 / log(10)) { - string fname = "Arity_"; - unsigned MAX=DEFAULT_MAX_ARITY; - using namespace boost; - if (!param.empty()) - MAX=lexical_cast<unsigned>(param); - for (unsigned i = 0; i <= MAX; ++i) { - WordID fid=FD::Convert(fname+lexical_cast<string>(i)); - fids_.push_back(fid); - } - while (!fids_.empty() && fids_.back()==0) fids_.pop_back(); // pretty up features vector in case FD was frozen. doesn't change anything -} - -Features ArityPenalty::features() const { - return Features(fids_.begin(),fids_.end()); -} - -void ArityPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_states, - SparseVector<double>* features, - SparseVector<double>* estimated_features, - void* state) const { - (void) smeta; - (void) ant_states; - (void) state; - (void) estimated_features; - unsigned a=edge.Arity(); - features->set_value(a<fids_.size()?fids_[a]:0, value_); -} - -ModelSet::ModelSet(const vector<double>& w, const vector<const FeatureFunction*>& models) : - models_(models), - weights_(w), - state_size_(0), - model_state_pos_(models.size()) { - for (int i = 0; i < models_.size(); ++i) { - model_state_pos_[i] = state_size_; - state_size_ += models_[i]->NumBytesContext(); - } -} - -void ModelSet::PrepareForInput(const SentenceMetadata& smeta) { - for (int i = 0; i < models_.size(); ++i) - const_cast<FeatureFunction*>(models_[i])->PrepareForInput(smeta); -} - -void ModelSet::AddFeaturesToEdge(const SentenceMetadata& smeta, - const Hypergraph& /* hg */, - const FFStates& node_states, - Hypergraph::Edge* edge, - FFState* context, - prob_t* combination_cost_estimate) const { - edge->reset_info(); - context->resize(state_size_); - if (state_size_ > 0) { - memset(&(*context)[0], 0, state_size_); - } - SparseVector<double> est_vals; // only computed if combination_cost_estimate is non-NULL - if (combination_cost_estimate) *combination_cost_estimate = prob_t::One(); - for (int i = 0; i < models_.size(); ++i) { - const FeatureFunction& ff = *models_[i]; - void* cur_ff_context = NULL; - vector<const void*> ants(edge->tail_nodes_.size()); - bool has_context = ff.NumBytesContext() > 0; - if (has_context) { - int spos = model_state_pos_[i]; - cur_ff_context = &(*context)[spos]; - for (int i = 0; i < ants.size(); ++i) { - ants[i] = &node_states[edge->tail_nodes_[i]][spos]; - } - } - ff.TraversalFeatures(smeta, *edge, ants, &edge->feature_values_, &est_vals, cur_ff_context); - } - if (combination_cost_estimate) - combination_cost_estimate->logeq(est_vals.dot(weights_)); - edge->edge_prob_.logeq(edge->feature_values_.dot(weights_)); -} - -void ModelSet::AddFinalFeatures(const FFState& state, Hypergraph::Edge* edge,SentenceMetadata const& smeta) const { - assert(1 == edge->rule_->Arity()); - edge->reset_info(); - for (int i = 0; i < models_.size(); ++i) { - const FeatureFunction& ff = *models_[i]; - const void* ant_state = NULL; - bool has_context = ff.NumBytesContext() > 0; - if (has_context) { - int spos = model_state_pos_[i]; - ant_state = &state[spos]; - } - ff.FinalTraversalFeatures(smeta, *edge, ant_state, &edge->feature_values_); - } - edge->edge_prob_.logeq(edge->feature_values_.dot(weights_)); -} - diff --git a/decoder/ff.h b/decoder/ff.h index 6c22d39f..3280592e 100644 --- a/decoder/ff.h +++ b/decoder/ff.h @@ -1,79 +1,47 @@ #ifndef _FF_H_ #define _FF_H_ -#define DEBUG_INIT 0 -#if DEBUG_INIT -# include <iostream> -# define DBGINIT(a) do { std::cerr<<a<<"\n"; } while(0) -#else -# define DBGINIT(a) -#endif - -#include <stdint.h> +#include <string> #include <vector> -#include <cstring> -#include "fdict.h" -#include "hg.h" -#include "feature_vector.h" -#include "value_array.h" +#include "sparse_vector.h" +namespace HG { struct Edge; struct Node; } +class Hypergraph; class SentenceMetadata; -class FeatureFunction; // see definition below - -typedef std::vector<WordID> Features; // set of features ids // if you want to develop a new feature, inherit from this class and // override TraversalFeaturesImpl(...). If it's a feature that returns / // depends on context, you may also need to implement // FinalTraversalFeatures(...) class FeatureFunction { + friend class ExternalFeature; public: std::string name_; // set by FF factory using usage() - bool debug_; // also set by FF factory checking param for immediate initial "debug" - //called after constructor, but before name_ and debug_ have been set - virtual void Init() { DBGINIT("default FF::Init name="<<name_); } - virtual void init_name_debug(std::string const& n,bool debug) { - name_=n; - debug_=debug; - } - bool debug() const { return debug_; } FeatureFunction() : state_size_() {} explicit FeatureFunction(int state_size) : state_size_(state_size) {} virtual ~FeatureFunction(); bool IsStateful() const { return state_size_ > 0; } + int StateSize() const { return state_size_; } // override this. not virtual because we want to expose this to factory template for help before creating a FF static std::string usage(bool show_params,bool show_details) { return usage_helper("FIXME_feature_needs_name","[no parameters]","[no documentation yet]",show_params,show_details); } static std::string usage_helper(std::string const& name,std::string const& params,std::string const& details,bool show_params,bool show_details); - static Features single_feature(int feat); -public: - - // stateless feature that doesn't depend on source span: override and return true. then your feature can be precomputed over rules. - virtual bool rule_feature() const { return false; } // called once, per input, before any feature calls to TraversalFeatures, etc. // used to initialize sentence-specific data structures virtual void PrepareForInput(const SentenceMetadata& smeta); - //OVERRIDE THIS: - virtual Features features() const { return single_feature(FD::Convert(name_)); } - // returns the number of bytes of context that this feature function will - // (maximally) use. By default, 0 ("stateless" models in Hiero/Joshua). - // NOTE: this value is fixed for the instance of your class, you cannot - // use different amounts of memory for different nodes in the forest. this will be read as soon as you create a ModelSet, then fixed forever on - inline int NumBytesContext() const { return state_size_; } - // Compute the feature values and (if this applies) the estimates of the // feature values when this edge is used incorporated into a larger context inline void TraversalFeatures(const SentenceMetadata& smeta, - Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, + SparseVector<double>* features, + SparseVector<double>* estimated_features, void* out_state) const { - TraversalFeaturesLog(smeta, edge, ant_contexts, + TraversalFeaturesImpl(smeta, edge, ant_contexts, features, estimated_features, out_state); // TODO it's easy for careless feature function developers to overwrite // the end of their state and clobber someone else's memory. These bugs @@ -83,21 +51,10 @@ public: } // if there's some state left when you transition to the goal state, score - // it here. For example, the language model computes the cost of adding + // it here. For example, a language model might the cost of adding // <s> and </s>. - -protected: virtual void FinalTraversalFeatures(const void* residual_state, - FeatureVector* final_features) const; -public: - //override either this or one of above. - virtual void FinalTraversalFeatures(const SentenceMetadata& /* smeta */, - Hypergraph::Edge& /* edge */, // so you can log() - const void* residual_state, - FeatureVector* final_features) const { - FinalTraversalFeatures(residual_state,final_features); - } - + SparseVector<double>* final_features) const; protected: // context is a pointer to a buffer of size NumBytesContext() that the @@ -107,191 +64,19 @@ public: // of the particular FeatureFunction class. There is one exception: // equality of the contents (i.e., memcmp) is required to determine whether // two states can be combined. - - // by Log, I mean that the edge is non-const only so you can log to it with INFO_EDGE(edge,msg<<"etc."). most features don't use this so implement the below. it has a different name to allow a default implementation without name hiding when inheriting + overriding just 1. - virtual void TraversalFeaturesLog(const SentenceMetadata& smeta, - Hypergraph::Edge& edge, // this is writable only so you can use log() - const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, - void* context) const { - TraversalFeaturesImpl(smeta,edge,ant_contexts,features,estimated_features,context); - } - - // override above or below. virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - Hypergraph::Edge const& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, + SparseVector<double>* features, + SparseVector<double>* estimated_features, void* context) const; // !!! ONLY call this from subclass *CONSTRUCTORS* !!! void SetStateSize(size_t state_size) { state_size_ = state_size; } - int StateSize() const { return state_size_; } - private: - int state_size_; -}; - - -// word penalty feature, for each word on the E side of a rule, -// add value_ -class WordPenalty : public FeatureFunction { - public: - Features features() const; - WordPenalty(const std::string& param); - static std::string usage(bool p,bool d) { - return usage_helper("WordPenalty","","number of target words (local feature)",p,d); - } - bool rule_feature() const { return true; } - protected: - virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, - void* context) const; - private: - const int fid_; - const double value_; -}; - -class SourceWordPenalty : public FeatureFunction { - public: - bool rule_feature() const { return true; } - Features features() const; - SourceWordPenalty(const std::string& param); - static std::string usage(bool p,bool d) { - return usage_helper("SourceWordPenalty","","number of source words (local feature, and meaningless except when input has non-constant number of source words, e.g. segmentation/morphology/speech recognition lattice)",p,d); - } - protected: - virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, - void* context) const; - private: - const int fid_; - const double value_; -}; - -#define DEFAULT_MAX_ARITY 9 -#define DEFAULT_MAX_ARITY_STRINGIZE(x) #x -#define DEFAULT_MAX_ARITY_STRINGIZE_EVAL(x) DEFAULT_MAX_ARITY_STRINGIZE(x) -#define DEFAULT_MAX_ARITY_STR DEFAULT_MAX_ARITY_STRINGIZE_EVAL(DEFAULT_MAX_ARITY) - -class ArityPenalty : public FeatureFunction { - public: - bool rule_feature() const { return true; } - Features features() const; - ArityPenalty(const std::string& param); - static std::string usage(bool p,bool d) { - return usage_helper("ArityPenalty","[MaxArity(default " DEFAULT_MAX_ARITY_STR ")]","Indicator feature Arity_N=1 for rule of arity N (local feature). 0<=N<=MaxArity(default " DEFAULT_MAX_ARITY_STR ")",p,d); - } - - protected: - virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, - const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, - void* context) const; - private: - std::vector<WordID> fids_; - const double value_; -}; - -void show_features(Features const& features,DenseWeightVector const& weights,std::ostream &out,std::ostream &warn,bool warn_zero_wt=true); //show features and weights - -template <class FFp> -Features all_features(std::vector<FFp> const& models_,DenseWeightVector &weights_,std::ostream *warn=0,bool warn_fid_0=false) { - using namespace std; - Features ffs; -#define WARNFF(x) do { if (warn) { *warn << "WARNING: "<< x << endl; } } while(0) - typedef map<WordID,string> FFM; - FFM ff_from; - for (unsigned i=0;i<models_.size();++i) { - string const& ffname=models_[i]->name_; - Features si=models_[i]->features(); - if (si.empty()) { - WARNFF(ffname<<" doesn't yet report any feature IDs - either supply feature weight, or use --no_freeze_feature_set, or implement features() method"); - } - unsigned n0=0; - for (unsigned j=0;j<si.size();++j) { - WordID fid=si[j]; - if (!fid) ++n0; - if (fid >= weights_.size()) - weights_.resize(fid+1); - if (warn_fid_0 || fid) { - pair<FFM::iterator,bool> i_new=ff_from.insert(FFM::value_type(fid,ffname)); - if (i_new.second) { - if (fid) - ffs.push_back(fid); - else - WARNFF("Feature id 0 for "<<ffname<<" (models["<<i<<"]) - probably no weight provided. Don't freeze feature ids to see the name"); - } else { - WARNFF(ffname<<" (models["<<i<<"]) tried to define feature "<<FD::Convert(fid)<<" already defined earlier by "<<i_new.first->second); - } - } - } - if (n0) - WARNFF(ffname<<" (models["<<i<<"]) had "<<n0<<" unused features (--no_freeze_feature_set to see them)"); - } - return ffs; -#undef WARNFF -} - -template <class FFp> -void show_all_features(std::vector<FFp> const& models_,DenseWeightVector &weights_,std::ostream &out,std::ostream &warn,bool warn_fid_0=true,bool warn_zero_wt=true) { - return show_features(all_features(models_,weights_,&warn,warn_fid_0),weights_,out,warn,warn_zero_wt); -} - -typedef ValueArray<uint8_t> FFState; // this is about 10% faster than string. -//typedef std::string FFState; - -//FIXME: only context.data() is required to be contiguous, and it becomes invalid after next string operation. use ValueArray instead? (higher performance perhaps, save a word due to fixed size) -typedef std::vector<FFState> FFStates; - -// this class is a set of FeatureFunctions that can be used to score, rescore, -// etc. a (translation?) forest -class ModelSet { - public: - ModelSet(const std::vector<double>& weights, - const std::vector<const FeatureFunction*>& models); - - // sets edge->feature_values_ and edge->edge_prob_ - // NOTE: edge must not necessarily be in hg.edges_ but its TAIL nodes - // must be. edge features are supposed to be overwritten, not added to (possibly because rule features aren't in ModelSet so need to be left alone - void AddFeaturesToEdge(const SentenceMetadata& smeta, - const Hypergraph& hg, - const FFStates& node_states, - Hypergraph::Edge* edge, - FFState* residual_context, - prob_t* combination_cost_estimate = NULL) const; - - //this is called INSTEAD of above when result of edge is goal (must be a unary rule - i.e. one variable, but typically it's assumed that there are no target terminals either (e.g. for LM)) - void AddFinalFeatures(const FFState& residual_context, - Hypergraph::Edge* edge, - SentenceMetadata const& smeta) const; - - // this is called once before any feature functions apply to a hypergraph - // it can be used to initialize sentence-specific data structures - void PrepareForInput(const SentenceMetadata& smeta); - - bool empty() const { return models_.empty(); } - - bool stateless() const { return !state_size_; } - Features all_features(std::ostream *warnings=0,bool warn_fid_zero=false); // this will warn about duplicate features as well (one function overwrites the feature of another). also resizes weights_ so it is large enough to hold the (0) weight for the largest reported feature id. since 0 is a NULL feature id, it's never included. if warn_fid_zero, then even the first 0 id is - void show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt=true); - private: - std::vector<const FeatureFunction*> models_; - const std::vector<double>& weights_; int state_size_; - std::vector<int> model_state_pos_; }; #endif diff --git a/decoder/ff_basic.cc b/decoder/ff_basic.cc new file mode 100644 index 00000000..f9404d24 --- /dev/null +++ b/decoder/ff_basic.cc @@ -0,0 +1,80 @@ +#include "ff_basic.h" + +#include "fast_lexical_cast.hpp" +#include "hg.h" + +using namespace std; + +// Hiero and Joshua use log_10(e) as the value, so I do to +WordPenalty::WordPenalty(const string& param) : + fid_(FD::Convert("WordPenalty")), + value_(-1.0 / log(10)) { + if (!param.empty()) { + cerr << "Warning WordPenalty ignoring parameter: " << param << endl; + } +} + +void WordPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, + const Hypergraph::Edge& edge, + const std::vector<const void*>& ant_states, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* state) const { + (void) smeta; + (void) ant_states; + (void) state; + (void) estimated_features; + features->set_value(fid_, edge.rule_->EWords() * value_); +} + + +SourceWordPenalty::SourceWordPenalty(const string& param) : + fid_(FD::Convert("SourceWordPenalty")), + value_(-1.0 / log(10)) { + if (!param.empty()) { + cerr << "Warning SourceWordPenalty ignoring parameter: " << param << endl; + } +} + +void SourceWordPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, + const Hypergraph::Edge& edge, + const std::vector<const void*>& ant_states, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* state) const { + (void) smeta; + (void) ant_states; + (void) state; + (void) estimated_features; + features->set_value(fid_, edge.rule_->FWords() * value_); +} + + +ArityPenalty::ArityPenalty(const std::string& param) : + value_(-1.0 / log(10)) { + string fname = "Arity_"; + unsigned MAX=DEFAULT_MAX_ARITY; + using namespace boost; + if (!param.empty()) + MAX=lexical_cast<unsigned>(param); + for (unsigned i = 0; i <= MAX; ++i) { + WordID fid=FD::Convert(fname+lexical_cast<string>(i)); + fids_.push_back(fid); + } + while (!fids_.empty() && fids_.back()==0) fids_.pop_back(); // pretty up features vector in case FD was frozen. doesn't change anything +} + +void ArityPenalty::TraversalFeaturesImpl(const SentenceMetadata& smeta, + const Hypergraph::Edge& edge, + const std::vector<const void*>& ant_states, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* state) const { + (void) smeta; + (void) ant_states; + (void) state; + (void) estimated_features; + unsigned a=edge.Arity(); + features->set_value(a<fids_.size()?fids_[a]:0, value_); +} + diff --git a/decoder/ff_basic.h b/decoder/ff_basic.h new file mode 100644 index 00000000..901c0110 --- /dev/null +++ b/decoder/ff_basic.h @@ -0,0 +1,68 @@ +#ifndef _FF_BASIC_H_ +#define _FF_BASIC_H_ + +#include "ff.h" + +// word penalty feature, for each word on the E side of a rule, +// add value_ +class WordPenalty : public FeatureFunction { + public: + WordPenalty(const std::string& param); + static std::string usage(bool p,bool d) { + return usage_helper("WordPenalty","","number of target words (local feature)",p,d); + } + protected: + virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, + const HG::Edge& edge, + const std::vector<const void*>& ant_contexts, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* context) const; + private: + const int fid_; + const double value_; +}; + +class SourceWordPenalty : public FeatureFunction { + public: + SourceWordPenalty(const std::string& param); + static std::string usage(bool p,bool d) { + return usage_helper("SourceWordPenalty","","number of source words (local feature, and meaningless except when input has non-constant number of source words, e.g. segmentation/morphology/speech recognition lattice)",p,d); + } + protected: + virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, + const HG::Edge& edge, + const std::vector<const void*>& ant_contexts, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* context) const; + private: + const int fid_; + const double value_; +}; + +#define DEFAULT_MAX_ARITY 9 +#define DEFAULT_MAX_ARITY_STRINGIZE(x) #x +#define DEFAULT_MAX_ARITY_STRINGIZE_EVAL(x) DEFAULT_MAX_ARITY_STRINGIZE(x) +#define DEFAULT_MAX_ARITY_STR DEFAULT_MAX_ARITY_STRINGIZE_EVAL(DEFAULT_MAX_ARITY) + +class ArityPenalty : public FeatureFunction { + public: + ArityPenalty(const std::string& param); + static std::string usage(bool p,bool d) { + return usage_helper("ArityPenalty","[MaxArity(default " DEFAULT_MAX_ARITY_STR ")]","Indicator feature Arity_N=1 for rule of arity N (local feature). 0<=N<=MaxArity(default " DEFAULT_MAX_ARITY_STR ")",p,d); + } + + protected: + virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, + const HG::Edge& edge, + const std::vector<const void*>& ant_contexts, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* context) const; + private: + std::vector<WordID> fids_; + const double value_; +}; + +#endif diff --git a/decoder/ff_bleu.h b/decoder/ff_bleu.h index 5544920e..344dc788 100644 --- a/decoder/ff_bleu.h +++ b/decoder/ff_bleu.h @@ -20,7 +20,7 @@ class BLEUModel : public FeatureFunction { static std::string usage(bool param,bool verbose); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_charset.cc b/decoder/ff_charset.cc index 472de82b..6429088b 100644 --- a/decoder/ff_charset.cc +++ b/decoder/ff_charset.cc @@ -1,5 +1,7 @@ #include "ff_charset.h" +#include "tdict.h" +#include "hg.h" #include "fdict.h" #include "stringlib.h" @@ -20,8 +22,8 @@ bool ContainsNonLatin(const string& word) { void NonLatinCount::TraversalFeaturesImpl(const SentenceMetadata& smeta, const Hypergraph::Edge& edge, const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, + SparseVector<double>* features, + SparseVector<double>* estimated_features, void* context) const { const vector<WordID>& e = edge.rule_->e(); int count = 0; diff --git a/decoder/ff_charset.h b/decoder/ff_charset.h index b1ad537e..267ef65d 100644 --- a/decoder/ff_charset.h +++ b/decoder/ff_charset.h @@ -13,10 +13,10 @@ class NonLatinCount : public FeatureFunction { NonLatinCount(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, - FeatureVector* features, - FeatureVector* estimated_features, + SparseVector<double>* features, + SparseVector<double>* estimated_features, void* context) const; private: mutable std::map<WordID, bool> is_non_latin_; diff --git a/decoder/ff_context.cc b/decoder/ff_context.cc index 9de4d737..f2b0e67c 100644 --- a/decoder/ff_context.cc +++ b/decoder/ff_context.cc @@ -5,12 +5,14 @@ #include <cassert> #include <cmath> +#include "hg.h" #include "filelib.h" #include "stringlib.h" #include "sentence_metadata.h" #include "lattice.h" #include "fdict.h" #include "verbose.h" +#include "tdict.h" RuleContextFeatures::RuleContextFeatures(const string& param) { // cerr << "initializing RuleContextFeatures with parameters: " << param; diff --git a/decoder/ff_context.h b/decoder/ff_context.h index 89bcb557..19198ec3 100644 --- a/decoder/ff_context.h +++ b/decoder/ff_context.h @@ -14,7 +14,7 @@ class RuleContextFeatures : public FeatureFunction { RuleContextFeatures(const string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_csplit.cc b/decoder/ff_csplit.cc index 252dbf8c..e6f78f84 100644 --- a/decoder/ff_csplit.cc +++ b/decoder/ff_csplit.cc @@ -5,6 +5,7 @@ #include "klm/lm/model.hh" +#include "hg.h" #include "sentence_metadata.h" #include "lattice.h" #include "tdict.h" diff --git a/decoder/ff_csplit.h b/decoder/ff_csplit.h index 38c0c5b8..64d42526 100644 --- a/decoder/ff_csplit.h +++ b/decoder/ff_csplit.h @@ -12,7 +12,7 @@ class BasicCSplitFeatures : public FeatureFunction { BasicCSplitFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -27,7 +27,7 @@ class ReverseCharLMCSplitFeature : public FeatureFunction { ReverseCharLMCSplitFeature(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_dwarf.cc b/decoder/ff_dwarf.cc index 43528405..fe7a472e 100644 --- a/decoder/ff_dwarf.cc +++ b/decoder/ff_dwarf.cc @@ -4,6 +4,7 @@ #include <string> #include <iostream> #include <map> +#include "hg.h" #include "ff_dwarf.h" #include "dwarf.h" #include "wordid.h" diff --git a/decoder/ff_dwarf.h b/decoder/ff_dwarf.h index 083fcc7c..3d6a7da6 100644 --- a/decoder/ff_dwarf.h +++ b/decoder/ff_dwarf.h @@ -56,7 +56,7 @@ class Dwarf : public FeatureFunction { function word alignments set by 3. */ void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_external.cc b/decoder/ff_external.cc new file mode 100644 index 00000000..dea0e20f --- /dev/null +++ b/decoder/ff_external.cc @@ -0,0 +1,60 @@ +#include "ff_external.h" + +#include <dlfcn.h> + +#include "stringlib.h" +#include "hg.h" + +using namespace std; + +ExternalFeature::ExternalFeature(const string& param) { + size_t pos = param.find(' '); + string nparam; + string file = param; + if (pos < param.size()) { + nparam = Trim(param.substr(pos + 1)); + file = param.substr(0, pos); + } + if (file.size() < 1) { + cerr << "External requires a path to a dynamic library!\n"; + abort(); + } + lib_handle = dlopen(file.c_str(), RTLD_LAZY); + if (!lib_handle) { + cerr << "dlopen reports: " << dlerror() << endl; + cerr << "Did you provide a full path to the dynamic library?\n"; + abort(); + } + FeatureFunction* (*fn)(const string&) = + (FeatureFunction* (*)(const string&))(dlsym(lib_handle, "create_ff")); + if (!fn) { + cerr << "dlsym reports: " << dlerror() << endl; + abort(); + } + ff_ext = (*fn)(nparam); + SetStateSize(ff_ext->StateSize()); +} + +ExternalFeature::~ExternalFeature() { + delete ff_ext; + dlclose(lib_handle); +} + +void ExternalFeature::PrepareForInput(const SentenceMetadata& smeta) { + ff_ext->PrepareForInput(smeta); +} + +void ExternalFeature::FinalTraversalFeatures(const void* context, + SparseVector<double>* features) const { + ff_ext->FinalTraversalFeatures(context, features); +} + +void ExternalFeature::TraversalFeaturesImpl(const SentenceMetadata& smeta, + const Hypergraph::Edge& edge, + const std::vector<const void*>& ant_contexts, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* context) const { + ff_ext->TraversalFeaturesImpl(smeta, edge, ant_contexts, features, estimated_features, context); +} + diff --git a/decoder/ff_external.h b/decoder/ff_external.h new file mode 100644 index 00000000..3e2bee51 --- /dev/null +++ b/decoder/ff_external.h @@ -0,0 +1,26 @@ +#ifndef _FFEXTERNAL_H_ +#define _FFEXTERNAL_H_ + +#include "ff.h" + +// dynamically loaded feature function +class ExternalFeature : public FeatureFunction { + public: + ExternalFeature(const std::string& param); + ~ExternalFeature(); + virtual void PrepareForInput(const SentenceMetadata& smeta); + virtual void FinalTraversalFeatures(const void* context, + SparseVector<double>* features) const; + protected: + virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, + const HG::Edge& edge, + const std::vector<const void*>& ant_contexts, + SparseVector<double>* features, + SparseVector<double>* estimated_features, + void* context) const; + private: + void* lib_handle; + FeatureFunction* ff_ext; +}; + +#endif diff --git a/decoder/ff_factory.h b/decoder/ff_factory.h index 5eb68c8b..bfdd3257 100644 --- a/decoder/ff_factory.h +++ b/decoder/ff_factory.h @@ -43,7 +43,6 @@ template<class FF> struct FFFactory : public FactoryBase<FeatureFunction> { FP Create(std::string param) const { FF *ret=new FF(param); - ret->Init(); return FP(ret); } virtual std::string usage(bool params,bool verbose) const { @@ -57,7 +56,6 @@ template<class FF> struct FsaFactory : public FactoryBase<FsaFeatureFunction> { FP Create(std::string param) const { FF *ret=new FF(param); - ret->Init(); return FP(ret); } virtual std::string usage(bool params,bool verbose) const { @@ -98,8 +96,6 @@ struct FactoryRegistry : public UntypedFactoryRegistry { if (debug) cerr<<"debug enabled for "<<ffname<< " - remaining options: '"<<param<<"'\n"; FP res = dynamic_cast<FB const&>(*it->second).Create(param); - res->init_name_debug(ffname,debug); - // could add a res->Init() here instead of in Create if we wanted feature id to potentially differ based on the registered name rather than static usage() - of course, specific feature ids can be computed on the basis of feature param as well; this only affects the default single feature id=name return res; } }; diff --git a/decoder/ff_klm.cc b/decoder/ff_klm.cc index 09ef282c..fefa90bd 100644 --- a/decoder/ff_klm.cc +++ b/decoder/ff_klm.cc @@ -327,11 +327,6 @@ KLanguageModel<Model>::KLanguageModel(const string& param) { } template <class Model> -Features KLanguageModel<Model>::features() const { - return single_feature(fid_); -} - -template <class Model> KLanguageModel<Model>::~KLanguageModel() { delete pimpl_; } @@ -362,7 +357,6 @@ void KLanguageModel<Model>::FinalTraversalFeatures(const void* ant_state, template <class Model> boost::shared_ptr<FeatureFunction> CreateModel(const std::string ¶m) { KLanguageModel<Model> *ret = new KLanguageModel<Model>(param); - ret->Init(); return boost::shared_ptr<FeatureFunction>(ret); } diff --git a/decoder/ff_klm.h b/decoder/ff_klm.h index 6efe50f6..b5ceffd0 100644 --- a/decoder/ff_klm.h +++ b/decoder/ff_klm.h @@ -20,10 +20,9 @@ class KLanguageModel : public FeatureFunction { virtual void FinalTraversalFeatures(const void* context, SparseVector<double>* features) const; static std::string usage(bool param,bool verbose); - Features features() const; protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_lm.cc b/decoder/ff_lm.cc index 5e16d4e3..6ec7b4f3 100644 --- a/decoder/ff_lm.cc +++ b/decoder/ff_lm.cc @@ -519,10 +519,6 @@ LanguageModel::LanguageModel(const string& param) { SetStateSize(LanguageModelImpl::OrderToStateSize(order)); } -Features LanguageModel::features() const { - return single_feature(fid_); -} - LanguageModel::~LanguageModel() { delete pimpl_; } diff --git a/decoder/ff_lm.h b/decoder/ff_lm.h index ccee4268..94e18f00 100644 --- a/decoder/ff_lm.h +++ b/decoder/ff_lm.h @@ -55,10 +55,9 @@ class LanguageModel : public FeatureFunction { SparseVector<double>* features) const; std::string DebugStateToString(const void* state) const; static std::string usage(bool param,bool verbose); - Features features() const; protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -81,7 +80,7 @@ class LanguageModelRandLM : public FeatureFunction { std::string DebugStateToString(const void* state) const; protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_ngrams.h b/decoder/ff_ngrams.h index 064dbb49..4965d235 100644 --- a/decoder/ff_ngrams.h +++ b/decoder/ff_ngrams.h @@ -17,7 +17,7 @@ class NgramDetector : public FeatureFunction { SparseVector<double>* features) const; protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_rules.cc b/decoder/ff_rules.cc index bd4c4cc0..0aafb0ba 100644 --- a/decoder/ff_rules.cc +++ b/decoder/ff_rules.cc @@ -10,6 +10,8 @@ #include "lattice.h" #include "fdict.h" #include "verbose.h" +#include "tdict.h" +#include "hg.h" using namespace std; diff --git a/decoder/ff_rules.h b/decoder/ff_rules.h index 48d8bd05..7f5e1dfa 100644 --- a/decoder/ff_rules.h +++ b/decoder/ff_rules.h @@ -3,6 +3,7 @@ #include <vector> #include <map> +#include "trule.h" #include "ff.h" #include "array2d.h" #include "wordid.h" @@ -12,7 +13,7 @@ class RuleIdentityFeatures : public FeatureFunction { RuleIdentityFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -27,7 +28,7 @@ class RuleNgramFeatures : public FeatureFunction { RuleNgramFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_ruleshape.cc b/decoder/ff_ruleshape.cc index f56ccfa9..7bb548c4 100644 --- a/decoder/ff_ruleshape.cc +++ b/decoder/ff_ruleshape.cc @@ -1,5 +1,7 @@ #include "ff_ruleshape.h" +#include "trule.h" +#include "hg.h" #include "fdict.h" #include <sstream> diff --git a/decoder/ff_ruleshape.h b/decoder/ff_ruleshape.h index 23c9827e..9f20faf3 100644 --- a/decoder/ff_ruleshape.h +++ b/decoder/ff_ruleshape.h @@ -9,7 +9,7 @@ class RuleShapeFeatures : public FeatureFunction { RuleShapeFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_source_syntax.cc b/decoder/ff_source_syntax.cc index 035132b4..a1997695 100644 --- a/decoder/ff_source_syntax.cc +++ b/decoder/ff_source_syntax.cc @@ -3,6 +3,7 @@ #include <sstream> #include <stack> +#include "hg.h" #include "sentence_metadata.h" #include "array2d.h" #include "filelib.h" diff --git a/decoder/ff_source_syntax.h b/decoder/ff_source_syntax.h index 279563e1..a8c7150a 100644 --- a/decoder/ff_source_syntax.h +++ b/decoder/ff_source_syntax.h @@ -11,7 +11,7 @@ class SourceSyntaxFeatures : public FeatureFunction { ~SourceSyntaxFeatures(); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -28,7 +28,7 @@ class SourceSpanSizeFeatures : public FeatureFunction { ~SourceSpanSizeFeatures(); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_spans.cc b/decoder/ff_spans.cc index 0483517b..0ccac69b 100644 --- a/decoder/ff_spans.cc +++ b/decoder/ff_spans.cc @@ -4,6 +4,8 @@ #include <cassert> #include <cmath> +#include "hg.h" +#include "tdict.h" #include "filelib.h" #include "stringlib.h" #include "sentence_metadata.h" diff --git a/decoder/ff_spans.h b/decoder/ff_spans.h index 24e0dede..d2f5e84c 100644 --- a/decoder/ff_spans.h +++ b/decoder/ff_spans.h @@ -12,7 +12,7 @@ class SpanFeatures : public FeatureFunction { SpanFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -49,7 +49,7 @@ class CMR2008ReorderingFeatures : public FeatureFunction { CMR2008ReorderingFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_tagger.cc b/decoder/ff_tagger.cc index fd9210fa..7f9af9cd 100644 --- a/decoder/ff_tagger.cc +++ b/decoder/ff_tagger.cc @@ -2,6 +2,7 @@ #include <sstream> +#include "hg.h" #include "tdict.h" #include "sentence_metadata.h" #include "stringlib.h" diff --git a/decoder/ff_tagger.h b/decoder/ff_tagger.h index bd5b62c0..46418b0c 100644 --- a/decoder/ff_tagger.h +++ b/decoder/ff_tagger.h @@ -18,7 +18,7 @@ class Tagger_BigramIndicator : public FeatureFunction { Tagger_BigramIndicator(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -39,7 +39,7 @@ class LexicalPairIndicator : public FeatureFunction { virtual void PrepareForInput(const SentenceMetadata& smeta); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -59,7 +59,7 @@ class OutputIndicator : public FeatureFunction { OutputIndicator(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_wordalign.h b/decoder/ff_wordalign.h index d7a2dda8..ba3d0b9b 100644 --- a/decoder/ff_wordalign.h +++ b/decoder/ff_wordalign.h @@ -13,7 +13,7 @@ class RelativeSentencePosition : public FeatureFunction { RelativeSentencePosition(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -36,7 +36,7 @@ class SourceBigram : public FeatureFunction { void PrepareForInput(const SentenceMetadata& smeta); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -55,7 +55,7 @@ class LexNullJump : public FeatureFunction { LexNullJump(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -72,7 +72,7 @@ class NewJump : public FeatureFunction { NewJump(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -109,7 +109,7 @@ class LexicalTranslationTrigger : public FeatureFunction { LexicalTranslationTrigger(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -132,14 +132,14 @@ class BlunsomSynchronousParseHack : public FeatureFunction { BlunsomSynchronousParseHack(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, void* out_context) const; private: inline bool DoesNotBelong(const void* state) const { - for (int i = 0; i < NumBytesContext(); ++i) { + for (int i = 0; i < StateSize(); ++i) { if (*(static_cast<const unsigned char*>(state) + i)) return false; } return true; @@ -148,9 +148,9 @@ class BlunsomSynchronousParseHack : public FeatureFunction { inline void AppendAntecedentString(const void* state, std::vector<WordID>* yield) const { int i = 0; int ind = 0; - while (i < NumBytesContext() && !(*(static_cast<const unsigned char*>(state) + i))) { ++i; ind += 8; } - // std::cerr << i << " " << NumBytesContext() << std::endl; - assert(i != NumBytesContext()); + while (i < StateSize() && !(*(static_cast<const unsigned char*>(state) + i))) { ++i; ind += 8; } + // std::cerr << i << " " << StateSize() << std::endl; + assert(i != StateSize()); assert(ind < cur_ref_->size()); int cur = *(static_cast<const unsigned char*>(state) + i); int comp = 1; @@ -171,7 +171,7 @@ class BlunsomSynchronousParseHack : public FeatureFunction { } inline void SetStateMask(int start, int end, void* state) const { - assert((end / 8) < NumBytesContext()); + assert((end / 8) < StateSize()); int i = 0; int comp = 1; for (int j = 0; j < start; ++j) { @@ -209,7 +209,7 @@ class WordPairFeatures : public FeatureFunction { WordPairFeatures(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -226,7 +226,7 @@ class IdentityCycleDetector : public FeatureFunction { IdentityCycleDetector(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -242,7 +242,7 @@ class InputIndicator : public FeatureFunction { InputIndicator(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, @@ -258,7 +258,7 @@ class Fertility : public FeatureFunction { Fertility(const std::string& param); protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ff_wordset.cc b/decoder/ff_wordset.cc index 44468899..70cea7de 100644 --- a/decoder/ff_wordset.cc +++ b/decoder/ff_wordset.cc @@ -1,5 +1,6 @@ #include "ff_wordset.h" +#include "hg.h" #include "fdict.h" #include <sstream> #include <iostream> diff --git a/decoder/ff_wordset.h b/decoder/ff_wordset.h index 7c9a3fb7..639e1514 100644 --- a/decoder/ff_wordset.h +++ b/decoder/ff_wordset.h @@ -2,6 +2,7 @@ #define _FF_WORDSET_H_ #include "ff.h" +#include "tdict.h" #include <tr1/unordered_set> #include <boost/algorithm/string.hpp> @@ -32,11 +33,9 @@ class WordSet : public FeatureFunction { ~WordSet() { } - Features features() const { return single_feature(fid_); } - protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/ffset.cc b/decoder/ffset.cc new file mode 100644 index 00000000..5820f421 --- /dev/null +++ b/decoder/ffset.cc @@ -0,0 +1,72 @@ +#include "ffset.h" + +#include "ff.h" +#include "tdict.h" +#include "hg.h" + +using namespace std; + +ModelSet::ModelSet(const vector<double>& w, const vector<const FeatureFunction*>& models) : + models_(models), + weights_(w), + state_size_(0), + model_state_pos_(models.size()) { + for (int i = 0; i < models_.size(); ++i) { + model_state_pos_[i] = state_size_; + state_size_ += models_[i]->StateSize(); + } +} + +void ModelSet::PrepareForInput(const SentenceMetadata& smeta) { + for (int i = 0; i < models_.size(); ++i) + const_cast<FeatureFunction*>(models_[i])->PrepareForInput(smeta); +} + +void ModelSet::AddFeaturesToEdge(const SentenceMetadata& smeta, + const Hypergraph& /* hg */, + const FFStates& node_states, + HG::Edge* edge, + FFState* context, + prob_t* combination_cost_estimate) const { + //edge->reset_info(); + context->resize(state_size_); + if (state_size_ > 0) { + memset(&(*context)[0], 0, state_size_); + } + SparseVector<double> est_vals; // only computed if combination_cost_estimate is non-NULL + if (combination_cost_estimate) *combination_cost_estimate = prob_t::One(); + for (int i = 0; i < models_.size(); ++i) { + const FeatureFunction& ff = *models_[i]; + void* cur_ff_context = NULL; + vector<const void*> ants(edge->tail_nodes_.size()); + bool has_context = ff.StateSize() > 0; + if (has_context) { + int spos = model_state_pos_[i]; + cur_ff_context = &(*context)[spos]; + for (int i = 0; i < ants.size(); ++i) { + ants[i] = &node_states[edge->tail_nodes_[i]][spos]; + } + } + ff.TraversalFeatures(smeta, *edge, ants, &edge->feature_values_, &est_vals, cur_ff_context); + } + if (combination_cost_estimate) + combination_cost_estimate->logeq(est_vals.dot(weights_)); + edge->edge_prob_.logeq(edge->feature_values_.dot(weights_)); +} + +void ModelSet::AddFinalFeatures(const FFState& state, HG::Edge* edge,SentenceMetadata const& smeta) const { + assert(1 == edge->rule_->Arity()); + //edge->reset_info(); + for (int i = 0; i < models_.size(); ++i) { + const FeatureFunction& ff = *models_[i]; + const void* ant_state = NULL; + bool has_context = ff.StateSize() > 0; + if (has_context) { + int spos = model_state_pos_[i]; + ant_state = &state[spos]; + } + ff.FinalTraversalFeatures(ant_state, &edge->feature_values_); + } + edge->edge_prob_.logeq(edge->feature_values_.dot(weights_)); +} + diff --git a/decoder/ffset.h b/decoder/ffset.h new file mode 100644 index 00000000..28aef667 --- /dev/null +++ b/decoder/ffset.h @@ -0,0 +1,57 @@ +#ifndef _FFSET_H_ +#define _FFSET_H_ + +#include <vector> +#include "value_array.h" +#include "prob.h" + +namespace HG { struct Edge; struct Node; } +class Hypergraph; +class FeatureFunction; +class SentenceMetadata; +class FeatureFunction; // see definition below + +// TODO let states be dynamically sized +typedef ValueArray<uint8_t> FFState; // this is a fixed array, but about 10% faster than string + +//FIXME: only context.data() is required to be contiguous, and it becomes invalid after next string operation. use ValueArray instead? (higher performance perhaps, save a word due to fixed size) +typedef std::vector<FFState> FFStates; + +// this class is a set of FeatureFunctions that can be used to score, rescore, +// etc. a (translation?) forest +class ModelSet { + public: + ModelSet(const std::vector<double>& weights, + const std::vector<const FeatureFunction*>& models); + + // sets edge->feature_values_ and edge->edge_prob_ + // NOTE: edge must not necessarily be in hg.edges_ but its TAIL nodes + // must be. edge features are supposed to be overwritten, not added to (possibly because rule features aren't in ModelSet so need to be left alone + void AddFeaturesToEdge(const SentenceMetadata& smeta, + const Hypergraph& hg, + const FFStates& node_states, + HG::Edge* edge, + FFState* residual_context, + prob_t* combination_cost_estimate = NULL) const; + + //this is called INSTEAD of above when result of edge is goal (must be a unary rule - i.e. one variable, but typically it's assumed that there are no target terminals either (e.g. for LM)) + void AddFinalFeatures(const FFState& residual_context, + HG::Edge* edge, + SentenceMetadata const& smeta) const; + + // this is called once before any feature functions apply to a hypergraph + // it can be used to initialize sentence-specific data structures + void PrepareForInput(const SentenceMetadata& smeta); + + bool empty() const { return models_.empty(); } + + bool stateless() const { return !state_size_; } + + private: + std::vector<const FeatureFunction*> models_; + const std::vector<double>& weights_; + int state_size_; + std::vector<int> model_state_pos_; +}; + +#endif diff --git a/decoder/grammar_test.cc b/decoder/grammar_test.cc index 4500490a..912f4f12 100644 --- a/decoder/grammar_test.cc +++ b/decoder/grammar_test.cc @@ -10,7 +10,9 @@ #include "tdict.h" #include "grammar.h" #include "bottom_up_parser.h" +#include "hg.h" #include "ff.h" +#include "ffset.h" #include "weights.h" using namespace std; diff --git a/decoder/hg.h b/decoder/hg.h index 591e98ce..3d8cd9bc 100644 --- a/decoder/hg.h +++ b/decoder/hg.h @@ -33,47 +33,20 @@ // slow #undef HG_EDGES_TOPO_SORTED -class Hypergraph; -typedef boost::shared_ptr<Hypergraph> HypergraphP; - -// class representing an acyclic hypergraph -// - edges have 1 head, 0..n tails -class Hypergraph { -public: - Hypergraph() : is_linear_chain_(false) {} +// SmallVector is a fast, small vector<int> implementation for sizes <= 2 +typedef SmallVectorUnsigned TailNodeVector; // indices in nodes_ +typedef std::vector<int> EdgesVector; // indices in edges_ - // SmallVector is a fast, small vector<int> implementation for sizes <= 2 - typedef SmallVectorUnsigned TailNodeVector; // indices in nodes_ - typedef std::vector<int> EdgesVector; // indices in edges_ - - // TODO get rid of cat_? - // TODO keep cat_ and add span and/or state? :) - struct Node { - Node() : id_(), cat_() {} - int id_; // equal to this object's position in the nodes_ vector - WordID cat_; // non-terminal category if <0, 0 if not set - WordID NT() const { return -cat_; } - EdgesVector in_edges_; // an in edge is an edge with this node as its head. (in edges come from the bottom up to us) indices in edges_ - EdgesVector out_edges_; // an out edge is an edge with this node as its tail. (out edges leave us up toward the top/goal). indices in edges_ - void copy_fixed(Node const& o) { // nonstructural fields only - structural ones are managed by sorting/pruning/subsetting - cat_=o.cat_; - } - void copy_reindex(Node const& o,indices_after const& n2,indices_after const& e2) { - copy_fixed(o); - id_=n2[id_]; - e2.reindex_push_back(o.in_edges_,in_edges_); - e2.reindex_push_back(o.out_edges_,out_edges_); - } - }; +enum { + NONE=0,CATEGORY=1,SPAN=2,PROB=4,FEATURES=8,RULE=16,RULE_LHS=32,PREV_SPAN=64,ALL=0xFFFFFFFF +}; +namespace HG { - // TODO get rid of edge_prob_? (can be computed on the fly as the dot - // product of the weight vector and the feature values) struct Edge { -// int poplimit; //TODO: cube pruning per edge limit? per node didn't work well at all. also, inside cost + outside(node) is the same information i'd use to set a per-edge limit anyway - and nonmonotonicity in cube pruning may mean it's good to favor edge (in same node) w/ relatively worse score Edge() : i_(-1), j_(-1), prev_i_(-1), prev_j_(-1) {} Edge(int id,Edge const& copy_pod_from) : id_(id) { copy_pod(copy_pod_from); } // call copy_features yourself later. - Edge(int id,Edge const& copy_from,TailNodeVector const& tail) // fully inits - probably more expensive when push_back(Edge(...)) than setting after + Edge(int id,Edge const& copy_from,TailNodeVector const& tail) // fully inits - probably more expensive when push_back(Edge(...)) than sett : tail_nodes_(tail),id_(id) { copy_pod(copy_from);copy_features(copy_from); } inline int Arity() const { return tail_nodes_.size(); } int head_node_; // refers to a position in nodes_ @@ -83,8 +56,6 @@ public: prob_t edge_prob_; // dot product of weights and feat_values int id_; // equal to this object's position in the edges_ vector - //FIXME: these span ids belong in Node, not Edge, right? every node should have the same spans. - // span info. typically, i_ and j_ refer to indices in the source sentence. // In synchronous parsing, i_ and j_ will refer to target sentence/lattice indices // while prev_i_ prev_j_ will refer to positions in the source. @@ -97,54 +68,6 @@ public: short int j_; short int prev_i_; short int prev_j_; - - void copy_info(Edge const& o) { -#if USE_INFO_EDGE - set_info(o.info_.str()); // by convention, each person putting info here starts with a separator (e.g. space). it's empty if nobody put any info there. -#else - (void) o; -#endif - } - void copy_pod(Edge const& o) { - rule_=o.rule_; - i_ = o.i_; j_ = o.j_; prev_i_ = o.prev_i_; prev_j_ = o.prev_j_; - } - void copy_features(Edge const& o) { - feature_values_=o.feature_values_; - copy_info(o); - } - void copy_fixed(Edge const& o) { - copy_pod(o); - copy_features(o); - edge_prob_ = o.edge_prob_; - } - void copy_reindex(Edge const& o,indices_after const& n2,indices_after const& e2) { - copy_fixed(o); - head_node_=n2[o.head_node_]; - id_=e2[o.id_]; - n2.reindex_push_back(o.tail_nodes_,tail_nodes_); - } - -#if USE_INFO_EDGE - std::ostringstream info_; - void set_info(std::string const& s) { - info_.str(s); - info_.seekp(0,std::ios_base::end); - } - Edge(Edge const& o) : head_node_(o.head_node_),tail_nodes_(o.tail_nodes_),rule_(o.rule_),feature_values_(o.feature_values_),edge_prob_(o.edge_prob_),id_(o.id_),i_(o.i_),j_(o.j_),prev_i_(o.prev_i_),prev_j_(o.prev_j_), info_(o.info_.str(),std::ios_base::ate) { -// info_.seekp(0,std::ios_base::end); - } - void operator=(Edge const& o) { - head_node_ = o.head_node_; tail_nodes_ = o.tail_nodes_; rule_ = o.rule_; feature_values_ = o.feature_values_; edge_prob_ = o.edge_prob_; id_ = o.id_; i_ = o.i_; j_ = o.j_; prev_i_ = o.prev_i_; prev_j_ = o.prev_j_; - set_info(o.info_.str()); - } - std::string info() const { return info_.str(); } - void reset_info() { info_.str(""); info_.clear(); } -#else - std::string info() const { return std::string(); } - void reset_info() { } - void set_info(std::string const& ) { } -#endif void show(std::ostream &o,unsigned mask=SPAN|RULE) const { o<<'{'; if (mask&CATEGORY) @@ -159,10 +82,6 @@ public: o<<' '<<feature_values_; if (mask&RULE) o<<' '<<rule_->AsString(mask&RULE_LHS); - if (USE_INFO_EDGE) { - std::string const& i=info(); - if (mask&&!i.empty()) o << " |||"<<i; // remember, the initial space is expected as part of i - } o<<'}'; } std::string show(unsigned mask=SPAN|RULE) const { @@ -170,12 +89,28 @@ public: show(o,mask); return o.str(); } - /* generic recursion re: child_handle=re(tail_nodes_[i],i,parent_handle) - - FIXME: make kbest create a simple derivation-tree structure (could be a - hg), and replace the list-of-edges viterbi.h with a tree-structured one. - CreateViterbiHypergraph can do for 1best, though. - */ + void copy_pod(Edge const& o) { + rule_=o.rule_; + i_ = o.i_; j_ = o.j_; prev_i_ = o.prev_i_; prev_j_ = o.prev_j_; + } + void copy_features(Edge const& o) { + feature_values_=o.feature_values_; + } + void copy_fixed(Edge const& o) { + copy_pod(o); + copy_features(o); + edge_prob_ = o.edge_prob_; + } + void copy_reindex(Edge const& o,indices_after const& n2,indices_after const& e2) { + copy_fixed(o); + head_node_=n2[o.head_node_]; + id_=e2[o.id_]; + n2.reindex_push_back(o.tail_nodes_,tail_nodes_); + } + // generic recursion re: child_handle=re(tail_nodes_[i],i,parent_handle) + // FIXME: make kbest create a simple derivation-tree structure (could be a + // hg), and replace the list-of-edges viterbi.h with a tree-structured one. + // CreateViterbiHypergraph can do for 1best, though. template <class EdgeRecurse,class TEdgeHandle> std::string derivation_tree(EdgeRecurse const& re,TEdgeHandle const& eh,bool indent=true,int show_mask=SPAN|RULE,int maxdepth=0x7FFFFFFF,int depth=0) const { std::ostringstream o; @@ -203,7 +138,43 @@ public: } }; - // all this info ought to live in Node, but for some reason it's on Edges. + // TODO get rid of cat_? + // TODO keep cat_ and add span and/or state? :) + struct Node { + Node() : id_(), cat_() {} + int id_; // equal to this object's position in the nodes_ vector + WordID cat_; // non-terminal category if <0, 0 if not set + WordID NT() const { return -cat_; } + EdgesVector in_edges_; // an in edge is an edge with this node as its head. (in edges come from the bottom up to us) indices in edges_ + EdgesVector out_edges_; // an out edge is an edge with this node as its tail. (out edges leave us up toward the top/goal). indices in edges_ + void copy_fixed(Node const& o) { // nonstructural fields only - structural ones are managed by sorting/pruning/subsetting + cat_=o.cat_; + } + void copy_reindex(Node const& o,indices_after const& n2,indices_after const& e2) { + copy_fixed(o); + id_=n2[id_]; + e2.reindex_push_back(o.in_edges_,in_edges_); + e2.reindex_push_back(o.out_edges_,out_edges_); + } + }; + +} // namespace HG + +class Hypergraph; +typedef boost::shared_ptr<Hypergraph> HypergraphP; +// class representing an acyclic hypergraph +// - edges have 1 head, 0..n tails +class Hypergraph { +public: + Hypergraph() : is_linear_chain_(false) {} + typedef HG::Node Node; + typedef HG::Edge Edge; + typedef SmallVectorUnsigned TailNodeVector; // indices in nodes_ + typedef std::vector<int> EdgesVector; // indices in edges_ + enum { + NONE=0,CATEGORY=1,SPAN=2,PROB=4,FEATURES=8,RULE=16,RULE_LHS=32,PREV_SPAN=64,ALL=0xFFFFFFFF + }; + // except for stateful models that have split nt,span, this should identify the node void SetNodeOrigin(int nodeid,NTSpan &r) const { Node const &n=nodes_[nodeid]; @@ -230,18 +201,9 @@ public: } return s; } - // 0 if none, -TD index otherwise (just like in rule) WordID NodeLHS(int nodeid) const { Node const &n=nodes_[nodeid]; return n.NT(); - /* - if (!n.in_edges_.empty()) { - Edge const& e=edges_[n.in_edges_.front()]; - if (e.rule_) - return -e.rule_->lhs_; - } - return 0; - */ } typedef std::vector<prob_t> EdgeProbs; @@ -250,14 +212,8 @@ public: typedef std::vector<bool> NodeMask; std::string show_viterbi_tree(bool indent=true,int show_mask=SPAN|RULE,int maxdepth=0x7FFFFFFF,int depth=0) const; -// builds viterbi hg and returns it formatted as a pretty string - - enum { - NONE=0,CATEGORY=1,SPAN=2,PROB=4,FEATURES=8,RULE=16,RULE_LHS=32,PREV_SPAN=64,ALL=0xFFFFFFFF - }; std::string show_first_tree(bool indent=true,int show_mask=SPAN|RULE,int maxdepth=0x7FFFFFFF,int depth=0) const; - // same as above, but takes in_edges_[0] all the way down - to make it viterbi cost (1-best), call ViterbiSortInEdges() first typedef Edge const* EdgeHandle; EdgeHandle operator()(int tailn,int /*taili*/,EdgeHandle /*parent*/) const { @@ -334,7 +290,7 @@ public: Edge* AddEdge(Edge const& in_edge, const TailNodeVector& tail) { edges_.push_back(Edge(edges_.size(),in_edge)); Edge* edge = &edges_.back(); - edge->copy_features(in_edge); + edge->feature_values_ = in_edge.feature_values_; edge->tail_nodes_ = tail; // possibly faster than copying to Edge() constructed above then copying via push_back. perhaps optimized it's the same. index_tails(*edge); return edge; @@ -503,9 +459,9 @@ public: template <class V> void visit_edges_topo(V &v) { - for (int i = 0; i < nodes_.size(); ++i) { + for (unsigned i = 0; i < nodes_.size(); ++i) { EdgesVector const& in=nodes_[i].in_edges_; - for (int j=0;j<in.size();++j) { + for (unsigned j=0;j<in.size();++j) { int e=in[j]; v(i,e,edges_[e]); } @@ -534,14 +490,14 @@ private: // for generic Viterbi/Inside algorithms struct EdgeProb { typedef prob_t Weight; - inline const prob_t& operator()(const Hypergraph::Edge& e) const { return e.edge_prob_; } + inline const prob_t& operator()(const HG::Edge& e) const { return e.edge_prob_; } }; struct EdgeSelectEdgeWeightFunction { typedef prob_t Weight; typedef std::vector<bool> EdgeMask; EdgeSelectEdgeWeightFunction(const EdgeMask& v) : v_(v) {} - inline prob_t operator()(const Hypergraph::Edge& e) const { + inline prob_t operator()(const HG::Edge& e) const { if (v_[e.id_]) return prob_t::One(); else return prob_t::Zero(); } @@ -551,7 +507,7 @@ private: struct ScaledEdgeProb { ScaledEdgeProb(const double& alpha) : alpha_(alpha) {} - inline prob_t operator()(const Hypergraph::Edge& e) const { return e.edge_prob_.pow(alpha_); } + inline prob_t operator()(const HG::Edge& e) const { return e.edge_prob_.pow(alpha_); } const double alpha_; typedef prob_t Weight; }; @@ -560,7 +516,7 @@ struct ScaledEdgeProb { struct EdgeFeaturesAndProbWeightFunction { typedef SparseVector<prob_t> Weight; typedef Weight Result; //TODO: change Result->Weight everywhere? - inline const Weight operator()(const Hypergraph::Edge& e) const { + inline const Weight operator()(const HG::Edge& e) const { SparseVector<prob_t> res; for (SparseVector<double>::const_iterator it = e.feature_values_.begin(); it != e.feature_values_.end(); ++it) @@ -571,7 +527,7 @@ struct EdgeFeaturesAndProbWeightFunction { struct TransitionCountWeightFunction { typedef double Weight; - inline double operator()(const Hypergraph::Edge& e) const { (void)e; return 1.0; } + inline double operator()(const HG::Edge& e) const { (void)e; return 1.0; } }; #endif diff --git a/decoder/hg_io.cc b/decoder/hg_io.cc index 3a68a429..64c6663e 100644 --- a/decoder/hg_io.cc +++ b/decoder/hg_io.cc @@ -28,7 +28,7 @@ struct HGReader : public JSONParser { hg.ConnectEdgeToHeadNode(&hg.edges_[in_edges[i]], node); } } - void CreateEdge(const TRulePtr& rule, FeatureVector* feats, const SmallVectorUnsigned& tail) { + void CreateEdge(const TRulePtr& rule, SparseVector<double>* feats, const SmallVectorUnsigned& tail) { Hypergraph::Edge* edge = hg.AddEdge(rule, tail); feats->swap(edge->feature_values_); edge->i_ = spans[0]; @@ -392,8 +392,8 @@ string HypergraphIO::AsPLF(const Hypergraph& hg, bool include_global_parentheses const Hypergraph::Edge& e = hg.edges_[hg.nodes_[i].out_edges_[j]]; const string output = e.rule_->e_.size() ==2 ? Escape(TD::Convert(e.rule_->e_[1])) : EPS; double prob = log(e.edge_prob_); - if (isinf(prob)) { prob = -9e20; } - if (isnan(prob)) { prob = 0; } + if (std::isinf(prob)) { prob = -9e20; } + if (std::isnan(prob)) { prob = 0; } os << "('" << output << "'," << prob << "," << e.head_node_ - i << "),"; } os << "),"; diff --git a/decoder/inside_outside.h b/decoder/inside_outside.h index f73a1d3f..c0377fe8 100644 --- a/decoder/inside_outside.h +++ b/decoder/inside_outside.h @@ -42,7 +42,7 @@ WeightType Inside(const Hypergraph& hg, Hypergraph::EdgesVector const& in=hg.nodes_[i].in_edges_; const unsigned num_in_edges = in.size(); for (unsigned j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[in[j]]; + const HG::Edge& edge = hg.edges_[in[j]]; WeightType score = weight(edge); for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) { const int tail_node_index = edge.tail_nodes_[k]; @@ -74,7 +74,7 @@ void Outside(const Hypergraph& hg, Hypergraph::EdgesVector const& in=hg.nodes_[i].in_edges_; const int num_in_edges = in.size(); for (int j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[in[j]]; + const HG::Edge& edge = hg.edges_[in[j]]; WeightType head_and_edge_weight = weight(edge); head_and_edge_weight *= head_node_outside_score; const int num_tail_nodes = edge.tail_nodes_.size(); @@ -138,7 +138,7 @@ struct InsideOutsides { Hypergraph::EdgesVector const& in=hg.nodes_[i].in_edges_; const int num_in_edges = in.size(); for (int j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[in[j]]; + const HG::Edge& edge = hg.edges_[in[j]]; KType kbar_e = outside[i]; const int num_tail_nodes = edge.tail_nodes_.size(); for (int k = 0; k < num_tail_nodes; ++k) @@ -156,7 +156,7 @@ struct InsideOutsides { const int num_in_edges = in.size(); for (int j = 0; j < num_in_edges; ++j) { int edgei=in[j]; - const Hypergraph::Edge& edge = hg.edges_[edgei]; + const HG::Edge& edge = hg.edges_[edgei]; V x=weight(edge)*outside[i]; const int num_tail_nodes = edge.tail_nodes_.size(); for (int k = 0; k < num_tail_nodes; ++k) diff --git a/decoder/kbest.h b/decoder/kbest.h index 9af3a20e..9a55f653 100644 --- a/decoder/kbest.h +++ b/decoder/kbest.h @@ -48,7 +48,7 @@ namespace KBest { } struct Derivation { - Derivation(const Hypergraph::Edge& e, + Derivation(const HG::Edge& e, const SmallVectorInt& jv, const WeightType& w, const SparseVector<double>& f) : @@ -58,11 +58,11 @@ namespace KBest { feature_values(f) {} // dummy constructor, just for query - Derivation(const Hypergraph::Edge& e, + Derivation(const HG::Edge& e, const SmallVectorInt& jv) : edge(&e), j(jv) {} T yield; - const Hypergraph::Edge* const edge; + const HG::Edge* const edge; const SmallVectorInt j; const WeightType score; const SparseVector<double> feature_values; @@ -82,8 +82,8 @@ namespace KBest { Derivation const* d; explicit EdgeHandle(Derivation const* d) : d(d) { } // operator bool() const { return d->edge; } - operator Hypergraph::Edge const* () const { return d->edge; } -// Hypergraph::Edge const * operator ->() const { return d->edge; } + operator HG::Edge const* () const { return d->edge; } +// HG::Edge const * operator ->() const { return d->edge; } }; EdgeHandle operator()(unsigned t,unsigned taili,EdgeHandle const& parent) const { @@ -158,7 +158,7 @@ namespace KBest { // the yield is computed in LazyKthBest before the derivation is added to D // returns NULL if j refers to derivation numbers larger than the // antecedent structure define - Derivation* CreateDerivation(const Hypergraph::Edge& e, const SmallVectorInt& j) { + Derivation* CreateDerivation(const HG::Edge& e, const SmallVectorInt& j) { WeightType score = w(e); SparseVector<double> feats = e.feature_values_; for (int i = 0; i < e.Arity(); ++i) { @@ -177,7 +177,7 @@ namespace KBest { const Hypergraph::Node& node = g.nodes_[v]; for (unsigned i = 0; i < node.in_edges_.size(); ++i) { - const Hypergraph::Edge& edge = g.edges_[node.in_edges_[i]]; + const HG::Edge& edge = g.edges_[node.in_edges_[i]]; SmallVectorInt jv(edge.Arity(), 0); Derivation* d = CreateDerivation(edge, jv); assert(d); diff --git a/decoder/oracle_bleu.h b/decoder/oracle_bleu.h index b603e27a..d2c4715c 100644 --- a/decoder/oracle_bleu.h +++ b/decoder/oracle_bleu.h @@ -12,6 +12,7 @@ #include "scorer.h" #include "hg.h" #include "ff_factory.h" +#include "ffset.h" #include "ff_bleu.h" #include "sparse_vector.h" #include "viterbi.h" @@ -26,7 +27,7 @@ struct Translation { typedef std::vector<WordID> Sentence; Sentence sentence; - FeatureVector features; + SparseVector<double> features; Translation() { } Translation(Hypergraph const& hg,WeightVector *feature_weights=0) { @@ -57,14 +58,14 @@ struct Oracle { } // feature 0 will be the error rate in fear and hope // move toward hope - FeatureVector ModelHopeGradient() const { - FeatureVector r=hope.features-model.features; + SparseVector<double> ModelHopeGradient() const { + SparseVector<double> r=hope.features-model.features; r.set_value(0,0); return r; } // move toward hope from fear - FeatureVector FearHopeGradient() const { - FeatureVector r=hope.features-fear.features; + SparseVector<double> FearHopeGradient() const { + SparseVector<double> r=hope.features-fear.features; r.set_value(0,0); return r; } diff --git a/decoder/program_options.h b/decoder/program_options.h index 87afb320..3cd7649a 100644 --- a/decoder/program_options.h +++ b/decoder/program_options.h @@ -94,7 +94,7 @@ struct any_printer : public boost::function<void (Ostream &,boost::any const&)> {} template <class T> - explicit any_printer(T const* tag) : F(typed_print<T>()) { + explicit any_printer(T const*) : F(typed_print<T>()) { } template <class T> diff --git a/decoder/tromble_loss.h b/decoder/tromble_loss.h index 599a2d54..fde33100 100644 --- a/decoder/tromble_loss.h +++ b/decoder/tromble_loss.h @@ -28,7 +28,7 @@ class TrombleLossComputer : private boost::base_from_member<boost::scoped_ptr<Tr protected: virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, - const Hypergraph::Edge& edge, + const HG::Edge& edge, const std::vector<const void*>& ant_contexts, SparseVector<double>* features, SparseVector<double>* estimated_features, diff --git a/decoder/viterbi.cc b/decoder/viterbi.cc index 1b9c6665..9e381ac6 100644 --- a/decoder/viterbi.cc +++ b/decoder/viterbi.cc @@ -139,8 +139,8 @@ inline bool close_enough(double a,double b,double epsilon) return diff<=epsilon*fabs(a) || diff<=epsilon*fabs(b); } -FeatureVector ViterbiFeatures(Hypergraph const& hg,WeightVector const* weights,bool fatal_dotprod_disagreement) { - FeatureVector r; +SparseVector<double> ViterbiFeatures(Hypergraph const& hg,WeightVector const* weights,bool fatal_dotprod_disagreement) { + SparseVector<double> r; const prob_t p = Viterbi<FeatureVectorTraversal>(hg, &r); if (weights) { double logp=log(p); diff --git a/decoder/viterbi.h b/decoder/viterbi.h index 03e961a2..a8a0ea7f 100644 --- a/decoder/viterbi.h +++ b/decoder/viterbi.h @@ -14,10 +14,10 @@ std::string viterbi_stats(Hypergraph const& hg, std::string const& name="forest" //TODO: make T a typename inside Traversal and WeightType a typename inside WeightFunction? // Traversal must implement: // typedef T Result; -// void operator()(Hypergraph::Edge const& e,const vector<const Result*>& ants, Result* result) const; +// void operator()(HG::Edge const& e,const vector<const Result*>& ants, Result* result) const; // WeightFunction must implement: // typedef prob_t Weight; -// Weight operator()(Hypergraph::Edge const& e) const; +// Weight operator()(HG::Edge const& e) const; template<class Traversal,class WeightFunction> typename WeightFunction::Weight Viterbi(const Hypergraph& hg, typename Traversal::Result* result, @@ -39,9 +39,9 @@ typename WeightFunction::Weight Viterbi(const Hypergraph& hg, *cur_node_best_weight = WeightType(1); continue; } - Hypergraph::Edge const* edge_best=0; + HG::Edge const* edge_best=0; for (unsigned j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[cur_node.in_edges_[j]]; + const HG::Edge& edge = hg.edges_[cur_node.in_edges_[j]]; WeightType score = weight(edge); for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) score *= vit_weight[edge.tail_nodes_[k]]; @@ -51,7 +51,7 @@ typename WeightFunction::Weight Viterbi(const Hypergraph& hg, } } assert(edge_best); - Hypergraph::Edge const& edgeb=*edge_best; + HG::Edge const& edgeb=*edge_best; std::vector<const T*> antsb(edgeb.tail_nodes_.size()); for (unsigned k = 0; k < edgeb.tail_nodes_.size(); ++k) antsb[k] = &vit_result[edgeb.tail_nodes_[k]]; @@ -98,7 +98,7 @@ prob_t Viterbi(const Hypergraph& hg, struct PathLengthTraversal { typedef int Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const int*>& ants, int* result) const { (void) edge; @@ -109,7 +109,7 @@ struct PathLengthTraversal { struct ESentenceTraversal { typedef std::vector<WordID> Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const Result*>& ants, Result* result) const { edge.rule_->ESubstitute(ants, result); @@ -118,7 +118,7 @@ struct ESentenceTraversal { struct ELengthTraversal { typedef int Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const int*>& ants, int* result) const { *result = edge.rule_->ELength() - edge.rule_->Arity(); @@ -128,7 +128,7 @@ struct ELengthTraversal { struct FSentenceTraversal { typedef std::vector<WordID> Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const Result*>& ants, Result* result) const { edge.rule_->FSubstitute(ants, result); @@ -142,7 +142,7 @@ struct ETreeTraversal { const std::string space; const std::string right; typedef std::vector<WordID> Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const Result*>& ants, Result* result) const { Result tmp; @@ -162,7 +162,7 @@ struct FTreeTraversal { const std::string space; const std::string right; typedef std::vector<WordID> Result; - void operator()(const Hypergraph::Edge& edge, + void operator()(const HG::Edge& edge, const std::vector<const Result*>& ants, Result* result) const { Result tmp; @@ -177,8 +177,8 @@ struct FTreeTraversal { }; struct ViterbiPathTraversal { - typedef std::vector<Hypergraph::Edge const*> Result; - void operator()(const Hypergraph::Edge& edge, + typedef std::vector<HG::Edge const*> Result; + void operator()(const HG::Edge& edge, std::vector<Result const*> const& ants, Result* result) const { for (unsigned i = 0; i < ants.size(); ++i) @@ -189,8 +189,8 @@ struct ViterbiPathTraversal { }; struct FeatureVectorTraversal { - typedef FeatureVector Result; - void operator()(Hypergraph::Edge const& edge, + typedef SparseVector<double> Result; + void operator()(HG::Edge const& edge, std::vector<Result const*> const& ants, Result* result) const { for (unsigned i = 0; i < ants.size(); ++i) @@ -210,6 +210,6 @@ int ViterbiELength(const Hypergraph& hg); int ViterbiPathLength(const Hypergraph& hg); /// if weights supplied, assert viterbi prob = features.dot(*weights) (exception if fatal, cerr warn if not). return features (sum over all edges in viterbi derivation) -FeatureVector ViterbiFeatures(Hypergraph const& hg,WeightVector const* weights=0,bool fatal_dotprod_disagreement=false); +SparseVector<double> ViterbiFeatures(Hypergraph const& hg,WeightVector const* weights=0,bool fatal_dotprod_disagreement=false); #endif diff --git a/dpmert/Jamfile b/dpmert/Jamfile deleted file mode 100644 index 6f6a3d40..00000000 --- a/dpmert/Jamfile +++ /dev/null @@ -1,33 +0,0 @@ -import testing ; -import lex ; -import option ; - -lib dpmert : - ces.cc - error_surface.cc - line_optimizer.cc - mert_geometry.cc - ..//utils - ..//mteval - ../decoder//decoder - ../klm/lm//kenlm - ..//boost_program_options - : <include>. - : : - <library>..//utils - <library>..//mteval - <library>../decoder//decoder - <library>../klm/lm//kenlm - <library>..//boost_program_options - <include>. - ; - -all_tests [ glob *_test.cc ] : dpmert : <testing.arg>$(TOP)/dpmert/test_data ; - -exe sentserver : sentserver.c : <threading>multi ; -exe sentclient : sentclient.c ; -exe mr_dpmert_generate_mapper_input : mr_dpmert_generate_mapper_input.cc dpmert ..//boost_program_options ; -exe mr_dpmert_map : mr_dpmert_map.cc dpmert ..//boost_program_options ; -exe mr_dpmert_reduce : mr_dpmert_reduce.cc dpmert ..//boost_program_options ; - -alias programs : sentserver sentclient mr_dpmert_generate_mapper_input mr_dpmert_map mr_dpmert_reduce ; diff --git a/dpmert/lo_test.cc b/dpmert/lo_test.cc index 2daf87bb..95a08d3d 100644 --- a/dpmert/lo_test.cc +++ b/dpmert/lo_test.cc @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE( TestCheckNaN) { double x = 0; double y = 0; double z = x / y; - BOOST_CHECK_EQUAL(true, isnan(z)); + BOOST_CHECK_EQUAL(true, std::isnan(z)); } BOOST_AUTO_TEST_CASE(TestConvexHull) { diff --git a/example_extff/Makefile.am b/example_extff/Makefile.am new file mode 100644 index 00000000..ac2694ca --- /dev/null +++ b/example_extff/Makefile.am @@ -0,0 +1,5 @@ +AM_CPPFLAGS = -DBOOST_TEST_DYN_LINK -W -Wno-sign-compare $(GTEST_CPPFLAGS) -I.. -I../mteval -I../utils -I../klm -I../decoder + +lib_LTLIBRARIES = libff_example.la +libff_example_la_SOURCES = ff_example.cc +libff_example_la_LDFLAGS = -version-info 1:0:0 -module diff --git a/example_extff/README.md b/example_extff/README.md new file mode 100644 index 00000000..f2aba487 --- /dev/null +++ b/example_extff/README.md @@ -0,0 +1,8 @@ +This is an example of an _external_ feature function which is loaded as a dynamically linked library at run time to compute feature functions over derivations in a hypergraph. To load feature external feature functions, you can specify them in your `cdec.ini` configuration file as follows: + + feature_function=External /path/to/libmy_feature.so + +Any extra options are passed to the external library. + +*Note*: the build system uses [GNU Libtool](http://www.gnu.org/software/libtool/) to create the shared library. This may be placed in a hidden directory called `./libs`. + diff --git a/example_extff/ff_example.cc b/example_extff/ff_example.cc new file mode 100644 index 00000000..4e478ecd --- /dev/null +++ b/example_extff/ff_example.cc @@ -0,0 +1,58 @@ +#include "ff.h" +#include <iostream> +#include <sstream> + +#include "hg.h" + +using namespace std; + +// example of a "stateful" feature made available as an external library +// This feature looks nodes and their daughters and fires an indicator based +// on the arities of the rules involved. +// (X (X a) b (X c)) - this is a 2 arity parent with children of 0 and 0 arity +// so you get MAF_2_0_0=1 +class ParentChildrenArityFeatures : public FeatureFunction { + public: + ParentChildrenArityFeatures(const string& param) : fids(16, vector<int>(256, -1)) { + SetStateSize(1); // number of bytes extra state required by this Feature + } + virtual void FinalTraversalFeatures(const void* context, + SparseVector<double>* features) const { + // Goal always is arity 1, so there's no discriminative value of + // computing a feature + } + protected: + virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta, + const Hypergraph::Edge& edge, + const std::vector<const void*>& ant_contexts, + FeatureVector* features, + FeatureVector* estimated_features, + void* context) const { + unsigned child_arity_code = 0; + for (unsigned j = 0; j < ant_contexts.size(); ++j) { + child_arity_code <<= 4; + child_arity_code |= *reinterpret_cast<const unsigned char*>(ant_contexts[j]); + } + int& fid = fids[edge.Arity()][child_arity_code]; // reference! + if (fid < 0) { + ostringstream feature_string; + feature_string << "MAF_" << edge.Arity(); + for (unsigned j = 0; j < ant_contexts.size(); ++j) + feature_string << '_' << + static_cast<int>(*reinterpret_cast<const unsigned char*>(ant_contexts[j])); + fid = FD::Convert(feature_string.str()); + } + features->set_value(fid, 1.0); + *reinterpret_cast<unsigned char*>(context) = edge.Arity(); // save state + } + private: + mutable vector<vector<int> > fids; +}; + +// IMPORTANT: this function must be implemented by any external FF library +// if your library has multiple features, you can use str to configure things +extern "C" FeatureFunction* create_ff(const string& str) { + return new ParentChildrenArityFeatures(str); +} + + diff --git a/gi/clda/src/Makefile.am b/gi/clda/src/Makefile.am deleted file mode 100644 index cdca1f97..00000000 --- a/gi/clda/src/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -bin_PROGRAMS = clda - -clda_SOURCES = clda.cc - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -funroll-loops -I$(top_srcdir)/utils $(GTEST_CPPFLAGS) -AM_LDFLAGS = $(top_srcdir)/utils/libutils.a -lz diff --git a/gi/clda/src/ccrp.h b/gi/clda/src/ccrp.h deleted file mode 100644 index a7c2825c..00000000 --- a/gi/clda/src/ccrp.h +++ /dev/null @@ -1,291 +0,0 @@ -#ifndef _CCRP_H_ -#define _CCRP_H_ - -#include <numeric> -#include <cassert> -#include <cmath> -#include <list> -#include <iostream> -#include <vector> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include "sampler.h" -#include "slice_sampler.h" - -// Chinese restaurant process (Pitman-Yor parameters) with table tracking. - -template <typename Dish, typename DishHash = boost::hash<Dish> > -class CCRP { - public: - CCRP(double disc, double conc) : - num_tables_(), - num_customers_(), - discount_(disc), - concentration_(conc), - discount_prior_alpha_(std::numeric_limits<double>::quiet_NaN()), - discount_prior_beta_(std::numeric_limits<double>::quiet_NaN()), - concentration_prior_shape_(std::numeric_limits<double>::quiet_NaN()), - concentration_prior_rate_(std::numeric_limits<double>::quiet_NaN()) {} - - CCRP(double d_alpha, double d_beta, double c_shape, double c_rate, double d = 0.1, double c = 10.0) : - num_tables_(), - num_customers_(), - discount_(d), - concentration_(c), - discount_prior_alpha_(d_alpha), - discount_prior_beta_(d_beta), - concentration_prior_shape_(c_shape), - concentration_prior_rate_(c_rate) {} - - double discount() const { return discount_; } - double concentration() const { return concentration_; } - - bool has_discount_prior() const { - return !std::isnan(discount_prior_alpha_); - } - - bool has_concentration_prior() const { - return !std::isnan(concentration_prior_shape_); - } - - void clear() { - num_tables_ = 0; - num_customers_ = 0; - dish_locs_.clear(); - } - - unsigned num_tables(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->second.table_counts_.size(); - } - - unsigned num_customers() const { - return num_customers_; - } - - unsigned num_customers(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->total_dish_count_; - } - - // returns +1 or 0 indicating whether a new table was opened - int increment(const Dish& dish, const double& p0, MT19937* rng) { - DishLocations& loc = dish_locs_[dish]; - bool share_table = false; - if (loc.total_dish_count_) { - const double p_empty = (concentration_ + num_tables_ * discount_) * p0; - const double p_share = (loc.total_dish_count_ - loc.table_counts_.size() * discount_); - share_table = rng->SelectSample(p_empty, p_share); - } - if (share_table) { - double r = rng->next() * (loc.total_dish_count_ - loc.table_counts_.size() * discount_); - for (typename std::list<unsigned>::iterator ti = loc.table_counts_.begin(); - ti != loc.table_counts_.end(); ++ti) { - r -= (*ti - discount_); - if (r <= 0.0) { - ++(*ti); - break; - } - } - if (r > 0.0) { - std::cerr << "Serious error: r=" << r << std::endl; - Print(&std::cerr); - assert(r <= 0.0); - } - } else { - loc.table_counts_.push_back(1u); - ++num_tables_; - } - ++loc.total_dish_count_; - ++num_customers_; - return (share_table ? 0 : 1); - } - - // returns -1 or 0, indicating whether a table was closed - int decrement(const Dish& dish, MT19937* rng) { - DishLocations& loc = dish_locs_[dish]; - assert(loc.total_dish_count_); - if (loc.total_dish_count_ == 1) { - dish_locs_.erase(dish); - --num_tables_; - --num_customers_; - return -1; - } else { - int delta = 0; - // sample customer to remove UNIFORMLY. that is, do NOT use the discount - // here. if you do, it will introduce (unwanted) bias! - double r = rng->next() * loc.total_dish_count_; - --loc.total_dish_count_; - for (typename std::list<unsigned>::iterator ti = loc.table_counts_.begin(); - ti != loc.table_counts_.end(); ++ti) { - r -= *ti; - if (r <= 0.0) { - if ((--(*ti)) == 0) { - --num_tables_; - delta = -1; - loc.table_counts_.erase(ti); - } - break; - } - } - if (r > 0.0) { - std::cerr << "Serious error: r=" << r << std::endl; - Print(&std::cerr); - assert(r <= 0.0); - } - --num_customers_; - return delta; - } - } - - double prob(const Dish& dish, const double& p0) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - const double r = num_tables_ * discount_ + concentration_; - if (it == dish_locs_.end()) { - return r * p0 / (num_customers_ + concentration_); - } else { - return (it->second.total_dish_count_ - discount_ * it->second.table_counts_.size() + r * p0) / - (num_customers_ + concentration_); - } - } - - double log_crp_prob() const { - return log_crp_prob(discount_, concentration_); - } - - static double log_beta_density(const double& x, const double& alpha, const double& beta) { - assert(x > 0.0); - assert(x < 1.0); - assert(alpha > 0.0); - assert(beta > 0.0); - const double lp = (alpha-1)*log(x)+(beta-1)*log(1-x)+lgamma(alpha+beta)-lgamma(alpha)-lgamma(beta); - return lp; - } - - static double log_gamma_density(const double& x, const double& shape, const double& rate) { - assert(x >= 0.0); - assert(shape > 0.0); - assert(rate > 0.0); - const double lp = (shape-1)*log(x) - shape*log(rate) - x/rate - lgamma(shape); - return lp; - } - - // taken from http://en.wikipedia.org/wiki/Chinese_restaurant_process - // does not include P_0's - double log_crp_prob(const double& discount, const double& concentration) const { - double lp = 0.0; - if (has_discount_prior()) - lp = log_beta_density(discount, discount_prior_alpha_, discount_prior_beta_); - if (has_concentration_prior()) - lp += log_gamma_density(concentration, concentration_prior_shape_, concentration_prior_rate_); - assert(lp <= 0.0); - if (num_customers_) { - if (discount > 0.0) { - const double r = lgamma(1.0 - discount); - lp += lgamma(concentration) - lgamma(concentration + num_customers_) - + num_tables_ * log(discount) + lgamma(concentration / discount + num_tables_) - - lgamma(concentration / discount); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - const DishLocations& cur = it->second; - for (std::list<unsigned>::const_iterator ti = cur.table_counts_.begin(); ti != cur.table_counts_.end(); ++ti) { - lp += lgamma(*ti - discount) - r; - } - } - } else { - assert(!"not implemented yet"); - } - } - assert(std::isfinite(lp)); - return lp; - } - - void resample_hyperparameters(MT19937* rng) { - assert(has_discount_prior() || has_concentration_prior()); - DiscountResampler dr(*this); - ConcentrationResampler cr(*this); - const int niterations = 10; - double gamma_upper = std::numeric_limits<double>::infinity(); - for (int iter = 0; iter < 5; ++iter) { - if (has_concentration_prior()) { - concentration_ = slice_sampler1d(cr, concentration_, *rng, 0.0, - gamma_upper, 0.0, niterations, 100*niterations); - } - if (has_discount_prior()) { - discount_ = slice_sampler1d(dr, discount_, *rng, std::numeric_limits<double>::min(), - 1.0, 0.0, niterations, 100*niterations); - } - } - concentration_ = slice_sampler1d(cr, concentration_, *rng, 0.0, - gamma_upper, 0.0, niterations, 100*niterations); - } - - struct DiscountResampler { - DiscountResampler(const CCRP& crp) : crp_(crp) {} - const CCRP& crp_; - double operator()(const double& proposed_discount) const { - return crp_.log_crp_prob(proposed_discount, crp_.concentration_); - } - }; - - struct ConcentrationResampler { - ConcentrationResampler(const CCRP& crp) : crp_(crp) {} - const CCRP& crp_; - double operator()(const double& proposed_concentration) const { - return crp_.log_crp_prob(crp_.discount_, proposed_concentration); - } - }; - - struct DishLocations { - DishLocations() : total_dish_count_() {} - unsigned total_dish_count_; // customers at all tables with this dish - std::list<unsigned> table_counts_; // list<> gives O(1) deletion and insertion, which we want - // .size() is the number of tables for this dish - }; - - void Print(std::ostream* out) const { - for (typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - (*out) << it->first << " (" << it->second.total_dish_count_ << " on " << it->second.table_counts_.size() << " tables): "; - for (typename std::list<unsigned>::const_iterator i = it->second.table_counts_.begin(); - i != it->second.table_counts_.end(); ++i) { - (*out) << " " << *i; - } - (*out) << std::endl; - } - } - - typedef typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator const_iterator; - const_iterator begin() const { - return dish_locs_.begin(); - } - const_iterator end() const { - return dish_locs_.end(); - } - - unsigned num_tables_; - unsigned num_customers_; - std::tr1::unordered_map<Dish, DishLocations, DishHash> dish_locs_; - - double discount_; - double concentration_; - - // optional beta prior on discount_ (NaN if no prior) - double discount_prior_alpha_; - double discount_prior_beta_; - - // optional gamma prior on concentration_ (NaN if no prior) - double concentration_prior_shape_; - double concentration_prior_rate_; -}; - -template <typename T,typename H> -std::ostream& operator<<(std::ostream& o, const CCRP<T,H>& c) { - c.Print(&o); - return o; -} - -#endif diff --git a/gi/clda/src/clda.cc b/gi/clda/src/clda.cc deleted file mode 100644 index f548997f..00000000 --- a/gi/clda/src/clda.cc +++ /dev/null @@ -1,148 +0,0 @@ -#include <iostream> -#include <vector> -#include <map> -#include <string> - -#include "timer.h" -#include "crp.h" -#include "ccrp.h" -#include "sampler.h" -#include "tdict.h" -const size_t MAX_DOC_LEN_CHARS = 10000000; - -using namespace std; - -void ShowTopWordsForTopic(const map<WordID, int>& counts) { - multimap<int, WordID> ms; - for (map<WordID,int>::const_iterator it = counts.begin(); it != counts.end(); ++it) - ms.insert(make_pair(it->second, it->first)); - int cc = 0; - for (multimap<int, WordID>::reverse_iterator it = ms.rbegin(); it != ms.rend(); ++it) { - cerr << it->first << ':' << TD::Convert(it->second) << " "; - ++cc; - if (cc==20) break; - } - cerr << endl; -} - -int main(int argc, char** argv) { - if (argc != 3) { - cerr << "Usage: " << argv[0] << " num-classes num-samples\n"; - return 1; - } - const int num_classes = atoi(argv[1]); - const int num_iterations = atoi(argv[2]); - const int burnin_size = num_iterations * 0.9; - if (num_classes < 2) { - cerr << "Must request more than 1 class\n"; - return 1; - } - if (num_iterations < 5) { - cerr << "Must request more than 5 iterations\n"; - return 1; - } - cerr << "CLASSES: " << num_classes << endl; - char* buf = new char[MAX_DOC_LEN_CHARS]; - vector<vector<int> > wji; // w[j][i] - observed word i of doc j - vector<vector<int> > zji; // z[j][i] - topic assignment for word i of doc j - cerr << "READING DOCUMENTS\n"; - while(cin) { - cin.getline(buf, MAX_DOC_LEN_CHARS); - if (buf[0] == 0) continue; - wji.push_back(vector<WordID>()); - TD::ConvertSentence(buf, &wji.back()); - } - cerr << "READ " << wji.size() << " DOCUMENTS\n"; - MT19937 rng; - cerr << "INITIALIZING RANDOM TOPIC ASSIGNMENTS\n"; - zji.resize(wji.size()); - double disc = 0.1; - double beta = 10.0; - double alpha = 50.0; - const double uniform_topic = 1.0 / num_classes; - const double uniform_word = 1.0 / TD::NumWords(); - vector<CCRP<int> > dr(zji.size(), CCRP<int>(1,1,1,1,disc, beta)); // dr[i] describes the probability of using a topic in document i - vector<CCRP<int> > wr(num_classes, CCRP<int>(1,1,1,1,disc, alpha)); // wr[k] describes the probability of generating a word in topic k - for (int j = 0; j < zji.size(); ++j) { - const size_t num_words = wji[j].size(); - vector<int>& zj = zji[j]; - const vector<int>& wj = wji[j]; - zj.resize(num_words); - for (int i = 0; i < num_words; ++i) { - int random_topic = rng.next() * num_classes; - if (random_topic == num_classes) { --random_topic; } - zj[i] = random_topic; - const int word = wj[i]; - dr[j].increment(random_topic, uniform_topic, &rng); - wr[random_topic].increment(word, uniform_word, &rng); - } - } - cerr << "SAMPLING\n"; - vector<map<WordID, int> > t2w(num_classes); - Timer timer; - SampleSet<double> ss; - ss.resize(num_classes); - double total_time = 0; - for (int iter = 0; iter < num_iterations; ++iter) { - cerr << '.'; - if (iter && iter % 10 == 0) { - total_time += timer.Elapsed(); - timer.Reset(); - double llh = 0; -#if 1 - for (int j = 0; j < dr.size(); ++j) - dr[j].resample_hyperparameters(&rng); - for (int j = 0; j < wr.size(); ++j) - wr[j].resample_hyperparameters(&rng); -#endif - - for (int j = 0; j < dr.size(); ++j) - llh += dr[j].log_crp_prob(); - for (int j = 0; j < wr.size(); ++j) - llh += wr[j].log_crp_prob(); - cerr << " [LLH=" << llh << " I=" << iter << "]\n"; - } - for (int j = 0; j < zji.size(); ++j) { - const size_t num_words = wji[j].size(); - vector<int>& zj = zji[j]; - const vector<int>& wj = wji[j]; - for (int i = 0; i < num_words; ++i) { - const int word = wj[i]; - const int cur_topic = zj[i]; - dr[j].decrement(cur_topic, &rng); - wr[cur_topic].decrement(word, &rng); - - for (int k = 0; k < num_classes; ++k) { - ss[k]= dr[j].prob(k, uniform_topic) * wr[k].prob(word, uniform_word); - } - const int new_topic = rng.SelectSample(ss); - dr[j].increment(new_topic, uniform_topic, &rng); - wr[new_topic].increment(word, uniform_word, &rng); - zj[i] = new_topic; - if (iter > burnin_size) { - ++t2w[cur_topic][word]; - } - } - } - } - for (int i = 0; i < num_classes; ++i) { - cerr << "---------------------------------\n"; - cerr << " final PYP(" << wr[i].discount() << "," << wr[i].concentration() << ")\n"; - ShowTopWordsForTopic(t2w[i]); - } - cerr << "-------------\n"; -#if 0 - for (int j = 0; j < zji.size(); ++j) { - const size_t num_words = wji[j].size(); - vector<int>& zj = zji[j]; - const vector<int>& wj = wji[j]; - zj.resize(num_words); - for (int i = 0; i < num_words; ++i) { - cerr << TD::Convert(wji[j][i]) << '(' << zj[i] << ") "; - } - cerr << endl; - } -#endif - return 0; -} - diff --git a/gi/clda/src/crp.h b/gi/clda/src/crp.h deleted file mode 100644 index 9d35857e..00000000 --- a/gi/clda/src/crp.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef _CRP_H_ -#define _CRP_H_ - -// shamelessly adapted from code by Phil Blunsom and Trevor Cohn - -#include <boost/functional/hash.hpp> -#include <tr1/unordered_map> - -#include "prob.h" - -template <typename DishType, typename Hash = boost::hash<DishType> > -class CRP { - public: - CRP(double alpha) : alpha_(alpha), palpha_(alpha), total_customers_() {} - void increment(const DishType& dish); - void decrement(const DishType& dish); - void erase(const DishType& dish) { - counts_.erase(dish); - } - inline int count(const DishType& dish) const { - const typename MapType::const_iterator i = counts_.find(dish); - if (i == counts_.end()) return 0; else return i->second; - } - inline prob_t prob(const DishType& dish, const prob_t& p0) const { - return (prob_t(count(dish)) + palpha_ * p0) / prob_t(total_customers_ + alpha_); - } - private: - typedef std::tr1::unordered_map<DishType, int, Hash> MapType; - MapType counts_; - const double alpha_; - const prob_t palpha_; - int total_customers_; -}; - -template <typename Dish, typename Hash> -void CRP<Dish,Hash>::increment(const Dish& dish) { - ++counts_[dish]; - ++total_customers_; -} - -template <typename Dish, typename Hash> -void CRP<Dish,Hash>::decrement(const Dish& dish) { - typename MapType::iterator i = counts_.find(dish); - assert(i != counts_.end()); - if (--i->second == 0) - counts_.erase(i); - --total_customers_; -} - -#endif diff --git a/gi/clda/src/slice_sampler.h b/gi/clda/src/slice_sampler.h deleted file mode 100644 index aa48a169..00000000 --- a/gi/clda/src/slice_sampler.h +++ /dev/null @@ -1,191 +0,0 @@ -//! slice-sampler.h is an MCMC slice sampler -//! -//! Mark Johnson, 1st August 2008 - -#ifndef SLICE_SAMPLER_H -#define SLICE_SAMPLER_H - -#include <algorithm> -#include <cassert> -#include <cmath> -#include <iostream> -#include <limits> - -//! slice_sampler_rfc_type{} returns the value of a user-specified -//! function if the argument is within range, or - infinity otherwise -// -template <typename F, typename Fn, typename U> -struct slice_sampler_rfc_type { - F min_x, max_x; - const Fn& f; - U max_nfeval, nfeval; - slice_sampler_rfc_type(F min_x, F max_x, const Fn& f, U max_nfeval) - : min_x(min_x), max_x(max_x), f(f), max_nfeval(max_nfeval), nfeval(0) { } - - F operator() (F x) { - if (min_x < x && x < max_x) { - assert(++nfeval <= max_nfeval); - F fx = f(x); - assert(std::isfinite(fx)); - return fx; - } - return -std::numeric_limits<F>::infinity(); - } -}; // slice_sampler_rfc_type{} - -//! slice_sampler1d() implements the univariate "range doubling" slice sampler -//! described in Neal (2003) "Slice Sampling", The Annals of Statistics 31(3), 705-767. -// -template <typename F, typename LogF, typename Uniform01> -F slice_sampler1d(const LogF& logF0, //!< log of function to sample - F x, //!< starting point - Uniform01& u01, //!< uniform [0,1) random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - slice_sampler_rfc_type<F,LogF,U> logF(min_x, max_x, logF0, max_nfeval); - - assert(std::isfinite(x)); - - if (w <= 0.0) { // set w to a default width - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, (F) 0.1); - } - assert(std::isfinite(w)); - - F logFx = logF(x); - for (U sample = 0; sample < nsamples; ++sample) { - F logY = logFx + log(u01()+1e-100); //! slice logFx at this value - assert(std::isfinite(logY)); - - F xl = x - w*u01(); //! lower bound on slice interval - F logFxl = logF(xl); - F xr = xl + w; //! upper bound on slice interval - F logFxr = logF(xr); - - while (logY < logFxl || logY < logFxr) // doubling procedure - if (u01() < 0.5) - logFxl = logF(xl -= xr - xl); - else - logFxr = logF(xr += xr - xl); - - F xl1 = xl; - F xr1 = xr; - while (true) { // shrinking procedure - F x1 = xl1 + u01()*(xr1 - xl1); - if (logY < logF(x1)) { - F xl2 = xl; // acceptance procedure - F xr2 = xr; - bool d = false; - while (xr2 - xl2 > 1.1*w) { - F xm = (xl2 + xr2)/2; - if ((x < xm && x1 >= xm) || (x >= xm && x1 < xm)) - d = true; - if (x1 < xm) - xr2 = xm; - else - xl2 = xm; - if (d && logY >= logF(xl2) && logY >= logF(xr2)) - goto unacceptable; - } - x = x1; - goto acceptable; - } - goto acceptable; - unacceptable: - if (x1 < x) // rest of shrinking procedure - xl1 = x1; - else - xr1 = x1; - } - acceptable: - w = (4*w + (xr1 - xl1))/5; // update width estimate - } - return x; -} - -/* -//! slice_sampler1d() implements a 1-d MCMC slice sampler. -//! It should be correct for unimodal distributions, but -//! not for multimodal ones. -// -template <typename F, typename LogP, typename Uniform01> -F slice_sampler1d(const LogP& logP, //!< log of distribution to sample - F x, //!< initial sample - Uniform01& u01, //!< uniform random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - assert(std::isfinite(x)); - if (w <= 0.0) { - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, 0.1); - } - // TRACE4(x, min_x, max_x, w); - F logPx = logP(x); - assert(std::isfinite(logPx)); - U nfeval = 1; - for (U sample = 0; sample < nsamples; ++sample) { - F x0 = x; - F logU = logPx + log(u01()+1e-100); - assert(std::isfinite(logU)); - F r = u01(); - F xl = std::max(min_x, x - r*w); - F xr = std::min(max_x, x + (1-r)*w); - // TRACE3(x, logPx, logU); - while (xl > min_x && logP(xl) > logU) { - xl -= w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << std::endl; - assert(nfeval < max_nfeval); - } - xl = std::max(xl, min_x); - while (xr < max_x && logP(xr) > logU) { - xr += w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xr = " << xr << std::endl; - assert(nfeval < max_nfeval); - } - xr = std::min(xr, max_x); - while (true) { - r = u01(); - x = r*xl + (1-r)*xr; - assert(std::isfinite(x)); - logPx = logP(x); - // TRACE4(logPx, x, xl, xr); - assert(std::isfinite(logPx)); - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << ", xr = " << xr << ", x = " << x << std::endl; - assert(nfeval < max_nfeval); - if (logPx > logU) - break; - else if (x > x0) - xr = x; - else - xl = x; - } - // w = (4*w + (xr-xl))/5; // gradually adjust w - } - // TRACE2(logPx, x); - return x; -} // slice_sampler1d() -*/ - -#endif // SLICE_SAMPLER_H diff --git a/gi/clda/src/timer.h b/gi/clda/src/timer.h deleted file mode 100644 index 123d9a94..00000000 --- a/gi/clda/src/timer.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _TIMER_STATS_H_ -#define _TIMER_STATS_H_ - -#include <ctime> - -struct Timer { - Timer() { Reset(); } - void Reset() { - start_t = clock(); - } - double Elapsed() const { - const clock_t end_t = clock(); - const double elapsed = (end_t - start_t) / 1000000.0; - return elapsed; - } - private: - std::clock_t start_t; -}; - -#endif diff --git a/gi/evaluation/conditional_entropy.py b/gi/evaluation/conditional_entropy.py deleted file mode 100644 index 356d3b1d..00000000 --- a/gi/evaluation/conditional_entropy.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -import sys, math, itertools, getopt - -def usage(): - print >>sys.stderr, 'Usage:', sys.argv[0], '[-s slash_threshold] input-1 input-2' - sys.exit(0) - -optlist, args = getopt.getopt(sys.argv[1:], 'hs:') -slash_threshold = None -for opt, arg in optlist: - if opt == '-s': - slash_threshold = int(arg) - else: - usage() -if len(args) != 2: - usage() - -ginfile = open(args[0]) -pinfile = open(args[1]) - -# evaluating: H(G | P) = sum_{g,p} p(g,p) log { p(p) / p(g,p) } -# = sum_{g,p} c(g,p)/N { log c(p) - log N - log c(g,p) + log N } -# = 1/N sum_{g,p} c(g,p) { log c(p) - log c(g,p) } -# where G = gold, P = predicted, N = number of events - -N = 0 -gold_frequencies = {} -predict_frequencies = {} -joint_frequencies = {} - -for gline, pline in itertools.izip(ginfile, pinfile): - gparts = gline.split('||| ')[1].split() - pparts = pline.split('||| ')[1].split() - assert len(gparts) == len(pparts) - - for gpart, ppart in zip(gparts, pparts): - gtag = gpart.split(':',1)[1] - ptag = ppart.split(':',1)[1] - - if slash_threshold == None or gtag.count('/') + gtag.count('\\') <= slash_threshold: - joint_frequencies.setdefault((gtag, ptag), 0) - joint_frequencies[gtag,ptag] += 1 - - predict_frequencies.setdefault(ptag, 0) - predict_frequencies[ptag] += 1 - - gold_frequencies.setdefault(gtag, 0) - gold_frequencies[gtag] += 1 - - N += 1 - -hg2p = 0 -hp2g = 0 -for (gtag, ptag), cgp in joint_frequencies.items(): - hp2g += cgp * (math.log(predict_frequencies[ptag], 2) - math.log(cgp, 2)) - hg2p += cgp * (math.log(gold_frequencies[gtag], 2) - math.log(cgp, 2)) -hg2p /= N -hp2g /= N - -print 'H(P|G)', hg2p, 'H(G|P)', hp2g, 'VI', hg2p + hp2g diff --git a/gi/evaluation/confusion_matrix.py b/gi/evaluation/confusion_matrix.py deleted file mode 100644 index 2dd7aa47..00000000 --- a/gi/evaluation/confusion_matrix.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python - -import sys, math, itertools, getopt - -def usage(): - print >>sys.stderr, 'Usage:', sys.argv[0], '[-s slash_threshold] [-p output] [-m] input-1 input-2' - sys.exit(0) - -optlist, args = getopt.getopt(sys.argv[1:], 'hs:mp:') -slash_threshold = None -output_fname = None -show_matrix = False -for opt, arg in optlist: - if opt == '-s': - slash_threshold = int(arg) - elif opt == '-p': - output_fname = arg - elif opt == '-m': - show_matrix = True - else: - usage() -if len(args) != 2 or (not show_matrix and not output_fname): - usage() - -ginfile = open(args[0]) -pinfile = open(args[1]) - -if output_fname: - try: - import Image, ImageDraw - except ImportError: - print >>sys.stderr, "Error: Python Image Library not available. Did you forget to set your PYTHONPATH environment variable?" - sys.exit(1) - -N = 0 -gold_frequencies = {} -predict_frequencies = {} -joint_frequencies = {} - -for gline, pline in itertools.izip(ginfile, pinfile): - gparts = gline.split('||| ')[1].split() - pparts = pline.split('||| ')[1].split() - assert len(gparts) == len(pparts) - - for gpart, ppart in zip(gparts, pparts): - gtag = gpart.split(':',1)[1] - ptag = ppart.split(':',1)[1] - - if slash_threshold == None or gtag.count('/') + gtag.count('\\') <= slash_threshold: - joint_frequencies.setdefault((gtag, ptag), 0) - joint_frequencies[gtag,ptag] += 1 - - predict_frequencies.setdefault(ptag, 0) - predict_frequencies[ptag] += 1 - - gold_frequencies.setdefault(gtag, 0) - gold_frequencies[gtag] += 1 - - N += 1 - -# find top tags -gtags = gold_frequencies.items() -gtags.sort(lambda x,y: x[1]-y[1]) -gtags.reverse() -#gtags = gtags[:50] - -preds = predict_frequencies.items() -preds.sort(lambda x,y: x[1]-y[1]) -preds.reverse() - -if show_matrix: - print '%7s %7s' % ('pred', 'cnt'), - for gtag, gcount in gtags: print '%7s' % gtag, - print - print '=' * 80 - - for ptag, pcount in preds: - print '%7s %7d' % (ptag, pcount), - for gtag, gcount in gtags: - print '%7d' % joint_frequencies.get((gtag, ptag), 0), - print - - print '%7s %7d' % ('total', N), - for gtag, gcount in gtags: print '%7d' % gcount, - print - -if output_fname: - offset=10 - - image = Image.new("RGB", (len(preds), len(gtags)), (255, 255, 255)) - #hsl(hue, saturation%, lightness%) - - # re-sort preds to get a better diagonal - ptags=[] - if True: - ptags = map(lambda (p,c): p, preds) - else: - remaining = set(predict_frequencies.keys()) - for y, (gtag, gcount) in enumerate(gtags): - best = (None, 0) - for ptag in remaining: - #pcount = predict_frequencies[ptag] - p = joint_frequencies.get((gtag, ptag), 0)# / float(pcount) - if p > best[1]: best = (ptag, p) - ptags.append(ptag) - remaining.remove(ptag) - if not remaining: break - - print 'Predicted tag ordering:', ' '.join(ptags) - print 'Gold tag ordering:', ' '.join(map(lambda (t,c): t, gtags)) - - draw = ImageDraw.Draw(image) - for x, ptag in enumerate(ptags): - pcount = predict_frequencies[ptag] - minval = math.log(offset) - maxval = math.log(pcount + offset) - for y, (gtag, gcount) in enumerate(gtags): - f = math.log(offset + joint_frequencies.get((gtag, ptag), 0)) - z = int(240. * (maxval - f) / float(maxval - minval)) - #print x, y, z, f, maxval - draw.point([(x,y)], fill='hsl(%d, 100%%, 50%%)' % z) - del draw - image.save(output_fname) diff --git a/gi/evaluation/entropy.py b/gi/evaluation/entropy.py deleted file mode 100644 index ec1ef502..00000000 --- a/gi/evaluation/entropy.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python - -import sys, math, itertools, getopt - -def usage(): - print >>sys.stderr, 'Usage:', sys.argv[0], '[-s slash_threshold] input file' - sys.exit(0) - -optlist, args = getopt.getopt(sys.argv[1:], 'hs:') -slash_threshold = None -for opt, arg in optlist: - if opt == '-s': - slash_threshold = int(arg) - else: - usage() -if len(args) != 1: - usage() - -infile = open(args[0]) -N = 0 -frequencies = {} - -for line in infile: - - for part in line.split('||| ')[1].split(): - tag = part.split(':',1)[1] - - if slash_threshold == None or tag.count('/') + tag.count('\\') <= slash_threshold: - frequencies.setdefault(tag, 0) - frequencies[tag] += 1 - N += 1 - -h = 0 -for tag, c in frequencies.items(): - h -= c * (math.log(c, 2) - math.log(N, 2)) -h /= N - -print 'entropy', h diff --git a/gi/evaluation/extract_ccg_labels.py b/gi/evaluation/extract_ccg_labels.py deleted file mode 100644 index e0034648..00000000 --- a/gi/evaluation/extract_ccg_labels.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python - -# -# Takes spans input along with treebank and spits out CG style categories for each span. -# spans = output from CDEC's extools/extractor with --base_phrase_spans option -# treebank = PTB format, one tree per line -# -# Output is in CDEC labelled-span format -# - -import sys, itertools, tree - -tinfile = open(sys.argv[1]) -einfile = open(sys.argv[2]) - -def number_leaves(node, next=0): - left, right = None, None - for child in node.children: - l, r = number_leaves(child, next) - next = max(next, r+1) - if left == None or l < left: - left = l - if right == None or r > right: - right = r - - #print node, left, right, next - if left == None or right == None: - assert not node.children - left = right = next - - node.left = left - node.right = right - - return left, right - -def ancestor(node, indices): - #print node, node.left, node.right, indices - # returns the deepest node covering all the indices - if min(indices) >= node.left and max(indices) <= node.right: - # try the children - for child in node.children: - x = ancestor(child, indices) - if x: return x - return node - else: - return None - -def frontier(node, indices): - #print 'frontier for node', node, 'indices', indices - if node.left > max(indices) or node.right < min(indices): - #print '\toutside' - return [node] - elif node.children: - #print '\tcovering at least part' - ns = [] - for child in node.children: - n = frontier(child, indices) - ns.extend(n) - return ns - else: - return [node] - -def project_heads(node): - #print 'project_heads', node - is_head = node.data.tag.endswith('-HEAD') - if node.children: - found = 0 - for child in node.children: - x = project_heads(child) - if x: - node.data.tag = x - found += 1 - assert found == 1 - elif is_head: - node.data.tag = node.data.tag[:-len('-HEAD')] - - if is_head: - return node.data.tag - else: - return None - -for tline, eline in itertools.izip(tinfile, einfile): - if tline.strip() != '(())': - if tline.startswith('( '): - tline = tline[2:-1].strip() - tr = tree.parse_PST(tline) - if tr != None: - number_leaves(tr) - #project_heads(tr) # assumes Bikel-style head annotation for the input trees - else: - tr = None - - parts = eline.strip().split(" ||| ") - zh, en = parts[:2] - spans = parts[-1] - print '|||', - for span in spans.split(): - sps = span.split(":") - i, j, x, y = map(int, sps[0].split("-")) - - if tr: - a = ancestor(tr, range(x,y)) - try: - fs = frontier(a, range(x,y)) - except: - print >>sys.stderr, "problem with line", tline.strip(), "--", eline.strip() - raise - - #print x, y - #print 'ancestor', a - #print 'frontier', fs - - cat = a.data.tag - for f in fs: - if f.right < x: - cat += '\\' + f.data.tag - else: - break - fs.reverse() - for f in fs: - if f.left >= y: - cat += '/' + f.data.tag - else: - break - else: - cat = 'FAIL' - - print '%d-%d:%s' % (x, y, cat), - print diff --git a/gi/evaluation/tree.py b/gi/evaluation/tree.py deleted file mode 100644 index 702d80b6..00000000 --- a/gi/evaluation/tree.py +++ /dev/null @@ -1,485 +0,0 @@ -import re, sys - -class Symbol: - def __init__(self, nonterm, term=None, var=None): - assert not (term != None and var != None) - self.tag = nonterm - self.token = term - self.variable = var - - def is_variable(self): - return self.variable != None - - def __eq__(self, other): - return self.tag == other.tag and self.token == other.token and self.variable == other.variable - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash((self.tag, self.token, self.variable)) - - def __repr__(self): - return str(self) - - def __cmp__(self, other): - return cmp((self.tag, self.token, self.variable), - (other.tag, other.token, other.variable)) - - def __str__(self): - parts = [] - if False: # DEPENDENCY - if self.token: - parts.append(str(self.token)) - elif self.variable != None: - parts.append('#%d' % self.variable) - if self.tag: - parts.append(str(self.tag)) - return '/'.join(parts) - else: - if self.tag: - parts.append(str(self.tag)) - if self.token: - parts.append(str(self.token)) - elif self.variable != None: - parts.append('#%d' % self.variable) - return ' '.join(parts) - -class TreeNode: - def __init__(self, data, children=None, order=-1): - self.data = data - self.children = [] - self.order = order - self.parent = None - if children: self.children = children - - def insert(self, child): - self.children.append(child) - child.parent = self - - def leaves(self): - ls = [] - for node in self.xtraversal(): - if not node.children: - ls.append(node.data) - return ls - - def leaf_nodes(self): - ls = [] - for node in self.xtraversal(): - if not node.children: - ls.append(node) - return ls - - def max_depth(self): - d = 1 - for child in self.children: - d = max(d, 1 + child.max_depth()) - if not self.children and self.data.token: - d = 2 - return d - - def max_width(self): - w = 0 - for child in self.children: - w += child.max_width() - return max(1, w) - - def num_internal_nodes(self): - if self.children: - n = 1 - for child in self.children: - n += child.num_internal_nodes() - return n - elif self.data.token: - return 1 - else: - return 0 - - def postorder_traversal(self, visit): - """ - Postorder traversal; no guarantee that terminals will be read in the - correct order for dep. trees. - """ - for child in self.children: - child.traversal(visit) - visit(self) - - def traversal(self, visit): - """ - Preorder for phrase structure trees, and inorder for dependency trees. - In both cases the terminals will be read off in the correct order. - """ - visited_self = False - if self.order <= 0: - visited_self = True - visit(self) - - for i, child in enumerate(self.children): - child.traversal(visit) - if i + 1 == self.order: - visited_self = True - visit(self) - - assert visited_self - - def xpostorder_traversal(self): - for child in self.children: - for node in child.xpostorder_traversal(): - yield node - yield self - - def xtraversal(self): - visited_self = False - if self.order <= 0: - visited_self = True - yield self - - for i, child in enumerate(self.children): - for d in child.xtraversal(): - yield d - - if i + 1 == self.order: - visited_self = True - yield self - - assert visited_self - - def xpostorder_traversal(self): - for i, child in enumerate(self.children): - for d in child.xpostorder_traversal(): - yield d - yield self - - def edges(self): - es = [] - self.traverse_edges(lambda h,c: es.append((h,c))) - return es - - def traverse_edges(self, visit): - for child in self.children: - visit(self.data, child.data) - child.traverse_edges(visit) - - def subtrees(self, include_self=False): - st = [] - if include_self: - stack = [self] - else: - stack = self.children[:] - - while stack: - node = stack.pop() - st.append(node) - stack.extend(node.children) - return st - - def find_parent(self, node): - try: - index = self.children.index(node) - return self, index - except ValueError: - for child in self.children: - if isinstance(child, TreeNode): - r = child.find_parent(node) - if r: return r - return None - - def is_ancestor_of(self, node): - if self == node: - return True - for child in self.children: - if child.is_ancestor_of(child): - return True - return False - - def find(self, node): - if self == node: - return self - for child in self.children: - if isinstance(child, TreeNode): - r = child.find(node) - if r: return r - else: - if child == node: - return r - return None - - def equals_ignorecase(self, other): - if not isinstance(other, TreeNode): - return False - if self.data != other.data: - return False - if len(self.children) != len(other.children): - return False - for mc, oc in zip(self.children, other.children): - if isinstance(mc, TreeNode): - if not mc.equals_ignorecase(oc): - return False - else: - if mc.lower() != oc.lower(): - return False - return True - - def node_number(self, numbering, next=0): - if self.order <= 0: - numbering[id(self)] = next - next += 1 - - for i, child in enumerate(self.children): - next = child.node_number(numbering, next) - if i + 1 == self.order: - numbering[id(self)] = next - next += 1 - - return next - - def display_conll(self, out): - numbering = {} - self.node_number(numbering) - next = 0 - self.children[0].traversal(lambda x: \ - out.write('%d\t%s\t%s\t%s\t%s\t_\t%d\tLAB\n' \ - % (numbering[id(x)], x.data.token, x.data.token, - x.data.tag, x.data.tag, numbering[id(x.parent)]))) - out.write('\n') - - def size(self): - sz = 1 - for child in self.children: - sz += child.size() - return sz - - def __eq__(self, other): - if isinstance(other, TreeNode) and self.data == other.data \ - and self.children == other.children: - return True - return False - - def __cmp__(self, other): - if not isinstance(other, TreeNode): return 1 - n = cmp(self.data, other.data) - if n != 0: return n - n = len(self.children) - len(other.children) - if n != 0: return n - for sc, oc in zip(self.children, other.children): - n = cmp(sc, oc) - if n != 0: return n - return 0 - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return hash((self.data, tuple(self.children))) - - def __repr__(self): - return str(self) - - def __str__(self): - s = '(' - space = False - if self.order <= 0: - s += str(self.data) - space = True - for i, child in enumerate(self.children): - if space: s += ' ' - s += str(child) - space = True - if i+1 == self.order: - s += ' ' + str(self.data) - return s + ')' - -def read_PSTs(fname): - infile = open(fname) - trees = [] - for line in infile: - trees.append(parse_PST(line.strip())) - infile.close() - return trees - -def parse_PST_multiline(infile, hash_is_var=True): - buf = '' - num_open = 0 - while True: - line = infile.readline() - if not line: - return None - buf += ' ' + line.rstrip() - num_open += line.count('(') - line.count(')') - if num_open == 0: - break - - return parse_PST(buf, hash_is_var) - -def parse_PST(line, hash_is_var=True): - line = line.rstrip() - if not line or line.lower() == 'null': - return None - - # allow either (a/DT) or (DT a) - #parts_re = re.compile(r'(\(*)([^/)]*)(?:/([^)]*))?(\)*)$') - - # only allow (DT a) - parts_re = re.compile(r'(\(*)([^)]*)(\)*)$') - - root = TreeNode(Symbol('TOP')) - stack = [root] - for part in line.rstrip().split(): - m = parts_re.match(part) - #opening, tok_or_tag, tag, closing = m.groups() - opening, tok_or_tag, closing = m.groups() - tag = None - #print 'token', part, 'bits', m.groups() - for i in opening: - node = TreeNode(Symbol(None)) - stack[-1].insert(node) - stack.append(node) - - if tag: - stack[-1].data.tag = tag - if hash_is_var and tok_or_tag.startswith('#'): - stack[-1].data.variable = int(tok_or_tag[1:]) - else: - stack[-1].data.token = tok_or_tag - else: - if stack[-1].data.tag == None: - stack[-1].data.tag = tok_or_tag - else: - if hash_is_var and tok_or_tag.startswith('#'): - try: - stack[-1].data.variable = int(tok_or_tag[1:]) - except ValueError: # it's really a token! - #print >>sys.stderr, 'Warning: # used for token:', tok_or_tag - stack[-1].data.token = tok_or_tag - else: - stack[-1].data.token = tok_or_tag - - for i in closing: - stack.pop() - - #assert str(root.children[0]) == line - return root.children[0] - -def read_DTs(fname): - infile = open(fname) - trees = [] - while True: - t = parse_DT(infile) - if t: trees.append(t) - else: break - infile.close() - return trees - -def read_bracketed_DTs(fname): - infile = open(fname) - trees = [] - for line in infile: - trees.append(parse_bracketed_DT(line)) - infile.close() - return trees - -def parse_DT(infile): - tokens = [Symbol('ROOT')] - children = {} - - for line in infile: - parts = line.rstrip().split() - #print parts - if not parts: break - index = len(tokens) - token = parts[1] - tag = parts[3] - parent = int(parts[6]) - if token.startswith('#'): - tokens.append(Symbol(tag, var=int(token[1:]))) - else: - tokens.append(Symbol(tag, token)) - children.setdefault(parent, set()).add(index) - - if len(tokens) == 1: return None - - root = TreeNode(Symbol('ROOT'), [], 0) - schedule = [] - for child in sorted(children[0]): - schedule.append((root, child)) - - while schedule: - parent, index = schedule[0] - del schedule[0] - - node = TreeNode(tokens[index]) - node.order = 0 - parent.insert(node) - - for child in sorted(children.get(index, [])): - schedule.append((node, child)) - if child < index: - node.order += 1 - - return root - -_bracket_split_re = re.compile(r'([(]*)([^)/]*)(?:/([^)]*))?([)]*)') - -def parse_bracketed_DT(line, insert_root=True): - line = line.rstrip() - if not line or line == 'NULL': return None - #print line - - root = TreeNode(Symbol('ROOT')) - stack = [root] - for part in line.rstrip().split(): - m = _bracket_split_re.match(part) - - for c in m.group(1): - node = TreeNode(Symbol(None)) - stack[-1].insert(node) - stack.append(node) - - if m.group(3) != None: - if m.group(2).startswith('#'): - stack[-1].data.variable = int(m.group(2)[1:]) - else: - stack[-1].data.token = m.group(2) - stack[-1].data.tag = m.group(3) - else: - stack[-1].data.tag = m.group(2) - stack[-1].order = len(stack[-1].children) - # FIXME: also check for vars - - for c in m.group(4): - stack.pop() - - assert len(stack) == 1 - if not insert_root or root.children[0].data.tag == 'ROOT': - return root.children[0] - else: - return root - -_bracket_split_notag_re = re.compile(r'([(]*)([^)/]*)([)]*)') - -def parse_bracketed_untagged_DT(line): - line = line.rstrip() - if not line or line == 'NULL': return None - - root = TreeNode(Symbol('TOP')) - stack = [root] - for part in line.rstrip().split(): - m = _bracket_split_notag_re.match(part) - - for c in m.group(1): - node = TreeNode(Symbol(None)) - stack[-1].insert(node) - stack.append(node) - - if stack[-1].data.token == None: - stack[-1].data.token = m.group(2) - stack[-1].order = len(stack[-1].children) - else: - child = TreeNode(Symbol(nonterm=None, term=m.group(2))) - stack[-1].insert(child) - - for c in m.group(3): - stack.pop() - - return root.children[0] diff --git a/gi/markov_al/Makefile.am b/gi/markov_al/Makefile.am deleted file mode 100644 index fe3e3349..00000000 --- a/gi/markov_al/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -bin_PROGRAMS = ml - -ml_SOURCES = ml.cc - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -funroll-loops -I$(top_srcdir)/utils $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -AM_LDFLAGS = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz diff --git a/gi/markov_al/README b/gi/markov_al/README deleted file mode 100644 index 9c10f7cd..00000000 --- a/gi/markov_al/README +++ /dev/null @@ -1,2 +0,0 @@ -Experimental translation models with Markovian dependencies. - diff --git a/gi/markov_al/ml.cc b/gi/markov_al/ml.cc deleted file mode 100644 index 1e71edd6..00000000 --- a/gi/markov_al/ml.cc +++ /dev/null @@ -1,470 +0,0 @@ -#include <iostream> -#include <tr1/unordered_map> - -#include <boost/shared_ptr.hpp> -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "tdict.h" -#include "filelib.h" -#include "sampler.h" -#include "ccrp_onetable.h" -#include "array2d.h" - -using namespace std; -using namespace std::tr1; -namespace po = boost::program_options; - -void PrintTopCustomers(const CCRP_OneTable<WordID>& crp) { - for (CCRP_OneTable<WordID>::const_iterator it = crp.begin(); it != crp.end(); ++it) { - cerr << " " << TD::Convert(it->first) << " = " << it->second << endl; - } -} - -void PrintAlignment(const vector<WordID>& src, const vector<WordID>& trg, const vector<unsigned char>& a) { - cerr << TD::GetString(src) << endl << TD::GetString(trg) << endl; - Array2D<bool> al(src.size(), trg.size()); - for (int i = 0; i < a.size(); ++i) - if (a[i] != 255) al(a[i], i) = true; - cerr << al << endl; -} - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -struct Unigram; -struct Bigram { - Bigram() : trg(), cond() {} - Bigram(WordID prev, WordID cur, WordID t) : trg(t) { cond.first = prev; cond.second = cur; } - const pair<WordID,WordID>& ConditioningPair() const { - return cond; - } - WordID& prev_src() { return cond.first; } - WordID& cur_src() { return cond.second; } - const WordID& prev_src() const { return cond.first; } - const WordID& cur_src() const { return cond.second; } - WordID trg; - private: - pair<WordID, WordID> cond; -}; - -struct Unigram { - Unigram() : cur_src(), trg() {} - Unigram(WordID s, WordID t) : cur_src(s), trg(t) {} - WordID cur_src; - WordID trg; -}; - -ostream& operator<<(ostream& os, const Bigram& b) { - os << "( " << TD::Convert(b.trg) << " | " << TD::Convert(b.prev_src()) << " , " << TD::Convert(b.cur_src()) << " )"; - return os; -} - -ostream& operator<<(ostream& os, const Unigram& u) { - os << "( " << TD::Convert(u.trg) << " | " << TD::Convert(u.cur_src) << " )"; - return os; -} - -bool operator==(const Bigram& a, const Bigram& b) { - return a.trg == b.trg && a.cur_src() == b.cur_src() && a.prev_src() == b.prev_src(); -} - -bool operator==(const Unigram& a, const Unigram& b) { - return a.trg == b.trg && a.cur_src == b.cur_src; -} - -size_t hash_value(const Bigram& b) { - size_t h = boost::hash_value(b.prev_src()); - boost::hash_combine(h, boost::hash_value(b.cur_src())); - boost::hash_combine(h, boost::hash_value(b.trg)); - return h; -} - -size_t hash_value(const Unigram& u) { - size_t h = boost::hash_value(u.cur_src); - boost::hash_combine(h, boost::hash_value(u.trg)); - return h; -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<WordID> >* e, - set<WordID>* vocab_f, - set<WordID>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -struct UnigramModel { - UnigramModel(size_t src_voc_size, size_t trg_voc_size) : - unigrams(TD::NumWords() + 1, CCRP_OneTable<WordID>(1,1,1,1)), - p0(1.0 / trg_voc_size) {} - - void increment(const Bigram& b) { - unigrams[b.cur_src()].increment(b.trg); - } - - void decrement(const Bigram& b) { - unigrams[b.cur_src()].decrement(b.trg); - } - - double prob(const Bigram& b) const { - const double q0 = unigrams[b.cur_src()].prob(b.trg, p0); - return q0; - } - - double LogLikelihood() const { - double llh = 0; - for (unsigned i = 0; i < unigrams.size(); ++i) { - const CCRP_OneTable<WordID>& crp = unigrams[i]; - if (crp.num_customers() > 0) { - llh += crp.log_crp_prob(); - llh += crp.num_tables() * log(p0); - } - } - return llh; - } - - void ResampleHyperparameters(MT19937* rng) { - for (unsigned i = 0; i < unigrams.size(); ++i) - unigrams[i].resample_hyperparameters(rng); - } - - vector<CCRP_OneTable<WordID> > unigrams; // unigrams[src].prob(trg, p0) = p(trg|src) - - const double p0; -}; - -struct BigramModel { - BigramModel(size_t src_voc_size, size_t trg_voc_size) : - unigrams(TD::NumWords() + 1, CCRP_OneTable<WordID>(1,1,1,1)), - p0(1.0 / trg_voc_size) {} - - void increment(const Bigram& b) { - BigramMap::iterator it = bigrams.find(b.ConditioningPair()); - if (it == bigrams.end()) { - it = bigrams.insert(make_pair(b.ConditioningPair(), CCRP_OneTable<WordID>(1,1,1,1))).first; - } - if (it->second.increment(b.trg)) - unigrams[b.cur_src()].increment(b.trg); - } - - void decrement(const Bigram& b) { - BigramMap::iterator it = bigrams.find(b.ConditioningPair()); - assert(it != bigrams.end()); - if (it->second.decrement(b.trg)) { - unigrams[b.cur_src()].decrement(b.trg); - if (it->second.num_customers() == 0) - bigrams.erase(it); - } - } - - double prob(const Bigram& b) const { - const double q0 = unigrams[b.cur_src()].prob(b.trg, p0); - const BigramMap::const_iterator it = bigrams.find(b.ConditioningPair()); - if (it == bigrams.end()) return q0; - return it->second.prob(b.trg, q0); - } - - double LogLikelihood() const { - double llh = 0; - for (unsigned i = 0; i < unigrams.size(); ++i) { - const CCRP_OneTable<WordID>& crp = unigrams[i]; - if (crp.num_customers() > 0) { - llh += crp.log_crp_prob(); - llh += crp.num_tables() * log(p0); - } - } - for (BigramMap::const_iterator it = bigrams.begin(); it != bigrams.end(); ++it) { - const CCRP_OneTable<WordID>& crp = it->second; - const WordID cur_src = it->first.second; - llh += crp.log_crp_prob(); - for (CCRP_OneTable<WordID>::const_iterator bit = crp.begin(); bit != crp.end(); ++bit) { - llh += log(unigrams[cur_src].prob(bit->second, p0)); - } - } - return llh; - } - - void ResampleHyperparameters(MT19937* rng) { - for (unsigned i = 0; i < unigrams.size(); ++i) - unigrams[i].resample_hyperparameters(rng); - for (BigramMap::iterator it = bigrams.begin(); it != bigrams.end(); ++it) - it->second.resample_hyperparameters(rng); - } - - typedef unordered_map<pair<WordID,WordID>, CCRP_OneTable<WordID>, boost::hash<pair<WordID,WordID> > > BigramMap; - BigramMap bigrams; // bigrams[(src-1,src)].prob(trg, q0) = p(trg|src,src-1) - vector<CCRP_OneTable<WordID> > unigrams; // unigrams[src].prob(trg, p0) = p(trg|src) - - const double p0; -}; - -struct BigramAlignmentModel { - BigramAlignmentModel(size_t src_voc_size, size_t trg_voc_size) : bigrams(TD::NumWords() + 1, CCRP_OneTable<WordID>(1,1,1,1)), p0(1.0 / src_voc_size) {} - void increment(WordID prev, WordID next) { - bigrams[prev].increment(next); // hierarchy? - } - void decrement(WordID prev, WordID next) { - bigrams[prev].decrement(next); // hierarchy? - } - double prob(WordID prev, WordID next) { - return bigrams[prev].prob(next, p0); - } - double LogLikelihood() const { - double llh = 0; - for (unsigned i = 0; i < bigrams.size(); ++i) { - const CCRP_OneTable<WordID>& crp = bigrams[i]; - if (crp.num_customers() > 0) { - llh += crp.log_crp_prob(); - llh += crp.num_tables() * log(p0); - } - } - return llh; - } - - vector<CCRP_OneTable<WordID> > bigrams; // bigrams[prev].prob(next, p0) = p(next|prev) - const double p0; -}; - -struct Alignment { - vector<unsigned char> a; -}; - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - const unsigned samples = conf["samples"].as<unsigned>(); - - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<WordID> > corpuse, corpusf; - set<WordID> vocabe, vocabf; - cerr << "Reading corpus...\n"; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "F-corpus size: " << corpusf.size() << " sentences\t (" << vocabf.size() << " word types)\n"; - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - assert(corpusf.size() == corpuse.size()); - const size_t corpus_len = corpusf.size(); - const WordID kNULL = TD::Convert("<eps>"); - const WordID kBOS = TD::Convert("<s>"); - const WordID kEOS = TD::Convert("</s>"); - Bigram TT(kBOS, TD::Convert("我"), TD::Convert("i")); - Bigram TT2(kBOS, TD::Convert("要"), TD::Convert("i")); - - UnigramModel model(vocabf.size(), vocabe.size()); - vector<Alignment> alignments(corpus_len); - for (unsigned ci = 0; ci < corpus_len; ++ci) { - const vector<WordID>& src = corpusf[ci]; - const vector<WordID>& trg = corpuse[ci]; - vector<unsigned char>& alg = alignments[ci].a; - alg.resize(trg.size()); - int lenp1 = src.size() + 1; - WordID prev_src = kBOS; - for (int j = 0; j < trg.size(); ++j) { - int samp = lenp1 * rng.next(); - --samp; - if (samp < 0) samp = 255; - alg[j] = samp; - WordID cur_src = (samp == 255 ? kNULL : src[alg[j]]); - Bigram b(prev_src, cur_src, trg[j]); - model.increment(b); - prev_src = cur_src; - } - Bigram b(prev_src, kEOS, kEOS); - model.increment(b); - } - cerr << "Initial LLH: " << model.LogLikelihood() << endl; - - SampleSet<double> ss; - for (unsigned si = 0; si < 50; ++si) { - for (unsigned ci = 0; ci < corpus_len; ++ci) { - const vector<WordID>& src = corpusf[ci]; - const vector<WordID>& trg = corpuse[ci]; - vector<unsigned char>& alg = alignments[ci].a; - WordID prev_src = kBOS; - for (unsigned j = 0; j < trg.size(); ++j) { - unsigned char& a_j = alg[j]; - WordID cur_e_a_j = (a_j == 255 ? kNULL : src[a_j]); - Bigram b(prev_src, cur_e_a_j, trg[j]); - //cerr << "DEC: " << b << "\t" << nextb << endl; - model.decrement(b); - ss.clear(); - for (unsigned i = 0; i <= src.size(); ++i) { - const WordID cur_src = (i ? src[i-1] : kNULL); - b.cur_src() = cur_src; - ss.add(model.prob(b)); - } - int sampled_a_j = rng.SelectSample(ss); - a_j = (sampled_a_j ? sampled_a_j - 1 : 255); - cur_e_a_j = (a_j == 255 ? kNULL : src[a_j]); - b.cur_src() = cur_e_a_j; - //cerr << "INC: " << b << "\t" << nextb << endl; - model.increment(b); - prev_src = cur_e_a_j; - } - } - cerr << '.' << flush; - if (si % 10 == 9) { - cerr << "[LLH prev=" << model.LogLikelihood(); - //model.ResampleHyperparameters(&rng); - cerr << " new=" << model.LogLikelihood() << "]\n"; - //pair<WordID,WordID> xx = make_pair(kBOS, TD::Convert("我")); - //PrintTopCustomers(model.bigrams.find(xx)->second); - cerr << "p(" << TT << ") = " << model.prob(TT) << endl; - cerr << "p(" << TT2 << ") = " << model.prob(TT2) << endl; - PrintAlignment(corpusf[0], corpuse[0], alignments[0].a); - } - } - { - // MODEL 2 - BigramModel model(vocabf.size(), vocabe.size()); - BigramAlignmentModel amodel(vocabf.size(), vocabe.size()); - for (unsigned ci = 0; ci < corpus_len; ++ci) { - const vector<WordID>& src = corpusf[ci]; - const vector<WordID>& trg = corpuse[ci]; - vector<unsigned char>& alg = alignments[ci].a; - WordID prev_src = kBOS; - for (int j = 0; j < trg.size(); ++j) { - WordID cur_src = (alg[j] == 255 ? kNULL : src[alg[j]]); - Bigram b(prev_src, cur_src, trg[j]); - model.increment(b); - amodel.increment(prev_src, cur_src); - prev_src = cur_src; - } - amodel.increment(prev_src, kEOS); - Bigram b(prev_src, kEOS, kEOS); - model.increment(b); - } - cerr << "Initial LLH: " << model.LogLikelihood() << " " << amodel.LogLikelihood() << endl; - - SampleSet<double> ss; - for (unsigned si = 0; si < samples; ++si) { - for (unsigned ci = 0; ci < corpus_len; ++ci) { - const vector<WordID>& src = corpusf[ci]; - const vector<WordID>& trg = corpuse[ci]; - vector<unsigned char>& alg = alignments[ci].a; - WordID prev_src = kBOS; - for (unsigned j = 0; j < trg.size(); ++j) { - unsigned char& a_j = alg[j]; - WordID cur_e_a_j = (a_j == 255 ? kNULL : src[a_j]); - Bigram b(prev_src, cur_e_a_j, trg[j]); - WordID next_src = kEOS; - WordID next_trg = kEOS; - if (j < (trg.size() - 1)) { - next_src = (alg[j+1] == 255 ? kNULL : src[alg[j + 1]]); - next_trg = trg[j + 1]; - } - Bigram nextb(cur_e_a_j, next_src, next_trg); - //cerr << "DEC: " << b << "\t" << nextb << endl; - model.decrement(b); - model.decrement(nextb); - amodel.decrement(prev_src, cur_e_a_j); - amodel.decrement(cur_e_a_j, next_src); - ss.clear(); - for (unsigned i = 0; i <= src.size(); ++i) { - const WordID cur_src = (i ? src[i-1] : kNULL); - b.cur_src() = cur_src; - ss.add(model.prob(b) * model.prob(nextb) * amodel.prob(prev_src, cur_src) * amodel.prob(cur_src, next_src)); - //cerr << log(ss[ss.size() - 1]) << "\t" << b << endl; - } - int sampled_a_j = rng.SelectSample(ss); - a_j = (sampled_a_j ? sampled_a_j - 1 : 255); - cur_e_a_j = (a_j == 255 ? kNULL : src[a_j]); - b.cur_src() = cur_e_a_j; - nextb.prev_src() = cur_e_a_j; - //cerr << "INC: " << b << "\t" << nextb << endl; - //exit(1); - model.increment(b); - model.increment(nextb); - amodel.increment(prev_src, cur_e_a_j); - amodel.increment(cur_e_a_j, next_src); - prev_src = cur_e_a_j; - } - } - cerr << '.' << flush; - if (si % 10 == 9) { - cerr << "[LLH prev=" << (model.LogLikelihood() + amodel.LogLikelihood()); - //model.ResampleHyperparameters(&rng); - cerr << " new=" << model.LogLikelihood() << "]\n"; - pair<WordID,WordID> xx = make_pair(kBOS, TD::Convert("我")); - cerr << "p(" << TT << ") = " << model.prob(TT) << endl; - cerr << "p(" << TT2 << ") = " << model.prob(TT2) << endl; - pair<WordID,WordID> xx2 = make_pair(kBOS, TD::Convert("要")); - PrintTopCustomers(model.bigrams.find(xx)->second); - //PrintTopCustomers(amodel.bigrams[TD::Convert("<s>")]); - //PrintTopCustomers(model.unigrams[TD::Convert("<eps>")]); - PrintAlignment(corpusf[0], corpuse[0], alignments[0].a); - } - } - } - return 0; -} - diff --git a/gi/morf-segmentation/filter_docs.pl b/gi/morf-segmentation/filter_docs.pl deleted file mode 100755 index a78575da..00000000 --- a/gi/morf-segmentation/filter_docs.pl +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/perl - -#Filters the phrase&cluster document set to retain only documents that correspond to words or morphs, i.e. not crossing word boundaries. - -#Usage: filter_docs.pl [mark] -# STDIN: data in the doc.txt format (i.e. phrase\t blahblah ), most likely from cdec extractor -# STDOUT: the matching subset, same format - -use utf8; -my $letter=qr/\p{L}\p{M}*/; # see http://www.regular-expressions.info/unicode.html - -my $morph=qr/$letter+/; - -my $m = "##"; # marker used to indicate morphemes -if ((scalar @ARGV) >= 1) { - $m = $ARGV[0]; - shift; -} -print STDERR "Using $m to filter for morphemes\n"; - -my $expr = qr/^($morph\Q$m\E)? ?(\Q$m\E$morph\Q$m\E)* ?(\Q$m\E$morph)?\t/; #\Q and \E bounded sections are escaped -while(<>) { - /$expr/ && print; -} diff --git a/gi/morf-segmentation/invalid_vocab.patterns b/gi/morf-segmentation/invalid_vocab.patterns deleted file mode 100644 index 473ce1b1..00000000 --- a/gi/morf-segmentation/invalid_vocab.patterns +++ /dev/null @@ -1,6 +0,0 @@ -[[:digit:]] -[] !"#$%&()*+,./:;<=>?@[\^_`{|}~] -^'$ --$ -^- -^$ diff --git a/gi/morf-segmentation/linestripper.py b/gi/morf-segmentation/linestripper.py deleted file mode 100755 index 04e9044a..00000000 --- a/gi/morf-segmentation/linestripper.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/python - -import sys - -#linestripper file file maxlen [numlines] - -if len(sys.argv) < 3: - print "linestripper file1 file2 maxlen [numlines]" - print " outputs subset of file1 to stdout, ..of file2 to stderr" - sys.exit(1) - - -f1 = open(sys.argv[1],'r') -f2 = open(sys.argv[2],'r') - -maxlen=int(sys.argv[3]) -numlines = 0 - -if len(sys.argv) > 4: - numlines = int(sys.argv[4]) - -count=0 -for line1 in f1: - line2 = f2.readline() - - w1 = len(line1.strip().split()) - w2 = len(line2.strip().split()) - - if w1 <= maxlen and w2 <= maxlen: - count = count + 1 - sys.stdout.write(line1) - sys.stderr.write(line2) - - if numlines > 0 and count >= numlines: - break - -f1.close() -f2.close() - - diff --git a/gi/morf-segmentation/morf-pipeline.pl b/gi/morf-segmentation/morf-pipeline.pl deleted file mode 100755 index 46eb5b46..00000000 --- a/gi/morf-segmentation/morf-pipeline.pl +++ /dev/null @@ -1,486 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use File::Copy; - - -# Preprocessing pipeline to take care of word segmentation -# Learns a segmentation model for each/either side of the parallel corpus using all train/dev/test data -# Applies the segmentation where necessary. -# Learns word alignments on the preprocessed training data. -# Outputs script files used later to score output. - - -my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path cwd /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR; } - -use Getopt::Long "GetOptions"; - -my $GZIP = 'gzip'; -my $ZCAT = 'gunzip -c'; -my $SED = 'sed -e'; - -my $MORF_TRAIN = "$SCRIPT_DIR/morftrain.sh"; -my $MORF_SEGMENT = "$SCRIPT_DIR/morfsegment.py"; - -my $LINESTRIPPER = "$SCRIPT_DIR/linestripper.py"; -my $ALIGNER = "/export/ws10smt/software/berkeleyaligner/berkeleyaligner.jar"; -#java -d64 -Xmx10g -jar $ALIGNER ++word-align.conf >> aligner.log -assert_exec($MORF_TRAIN, $LINESTRIPPER, $MORF_SEGMENT, $ALIGNER); - -my $OUTPUT = './morfwork'; -my $PPL_SRC = 50; -my $PPL_TRG = 50; -my $MARKER = "#"; -my $MAX_WORDS = 40; -my $SENTENCES;# = 100000; -my $SPLIT_TYPE = ""; #possible values: s, t, st, or (empty string) -my $NAME_SHORTCUT; - -usage() unless &GetOptions('max_words=i' => \$MAX_WORDS, - 'output=s' => \$OUTPUT, - 'ppl_src=i' => \$PPL_SRC, - 'ppl_trg=i' => \$PPL_TRG, - 'sentences=i' => \$SENTENCES, - 'marker=s' => \$MARKER, - 'split=s' => \$SPLIT_TYPE, - 'get_name_only' => \$NAME_SHORTCUT, - ); - -usage() unless scalar @ARGV >= 2; - -my %CORPUS; # for (src,trg) it has (orig, name, filtered, final) - -$CORPUS{'src'}{'orig'} = $ARGV[0]; -open F, "<$CORPUS{'src'}{'orig'}" or die "Can't read $CORPUS{'src'}{'orig'}: $!"; close F; -$CORPUS{'src'}{'name'} = get_basename($CORPUS{'src'}{'orig'}); - -$CORPUS{'trg'}{'orig'} = $ARGV[1]; -open F, "<$CORPUS{'trg'}{'orig'}" or die "Can't read $CORPUS{'trg'}{'orig'}: $!"; close F; -$CORPUS{'trg'}{'name'} = get_basename($CORPUS{'trg'}{'orig'}); - -my %DEV; # for (src,trg) has (orig, final.split final.unsplit -if (@ARGV >= 4) { - $DEV{'src'}{'orig'} = $ARGV[2]; - open F, "<$DEV{'src'}{'orig'}" or die "Can't read $DEV{'src'}{'orig'}: $!"; close F; - $DEV{'src'}{'name'} = get_basename($DEV{'src'}{'orig'}); - $DEV{'trg'}{'orig'} = $ARGV[3]; - open F, "<$DEV{'trg'}{'orig'}" or die "Can't read $DEV{'trg'}{'orig'}: $!"; close F; - $DEV{'trg'}{'name'} = get_basename($DEV{'trg'}{'orig'}); -} - -my %TEST; # for (src,trg) has (orig, name) -if (@ARGV >= 6) { - $TEST{'src'}{'orig'} = $ARGV[4]; - open F, "<$TEST{'src'}{'orig'}" or die "Can't read $TEST{'src'}{'orig'}: $!"; close F; - $TEST{'src'}{'name'} = get_basename($TEST{'src'}{'orig'}); - $TEST{'trg'}{'orig'} = $ARGV[5]; - open F, "<$TEST{'trg'}{'orig'}" or die "Can't read $TEST{'trg'}{'orig'}: $!"; close F; - $TEST{'trg'}{'name'} = get_basename($TEST{'trg'}{'orig'}); -} - -my $SPLIT_SRC; #use these to check whether that part is being split -my $SPLIT_TRG; - -#OUTPUT WILL GO IN THESE -my $CORPUS_DIR = $OUTPUT . '/' . corpus_dir(); #subsampled corpus -my $MODEL_SRC_DIR = $OUTPUT . '/' . model_dir("src"); #splitting.. -my $MODEL_TRG_DIR = $OUTPUT . '/' . model_dir("trg"); # .. models -my $PROCESSED_DIR = $OUTPUT . '/' . processed_dir(); #segmented copora+alignments -my $ALIGNMENT_DIR = $PROCESSED_DIR . '/alignments'; - -$CORPUS{'src'}{'filtered'} = $CORPUS_DIR . "/$CORPUS{'src'}{'name'}"; -$CORPUS{'trg'}{'filtered'} = $CORPUS_DIR . "/$CORPUS{'trg'}{'name'}"; - -print STDERR "Output: $OUTPUT\n"; -print STDERR "Corpus: $CORPUS_DIR\n"; -print STDERR "Model-src: $MODEL_SRC_DIR\n"; -print STDERR "Model-trg: $MODEL_TRG_DIR\n"; -print STDERR "Finaldir: $PROCESSED_DIR\n"; - -safemkdir($OUTPUT) or die "Couldn't create output directory $OUTPUT: $!"; -safemkdir($CORPUS_DIR) or die "Couldn't create output directory $CORPUS_DIR: $!"; -filter_corpus(); - -safemkdir($PROCESSED_DIR); -safemkdir($ALIGNMENT_DIR); - -if ($SPLIT_SRC) { - safemkdir($MODEL_SRC_DIR) or die "Couldn't create output directory $MODEL_SRC_DIR: $!"; - learn_segmentation("src"); - apply_segmentation_side("src", $MODEL_SRC_DIR); -} - -#assume that unsplit hypotheses will be scored against an aritificially split target test set; thus obtain a target splitting model -#TODO: add a flag to override this behaviour -safemkdir($MODEL_TRG_DIR) or die "Couldn't create output directory $MODEL_TRG_DIR: $!"; -learn_segmentation("trg"); -$TEST{'trg'}{'finalunsplit'} = "$PROCESSED_DIR/$TEST{'trg'}{'name'}"; -copy($TEST{'trg'}{'orig'}, $TEST{'trg'}{'finalunsplit'}) or die "Could not copy unsegmented test set"; - -if ($SPLIT_TRG) { - apply_segmentation_side("trg", $MODEL_TRG_DIR); - } else { - $TEST{'trg'}{'finalsplit'} = "$PROCESSED_DIR/$TEST{'trg'}{'name'}.split"; - apply_segmentation_any($MODEL_TRG_DIR, $TEST{'trg'}{'finalunsplit'}, $TEST{'trg'}{'finalsplit'}); -} - -write_eval_sh("$PROCESSED_DIR/eval-devtest.sh"); - -#copy corpora if they haven't been put in place by splitting operations -place_missing_data_side('src'); -place_missing_data_side('trg'); - -do_align(); - -if ($CORPUS{'src'}{'orig'} && $DEV{'src'}{'orig'} && $TEST{'src'}{'orig'}) { - print STDERR "Putting the config file entry in $PROCESSED_DIR/exp.config\n"; -#format is: - # nlfr100k_unsplit /export/ws10smt/jan/nlfr/morfwork/s100k.w40.sp_0 corpus.nl-fr.al fr-3.lm.gz dev.nl dev.fr test2008.nl eval-devtest.sh - my $line = split_name() . " $PROCESSED_DIR corpus.src-trg.al LMFILE.lm.gz"; - $line = $line . " $DEV{'src'}{'name'} $DEV{'trg'}{'name'}"; - $line = $line . " " . get_basename($TEST{'src'}{$SPLIT_SRC ? "finalsplit" : "finalunsplit"}) . " eval-devtest.sh"; - safesystem("echo '$line' > $PROCESSED_DIR/exp.config"); -} - -system("date"); -print STDERR "All done. You now need to train a language model (if target split), put it in the right dir and update the config file.\n\n"; - -############################## BILINGUAL ################################### - -sub filter_corpus { - print STDERR "\n!!!FILTERING TRAINING COPRUS!!!\n"; - if ( -f $CORPUS{'src'}{'filtered'} && -f $CORPUS{'trg'}{'filtered'}) { - print STDERR "$CORPUS{'src'}{'filtered'} and $CORPUS{'trg'}{'filtered'} exist, reusing...\n"; - return; - } - my $args = "$CORPUS{'src'}{'orig'} $CORPUS{'trg'}{'orig'} $MAX_WORDS"; - if ($SENTENCES) { $args = $args . " $SENTENCES"; } - safesystem("$LINESTRIPPER $args 1> $CORPUS{'src'}{'filtered'} 2> $CORPUS{'trg'}{'filtered'}") or die "Failed to filter training corpus for length."; -} - -sub learn_segmentation -{ - my $WHICH = shift; - my $corpus; my $dev; my $test; my $moddir; my $ppl; - - $corpus = $CORPUS{$WHICH}{'filtered'}; - $dev = $DEV{$WHICH}{'orig'}; - $test = $TEST{$WHICH}{'orig'}; - - if ($WHICH eq "src") { - $moddir = $MODEL_SRC_DIR; - $ppl = $PPL_SRC; - } else { - $moddir = $MODEL_TRG_DIR; - $ppl = $PPL_TRG; - } - my $cmd = "cat $corpus"; - if ($dev) { $cmd = "$cmd $dev"; } - if ($test) { $cmd = "$cmd $test"; } - my $tmpfile = "$CORPUS_DIR/all.tmp.gz"; - safesystem("$cmd | $GZIP > $tmpfile") or die "Failed to concatenate data for model learning.."; - assert_marker($tmpfile); - - learn_segmentation_side($tmpfile, $moddir, $ppl, $WHICH); - safesystem("rm $tmpfile"); -} - -sub do_align { - print STDERR "\n!!!WORD ALIGNMENT!!!\n"; - system("date"); - - my $ALIGNMENTS = "$ALIGNMENT_DIR/training.align"; - if ( -f $ALIGNMENTS ) { - print STDERR "$ALIGNMENTS exists, reusing...\n"; - return; - } - my $conf_file = "$ALIGNMENT_DIR/word-align.conf"; - - #decorate training files with identifiers to stop the aligner from training on dev and test when rerun in future. - safesystem("cd $PROCESSED_DIR && ln -s $CORPUS{'src'}{'name'} corpus.src") or die "Failed to symlink: $!"; - safesystem("cd $PROCESSED_DIR && ln -s $CORPUS{'trg'}{'name'} corpus.trg") or die "Failed to symlink: $!"; - - write_wconf($conf_file, $PROCESSED_DIR); - system("java -d64 -Xmx24g -jar $ALIGNER ++$conf_file > $ALIGNMENT_DIR/aligner.log"); - - if (! -f $ALIGNMENTS) { die "Failed to run word alignment.";} - - my $cmd = "paste $PROCESSED_DIR/corpus.src $PROCESSED_DIR/corpus.trg $ALIGNMENTS"; - $cmd = $cmd . " | sed 's/\\t/ \|\|\| /g' > $PROCESSED_DIR/corpus.src-trg.al"; - safesystem($cmd) or die "Failed to paste into aligned corpus file."; - -} - -############################# MONOLINGUAL ################################# - -#copy the necessary data files that weren't place by segmentation -sub place_missing_data_side { - my $side = shift; - - ifne_copy($CORPUS{$side}{'filtered'}, "$PROCESSED_DIR/$CORPUS{$side}{'name'}") ; - - if ($DEV{$side}{'orig'} && ! -f "$PROCESSED_DIR/$DEV{$side}{'name'}") { - $DEV{$side}{'final'} = "$PROCESSED_DIR/$DEV{$side}{'name'}"; - copy($DEV{$side}{'orig'}, $DEV{$side}{'final'}) or die "Copy failed: $!"; - } - - if ($TEST{$side}{'orig'} && ! -f "$PROCESSED_DIR/$TEST{$side}{'name'}" && ! $TEST{$side}{'finalunsplit'}) { - $TEST{$side}{'finalunsplit'} = "$PROCESSED_DIR/$TEST{$side}{'name'}"; - copy($TEST{$side}{'orig'}, $TEST{$side}{'finalunsplit'}) or die "Copy failed: $!"; - } - -} - -sub apply_segmentation_side { - my ($side, $moddir) = @_; - - print STDERR "\n!!!APPLYING SEGMENTATION MODEL ($side)!!!\n"; - apply_segmentation_any($moddir, $CORPUS{$side}{'filtered'}, "$PROCESSED_DIR/$CORPUS{$side}{'name'}"); - if ($DEV{$side}{'orig'}) { - $DEV{$side}{'final'} = "$PROCESSED_DIR/$DEV{$side}{'name'}"; - apply_segmentation_any($moddir, $DEV{$side}{'orig'}, "$DEV{$side}{'final'}"); - } - if ($TEST{$side}{'orig'}) { - $TEST{$side}{'finalsplit'} = "$PROCESSED_DIR/$TEST{$side}{'name'}.split"; - apply_segmentation_any($moddir, $TEST{$side}{'orig'}, $TEST{$side}{'finalsplit'} ); - } - -} - -sub learn_segmentation_side { - my($INPUT_FILE, $SEGOUT_DIR, $PPL, $LANG) = @_; - - print STDERR "\n!!!LEARNING SEGMENTATION MODEL ($LANG)!!!\n"; - system("date"); - my $SEG_FILE = $SEGOUT_DIR . "/segmentation.ready"; - if ( -f $SEG_FILE) { - print STDERR "$SEG_FILE exists, reusing...\n"; - return; - } - my $cmd = "$MORF_TRAIN $INPUT_FILE $SEGOUT_DIR $PPL \"$MARKER\""; - safesystem($cmd) or die "Failed to learn segmentation model"; -} - -sub apply_segmentation_any { - my($moddir, $datfile, $outfile) = @_; - if ( -f $outfile) { - print STDERR "$outfile exists, reusing...\n"; - return; - } - - my $args = "$moddir/inputvocab.gz $moddir/segmentation.ready \"$MARKER\""; - safesystem("cat $datfile | $MORF_SEGMENT $args &> $outfile") or die "Could not segment $datfile"; -} - -##################### PATH FUNCTIONS ########################## - -sub beautify_numlines { - return ($SENTENCES ? $SENTENCES : "_all"); -} - -sub corpus_dir { - return "s" . beautify_numlines() . ".w" . $MAX_WORDS; -} - -sub model_dir { - my $lang = shift; - if ($lang eq "src") { - return corpus_dir() . ".PPL" . $PPL_SRC . ".src"; - } elsif ($lang eq "trg") { - return corpus_dir() . ".PPL" . $PPL_TRG . ".trg"; - } else { - return "PPLundef"; - } -} - -sub processed_dir { - return corpus_dir() . "." . split_name(); -} - -########################## HELPER FUNCTIONS ############################ - -sub ifne_copy { - my ($src, $dest) = @_; - if (! -f $dest) { - copy($src, $dest) or die "Copy failed: $!"; - } -} - -sub split_name { - #parses SPLIT_TYPE, which can have the following values - # t|s|ts|st (last 2 are equiv) - # or is undefined when no splitting is done - my $name = ""; - - if ($SPLIT_TYPE) { - $SPLIT_SRC = lc($SPLIT_TYPE) =~ /s/; - $SPLIT_TRG = lc($SPLIT_TYPE) =~ /t/; - $name = $name . ($SPLIT_SRC ? $PPL_SRC : "0"); - $name = $name . "_" . ($SPLIT_TRG ? $PPL_TRG : "0"); - } else { - #no splitting - $name = "0"; - } - - return "sp_" . $name; - -} - -sub usage { - print <<EOT; - -Usage: $0 [OPTIONS] corpus.src corpus.trg [dev.src dev.trg [test.src test.trg]] - -Learns a segmentation model and splits up corpora as necessary. Word alignments are trained on a specified subset of the training corpus. - -EOT - exit 1; -}; - -sub safemkdir { - my $dir = shift; - if (-d $dir) { return 1; } - return mkdir($dir); -} - -sub assert_exec { - my @files = @_; - for my $file (@files) { - die "Can't find $file - did you run make?\n" unless -e $file; - die "Can't execute $file" unless -e $file; - } -}; -sub safesystem { - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - exit(1); - } - else { - my $exitcode = $? >> 8; - print STDERR "Exit code: $exitcode\n" if $exitcode; - return ! $exitcode; - } -} - -sub get_basename -{ - my $x = shift; - $x = `basename $x`; - $x =~ s/\n//; - return $x; -} - -sub assert_marker { - my $file = shift; - my $result = `zcat $file| grep '$MARKER' | wc -l` or die "Cannot read $file: $!"; - print $result; - if (scalar($result) != 0) { die "Data contains marker '$MARKER'; use something else.";} -} -########################### Dynamic config files ############################## - -sub write_wconf { - my ($filename, $train_dir) = @_; - open WCONF, ">$filename" or die "Can't write $filename: $!"; - - print WCONF <<EOT; -## ---------------------- -## This is an example training script for the Berkeley -## word aligner. In this configuration it uses two HMM -## alignment models trained jointly and then decoded -## using the competitive thresholding heuristic. - -########################################## -# Training: Defines the training regimen -########################################## -forwardModels MODEL1 HMM -reverseModels MODEL1 HMM -mode JOINT JOINT -iters 5 5 - -############################################### -# Execution: Controls output and program flow -############################################### -execDir $ALIGNMENT_DIR -create -overwriteExecDir -saveParams true -numThreads 1 -msPerLine 10000 -alignTraining - -################# -# Language/Data -################# -foreignSuffix src -englishSuffix trg - -# Choose the training sources, which can either be directories or files that list files/directories -trainSources $train_dir/ -#trainSources $train_dir/sources -testSources -sentences MAX - -################# -# 1-best output -################# -competitiveThresholding - -EOT - close WCONF; -} - -sub write_eval_sh -{ - my ($filename) = @_; - open EVALFILE, ">$filename" or die "Can't write $filename: $!"; - - print EVALFILE <<EOT; -#!/bin/bash - -EVAL_MAIN=/export/ws10smt/data/eval.sh -marker="$MARKER" -EOT - - if ($SPLIT_TRG) { - print EVALFILE <<EOT; -echo "OUTPUT EVALUATION" -echo "-----------------" -\$EVAL_MAIN "\$1" $TEST{'trg'}{'finalsplit'} - -echo "RECOMBINED OUTPUT EVALUATION" -echo "----------------------------" -cat "\$1" | sed -e "s/\$marker \$marker//g" -e "s/\$marker//g" > "\$1.recombined" - -\$EVAL_MAIN "\$1.recombined" $TEST{'trg'}{'finalunsplit'} -EOT - - } else { - print EVALFILE <<EOT; -echo "ARTIFICIAL SPLIT EVALUATION" -echo "--------------------------" - -#split the output translation -cat "\$1" | $MORF_SEGMENT $MODEL_TRG_DIR/inputvocab.gz $MODEL_TRG_DIR/segmentation.ready "\$MARKER" > "\$1.split" - -\$EVAL_MAIN "\$1.split" $TEST{'trg'}{'finalsplit'} - -echo "DIRECT EVALUATION" -echo "--------------------------" -\$EVAL_MAIN "\$1" $TEST{'trg'}{'finalunsplit'} - -EOT - - } - close EVALFILE; - -} - - - - diff --git a/gi/morf-segmentation/morfsegment.py b/gi/morf-segmentation/morfsegment.py deleted file mode 100755 index 85b9d4fb..00000000 --- a/gi/morf-segmentation/morfsegment.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/python - -import sys -import gzip - -#usage: morfsegment.py inputvocab.gz segmentation.ready -# stdin: the data to segment -# stdout: the segmented data - -if len(sys.argv) < 3: - print "usage: morfsegment.py inputvocab.gz segmentation.ready [marker]" - print " stdin: the data to segment" - print " stdout: the segmented data" - sys.exit() - -#read index: -split_index={} - -marker="##" - -if len(sys.argv) > 3: - marker=sys.argv[3] - -word_vocab=gzip.open(sys.argv[1], 'rb') #inputvocab.gz -seg_vocab=open(sys.argv[2], 'r') #segm.ready.. - -for seg in seg_vocab: - #seg = ver# #wonder\n - #wordline = 1 verwonder\n - word = word_vocab.readline().strip().split(' ') - assert(len(word) == 2) - word = word[1] - seg=seg.strip() - - if seg != word: - split_index[word] = seg - -word_vocab.close() -seg_vocab.close() - -for line in sys.stdin: - words = line.strip().split() - - newsent = [] - for word in words: - splitword = split_index.get(word, word) - newsent.append(splitword) - - print ' '.join(newsent) - diff --git a/gi/morf-segmentation/morftrain.sh b/gi/morf-segmentation/morftrain.sh deleted file mode 100755 index 9004922f..00000000 --- a/gi/morf-segmentation/morftrain.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -if [[ $# -lt 3 ]]; then - echo "Trains a morfessor model and places the result in writedir" - echo - echo "Usage: `basename $0` corpus_input_file writedir [PPL] [marker] [lines]" - echo -e "\tcorpus_input_file contains a sentence per line." - exit 1 -fi - -MORFESSOR_DIR="/export/ws10smt/software/morfessor_catmap0.9.2" -SCRIPT_DIR=$(dirname `readlink -f $0`) - -MORFBINDIR="$MORFESSOR_DIR/bin" -MORFMAKEFILE_TRAIN="$MORFESSOR_DIR/train/Makefile" -VOCABEXT="$SCRIPT_DIR/vocabextractor.sh" - -MARKER="#" - -if [[ ! -f $VOCABEXT ]]; then - echo "$VOCABEXT doesn't exist!" - exit 1 -fi -if [[ ! -f $MORFMAKEFILE_TRAIN ]]; then - echo "$MORFMAKEFILE_TRAIN doesn't exist!" - exit 1 -fi - - -CORPUS="$1" -WRITETODIR=$2 - -if [[ ! -f $CORPUS ]]; then - echo "$CORPUS doesn't exist!" - exit 1 -fi - -PPL=10 -LINES=0 -if [[ $# -gt 2 ]]; then - PPL=$3 -fi -if [[ $# -gt 3 ]]; then - MARKER="$4" -fi -if [[ $# -gt 4 ]]; then - LINES=$5 -fi - -mkdir -p $WRITETODIR - -#extract vocabulary to train on -echo "Extracting vocabulary..." -if [[ -f $WRITETODIR/inputvocab.gz ]]; then - echo " ....$WRITETODIR/inputvocab.gz exists, reusing." -else - if [[ $LINES -gt 0 ]]; then - $VOCABEXT $CORPUS $LINES | gzip > $WRITETODIR/inputvocab.gz - else - $VOCABEXT $CORPUS | gzip > $WRITETODIR/inputvocab.gz - fi -fi - - -#train it -echo "Training morf model..." -if [[ -f $WRITETODIR/segmentation.final.gz ]]; then - echo " ....$WRITETODIR/segmentation.final.gz exists, reusing.." -else - OLDPWD=`pwd` - cd $WRITETODIR - - #put the training Makefile in place, with appropriate modifications - sed -e "s/^GZIPPEDINPUTDATA = .*$/GZIPPEDINPUTDATA = inputvocab.gz/" \ - -e "s/^PPLTHRESH = .*$/PPLTHRESH = $PPL/" \ - -e "s;^BINDIR = .*$;BINDIR = $MORFBINDIR;" \ - $MORFMAKEFILE_TRAIN > ./Makefile - - date - make > ./trainmorf.log 2>&1 - cd $OLDPWD - - - echo "Post processing..." - #remove comments, counts and morph types - #mark morphs - - if [[ ! -f $WRITETODIR/segmentation.final.gz ]]; then - echo "Failed to learn segmentation model: $WRITETODIR/segmentation.final.gz not written" - exit 1 - fi - - zcat $WRITETODIR/segmentation.final.gz | \ - awk '$1 !~ /^#/ {print}' | \ - cut -d ' ' --complement -f 1 | \ - sed -e "s/\/...//g" -e "s/ + /$MARKER $MARKER/g" \ - > $WRITETODIR/segmentation.ready - - if [[ ! -f $WRITETODIR/segmentation.ready ]]; then - echo "Failed to learn segmentation model: $WRITETODIR/segmentation.final.gz not written" - exit 1 - fi - - - - echo "Done training." - date -fi -echo "Segmentation model is $WRITETODIR/segmentation.ready." - diff --git a/gi/morf-segmentation/vocabextractor.sh b/gi/morf-segmentation/vocabextractor.sh deleted file mode 100755 index 00ae7109..00000000 --- a/gi/morf-segmentation/vocabextractor.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -d=$(dirname `readlink -f $0`) -if [ $# -lt 1 ]; then - echo "Extracts unique words and their frequencies from a subset of a corpus." - echo - echo "Usage: `basename $0` input_file [number_of_lines] > output_file" - echo -e "\tinput_file contains a sentence per line." - echo - echo "Script also removes words from the vocabulary if they contain a digit or a special character. Output is printed to stdout in a format suitable for use with Morfessor." - echo - exit -fi - -srcname=$1 -reallen=0 - -if [[ $# -gt 1 ]]; then - reallen=$2 -fi - -pattern_file=$d/invalid_vocab.patterns - -if [[ ! -f $pattern_file ]]; then - echo "Pattern file missing" - exit 1 -fi - -#this awk strips entries from the vocabulary if they contain invalid characters -#invalid characters are digits and punctuation marks, and words beginning or ending with a dash -#uniq -c extracts the unique words and counts the occurrences - -if [[ $reallen -eq 0 ]]; then - #when a zero is passed, use the whole file - zcat -f $srcname | sed 's/ /\n/g' | egrep -v -f $pattern_file | sort | uniq -c | sed 's/^ *//' - -else - zcat -f $srcname | head -n $reallen | sed 's/ /\n/g' | egrep -v -f $pattern_file | sort | uniq -c | sed 's/^ *//' -fi - diff --git a/gi/pf/Makefile.am b/gi/pf/Makefile.am deleted file mode 100644 index 86f8e07b..00000000 --- a/gi/pf/Makefile.am +++ /dev/null @@ -1,44 +0,0 @@ -bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test bayes_lattice_score - -noinst_LIBRARIES = libpf.a - -libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc hpyp_tm.cc pyp_tm.cc - -bayes_lattice_score_SOURCES = bayes_lattice_score.cc -bayes_lattice_score_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz - -pf_test_SOURCES = pf_test.cc -pf_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz - -nuisance_test_SOURCES = nuisance_test.cc -nuisance_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz - -align_lexonly_pyp_SOURCES = align-lexonly-pyp.cc -align_lexonly_pyp_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz - -align_tl_SOURCES = align-tl.cc -align_tl_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz - -itg_SOURCES = itg.cc - -pyp_lm_SOURCES = pyp_lm.cc - -learn_cfg_SOURCES = learn_cfg.cc - -condnaive_SOURCES = condnaive.cc - -dpnaive_SOURCES = dpnaive.cc - -pfdist_SOURCES = pfdist.cc - -pfnaive_SOURCES = pfnaive.cc - -cbgi_SOURCES = cbgi.cc - -brat_SOURCES = brat.cc - -pfbrat_SOURCES = pfbrat.cc - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -funroll-loops -I$(top_srcdir)/utils $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -I$(top_srcdir)/klm - -AM_LDFLAGS = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a $(top_srcdir)/utils/libutils.a -lz diff --git a/gi/pf/README b/gi/pf/README deleted file mode 100644 index 62e47541..00000000 --- a/gi/pf/README +++ /dev/null @@ -1,2 +0,0 @@ -Experimental Bayesian alignment tools. Nothing to see here. - diff --git a/gi/pf/align-lexonly-pyp.cc b/gi/pf/align-lexonly-pyp.cc deleted file mode 100644 index e7509f57..00000000 --- a/gi/pf/align-lexonly-pyp.cc +++ /dev/null @@ -1,243 +0,0 @@ -#include <iostream> -#include <queue> - -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "tdict.h" -#include "stringlib.h" -#include "filelib.h" -#include "array2d.h" -#include "sampler.h" -#include "corpus.h" -#include "pyp_tm.h" -#include "hpyp_tm.h" -#include "quasi_model2.h" - -using namespace std; -namespace po = boost::program_options; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("infer_alignment_hyperparameters,I", "Infer alpha and p_null, otherwise fixed values will be assumed") - ("p_null,0", po::value<double>()->default_value(0.08), "probability of aligning to null") - ("align_alpha,a", po::value<double>()->default_value(4.0), "how 'tight' is the bias toward be along the diagonal?") - ("input,i",po::value<string>(),"Read parallel data from") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -MT19937* prng; - -struct LexicalAlignment { - unsigned char src_index; - bool is_transliteration; - vector<pair<short, short> > derivation; -}; - -struct AlignedSentencePair { - vector<WordID> src; - vector<WordID> trg; - vector<LexicalAlignment> a; - Array2D<short> posterior; -}; - -template <class LexicalTranslationModel> -struct Aligner { - Aligner(const vector<vector<WordID> >& lets, - int vocab_size, - int num_letters, - const po::variables_map& conf, - vector<AlignedSentencePair>* c) : - corpus(*c), - paj_model(conf["align_alpha"].as<double>(), conf["p_null"].as<double>()), - infer_paj(conf.count("infer_alignment_hyperparameters") > 0), - model(lets, vocab_size, num_letters), - kNULL(TD::Convert("NULL")) { - assert(lets[kNULL].size() == 0); - } - - vector<AlignedSentencePair>& corpus; - QuasiModel2 paj_model; - const bool infer_paj; - LexicalTranslationModel model; - const WordID kNULL; - - void ResampleHyperparameters() { - model.ResampleHyperparameters(prng); - if (infer_paj) paj_model.ResampleHyperparameters(prng); - } - - void InitializeRandom() { - cerr << "Initializing with random alignments ...\n"; - for (unsigned i = 0; i < corpus.size(); ++i) { - AlignedSentencePair& asp = corpus[i]; - asp.a.resize(asp.trg.size()); - for (unsigned j = 0; j < asp.trg.size(); ++j) { - unsigned char& a_j = asp.a[j].src_index; - a_j = prng->next() * (1 + asp.src.size()); - const WordID f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - model.Increment(f_a_j, asp.trg[j], &*prng); - paj_model.Increment(a_j, j, asp.src.size(), asp.trg.size()); - } - } - cerr << "Corpus intialized randomly." << endl; - cerr << "LLH = " << Likelihood() << " \t(Amodel=" << paj_model.Likelihood() - << " TModel=" << model.Likelihood() << ") contexts=" << model.UniqueConditioningContexts() << endl; - } - - void ResampleCorpus() { - for (unsigned i = 0; i < corpus.size(); ++i) { - AlignedSentencePair& asp = corpus[i]; - SampleSet<prob_t> ss; ss.resize(asp.src.size() + 1); - for (unsigned j = 0; j < asp.trg.size(); ++j) { - unsigned char& a_j = asp.a[j].src_index; - const WordID e_j = asp.trg[j]; - WordID f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - model.Decrement(f_a_j, e_j, prng); - paj_model.Decrement(a_j, j, asp.src.size(), asp.trg.size()); - - for (unsigned prop_a_j = 0; prop_a_j <= asp.src.size(); ++prop_a_j) { - const WordID prop_f = (prop_a_j ? asp.src[prop_a_j - 1] : kNULL); - ss[prop_a_j] = model.Prob(prop_f, e_j); - ss[prop_a_j] *= paj_model.Prob(prop_a_j, j, asp.src.size(), asp.trg.size()); - } - a_j = prng->SelectSample(ss); - f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - model.Increment(f_a_j, e_j, prng); - paj_model.Increment(a_j, j, asp.src.size(), asp.trg.size()); - } - } - } - - prob_t Likelihood() const { - return model.Likelihood() * paj_model.Likelihood(); - } -}; - -void ExtractLetters(const set<WordID>& v, vector<vector<WordID> >* l, set<WordID>* letset = NULL) { - for (set<WordID>::const_iterator it = v.begin(); it != v.end(); ++it) { - vector<WordID>& letters = (*l)[*it]; - if (letters.size()) continue; // if e and f have the same word - - const string& w = TD::Convert(*it); - - size_t cur = 0; - while (cur < w.size()) { - const size_t len = UTF8Len(w[cur]); - letters.push_back(TD::Convert(w.substr(cur, len))); - if (letset) letset->insert(letters.back()); - cur += len; - } - } -} - -void Debug(const AlignedSentencePair& asp) { - cerr << TD::GetString(asp.src) << endl << TD::GetString(asp.trg) << endl; - Array2D<bool> a(asp.src.size(), asp.trg.size()); - for (unsigned j = 0; j < asp.trg.size(); ++j) { - assert(asp.a[j].src_index <= asp.src.size()); - if (asp.a[j].src_index) a(asp.a[j].src_index - 1, j) = true; - } - cerr << a << endl; -} - -void AddSample(AlignedSentencePair* asp) { - for (unsigned j = 0; j < asp->trg.size(); ++j) - asp->posterior(asp->a[j].src_index, j)++; -} - -void WriteAlignments(const AlignedSentencePair& asp) { - bool first = true; - for (unsigned j = 0; j < asp.trg.size(); ++j) { - int src_index = -1; - int mc = -1; - for (unsigned i = 0; i <= asp.src.size(); ++i) { - if (asp.posterior(i, j) > mc) { - mc = asp.posterior(i, j); - src_index = i; - } - } - - if (src_index) { - if (first) first = false; else cout << ' '; - cout << (src_index - 1) << '-' << j; - } - } - cout << endl; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - - if (conf.count("random_seed")) - prng = new MT19937(conf["random_seed"].as<uint32_t>()); - else - prng = new MT19937; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - corpus::ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - vector<AlignedSentencePair> corpus(corpuse.size()); - for (unsigned i = 0; i < corpuse.size(); ++i) { - corpus[i].src.swap(corpusf[i]); - corpus[i].trg.swap(corpuse[i]); - corpus[i].posterior.resize(corpus[i].src.size() + 1, corpus[i].trg.size()); - } - corpusf.clear(); corpuse.clear(); - - vocabf.insert(TD::Convert("NULL")); - vector<vector<WordID> > letters(TD::NumWords()); - set<WordID> letset; - ExtractLetters(vocabe, &letters, &letset); - ExtractLetters(vocabf, &letters, NULL); - letters[TD::Convert("NULL")].clear(); - - //Aligner<PYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus); - Aligner<HPYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus); - aligner.InitializeRandom(); - - const unsigned samples = conf["samples"].as<unsigned>(); - for (int i = 0; i < samples; ++i) { - for (int j = 65; j < 67; ++j) Debug(corpus[j]); - if (i % 10 == 9) { - aligner.ResampleHyperparameters(); - cerr << "LLH = " << aligner.Likelihood() << " \t(Amodel=" << aligner.paj_model.Likelihood() - << " TModel=" << aligner.model.Likelihood() << ") contexts=" << aligner.model.UniqueConditioningContexts() << endl; - } - aligner.ResampleCorpus(); - if (i > (samples / 5) && (i % 6 == 5)) for (int j = 0; j < corpus.size(); ++j) AddSample(&corpus[j]); - } - for (unsigned i = 0; i < corpus.size(); ++i) - WriteAlignments(corpus[i]); - aligner.model.Summary(); - - return 0; -} diff --git a/gi/pf/align-tl.cc b/gi/pf/align-tl.cc deleted file mode 100644 index f6608f1d..00000000 --- a/gi/pf/align-tl.cc +++ /dev/null @@ -1,339 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/multi_array.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "backward.h" -#include "array2d.h" -#include "base_distributions.h" -#include "monotonic_pseg.h" -#include "conditional_pseg.h" -#include "trule.h" -#include "tdict.h" -#include "stringlib.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "mfcr.h" -#include "corpus.h" -#include "ngram_base.h" -#include "transliterations.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("s2t", po::value<string>(), "character level source-to-target prior transliteration probabilities") - ("t2s", po::value<string>(), "character level target-to-source prior transliteration probabilities") - ("max_src_chunk", po::value<unsigned>()->default_value(4), "Maximum size of translitered chunk in source") - ("max_trg_chunk", po::value<unsigned>()->default_value(4), "Maximum size of translitered chunk in target") - ("expected_src_to_trg_ratio", po::value<double>()->default_value(1.0), "If a word is transliterated, what is the expected length ratio from source to target?") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -boost::shared_ptr<MT19937> prng; - -struct LexicalAlignment { - unsigned char src_index; - bool is_transliteration; - vector<pair<short, short> > derivation; -}; - -struct AlignedSentencePair { - vector<WordID> src; - vector<WordID> trg; - vector<LexicalAlignment> a; - Array2D<short> posterior; -}; - -struct HierarchicalWordBase { - explicit HierarchicalWordBase(const unsigned vocab_e_size) : - base(prob_t::One()), r(1,1,1,1,0.66,50.0), u0(-log(vocab_e_size)), l(1,prob_t::One()), v(1, prob_t::Zero()) {} - - void ResampleHyperparameters(MT19937* rng) { - r.resample_hyperparameters(rng); - } - - inline double logp0(const vector<WordID>& s) const { - return Md::log_poisson(s.size(), 7.5) + s.size() * u0; - } - - // return p0 of rule.e_ - prob_t operator()(const TRule& rule) const { - v[0].logeq(logp0(rule.e_)); - return r.prob(rule.e_, v.begin(), l.begin()); - } - - void Increment(const TRule& rule) { - v[0].logeq(logp0(rule.e_)); - if (r.increment(rule.e_, v.begin(), l.begin(), &*prng).count) { - base *= v[0] * l[0]; - } - } - - void Decrement(const TRule& rule) { - if (r.decrement(rule.e_, &*prng).count) { - base /= prob_t(exp(logp0(rule.e_))); - } - } - - prob_t Likelihood() const { - prob_t p; p.logeq(r.log_crp_prob()); - p *= base; - return p; - } - - void Summary() const { - cerr << "NUMBER OF CUSTOMERS: " << r.num_customers() << " (d=" << r.discount() << ",s=" << r.strength() << ')' << endl; - for (MFCR<1,vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) - cerr << " " << it->second.total_dish_count_ << " (on " << it->second.table_counts_.size() << " tables) " << TD::GetString(it->first) << endl; - } - - prob_t base; - MFCR<1,vector<WordID> > r; - const double u0; - const vector<prob_t> l; - mutable vector<prob_t> v; -}; - -struct BasicLexicalAlignment { - explicit BasicLexicalAlignment(const vector<vector<WordID> >& lets, - const unsigned words_e, - const unsigned letters_e, - vector<AlignedSentencePair>* corp) : - letters(lets), - corpus(*corp), - //up0(words_e), - //up0("en.chars.1gram", letters_e), - //up0("en.words.1gram"), - up0(letters_e), - //up0("en.chars.2gram"), - tmodel(up0) { - } - - void InstantiateRule(const WordID src, - const WordID trg, - TRule* rule) const { - static const WordID kX = TD::Convert("X") * -1; - rule->lhs_ = kX; - rule->e_ = letters[trg]; - rule->f_ = letters[src]; - } - - void InitializeRandom() { - const WordID kNULL = TD::Convert("NULL"); - cerr << "Initializing with random alignments ...\n"; - for (unsigned i = 0; i < corpus.size(); ++i) { - AlignedSentencePair& asp = corpus[i]; - asp.a.resize(asp.trg.size()); - for (unsigned j = 0; j < asp.trg.size(); ++j) { - const unsigned char a_j = prng->next() * (1 + asp.src.size()); - const WordID f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - TRule r; - InstantiateRule(f_a_j, asp.trg[j], &r); - asp.a[j].is_transliteration = false; - asp.a[j].src_index = a_j; - if (tmodel.IncrementRule(r, &*prng)) - up0.Increment(r); - } - } - cerr << " LLH = " << Likelihood() << endl; - } - - prob_t Likelihood() const { - prob_t p = tmodel.Likelihood(); - p *= up0.Likelihood(); - return p; - } - - void ResampleHyperparemeters() { - tmodel.ResampleHyperparameters(&*prng); - up0.ResampleHyperparameters(&*prng); - cerr << " (base d=" << up0.r.discount() << ",s=" << up0.r.strength() << ")\n"; - } - - void ResampleCorpus(); - - const vector<vector<WordID> >& letters; // spelling dictionary - vector<AlignedSentencePair>& corpus; - //PhraseConditionalUninformativeBase up0; - //PhraseConditionalUninformativeUnigramBase up0; - //UnigramWordBase up0; - //HierarchicalUnigramBase up0; - HierarchicalWordBase up0; - //CompletelyUniformBase up0; - //FixedNgramBase up0; - //ConditionalTranslationModel<PhraseConditionalUninformativeBase> tmodel; - //ConditionalTranslationModel<PhraseConditionalUninformativeUnigramBase> tmodel; - //ConditionalTranslationModel<UnigramWordBase> tmodel; - //ConditionalTranslationModel<HierarchicalUnigramBase> tmodel; - MConditionalTranslationModel<HierarchicalWordBase> tmodel; - //ConditionalTranslationModel<FixedNgramBase> tmodel; - //ConditionalTranslationModel<CompletelyUniformBase> tmodel; -}; - -void BasicLexicalAlignment::ResampleCorpus() { - static const WordID kNULL = TD::Convert("NULL"); - for (unsigned i = 0; i < corpus.size(); ++i) { - AlignedSentencePair& asp = corpus[i]; - SampleSet<prob_t> ss; ss.resize(asp.src.size() + 1); - for (unsigned j = 0; j < asp.trg.size(); ++j) { - TRule r; - unsigned char& a_j = asp.a[j].src_index; - WordID f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - InstantiateRule(f_a_j, asp.trg[j], &r); - if (tmodel.DecrementRule(r, &*prng)) - up0.Decrement(r); - - for (unsigned prop_a_j = 0; prop_a_j <= asp.src.size(); ++prop_a_j) { - const WordID prop_f = (prop_a_j ? asp.src[prop_a_j - 1] : kNULL); - InstantiateRule(prop_f, asp.trg[j], &r); - ss[prop_a_j] = tmodel.RuleProbability(r); - } - a_j = prng->SelectSample(ss); - f_a_j = (a_j ? asp.src[a_j - 1] : kNULL); - InstantiateRule(f_a_j, asp.trg[j], &r); - if (tmodel.IncrementRule(r, &*prng)) - up0.Increment(r); - } - } - cerr << " LLH = " << Likelihood() << endl; -} - -void ExtractLetters(const set<WordID>& v, vector<vector<WordID> >* l, set<WordID>* letset = NULL) { - for (set<WordID>::const_iterator it = v.begin(); it != v.end(); ++it) { - vector<WordID>& letters = (*l)[*it]; - if (letters.size()) continue; // if e and f have the same word - - const string& w = TD::Convert(*it); - - size_t cur = 0; - while (cur < w.size()) { - const size_t len = UTF8Len(w[cur]); - letters.push_back(TD::Convert(w.substr(cur, len))); - if (letset) letset->insert(letters.back()); - cur += len; - } - } -} - -void Debug(const AlignedSentencePair& asp) { - cerr << TD::GetString(asp.src) << endl << TD::GetString(asp.trg) << endl; - Array2D<bool> a(asp.src.size(), asp.trg.size()); - for (unsigned j = 0; j < asp.trg.size(); ++j) - if (asp.a[j].src_index) a(asp.a[j].src_index - 1, j) = true; - cerr << a << endl; -} - -void AddSample(AlignedSentencePair* asp) { - for (unsigned j = 0; j < asp->trg.size(); ++j) - asp->posterior(asp->a[j].src_index, j)++; -} - -void WriteAlignments(const AlignedSentencePair& asp) { - bool first = true; - for (unsigned j = 0; j < asp.trg.size(); ++j) { - int src_index = -1; - int mc = -1; - for (unsigned i = 0; i <= asp.src.size(); ++i) { - if (asp.posterior(i, j) > mc) { - mc = asp.posterior(i, j); - src_index = i; - } - } - - if (src_index) { - if (first) first = false; else cout << ' '; - cout << (src_index - 1) << '-' << j; - } - } - cout << endl; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); -// MT19937& rng = *prng; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - corpus::ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - vector<AlignedSentencePair> corpus(corpuse.size()); - for (unsigned i = 0; i < corpuse.size(); ++i) { - corpus[i].src.swap(corpusf[i]); - corpus[i].trg.swap(corpuse[i]); - corpus[i].posterior.resize(corpus[i].src.size() + 1, corpus[i].trg.size()); - } - corpusf.clear(); corpuse.clear(); - - vocabf.insert(TD::Convert("NULL")); - vector<vector<WordID> > letters(TD::NumWords() + 1); - set<WordID> letset; - ExtractLetters(vocabe, &letters, &letset); - ExtractLetters(vocabf, &letters, NULL); - letters[TD::Convert("NULL")].clear(); - - // TODO configure this - const int max_src_chunk = conf["max_src_chunk"].as<unsigned>(); - const int max_trg_chunk = conf["max_trg_chunk"].as<unsigned>(); - const double s2t_rat = conf["expected_src_to_trg_ratio"].as<double>(); - const BackwardEstimator be(conf["s2t"].as<string>(), conf["t2s"].as<string>()); - Transliterations tl(max_src_chunk, max_trg_chunk, s2t_rat, be); - - cerr << "Initializing transliteration graph structures ...\n"; - for (int i = 0; i < corpus.size(); ++i) { - const vector<int>& src = corpus[i].src; - const vector<int>& trg = corpus[i].trg; - for (int j = 0; j < src.size(); ++j) { - const vector<int>& src_let = letters[src[j]]; - for (int k = 0; k < trg.size(); ++k) { - const vector<int>& trg_let = letters[trg[k]]; - tl.Initialize(src[j], src_let, trg[k], trg_let); - //if (src_let.size() < min_trans_src) - // tl.Forbid(src[j], src_let, trg[k], trg_let); - } - } - } - cerr << endl; - tl.GraphSummary(); - - return 0; -} diff --git a/gi/pf/backward.cc b/gi/pf/backward.cc deleted file mode 100644 index b92629fd..00000000 --- a/gi/pf/backward.cc +++ /dev/null @@ -1,89 +0,0 @@ -#include "backward.h" - -#include <queue> -#include <utility> - -#include "array2d.h" -#include "reachability.h" -#include "base_distributions.h" - -using namespace std; - -BackwardEstimator::BackwardEstimator(const string& s2t, - const string& t2s) : m1(new Model1(s2t)), m1inv(new Model1(t2s)) {} - -BackwardEstimator::~BackwardEstimator() { - delete m1; m1 = NULL; - delete m1inv; m1inv = NULL; -} - -float BackwardEstimator::ComputeBackwardProb(const std::vector<WordID>& src, - const std::vector<WordID>& trg, - unsigned src_covered, - unsigned trg_covered, - double s2t_ratio) const { - if (src_covered == src.size() || trg_covered == trg.size()) { - assert(src_covered == src.size()); - assert(trg_covered == trg.size()); - return 0; - } - static const WordID kNULL = TD::Convert("<eps>"); - const prob_t uniform_alignment(1.0 / (src.size() - src_covered + 1)); - // TODO factor in expected length ratio - prob_t e; e.logeq(Md::log_poisson(trg.size() - trg_covered, (src.size() - src_covered) * s2t_ratio)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_covered; j < trg.size(); ++j) { - prob_t p = (*m1)(kNULL, trg[j]) + prob_t(1e-12); - for (unsigned i = src_covered; i < src.size(); ++i) - p += (*m1)(src[i], trg[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg[j]) << " | " << TD::GetString(src) << ") = 0!\n"; - assert(!"failed"); - } - p *= uniform_alignment; - e *= p; - } - // TODO factor in expected length ratio - const prob_t inv_uniform(1.0 / (trg.size() - trg_covered + 1.0)); - prob_t inv; - inv.logeq(Md::log_poisson(src.size() - src_covered, (trg.size() - trg_covered) / s2t_ratio)); - for (unsigned i = src_covered; i < src.size(); ++i) { - prob_t p = (*m1inv)(kNULL, src[i]) + prob_t(1e-12); - for (unsigned j = trg_covered; j < trg.size(); ++j) - p += (*m1inv)(trg[j], src[i]); - if (p.is_0()) { - cerr << "ERROR: p_inv(" << TD::Convert(src[i]) << " | " << TD::GetString(trg) << ") = 0!\n"; - assert(!"failed"); - } - p *= inv_uniform; - inv *= p; - } - return (log(e) + log(inv)) / 2; -} - -void BackwardEstimator::InitializeGrid(const vector<WordID>& src, - const vector<WordID>& trg, - const Reachability& r, - double s2t_ratio, - float* grid) const { - queue<pair<int,int> > q; - q.push(make_pair(0,0)); - Array2D<bool> done(src.size()+1, trg.size()+1, false); - //cerr << TD::GetString(src) << " ||| " << TD::GetString(trg) << endl; - while(!q.empty()) { - const pair<int,int> n = q.front(); - q.pop(); - if (done(n.first,n.second)) continue; - done(n.first,n.second) = true; - - float lp = ComputeBackwardProb(src, trg, n.first, n.second, s2t_ratio); - if (n.first == 0 && n.second == 0) grid[0] = lp; - //cerr << " " << n.first << "," << n.second << "\t" << lp << endl; - - if (n.first == src.size() || n.second == trg.size()) continue; - const vector<pair<short,short> >& edges = r.valid_deltas[n.first][n.second]; - for (int i = 0; i < edges.size(); ++i) - q.push(make_pair(n.first + edges[i].first, n.second + edges[i].second)); - } - //static int cc = 0; ++cc; if (cc == 80) exit(1); -} - diff --git a/gi/pf/backward.h b/gi/pf/backward.h deleted file mode 100644 index e67eff0c..00000000 --- a/gi/pf/backward.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _BACKWARD_H_ -#define _BACKWARD_H_ - -#include <vector> -#include <string> -#include "wordid.h" - -struct Reachability; -struct Model1; - -struct BackwardEstimator { - BackwardEstimator(const std::string& s2t, - const std::string& t2s); - ~BackwardEstimator(); - - void InitializeGrid(const std::vector<WordID>& src, - const std::vector<WordID>& trg, - const Reachability& r, - double src2trg_ratio, - float* grid) const; - - private: - float ComputeBackwardProb(const std::vector<WordID>& src, - const std::vector<WordID>& trg, - unsigned src_covered, - unsigned trg_covered, - double src2trg_ratio) const; - - Model1* m1; - Model1* m1inv; -}; - -#endif diff --git a/gi/pf/base_distributions.cc b/gi/pf/base_distributions.cc deleted file mode 100644 index 57e0bbe1..00000000 --- a/gi/pf/base_distributions.cc +++ /dev/null @@ -1,241 +0,0 @@ -#include "base_distributions.h" - -#include <iostream> - -#include "filelib.h" - -using namespace std; - -TableLookupBase::TableLookupBase(const string& fname) { - cerr << "TableLookupBase reading from " << fname << " ..." << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - unsigned lc = 0; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - vector<int> le, lf; - TRule x; - x.lhs_ = -TD::Convert("X"); - bool flag = false; - while(getline(in, line)) { - ++lc; - if (lc % 1000000 == 0) { cerr << " [" << lc << ']' << endl; flag = false; } - else if (lc % 25000 == 0) { cerr << '.' << flush; flag = true; } - tmp.clear(); - TD::ConvertSentence(line, &tmp); - x.f_.clear(); - x.e_.clear(); - size_t pos = 0; - int cc = 0; - while(pos < tmp.size()) { - const WordID cur = tmp[pos++]; - if (cur == kDIV) { - ++cc; - } else if (cc == 0) { - x.f_.push_back(cur); - } else if (cc == 1) { - x.e_.push_back(cur); - } else if (cc == 2) { - table[x].logeq(atof(TD::Convert(cur).c_str())); - ++cc; - } else { - if (flag) cerr << endl; - cerr << "Bad format in " << lc << ": " << line << endl; abort(); - } - } - if (cc != 3) { - if (flag) cerr << endl; - cerr << "Bad format in " << lc << ": " << line << endl; abort(); - } - } - if (flag) cerr << endl; - cerr << " read " << lc << " entries\n"; -} - -prob_t PhraseConditionalUninformativeUnigramBase::p0(const vector<WordID>& vsrc, - const vector<WordID>& vtrg, - int start_src, int start_trg) const { - const int flen = vsrc.size() - start_src; - const int elen = vtrg.size() - start_trg; - prob_t p; - p.logeq(Md::log_poisson(elen, flen + 0.01)); // elen | flen ~Pois(flen + 0.01) - //p.logeq(log_poisson(elen, 1)); // elen | flen ~Pois(flen + 0.01) - for (int i = 0; i < elen; ++i) - p *= u(vtrg[i + start_trg]); // draw e_i ~Uniform - return p; -} - -prob_t PhraseConditionalUninformativeBase::p0(const vector<WordID>& vsrc, - const vector<WordID>& vtrg, - int start_src, int start_trg) const { - const int flen = vsrc.size() - start_src; - const int elen = vtrg.size() - start_trg; - prob_t p; - //p.logeq(log_poisson(elen, flen + 0.01)); // elen | flen ~Pois(flen + 0.01) - p.logeq(Md::log_poisson(elen, 1)); // elen | flen ~Pois(flen + 0.01) - for (int i = 0; i < elen; ++i) - p *= kUNIFORM_TARGET; // draw e_i ~Uniform - return p; -} - -void Model1::LoadModel1(const string& fname) { - cerr << "Loading Model 1 parameters from " << fname << " ..." << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - unsigned lc = 0; - while(getline(in, line)) { - ++lc; - int cur = 0; - int start = 0; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - const WordID src = TD::Convert(&line[0]); - ++cur; - start = cur; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - WordID trg = TD::Convert(&line[start]); - const double logprob = strtod(&line[cur + 1], NULL); - if (src >= ttable.size()) ttable.resize(src + 1); - ttable[src][trg].logeq(logprob); - } - cerr << " read " << lc << " parameters.\n"; -} - -prob_t PhraseConditionalBase::p0(const vector<WordID>& vsrc, - const vector<WordID>& vtrg, - int start_src, int start_trg) const { - const int flen = vsrc.size() - start_src; - const int elen = vtrg.size() - start_trg; - prob_t uniform_src_alignment; uniform_src_alignment.logeq(-log(flen + 1)); - prob_t p; - p.logeq(Md::log_poisson(elen, flen + 0.01)); // elen | flen ~Pois(flen + 0.01) - for (int i = 0; i < elen; ++i) { // for each position i in e-RHS - const WordID trg = vtrg[i + start_trg]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < flen; ++j) { - const WordID src = j < 0 ? 0 : vsrc[j + start_src]; - tp += kM1MIXTURE * model1(src, trg); - tp += kUNIFORM_MIXTURE * kUNIFORM_TARGET; - } - tp *= uniform_src_alignment; // draw a_i ~uniform - p *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - if (p.is_0()) { - cerr << "Zero! " << vsrc << "\nTRG=" << vtrg << endl; - abort(); - } - return p; -} - -prob_t PhraseJointBase::p0(const vector<WordID>& vsrc, - const vector<WordID>& vtrg, - int start_src, int start_trg) const { - const int flen = vsrc.size() - start_src; - const int elen = vtrg.size() - start_trg; - prob_t uniform_src_alignment; uniform_src_alignment.logeq(-log(flen + 1)); - prob_t p; - p.logeq(Md::log_poisson(flen, 1.0)); // flen ~Pois(1) - // elen | flen ~Pois(flen + 0.01) - prob_t ptrglen; ptrglen.logeq(Md::log_poisson(elen, flen + 0.01)); - p *= ptrglen; - p *= kUNIFORM_SOURCE.pow(flen); // each f in F ~Uniform - for (int i = 0; i < elen; ++i) { // for each position i in E - const WordID trg = vtrg[i + start_trg]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < flen; ++j) { - const WordID src = j < 0 ? 0 : vsrc[j + start_src]; - tp += kM1MIXTURE * model1(src, trg); - tp += kUNIFORM_MIXTURE * kUNIFORM_TARGET; - } - tp *= uniform_src_alignment; // draw a_i ~uniform - p *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - if (p.is_0()) { - cerr << "Zero! " << vsrc << "\nTRG=" << vtrg << endl; - abort(); - } - return p; -} - -prob_t PhraseJointBase_BiDir::p0(const vector<WordID>& vsrc, - const vector<WordID>& vtrg, - int start_src, int start_trg) const { - const int flen = vsrc.size() - start_src; - const int elen = vtrg.size() - start_trg; - prob_t uniform_src_alignment; uniform_src_alignment.logeq(-log(flen + 1)); - prob_t uniform_trg_alignment; uniform_trg_alignment.logeq(-log(elen + 1)); - - prob_t p1; - p1.logeq(Md::log_poisson(flen, 1.0)); // flen ~Pois(1) - // elen | flen ~Pois(flen + 0.01) - prob_t ptrglen; ptrglen.logeq(Md::log_poisson(elen, flen + 0.01)); - p1 *= ptrglen; - p1 *= kUNIFORM_SOURCE.pow(flen); // each f in F ~Uniform - for (int i = 0; i < elen; ++i) { // for each position i in E - const WordID trg = vtrg[i + start_trg]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < flen; ++j) { - const WordID src = j < 0 ? 0 : vsrc[j + start_src]; - tp += kM1MIXTURE * model1(src, trg); - tp += kUNIFORM_MIXTURE * kUNIFORM_TARGET; - } - tp *= uniform_src_alignment; // draw a_i ~uniform - p1 *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - if (p1.is_0()) { - cerr << "Zero! " << vsrc << "\nTRG=" << vtrg << endl; - abort(); - } - - prob_t p2; - p2.logeq(Md::log_poisson(elen, 1.0)); // elen ~Pois(1) - // flen | elen ~Pois(flen + 0.01) - prob_t psrclen; psrclen.logeq(Md::log_poisson(flen, elen + 0.01)); - p2 *= psrclen; - p2 *= kUNIFORM_TARGET.pow(elen); // each f in F ~Uniform - for (int i = 0; i < flen; ++i) { // for each position i in E - const WordID src = vsrc[i + start_src]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < elen; ++j) { - const WordID trg = j < 0 ? 0 : vtrg[j + start_trg]; - tp += kM1MIXTURE * invmodel1(trg, src); - tp += kUNIFORM_MIXTURE * kUNIFORM_SOURCE; - } - tp *= uniform_trg_alignment; // draw a_i ~uniform - p2 *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - if (p2.is_0()) { - cerr << "Zero! " << vsrc << "\nTRG=" << vtrg << endl; - abort(); - } - - static const prob_t kHALF(0.5); - return (p1 + p2) * kHALF; -} - -JumpBase::JumpBase() : p(200) { - for (unsigned src_len = 1; src_len < 200; ++src_len) { - map<int, prob_t>& cpd = p[src_len]; - int min_jump = 1 - src_len; - int max_jump = src_len; - prob_t z; - for (int j = min_jump; j <= max_jump; ++j) { - prob_t& cp = cpd[j]; - if (j < 0) - cp.logeq(Md::log_poisson(1.5-j, 1)); - else if (j > 0) - cp.logeq(Md::log_poisson(j, 1)); - cp.poweq(0.2); - z += cp; - } - for (int j = min_jump; j <= max_jump; ++j) { - cpd[j] /= z; - } - } -} - diff --git a/gi/pf/base_distributions.h b/gi/pf/base_distributions.h deleted file mode 100644 index 41b513f8..00000000 --- a/gi/pf/base_distributions.h +++ /dev/null @@ -1,238 +0,0 @@ -#ifndef _BASE_MEASURES_H_ -#define _BASE_MEASURES_H_ - -#include <vector> -#include <map> -#include <string> -#include <cmath> -#include <iostream> -#include <cassert> - -#include "unigrams.h" -#include "trule.h" -#include "prob.h" -#include "tdict.h" -#include "sampler.h" -#include "m.h" -#include "os_phrase.h" - -struct Model1 { - explicit Model1(const std::string& fname) : - kNULL(TD::Convert("<eps>")), - kZERO() { - LoadModel1(fname); - } - - void LoadModel1(const std::string& fname); - - // returns prob 0 if src or trg is not found - const prob_t& operator()(WordID src, WordID trg) const { - if (src == 0) src = kNULL; - if (src < ttable.size()) { - const std::map<WordID, prob_t>& cpd = ttable[src]; - const std::map<WordID, prob_t>::const_iterator it = cpd.find(trg); - if (it != cpd.end()) - return it->second; - } - return kZERO; - } - - const WordID kNULL; - const prob_t kZERO; - std::vector<std::map<WordID, prob_t> > ttable; -}; - -struct PoissonUniformUninformativeBase { - explicit PoissonUniformUninformativeBase(const unsigned ves) : kUNIFORM(1.0 / ves) {} - prob_t operator()(const TRule& r) const { - prob_t p; p.logeq(Md::log_poisson(r.e_.size(), 1.0)); - prob_t q = kUNIFORM; q.poweq(r.e_.size()); - p *= q; - return p; - } - void Summary() const {} - void ResampleHyperparameters(MT19937*) {} - void Increment(const TRule&) {} - void Decrement(const TRule&) {} - prob_t Likelihood() const { return prob_t::One(); } - const prob_t kUNIFORM; -}; - -struct CompletelyUniformBase { - explicit CompletelyUniformBase(const unsigned ves) : kUNIFORM(1.0 / ves) {} - prob_t operator()(const TRule&) const { - return kUNIFORM; - } - void Summary() const {} - void ResampleHyperparameters(MT19937*) {} - void Increment(const TRule&) {} - void Decrement(const TRule&) {} - prob_t Likelihood() const { return prob_t::One(); } - const prob_t kUNIFORM; -}; - -struct UnigramWordBase { - explicit UnigramWordBase(const std::string& fname) : un(fname) {} - prob_t operator()(const TRule& r) const { - return un(r.e_); - } - const UnigramWordModel un; -}; - -struct RuleHasher { - size_t operator()(const TRule& r) const { - return hash_value(r); - } -}; - -struct TableLookupBase { - TableLookupBase(const std::string& fname); - - prob_t operator()(const TRule& rule) const { - const std::tr1::unordered_map<TRule,prob_t,RuleHasher>::const_iterator it = table.find(rule); - if (it == table.end()) { - std::cerr << rule << " not found\n"; - abort(); - } - return it->second; - } - - void ResampleHyperparameters(MT19937*) {} - void Increment(const TRule&) {} - void Decrement(const TRule&) {} - prob_t Likelihood() const { return prob_t::One(); } - void Summary() const {} - - std::tr1::unordered_map<TRule,prob_t,RuleHasher> table; -}; - -struct PhraseConditionalUninformativeBase { - explicit PhraseConditionalUninformativeBase(const unsigned vocab_e_size) : - kUNIFORM_TARGET(1.0 / vocab_e_size) { - assert(vocab_e_size > 0); - } - - // return p0 of rule.e_ | rule.f_ - prob_t operator()(const TRule& rule) const { - return p0(rule.f_, rule.e_, 0, 0); - } - - prob_t p0(const std::vector<WordID>& vsrc, const std::vector<WordID>& vtrg, int start_src, int start_trg) const; - - void Summary() const {} - void ResampleHyperparameters(MT19937*) {} - void Increment(const TRule&) {} - void Decrement(const TRule&) {} - prob_t Likelihood() const { return prob_t::One(); } - const prob_t kUNIFORM_TARGET; -}; - -struct PhraseConditionalUninformativeUnigramBase { - explicit PhraseConditionalUninformativeUnigramBase(const std::string& file, const unsigned vocab_e_size) : u(file, vocab_e_size) {} - - // return p0 of rule.e_ | rule.f_ - prob_t operator()(const TRule& rule) const { - return p0(rule.f_, rule.e_, 0, 0); - } - - prob_t p0(const std::vector<WordID>& vsrc, const std::vector<WordID>& vtrg, int start_src, int start_trg) const; - - const UnigramModel u; -}; - -struct PhraseConditionalBase { - explicit PhraseConditionalBase(const Model1& m1, const double m1mixture, const unsigned vocab_e_size) : - model1(m1), - kM1MIXTURE(m1mixture), - kUNIFORM_MIXTURE(1.0 - m1mixture), - kUNIFORM_TARGET(1.0 / vocab_e_size) { - assert(m1mixture >= 0.0 && m1mixture <= 1.0); - assert(vocab_e_size > 0); - } - - // return p0 of rule.e_ | rule.f_ - prob_t operator()(const TRule& rule) const { - return p0(rule.f_, rule.e_, 0, 0); - } - - prob_t p0(const std::vector<WordID>& vsrc, const std::vector<WordID>& vtrg, int start_src, int start_trg) const; - - const Model1& model1; - const prob_t kM1MIXTURE; // Model 1 mixture component - const prob_t kUNIFORM_MIXTURE; // uniform mixture component - const prob_t kUNIFORM_TARGET; -}; - -struct PhraseJointBase { - explicit PhraseJointBase(const Model1& m1, const double m1mixture, const unsigned vocab_e_size, const unsigned vocab_f_size) : - model1(m1), - kM1MIXTURE(m1mixture), - kUNIFORM_MIXTURE(1.0 - m1mixture), - kUNIFORM_SOURCE(1.0 / vocab_f_size), - kUNIFORM_TARGET(1.0 / vocab_e_size) { - assert(m1mixture >= 0.0 && m1mixture <= 1.0); - assert(vocab_e_size > 0); - } - - // return p0 of rule.e_ , rule.f_ - prob_t operator()(const TRule& rule) const { - return p0(rule.f_, rule.e_, 0, 0); - } - - prob_t p0(const std::vector<WordID>& vsrc, const std::vector<WordID>& vtrg, int start_src, int start_trg) const; - - const Model1& model1; - const prob_t kM1MIXTURE; // Model 1 mixture component - const prob_t kUNIFORM_MIXTURE; // uniform mixture component - const prob_t kUNIFORM_SOURCE; - const prob_t kUNIFORM_TARGET; -}; - -struct PhraseJointBase_BiDir { - explicit PhraseJointBase_BiDir(const Model1& m1, - const Model1& im1, - const double m1mixture, - const unsigned vocab_e_size, - const unsigned vocab_f_size) : - model1(m1), - invmodel1(im1), - kM1MIXTURE(m1mixture), - kUNIFORM_MIXTURE(1.0 - m1mixture), - kUNIFORM_SOURCE(1.0 / vocab_f_size), - kUNIFORM_TARGET(1.0 / vocab_e_size) { - assert(m1mixture >= 0.0 && m1mixture <= 1.0); - assert(vocab_e_size > 0); - } - - // return p0 of rule.e_ , rule.f_ - prob_t operator()(const TRule& rule) const { - return p0(rule.f_, rule.e_, 0, 0); - } - - prob_t p0(const std::vector<WordID>& vsrc, const std::vector<WordID>& vtrg, int start_src, int start_trg) const; - - const Model1& model1; - const Model1& invmodel1; - const prob_t kM1MIXTURE; // Model 1 mixture component - const prob_t kUNIFORM_MIXTURE; // uniform mixture component - const prob_t kUNIFORM_SOURCE; - const prob_t kUNIFORM_TARGET; -}; - -// base distribution for jump size multinomials -// basically p(0) = 0 and then, p(1) is max, and then -// you drop as you move to the max jump distance -struct JumpBase { - JumpBase(); - - const prob_t& operator()(int jump, unsigned src_len) const { - assert(jump != 0); - const std::map<int, prob_t>::const_iterator it = p[src_len].find(jump); - assert(it != p[src_len].end()); - return it->second; - } - std::vector<std::map<int, prob_t> > p; -}; - - -#endif diff --git a/gi/pf/bayes_lattice_score.cc b/gi/pf/bayes_lattice_score.cc deleted file mode 100644 index 70cb8dc2..00000000 --- a/gi/pf/bayes_lattice_score.cc +++ /dev/null @@ -1,309 +0,0 @@ -#include <iostream> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "inside_outside.h" -#include "hg.h" -#include "hg_io.h" -#include "bottom_up_parser.h" -#include "fdict.h" -#include "grammar.h" -#include "m.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp.h" -#include "ccrp_onetable.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -unsigned ReadCorpus(const string& filename, - vector<Lattice>* e, - set<WordID>* vocab_e) { - e->clear(); - vocab_e->clear(); - ReadFile rf(filename); - istream* in = rf.stream(); - assert(*in); - string line; - unsigned toks = 0; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(Lattice()); - Lattice& le = e->back(); - LatticeTools::ConvertTextOrPLF(line, & le); - for (unsigned i = 0; i < le.size(); ++i) - for (unsigned j = 0; j < le[i].size(); ++j) - vocab_e->insert(le[i][j].label); - toks += le.size(); - } - return toks; -} - -struct BaseModel { - explicit BaseModel(unsigned tc) : - unif(1.0 / tc), p(prob_t::One()) {} - prob_t prob(const TRule& r) const { - return unif; - } - void increment(const TRule& r, MT19937* rng) { - p *= prob(r); - } - void decrement(const TRule& r, MT19937* rng) { - p /= prob(r); - } - prob_t Likelihood() const { - return p; - } - const prob_t unif; - prob_t p; -}; - -struct UnigramModel { - explicit UnigramModel(unsigned tc) : base(tc), crp(1,1,1,1), glue(1,1,1,1) {} - BaseModel base; - CCRP<TRule> crp; - CCRP<TRule> glue; - - prob_t Prob(const TRule& r) const { - if (r.Arity() != 0) { - return glue.prob(r, prob_t(0.5)); - } - return crp.prob(r, base.prob(r)); - } - - int Increment(const TRule& r, MT19937* rng) { - if (r.Arity() != 0) { - glue.increment(r, 0.5, rng); - return 0; - } else { - if (crp.increment(r, base.prob(r), rng)) { - base.increment(r, rng); - return 1; - } - return 0; - } - } - - int Decrement(const TRule& r, MT19937* rng) { - if (r.Arity() != 0) { - glue.decrement(r, rng); - return 0; - } else { - if (crp.decrement(r, rng)) { - base.decrement(r, rng); - return -1; - } - return 0; - } - } - - prob_t Likelihood() const { - prob_t p; - p.logeq(crp.log_crp_prob() + glue.log_crp_prob()); - p *= base.Likelihood(); - return p; - } - - void ResampleHyperparameters(MT19937* rng) { - crp.resample_hyperparameters(rng); - glue.resample_hyperparameters(rng); - cerr << " d=" << crp.discount() << ", s=" << crp.strength() << "\t STOP d=" << glue.discount() << ", s=" << glue.strength() << endl; - } -}; - -UnigramModel* plm; - -void SampleDerivation(const Hypergraph& hg, MT19937* rng, vector<unsigned>* sampled_deriv) { - vector<prob_t> node_probs; - Inside<prob_t, EdgeProb>(hg, &node_probs); - queue<unsigned> q; - q.push(hg.nodes_.size() - 2); - while(!q.empty()) { - unsigned cur_node_id = q.front(); -// cerr << "NODE=" << cur_node_id << endl; - q.pop(); - const Hypergraph::Node& node = hg.nodes_[cur_node_id]; - const unsigned num_in_edges = node.in_edges_.size(); - unsigned sampled_edge = 0; - if (num_in_edges == 1) { - sampled_edge = node.in_edges_[0]; - } else { - //prob_t z; - assert(num_in_edges > 1); - SampleSet<prob_t> ss; - for (unsigned j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; - prob_t p = edge.edge_prob_; - for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) - p *= node_probs[edge.tail_nodes_[k]]; - ss.add(p); -// cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; - //z += p; - } -// for (unsigned j = 0; j < num_in_edges; ++j) { -// const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; -// cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; -// } -// cerr << " --- \n"; - sampled_edge = node.in_edges_[rng->SelectSample(ss)]; - } - sampled_deriv->push_back(sampled_edge); - const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; - for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { - q.push(edge.tail_nodes_[j]); - } - } -// for (unsigned i = 0; i < sampled_deriv->size(); ++i) { -// cerr << *hg.edges_[(*sampled_deriv)[i]].rule_ << endl; -// } -} - -void IncrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { - for (unsigned i = 0; i < d.size(); ++i) - plm->Increment(*hg.edges_[d[i]].rule_, rng); -} - -void DecrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { - for (unsigned i = 0; i < d.size(); ++i) - plm->Decrement(*hg.edges_[d[i]].rule_, rng); -} - -prob_t TotalProb(const Hypergraph& hg) { - return Inside<prob_t, EdgeProb>(hg); -} - -void IncrementLatticePath(const Hypergraph& hg, const vector<unsigned>& d, Lattice* pl) { - Lattice& lat = *pl; - for (int i = 0; i < d.size(); ++i) { - const Hypergraph::Edge& edge = hg.edges_[d[i]]; - if (edge.rule_->Arity() != 0) continue; - WordID sym = edge.rule_->e_[0]; - vector<LatticeArc>& las = lat[edge.i_]; - int dist = edge.j_ - edge.i_; - assert(dist > 0); - for (int j = 0; j < las.size(); ++j) { - if (las[j].dist2next == dist && - las[j].label == sym) { - las[j].cost += 1; - } - } - } -} - -int main(int argc, char** argv) { - po::variables_map conf; - - InitCommandLine(argc, argv, &conf); - vector<GrammarPtr> grammars(2); - grammars[0].reset(new GlueGrammar("S","X")); - const unsigned samples = conf["samples"].as<unsigned>(); - - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - vector<Lattice> corpuse; - set<WordID> vocabe; - cerr << "Reading corpus...\n"; - const unsigned toks = ReadCorpus(conf["input"].as<string>(), &corpuse, &vocabe); - cerr << "E-corpus size: " << corpuse.size() << " lattices\t (" << vocabe.size() << " word types)\n"; - UnigramModel lm(vocabe.size()); - vector<Hypergraph> hgs(corpuse.size()); - vector<vector<unsigned> > derivs(corpuse.size()); - for (int i = 0; i < corpuse.size(); ++i) { - grammars[1].reset(new PassThroughGrammar(corpuse[i], "X")); - ExhaustiveBottomUpParser parser("S", grammars); - bool res = parser.Parse(corpuse[i], &hgs[i]); // exhaustive parse - assert(res); - } - - double csamples = 0; - for (int SS=0; SS < samples; ++SS) { - const bool is_last = ((samples - 1) == SS); - prob_t dlh = prob_t::One(); - bool record_sample = (SS > (samples * 1 / 3) && (SS % 5 == 3)); - if (record_sample) csamples++; - for (int ci = 0; ci < corpuse.size(); ++ci) { - Lattice& lat = corpuse[ci]; - Hypergraph& hg = hgs[ci]; - vector<unsigned>& d = derivs[ci]; - if (!is_last) DecrementDerivation(hg, d, &lm, &rng); - for (unsigned i = 0; i < hg.edges_.size(); ++i) { - TRule& r = *hg.edges_[i].rule_; - if (r.Arity() != 0) - hg.edges_[i].edge_prob_ = prob_t::One(); - else - hg.edges_[i].edge_prob_ = lm.Prob(r); - } - if (!is_last) { - d.clear(); - SampleDerivation(hg, &rng, &d); - IncrementDerivation(hg, derivs[ci], &lm, &rng); - } else { - prob_t p = TotalProb(hg); - dlh *= p; - cerr << " p(sentence) = " << log(p) << "\t" << log(dlh) << endl; - } - if (record_sample) IncrementLatticePath(hg, derivs[ci], &lat); - } - double llh = log(lm.Likelihood()); - cerr << "LLH=" << llh << "\tENTROPY=" << (-llh / log(2) / toks) << "\tPPL=" << pow(2, -llh / log(2) / toks) << endl; - if (SS % 10 == 9) lm.ResampleHyperparameters(&rng); - if (is_last) { - double z = log(dlh); - cerr << "TOTAL_PROB=" << z << "\tENTROPY=" << (-z / log(2) / toks) << "\tPPL=" << pow(2, -z / log(2) / toks) << endl; - } - } - cerr << lm.crp << endl; - cerr << lm.glue << endl; - for (int i = 0; i < corpuse.size(); ++i) { - for (int j = 0; j < corpuse[i].size(); ++j) - for (int k = 0; k < corpuse[i][j].size(); ++k) { - corpuse[i][j][k].cost /= csamples; - corpuse[i][j][k].cost += 1e-3; - corpuse[i][j][k].cost = log(corpuse[i][j][k].cost); - } - cout << HypergraphIO::AsPLF(corpuse[i]) << endl; - } - return 0; -} - diff --git a/gi/pf/brat.cc b/gi/pf/brat.cc deleted file mode 100644 index 832f22cf..00000000 --- a/gi/pf/brat.cc +++ /dev/null @@ -1,543 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/multi_array.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "cfg_wfst_composer.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -static unsigned kMAX_SRC_PHRASE; -static unsigned kMAX_TRG_PHRASE; -struct FSTState; - -double log_poisson(unsigned x, const double& lambda) { - assert(lambda > 0.0); - return log(lambda) * x - lgamma(x + 1) - lambda; -} - -struct ConditionalBase { - explicit ConditionalBase(const double m1mixture, const unsigned vocab_e_size, const string& model1fname) : - kM1MIXTURE(m1mixture), - kUNIFORM_MIXTURE(1.0 - m1mixture), - kUNIFORM_TARGET(1.0 / vocab_e_size), - kNULL(TD::Convert("<eps>")) { - assert(m1mixture >= 0.0 && m1mixture <= 1.0); - assert(vocab_e_size > 0); - LoadModel1(model1fname); - } - - void LoadModel1(const string& fname) { - cerr << "Loading Model 1 parameters from " << fname << " ..." << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - unsigned lc = 0; - while(getline(in, line)) { - ++lc; - int cur = 0; - int start = 0; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - const WordID src = TD::Convert(&line[0]); - ++cur; - start = cur; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - WordID trg = TD::Convert(&line[start]); - const double logprob = strtod(&line[cur + 1], NULL); - if (src >= ttable.size()) ttable.resize(src + 1); - ttable[src][trg].logeq(logprob); - } - cerr << " read " << lc << " parameters.\n"; - } - - // return logp0 of rule.e_ | rule.f_ - prob_t operator()(const TRule& rule) const { - const int flen = rule.f_.size(); - const int elen = rule.e_.size(); - prob_t uniform_src_alignment; uniform_src_alignment.logeq(-log(flen + 1)); - prob_t p; - p.logeq(log_poisson(elen, flen + 0.01)); // elen | flen ~Pois(flen + 0.01) - for (int i = 0; i < elen; ++i) { // for each position i in e-RHS - const WordID trg = rule.e_[i]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < flen; ++j) { - const WordID src = j < 0 ? kNULL : rule.f_[j]; - const map<WordID, prob_t>::const_iterator it = ttable[src].find(trg); - if (it != ttable[src].end()) { - tp += kM1MIXTURE * it->second; - } - tp += kUNIFORM_MIXTURE * kUNIFORM_TARGET; - } - tp *= uniform_src_alignment; // draw a_i ~uniform - p *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - return p; - } - - const prob_t kM1MIXTURE; // Model 1 mixture component - const prob_t kUNIFORM_MIXTURE; // uniform mixture component - const prob_t kUNIFORM_TARGET; - const WordID kNULL; - vector<map<WordID, prob_t> > ttable; -}; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(3),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(3),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<int> >* e, - set<int>* vocab_f, - set<int>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -struct UniphraseLM { - UniphraseLM(const vector<vector<int> >& corpus, - const set<int>& vocab, - const po::variables_map& conf) : - phrases_(1,1), - gen_(1,1), - corpus_(corpus), - uniform_word_(1.0 / vocab.size()), - gen_p0_(0.5), - p_end_(0.5), - use_poisson_(conf.count("poisson_length") > 0) {} - - void ResampleHyperparameters(MT19937* rng) { - phrases_.resample_hyperparameters(rng); - gen_.resample_hyperparameters(rng); - cerr << " " << phrases_.alpha(); - } - - CCRP_NoTable<vector<int> > phrases_; - CCRP_NoTable<bool> gen_; - vector<vector<bool> > z_; // z_[i] is there a phrase boundary after the ith word - const vector<vector<int> >& corpus_; - const double uniform_word_; - const double gen_p0_; - const double p_end_; // in base length distribution, p of the end of a phrase - const bool use_poisson_; -}; - -struct Reachability { - boost::multi_array<bool, 4> edges; // edges[src_covered][trg_covered][x][trg_delta] is this edge worth exploring? - boost::multi_array<short, 2> max_src_delta; // msd[src_covered][trg_covered] -- the largest src delta that's valid - - Reachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) : - edges(boost::extents[srclen][trglen][src_max_phrase_len+1][trg_max_phrase_len+1]), - max_src_delta(boost::extents[srclen][trglen]) { - ComputeReachability(srclen, trglen, src_max_phrase_len, trg_max_phrase_len); - } - - private: - struct SState { - SState() : prev_src_covered(), prev_trg_covered() {} - SState(int i, int j) : prev_src_covered(i), prev_trg_covered(j) {} - int prev_src_covered; - int prev_trg_covered; - }; - - struct NState { - NState() : next_src_covered(), next_trg_covered() {} - NState(int i, int j) : next_src_covered(i), next_trg_covered(j) {} - int next_src_covered; - int next_trg_covered; - }; - - void ComputeReachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) { - typedef boost::multi_array<vector<SState>, 2> array_type; - array_type a(boost::extents[srclen + 1][trglen + 1]); - a[0][0].push_back(SState()); - for (int i = 0; i < srclen; ++i) { - for (int j = 0; j < trglen; ++j) { - if (a[i][j].size() == 0) continue; - const SState prev(i,j); - for (int k = 1; k <= src_max_phrase_len; ++k) { - if ((i + k) > srclen) continue; - for (int l = 1; l <= trg_max_phrase_len; ++l) { - if ((j + l) > trglen) continue; - a[i + k][j + l].push_back(prev); - } - } - } - } - a[0][0].clear(); - cerr << "Final cell contains " << a[srclen][trglen].size() << " back pointers\n"; - assert(a[srclen][trglen].size() > 0); - - typedef boost::multi_array<bool, 2> rarray_type; - rarray_type r(boost::extents[srclen + 1][trglen + 1]); -// typedef boost::multi_array<vector<NState>, 2> narray_type; -// narray_type b(boost::extents[srclen + 1][trglen + 1]); - r[srclen][trglen] = true; - for (int i = srclen; i >= 0; --i) { - for (int j = trglen; j >= 0; --j) { - vector<SState>& prevs = a[i][j]; - if (!r[i][j]) { prevs.clear(); } -// const NState nstate(i,j); - for (int k = 0; k < prevs.size(); ++k) { - r[prevs[k].prev_src_covered][prevs[k].prev_trg_covered] = true; - int src_delta = i - prevs[k].prev_src_covered; - edges[prevs[k].prev_src_covered][prevs[k].prev_trg_covered][src_delta][j - prevs[k].prev_trg_covered] = true; - short &msd = max_src_delta[prevs[k].prev_src_covered][prevs[k].prev_trg_covered]; - if (src_delta > msd) msd = src_delta; -// b[prevs[k].prev_src_covered][prevs[k].prev_trg_covered].push_back(nstate); - } - } - } - assert(!edges[0][0][1][0]); - assert(!edges[0][0][0][1]); - assert(!edges[0][0][0][0]); - cerr << " MAX SRC DELTA[0][0] = " << max_src_delta[0][0] << endl; - assert(max_src_delta[0][0] > 0); - //cerr << "First cell contains " << b[0][0].size() << " forward pointers\n"; - //for (int i = 0; i < b[0][0].size(); ++i) { - // cerr << " -> (" << b[0][0][i].next_src_covered << "," << b[0][0][i].next_trg_covered << ")\n"; - //} - } -}; - -ostream& operator<<(ostream& os, const FSTState& q); -struct FSTState { - explicit FSTState(int src_size) : - trg_covered_(), - src_covered_(), - src_coverage_(src_size) {} - - FSTState(short trg_covered, short src_covered, const vector<bool>& src_coverage, const vector<short>& src_prefix) : - trg_covered_(trg_covered), - src_covered_(src_covered), - src_coverage_(src_coverage), - src_prefix_(src_prefix) { - if (src_coverage_.size() == src_covered) { - assert(src_prefix.size() == 0); - } - } - - // if we extend by the word at src_position, what are - // the next states that are reachable and lie on a valid - // path to the final state? - vector<FSTState> Extensions(int src_position, int src_len, int trg_len, const Reachability& r) const { - assert(src_position < src_coverage_.size()); - if (src_coverage_[src_position]) { - cerr << "Trying to extend " << *this << " with position " << src_position << endl; - abort(); - } - vector<bool> ncvg = src_coverage_; - ncvg[src_position] = true; - - vector<FSTState> res; - const int trg_remaining = trg_len - trg_covered_; - if (trg_remaining <= 0) { - cerr << "Target appears to have been covered: " << *this << " (trg_len=" << trg_len << ",trg_covered=" << trg_covered_ << ")" << endl; - abort(); - } - const int src_remaining = src_len - src_covered_; - if (src_remaining <= 0) { - cerr << "Source appears to have been covered: " << *this << endl; - abort(); - } - - for (int tc = 1; tc <= kMAX_TRG_PHRASE; ++tc) { - if (r.edges[src_covered_][trg_covered_][src_prefix_.size() + 1][tc]) { - int nc = src_prefix_.size() + 1 + src_covered_; - res.push_back(FSTState(trg_covered_ + tc, nc, ncvg, vector<short>())); - } - } - - if ((src_prefix_.size() + 1) < r.max_src_delta[src_covered_][trg_covered_]) { - vector<short> nsp = src_prefix_; - nsp.push_back(src_position); - res.push_back(FSTState(trg_covered_, src_covered_, ncvg, nsp)); - } - - if (res.size() == 0) { - cerr << *this << " can't be extended!\n"; - abort(); - } - return res; - } - - short trg_covered_, src_covered_; - vector<bool> src_coverage_; - vector<short> src_prefix_; -}; -bool operator<(const FSTState& q, const FSTState& r) { - if (q.trg_covered_ != r.trg_covered_) return q.trg_covered_ < r.trg_covered_; - if (q.src_covered_!= r.src_covered_) return q.src_covered_ < r.src_covered_; - if (q.src_coverage_ != r.src_coverage_) return q.src_coverage_ < r.src_coverage_; - return q.src_prefix_ < r.src_prefix_; -} - -ostream& operator<<(ostream& os, const FSTState& q) { - os << "[" << q.trg_covered_ << " : "; - for (int i = 0; i < q.src_coverage_.size(); ++i) - os << q.src_coverage_[i]; - os << " : <"; - for (int i = 0; i < q.src_prefix_.size(); ++i) { - if (i != 0) os << ' '; - os << q.src_prefix_[i]; - } - return os << ">]"; -} - -struct MyModel { - MyModel(ConditionalBase& rcp0) : rp0(rcp0) {} - typedef unordered_map<vector<WordID>, CCRP_NoTable<TRule>, boost::hash<vector<WordID> > > SrcToRuleCRPMap; - - void DecrementRule(const TRule& rule) { - SrcToRuleCRPMap::iterator it = rules.find(rule.f_); - assert(it != rules.end()); - it->second.decrement(rule); - if (it->second.num_customers() == 0) rules.erase(it); - } - - void IncrementRule(const TRule& rule) { - SrcToRuleCRPMap::iterator it = rules.find(rule.f_); - if (it == rules.end()) { - CCRP_NoTable<TRule> crp(1,1); - it = rules.insert(make_pair(rule.f_, crp)).first; - } - it->second.increment(rule); - } - - // conditioned on rule.f_ - prob_t RuleConditionalProbability(const TRule& rule) const { - const prob_t base = rp0(rule); - SrcToRuleCRPMap::const_iterator it = rules.find(rule.f_); - if (it == rules.end()) { - return base; - } else { - const double lp = it->second.logprob(rule, log(base)); - prob_t q; q.logeq(lp); - return q; - } - } - - const ConditionalBase& rp0; - SrcToRuleCRPMap rules; -}; - -struct MyFST : public WFST { - MyFST(const vector<WordID>& ssrc, const vector<WordID>& strg, MyModel* m) : - src(ssrc), trg(strg), - r(src.size(),trg.size(),kMAX_SRC_PHRASE, kMAX_TRG_PHRASE), - model(m) { - FSTState in(src.size()); - cerr << " INIT: " << in << endl; - init = GetNode(in); - for (int i = 0; i < in.src_coverage_.size(); ++i) in.src_coverage_[i] = true; - in.src_covered_ = src.size(); - in.trg_covered_ = trg.size(); - cerr << "FINAL: " << in << endl; - final = GetNode(in); - } - virtual const WFSTNode* Final() const; - virtual const WFSTNode* Initial() const; - - const WFSTNode* GetNode(const FSTState& q); - map<FSTState, boost::shared_ptr<WFSTNode> > m; - const vector<WordID>& src; - const vector<WordID>& trg; - Reachability r; - const WFSTNode* init; - const WFSTNode* final; - MyModel* model; -}; - -struct MyNode : public WFSTNode { - MyNode(const FSTState& q, MyFST* fst) : state(q), container(fst) {} - virtual vector<pair<const WFSTNode*, TRulePtr> > ExtendInput(unsigned srcindex) const; - const FSTState state; - mutable MyFST* container; -}; - -vector<pair<const WFSTNode*, TRulePtr> > MyNode::ExtendInput(unsigned srcindex) const { - cerr << "EXTEND " << state << " with " << srcindex << endl; - vector<FSTState> ext = state.Extensions(srcindex, container->src.size(), container->trg.size(), container->r); - vector<pair<const WFSTNode*,TRulePtr> > res(ext.size()); - for (unsigned i = 0; i < ext.size(); ++i) { - res[i].first = container->GetNode(ext[i]); - if (ext[i].src_prefix_.size() == 0) { - const unsigned trg_from = state.trg_covered_; - const unsigned trg_to = ext[i].trg_covered_; - const unsigned prev_prfx_size = state.src_prefix_.size(); - res[i].second.reset(new TRule); - res[i].second->lhs_ = -TD::Convert("X"); - vector<WordID>& src = res[i].second->f_; - vector<WordID>& trg = res[i].second->e_; - src.resize(prev_prfx_size + 1); - for (unsigned j = 0; j < prev_prfx_size; ++j) - src[j] = container->src[state.src_prefix_[j]]; - src[prev_prfx_size] = container->src[srcindex]; - for (unsigned j = trg_from; j < trg_to; ++j) - trg.push_back(container->trg[j]); - res[i].second->scores_.set_value(FD::Convert("Proposal"), log(container->model->RuleConditionalProbability(*res[i].second))); - } - } - return res; -} - -const WFSTNode* MyFST::GetNode(const FSTState& q) { - boost::shared_ptr<WFSTNode>& res = m[q]; - if (!res) { - res.reset(new MyNode(q, this)); - } - return &*res; -} - -const WFSTNode* MyFST::Final() const { - return final; -} - -const WFSTNode* MyFST::Initial() const { - return init; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - ConditionalBase lp0(conf["model1_interpolation_weight"].as<double>(), - vocabe.size(), - conf["model1"].as<string>()); - MyModel m(lp0); - - TRule x("[X] ||| kAnwntR myN ||| at the convent ||| 0"); - m.IncrementRule(x); - TRule y("[X] ||| nY dyN ||| gave ||| 0"); - m.IncrementRule(y); - - - MyFST fst(corpusf[0], corpuse[0], &m); - ifstream in("./kimura.g"); - assert(in); - CFG_WFSTComposer comp(fst); - Hypergraph hg; - bool succeed = comp.Compose(&in, &hg); - hg.PrintGraphviz(); - if (succeed) { cerr << "SUCCESS.\n"; } else { cerr << "FAILURE REPORTED.\n"; } - -#if 0 - ifstream in2("./amnabooks.g"); - assert(in2); - MyFST fst2(corpusf[1], corpuse[1], &m); - CFG_WFSTComposer comp2(fst2); - Hypergraph hg2; - bool succeed2 = comp2.Compose(&in2, &hg2); - if (succeed2) { cerr << "SUCCESS.\n"; } else { cerr << "FAILURE REPORTED.\n"; } -#endif - - SparseVector<double> w; w.set_value(FD::Convert("Proposal"), 1.0); - hg.Reweight(w); - cerr << ViterbiFTree(hg) << endl; - return 0; -} - diff --git a/gi/pf/cbgi.cc b/gi/pf/cbgi.cc deleted file mode 100644 index 97f1ba34..00000000 --- a/gi/pf/cbgi.cc +++ /dev/null @@ -1,330 +0,0 @@ -#include <queue> -#include <sstream> -#include <iostream> - -#include <boost/unordered_map.hpp> -#include <boost/functional/hash.hpp> - -#include "sampler.h" -#include "filelib.h" -#include "hg_io.h" -#include "hg.h" -#include "ccrp_nt.h" -#include "trule.h" -#include "inside_outside.h" - -using namespace std; -using namespace std::tr1; - -double log_poisson(unsigned x, const double& lambda) { - assert(lambda > 0.0); - return log(lambda) * x - lgamma(x + 1) - lambda; -} - -double log_decay(unsigned x, const double& b) { - assert(b > 1.0); - assert(x > 0); - return log(b - 1) - x * log(b); -} - -struct SimpleBase { - SimpleBase(unsigned esize, unsigned fsize, unsigned ntsize = 144) : - uniform_e(-log(esize)), - uniform_f(-log(fsize)), - uniform_nt(-log(ntsize)) { - } - - // binomial coefficient - static double choose(unsigned n, unsigned k) { - return exp(lgamma(n + 1) - lgamma(k + 1) - lgamma(n - k + 1)); - } - - // count the number of patterns of terminals and NTs in the rule, given elen and flen - static double log_number_of_patterns(const unsigned flen, const unsigned elen) { - static vector<vector<double> > counts; - if (elen >= counts.size()) counts.resize(elen + 1); - if (flen >= counts[elen].size()) counts[elen].resize(flen + 1); - double& count = counts[elen][flen]; - if (count) return log(count); - const unsigned max_arity = min(elen, flen); - for (unsigned a = 0; a <= max_arity; ++a) - count += choose(elen, a) * choose(flen, a); - return log(count); - } - - // return logp0 of rule | LHS - double operator()(const TRule& rule) const { - const unsigned flen = rule.f_.size(); - const unsigned elen = rule.e_.size(); -#if 0 - double p = 0; - p += log_poisson(flen, 0.5); // flen ~Pois(0.5) - p += log_poisson(elen, flen); // elen | flen ~Pois(flen) - p -= log_number_of_patterns(flen, elen); // pattern | flen,elen ~Uniform - for (unsigned i = 0; i < flen; ++i) { // for each position in f-RHS - if (rule.f_[i] <= 0) // according to pattern - p += uniform_nt; // draw NT ~Uniform - else - p += uniform_f; // draw f terminal ~Uniform - } - p -= lgamma(rule.Arity() + 1); // draw permutation ~Uniform - for (unsigned i = 0; i < elen; ++i) { // for each position in e-RHS - if (rule.e_[i] > 0) // according to pattern - p += uniform_e; // draw e|f term ~Uniform - // TODO this should prob be model 1 - } -#else - double p = 0; - bool is_abstract = rule.f_[0] <= 0; - p += log(0.5); - if (is_abstract) { - if (flen == 2) p += log(0.99); else p += log(0.01); - } else { - p += log_decay(flen, 3); - } - - for (unsigned i = 0; i < flen; ++i) { // for each position in f-RHS - if (rule.f_[i] <= 0) // according to pattern - p += uniform_nt; // draw NT ~Uniform - else - p += uniform_f; // draw f terminal ~Uniform - } -#endif - return p; - } - const double uniform_e; - const double uniform_f; - const double uniform_nt; - vector<double> arities; -}; - -MT19937* rng = NULL; - -template <typename Base> -struct MHSamplerEdgeProb { - MHSamplerEdgeProb(const Hypergraph& hg, - const map<int, CCRP_NoTable<TRule> >& rdp, - const Base& logp0, - const bool exclude_multiword_terminals) : edge_probs(hg.edges_.size()) { - for (int i = 0; i < edge_probs.size(); ++i) { - const TRule& rule = *hg.edges_[i].rule_; - const map<int, CCRP_NoTable<TRule> >::const_iterator it = rdp.find(rule.lhs_); - assert(it != rdp.end()); - const CCRP_NoTable<TRule>& crp = it->second; - edge_probs[i].logeq(crp.logprob(rule, logp0(rule))); - if (exclude_multiword_terminals && rule.f_[0] > 0 && rule.f_.size() > 1) - edge_probs[i] = prob_t::Zero(); - } - } - inline prob_t operator()(const Hypergraph::Edge& e) const { - return edge_probs[e.id_]; - } - prob_t DerivationProb(const vector<int>& d) const { - prob_t p = prob_t::One(); - for (unsigned i = 0; i < d.size(); ++i) - p *= edge_probs[d[i]]; - return p; - } - vector<prob_t> edge_probs; -}; - -template <typename Base> -struct ModelAndData { - ModelAndData() : - base_lh(prob_t::One()), - logp0(10000, 10000), - mh_samples(), - mh_rejects() {} - - void SampleCorpus(const string& hgpath, int i); - void ResampleHyperparameters() { - for (map<int, CCRP_NoTable<TRule> >::iterator it = rules.begin(); it != rules.end(); ++it) - it->second.resample_hyperparameters(rng); - } - - CCRP_NoTable<TRule>& RuleCRP(int lhs) { - map<int, CCRP_NoTable<TRule> >::iterator it = rules.find(lhs); - if (it == rules.end()) { - rules.insert(make_pair(lhs, CCRP_NoTable<TRule>(1,1))); - it = rules.find(lhs); - } - return it->second; - } - - void IncrementRule(const TRule& rule) { - CCRP_NoTable<TRule>& crp = RuleCRP(rule.lhs_); - if (crp.increment(rule)) { - prob_t p; p.logeq(logp0(rule)); - base_lh *= p; - } - } - - void DecrementRule(const TRule& rule) { - CCRP_NoTable<TRule>& crp = RuleCRP(rule.lhs_); - if (crp.decrement(rule)) { - prob_t p; p.logeq(logp0(rule)); - base_lh /= p; - } - } - - void DecrementDerivation(const Hypergraph& hg, const vector<int>& d) { - for (unsigned i = 0; i < d.size(); ++i) { - const TRule& rule = *hg.edges_[d[i]].rule_; - DecrementRule(rule); - } - } - - void IncrementDerivation(const Hypergraph& hg, const vector<int>& d) { - for (unsigned i = 0; i < d.size(); ++i) { - const TRule& rule = *hg.edges_[d[i]].rule_; - IncrementRule(rule); - } - } - - prob_t Likelihood() const { - prob_t p = prob_t::One(); - for (map<int, CCRP_NoTable<TRule> >::const_iterator it = rules.begin(); it != rules.end(); ++it) { - prob_t q; q.logeq(it->second.log_crp_prob()); - p *= q; - } - p *= base_lh; - return p; - } - - void ResampleDerivation(const Hypergraph& hg, vector<int>* sampled_derivation); - - map<int, CCRP_NoTable<TRule> > rules; // [lhs] -> distribution over RHSs - prob_t base_lh; - SimpleBase logp0; - vector<vector<int> > samples; // sampled derivations - unsigned int mh_samples; - unsigned int mh_rejects; -}; - -template <typename Base> -void ModelAndData<Base>::SampleCorpus(const string& hgpath, int n) { - vector<Hypergraph> hgs(n); hgs.clear(); - boost::unordered_map<TRule, unsigned> acc; - map<int, unsigned> tot; - for (int i = 0; i < n; ++i) { - ostringstream os; - os << hgpath << '/' << i << ".json.gz"; - if (!FileExists(os.str())) continue; - hgs.push_back(Hypergraph()); - ReadFile rf(os.str()); - HypergraphIO::ReadFromJSON(rf.stream(), &hgs.back()); - } - cerr << "Read " << hgs.size() << " alignment hypergraphs.\n"; - samples.resize(hgs.size()); - const unsigned SAMPLES = 2000; - const unsigned burnin = 3 * SAMPLES / 4; - const unsigned every = 20; - for (unsigned s = 0; s < SAMPLES; ++s) { - if (s % 10 == 0) { - if (s > 0) { cerr << endl; ResampleHyperparameters(); } - cerr << "[" << s << " LLH=" << log(Likelihood()) << " REJECTS=" << ((double)mh_rejects / mh_samples) << " LHS's=" << rules.size() << " base=" << log(base_lh) << "] "; - } - cerr << '.'; - for (unsigned i = 0; i < hgs.size(); ++i) { - ResampleDerivation(hgs[i], &samples[i]); - if (s > burnin && s % every == 0) { - for (unsigned j = 0; j < samples[i].size(); ++j) { - const TRule& rule = *hgs[i].edges_[samples[i][j]].rule_; - ++acc[rule]; - ++tot[rule.lhs_]; - } - } - } - } - cerr << endl; - for (boost::unordered_map<TRule,unsigned>::iterator it = acc.begin(); it != acc.end(); ++it) { - cout << it->first << " MyProb=" << log(it->second)-log(tot[it->first.lhs_]) << endl; - } -} - -template <typename Base> -void ModelAndData<Base>::ResampleDerivation(const Hypergraph& hg, vector<int>* sampled_deriv) { - vector<int> cur; - cur.swap(*sampled_deriv); - - const prob_t p_cur = Likelihood(); - DecrementDerivation(hg, cur); - if (cur.empty()) { - // first iteration, create restaurants - for (int i = 0; i < hg.edges_.size(); ++i) - RuleCRP(hg.edges_[i].rule_->lhs_); - } - MHSamplerEdgeProb<SimpleBase> wf(hg, rules, logp0, cur.empty()); -// MHSamplerEdgeProb<SimpleBase> wf(hg, rules, logp0, false); - const prob_t q_cur = wf.DerivationProb(cur); - vector<prob_t> node_probs; - Inside<prob_t, MHSamplerEdgeProb<SimpleBase> >(hg, &node_probs, wf); - queue<unsigned> q; - q.push(hg.nodes_.size() - 3); - while(!q.empty()) { - unsigned cur_node_id = q.front(); -// cerr << "NODE=" << cur_node_id << endl; - q.pop(); - const Hypergraph::Node& node = hg.nodes_[cur_node_id]; - const unsigned num_in_edges = node.in_edges_.size(); - unsigned sampled_edge = 0; - if (num_in_edges == 1) { - sampled_edge = node.in_edges_[0]; - } else { - prob_t z; - assert(num_in_edges > 1); - SampleSet<prob_t> ss; - for (unsigned j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; - prob_t p = wf.edge_probs[edge.id_]; // edge proposal prob - for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) - p *= node_probs[edge.tail_nodes_[k]]; - ss.add(p); -// cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; - z += p; - } -// for (unsigned j = 0; j < num_in_edges; ++j) { -// const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; -// cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; -// } -// cerr << " --- \n"; - sampled_edge = node.in_edges_[rng->SelectSample(ss)]; - } - sampled_deriv->push_back(sampled_edge); - const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; - for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { - q.push(edge.tail_nodes_[j]); - } - } - IncrementDerivation(hg, *sampled_deriv); - -// cerr << "sampled derivation contains " << sampled_deriv->size() << " edges\n"; -// cerr << "DERIV:\n"; -// for (int i = 0; i < sampled_deriv->size(); ++i) { -// cerr << " " << hg.edges_[(*sampled_deriv)[i]].rule_->AsString() << endl; -// } - - if (cur.empty()) return; // accept first sample - - ++mh_samples; - // only need to do MH if proposal is different to current state - if (cur != *sampled_deriv) { - const prob_t q_prop = wf.DerivationProb(*sampled_deriv); - const prob_t p_prop = Likelihood(); - if (!rng->AcceptMetropolisHastings(p_prop, p_cur, q_prop, q_cur)) { - ++mh_rejects; - DecrementDerivation(hg, *sampled_deriv); - IncrementDerivation(hg, cur); - swap(cur, *sampled_deriv); - } - } -} - -int main(int argc, char** argv) { - rng = new MT19937; - ModelAndData<SimpleBase> m; - m.SampleCorpus("./hgs", 50); - // m.SampleCorpus("./btec/hgs", 5000); - return 0; -} - diff --git a/gi/pf/cfg_wfst_composer.cc b/gi/pf/cfg_wfst_composer.cc deleted file mode 100644 index 21d5ec5b..00000000 --- a/gi/pf/cfg_wfst_composer.cc +++ /dev/null @@ -1,731 +0,0 @@ -#include "cfg_wfst_composer.h" - -#include <iostream> -#include <fstream> -#include <map> -#include <queue> -#include <tr1/unordered_map> -#include <tr1/unordered_set> - -#include <boost/shared_ptr.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> -#include "fast_lexical_cast.hpp" - -#include "phrasetable_fst.h" -#include "sparse_vector.h" -#include "tdict.h" -#include "hg.h" -#include "hg_remove_eps.h" - -namespace po = boost::program_options; -using namespace std; -using namespace std::tr1; - -WFSTNode::~WFSTNode() {} -WFST::~WFST() {} - -// Define the following macro if you want to see lots of debugging output -// when you run the chart parser -#undef DEBUG_CHART_PARSER - -// A few constants used by the chart parser /////////////// -static const int kMAX_NODES = 2000000; -static const string kPHRASE_STRING = "X"; -static bool constants_need_init = true; -static WordID kUNIQUE_START; -static WordID kPHRASE; -static TRulePtr kX1X2; -static TRulePtr kX1; -static WordID kEPS; -static TRulePtr kEPSRule; - -static void InitializeConstants() { - if (constants_need_init) { - kPHRASE = TD::Convert(kPHRASE_STRING) * -1; - kUNIQUE_START = TD::Convert("S") * -1; - kX1X2.reset(new TRule("[X] ||| [X,1] [X,2] ||| [X,1] [X,2]")); - kX1.reset(new TRule("[X] ||| [X,1] ||| [X,1]")); - kEPSRule.reset(new TRule("[X] ||| <eps> ||| <eps>")); - kEPS = TD::Convert("<eps>"); - constants_need_init = false; - } -} -//////////////////////////////////////////////////////////// - -class EGrammarNode { - friend bool CFG_WFSTComposer::Compose(const Hypergraph& src_forest, Hypergraph* trg_forest); - friend void AddGrammarRule(const string& r, map<WordID, EGrammarNode>* g); - public: -#ifdef DEBUG_CHART_PARSER - string hint; -#endif - EGrammarNode() : is_some_rule_complete(false), is_root(false) {} - const map<WordID, EGrammarNode>& GetTerminals() const { return tptr; } - const map<WordID, EGrammarNode>& GetNonTerminals() const { return ntptr; } - bool HasNonTerminals() const { return (!ntptr.empty()); } - bool HasTerminals() const { return (!tptr.empty()); } - bool RuleCompletes() const { - return (is_some_rule_complete || (ntptr.empty() && tptr.empty())); - } - bool GrammarContinues() const { - return !(ntptr.empty() && tptr.empty()); - } - bool IsRoot() const { - return is_root; - } - // these are the features associated with the rule from the start - // node up to this point. If you use these features, you must - // not Extend() this rule. - const SparseVector<double>& GetCFGProductionFeatures() const { - return input_features; - } - - const EGrammarNode* Extend(const WordID& t) const { - if (t < 0) { - map<WordID, EGrammarNode>::const_iterator it = ntptr.find(t); - if (it == ntptr.end()) return NULL; - return &it->second; - } else { - map<WordID, EGrammarNode>::const_iterator it = tptr.find(t); - if (it == tptr.end()) return NULL; - return &it->second; - } - } - - private: - map<WordID, EGrammarNode> tptr; - map<WordID, EGrammarNode> ntptr; - SparseVector<double> input_features; - bool is_some_rule_complete; - bool is_root; -}; -typedef map<WordID, EGrammarNode> EGrammar; // indexed by the rule LHS - -// edges are immutable once created -struct Edge { -#ifdef DEBUG_CHART_PARSER - static int id_count; - const int id; -#endif - const WordID cat; // lhs side of rule proved/being proved - const EGrammarNode* const dot; // dot position - const WFSTNode* const q; // start of span - const WFSTNode* const r; // end of span - const Edge* const active_parent; // back pointer, NULL for PREDICT items - const Edge* const passive_parent; // back pointer, NULL for SCAN and PREDICT items - TRulePtr tps; // translations - boost::shared_ptr<SparseVector<double> > features; // features from CFG rule - - bool IsPassive() const { - // when a rule is completed, this value will be set - return static_cast<bool>(features); - } - bool IsActive() const { return !IsPassive(); } - bool IsInitial() const { - return !(active_parent || passive_parent); - } - bool IsCreatedByScan() const { - return active_parent && !passive_parent && !dot->IsRoot(); - } - bool IsCreatedByPredict() const { - return dot->IsRoot(); - } - bool IsCreatedByComplete() const { - return active_parent && passive_parent; - } - - // constructor for PREDICT - Edge(WordID c, const EGrammarNode* d, const WFSTNode* q_and_r) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(q_and_r), r(q_and_r), active_parent(NULL), passive_parent(NULL), tps() {} - Edge(WordID c, const EGrammarNode* d, const WFSTNode* q_and_r, const Edge* act_parent) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(q_and_r), r(q_and_r), active_parent(act_parent), passive_parent(NULL), tps() {} - - // constructors for SCAN - Edge(WordID c, const EGrammarNode* d, const WFSTNode* i, const WFSTNode* j, - const Edge* act_par, const TRulePtr& translations) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(i), r(j), active_parent(act_par), passive_parent(NULL), tps(translations) {} - - Edge(WordID c, const EGrammarNode* d, const WFSTNode* i, const WFSTNode* j, - const Edge* act_par, const TRulePtr& translations, - const SparseVector<double>& feats) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(i), r(j), active_parent(act_par), passive_parent(NULL), tps(translations), - features(new SparseVector<double>(feats)) {} - - // constructors for COMPLETE - Edge(WordID c, const EGrammarNode* d, const WFSTNode* i, const WFSTNode* j, - const Edge* act_par, const Edge *pas_par) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(i), r(j), active_parent(act_par), passive_parent(pas_par), tps() { - assert(pas_par->IsPassive()); - assert(act_par->IsActive()); - } - - Edge(WordID c, const EGrammarNode* d, const WFSTNode* i, const WFSTNode* j, - const Edge* act_par, const Edge *pas_par, const SparseVector<double>& feats) : -#ifdef DEBUG_CHART_PARSER - id(++id_count), -#endif - cat(c), dot(d), q(i), r(j), active_parent(act_par), passive_parent(pas_par), tps(), - features(new SparseVector<double>(feats)) { - assert(pas_par->IsPassive()); - assert(act_par->IsActive()); - } - - // constructor for COMPLETE query - Edge(const WFSTNode* _r) : -#ifdef DEBUG_CHART_PARSER - id(0), -#endif - cat(0), dot(NULL), q(NULL), - r(_r), active_parent(NULL), passive_parent(NULL), tps() {} - // constructor for MERGE quere - Edge(const WFSTNode* _q, int) : -#ifdef DEBUG_CHART_PARSER - id(0), -#endif - cat(0), dot(NULL), q(_q), - r(NULL), active_parent(NULL), passive_parent(NULL), tps() {} -}; -#ifdef DEBUG_CHART_PARSER -int Edge::id_count = 0; -#endif - -ostream& operator<<(ostream& os, const Edge& e) { - string type = "PREDICT"; - if (e.IsCreatedByScan()) - type = "SCAN"; - else if (e.IsCreatedByComplete()) - type = "COMPLETE"; - os << "[" -#ifdef DEBUG_CHART_PARSER - << '(' << e.id << ") " -#else - << '(' << &e << ") " -#endif - << "q=" << e.q << ", r=" << e.r - << ", cat="<< TD::Convert(e.cat*-1) << ", dot=" - << e.dot -#ifdef DEBUG_CHART_PARSER - << e.dot->hint -#endif - << (e.IsActive() ? ", Active" : ", Passive") - << ", " << type; -#ifdef DEBUG_CHART_PARSER - if (e.active_parent) { os << ", act.parent=(" << e.active_parent->id << ')'; } - if (e.passive_parent) { os << ", psv.parent=(" << e.passive_parent->id << ')'; } -#endif - if (e.tps) { os << ", tps=" << e.tps->AsString(); } - return os << ']'; -} - -struct Traversal { - const Edge* const edge; // result from the active / passive combination - const Edge* const active; - const Edge* const passive; - Traversal(const Edge* me, const Edge* a, const Edge* p) : edge(me), active(a), passive(p) {} -}; - -struct UniqueTraversalHash { - size_t operator()(const Traversal* t) const { - size_t x = 5381; - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(t->active); - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(t->passive); - x = ((x << 5) + x) ^ t->edge->IsActive(); - return x; - } -}; - -struct UniqueTraversalEquals { - size_t operator()(const Traversal* a, const Traversal* b) const { - return (a->passive == b->passive && a->active == b->active && a->edge->IsActive() == b->edge->IsActive()); - } -}; - -struct UniqueEdgeHash { - size_t operator()(const Edge* e) const { - size_t x = 5381; - if (e->IsActive()) { - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->dot); - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->q); - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->r); - x = ((x << 5) + x) ^ static_cast<size_t>(e->cat); - x += 13; - } else { // with passive edges, we don't care about the dot - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->q); - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->r); - x = ((x << 5) + x) ^ static_cast<size_t>(e->cat); - } - return x; - } -}; - -struct UniqueEdgeEquals { - bool operator()(const Edge* a, const Edge* b) const { - if (a->IsActive() != b->IsActive()) return false; - if (a->IsActive()) { - return (a->cat == b->cat) && (a->dot == b->dot) && (a->q == b->q) && (a->r == b->r); - } else { - return (a->cat == b->cat) && (a->q == b->q) && (a->r == b->r); - } - } -}; - -struct REdgeHash { - size_t operator()(const Edge* e) const { - size_t x = 5381; - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->r); - return x; - } -}; - -struct REdgeEquals { - bool operator()(const Edge* a, const Edge* b) const { - return (a->r == b->r); - } -}; - -struct QEdgeHash { - size_t operator()(const Edge* e) const { - size_t x = 5381; - x = ((x << 5) + x) ^ reinterpret_cast<size_t>(e->q); - return x; - } -}; - -struct QEdgeEquals { - bool operator()(const Edge* a, const Edge* b) const { - return (a->q == b->q); - } -}; - -struct EdgeQueue { - queue<const Edge*> q; - EdgeQueue() {} - void clear() { while(!q.empty()) q.pop(); } - bool HasWork() const { return !q.empty(); } - const Edge* Next() { const Edge* res = q.front(); q.pop(); return res; } - void AddEdge(const Edge* s) { q.push(s); } -}; - -class CFG_WFSTComposerImpl { - public: - CFG_WFSTComposerImpl(WordID start_cat, - const WFSTNode* q_0, - const WFSTNode* q_final) : start_cat_(start_cat), q_0_(q_0), q_final_(q_final) {} - - // returns false if the intersection is empty - bool Compose(const EGrammar& g, Hypergraph* forest) { - goal_node = NULL; - EGrammar::const_iterator sit = g.find(start_cat_); - forest->ReserveNodes(kMAX_NODES); - assert(sit != g.end()); - Edge* init = new Edge(start_cat_, &sit->second, q_0_); - assert(IncorporateNewEdge(init)); - while (exp_agenda.HasWork() || agenda.HasWork()) { - while(exp_agenda.HasWork()) { - const Edge* edge = exp_agenda.Next(); - FinishEdge(edge, forest); - } - if (agenda.HasWork()) { - const Edge* edge = agenda.Next(); -#ifdef DEBUG_CHART_PARSER - cerr << "processing (" << edge->id << ')' << endl; -#endif - if (edge->IsActive()) { - if (edge->dot->HasTerminals()) - DoScan(edge); - if (edge->dot->HasNonTerminals()) { - DoMergeWithPassives(edge); - DoPredict(edge, g); - } - } else { - DoComplete(edge); - } - } - } - if (goal_node) { - forest->PruneUnreachable(goal_node->id_); - RemoveEpsilons(forest, kEPS); - } - FreeAll(); - return goal_node; - } - - void FreeAll() { - for (int i = 0; i < free_list_.size(); ++i) - delete free_list_[i]; - free_list_.clear(); - for (int i = 0; i < traversal_free_list_.size(); ++i) - delete traversal_free_list_[i]; - traversal_free_list_.clear(); - all_traversals.clear(); - exp_agenda.clear(); - agenda.clear(); - tps2node.clear(); - edge2node.clear(); - all_edges.clear(); - passive_edges.clear(); - active_edges.clear(); - } - - ~CFG_WFSTComposerImpl() { - FreeAll(); - } - - // returns the total number of edges created during composition - int EdgesCreated() const { - return free_list_.size(); - } - - private: - void DoScan(const Edge* edge) { - // here, we assume that the FST will potentially have many more outgoing - // edges than the grammar, which will be just a couple. If you want to - // efficiently handle the case where both are relatively large, this code - // will need to change how the intersection is done. The best general - // solution would probably be the Baeza-Yates double binary search. - - const EGrammarNode* dot = edge->dot; - const WFSTNode* r = edge->r; - const map<WordID, EGrammarNode>& terms = dot->GetTerminals(); - for (map<WordID, EGrammarNode>::const_iterator git = terms.begin(); - git != terms.end(); ++git) { - - if (!(TD::Convert(git->first)[0] >= '0' && TD::Convert(git->first)[0] <= '9')) { - std::cerr << "TERMINAL SYMBOL: " << TD::Convert(git->first) << endl; - abort(); - } - std::vector<std::pair<const WFSTNode*, TRulePtr> > extensions = r->ExtendInput(atoi(TD::Convert(git->first).c_str())); - for (unsigned nsi = 0; nsi < extensions.size(); ++nsi) { - const WFSTNode* next_r = extensions[nsi].first; - const EGrammarNode* next_dot = &git->second; - const bool grammar_continues = next_dot->GrammarContinues(); - const bool rule_completes = next_dot->RuleCompletes(); - if (extensions[nsi].second) - cerr << "!!! " << extensions[nsi].second->AsString() << endl; - // cerr << " rule completes: " << rule_completes << " after consuming " << TD::Convert(git->first) << endl; - assert(grammar_continues || rule_completes); - const SparseVector<double>& input_features = next_dot->GetCFGProductionFeatures(); - if (rule_completes) - IncorporateNewEdge(new Edge(edge->cat, next_dot, edge->q, next_r, edge, extensions[nsi].second, input_features)); - if (grammar_continues) - IncorporateNewEdge(new Edge(edge->cat, next_dot, edge->q, next_r, edge, extensions[nsi].second)); - } - } - } - - void DoPredict(const Edge* edge, const EGrammar& g) { - const EGrammarNode* dot = edge->dot; - const map<WordID, EGrammarNode>& non_terms = dot->GetNonTerminals(); - for (map<WordID, EGrammarNode>::const_iterator git = non_terms.begin(); - git != non_terms.end(); ++git) { - const WordID nt_to_predict = git->first; - //cerr << edge->id << " -- " << TD::Convert(nt_to_predict*-1) << endl; - EGrammar::const_iterator egi = g.find(nt_to_predict); - if (egi == g.end()) { - cerr << "[ERROR] Can't find any grammar rules with a LHS of type " - << TD::Convert(-1*nt_to_predict) << '!' << endl; - continue; - } - assert(edge->IsActive()); - const EGrammarNode* new_dot = &egi->second; - Edge* new_edge = new Edge(nt_to_predict, new_dot, edge->r, edge); - IncorporateNewEdge(new_edge); - } - } - - void DoComplete(const Edge* passive) { -#ifdef DEBUG_CHART_PARSER - cerr << " complete: " << *passive << endl; -#endif - const WordID completed_nt = passive->cat; - const WFSTNode* q = passive->q; - const WFSTNode* next_r = passive->r; - const Edge query(q); - const pair<unordered_multiset<const Edge*, REdgeHash, REdgeEquals>::iterator, - unordered_multiset<const Edge*, REdgeHash, REdgeEquals>::iterator > p = - active_edges.equal_range(&query); - for (unordered_multiset<const Edge*, REdgeHash, REdgeEquals>::iterator it = p.first; - it != p.second; ++it) { - const Edge* active = *it; -#ifdef DEBUG_CHART_PARSER - cerr << " pos: " << *active << endl; -#endif - const EGrammarNode* next_dot = active->dot->Extend(completed_nt); - if (!next_dot) continue; - const SparseVector<double>& input_features = next_dot->GetCFGProductionFeatures(); - // add up to 2 rules - if (next_dot->RuleCompletes()) - IncorporateNewEdge(new Edge(active->cat, next_dot, active->q, next_r, active, passive, input_features)); - if (next_dot->GrammarContinues()) - IncorporateNewEdge(new Edge(active->cat, next_dot, active->q, next_r, active, passive)); - } - } - - void DoMergeWithPassives(const Edge* active) { - // edge is active, has non-terminals, we need to find the passives that can extend it - assert(active->IsActive()); - assert(active->dot->HasNonTerminals()); -#ifdef DEBUG_CHART_PARSER - cerr << " merge active with passives: ACT=" << *active << endl; -#endif - const Edge query(active->r, 1); - const pair<unordered_multiset<const Edge*, QEdgeHash, QEdgeEquals>::iterator, - unordered_multiset<const Edge*, QEdgeHash, QEdgeEquals>::iterator > p = - passive_edges.equal_range(&query); - for (unordered_multiset<const Edge*, QEdgeHash, QEdgeEquals>::iterator it = p.first; - it != p.second; ++it) { - const Edge* passive = *it; - const EGrammarNode* next_dot = active->dot->Extend(passive->cat); - if (!next_dot) continue; - const WFSTNode* next_r = passive->r; - const SparseVector<double>& input_features = next_dot->GetCFGProductionFeatures(); - if (next_dot->RuleCompletes()) - IncorporateNewEdge(new Edge(active->cat, next_dot, active->q, next_r, active, passive, input_features)); - if (next_dot->GrammarContinues()) - IncorporateNewEdge(new Edge(active->cat, next_dot, active->q, next_r, active, passive)); - } - } - - // take ownership of edge memory, add to various indexes, etc - // returns true if this edge is new - bool IncorporateNewEdge(Edge* edge) { - free_list_.push_back(edge); - if (edge->passive_parent && edge->active_parent) { - Traversal* t = new Traversal(edge, edge->active_parent, edge->passive_parent); - traversal_free_list_.push_back(t); - if (all_traversals.find(t) != all_traversals.end()) { - return false; - } else { - all_traversals.insert(t); - } - } - exp_agenda.AddEdge(edge); - return true; - } - - bool FinishEdge(const Edge* edge, Hypergraph* hg) { - bool is_new = false; - if (all_edges.find(edge) == all_edges.end()) { -#ifdef DEBUG_CHART_PARSER - cerr << *edge << " is NEW\n"; -#endif - all_edges.insert(edge); - is_new = true; - if (edge->IsPassive()) passive_edges.insert(edge); - if (edge->IsActive()) active_edges.insert(edge); - agenda.AddEdge(edge); - } else { -#ifdef DEBUG_CHART_PARSER - cerr << *edge << " is NOT NEW.\n"; -#endif - } - AddEdgeToTranslationForest(edge, hg); - return is_new; - } - - // build the translation forest - void AddEdgeToTranslationForest(const Edge* edge, Hypergraph* hg) { - assert(hg->nodes_.size() < kMAX_NODES); - Hypergraph::Node* tps = NULL; - // first add any target language rules - if (edge->tps) { - Hypergraph::Node*& node = tps2node[(size_t)edge->tps.get()]; - if (!node) { - // cerr << "Creating phrases for " << edge->tps << endl; - const TRulePtr& rule = edge->tps; - node = hg->AddNode(kPHRASE); - Hypergraph::Edge* hg_edge = hg->AddEdge(rule, Hypergraph::TailNodeVector()); - hg_edge->feature_values_ += rule->GetFeatureValues(); - hg->ConnectEdgeToHeadNode(hg_edge, node); - } - tps = node; - } - Hypergraph::Node*& head_node = edge2node[edge]; - if (!head_node) - head_node = hg->AddNode(kPHRASE); - if (edge->cat == start_cat_ && edge->q == q_0_ && edge->r == q_final_ && edge->IsPassive()) { - assert(goal_node == NULL || goal_node == head_node); - goal_node = head_node; - } - Hypergraph::TailNodeVector tail; - SparseVector<double> extra; - if (edge->IsCreatedByPredict()) { - // extra.set_value(FD::Convert("predict"), 1); - } else if (edge->IsCreatedByScan()) { - tail.push_back(edge2node[edge->active_parent]->id_); - if (tps) { - tail.push_back(tps->id_); - } - //extra.set_value(FD::Convert("scan"), 1); - } else if (edge->IsCreatedByComplete()) { - tail.push_back(edge2node[edge->active_parent]->id_); - tail.push_back(edge2node[edge->passive_parent]->id_); - //extra.set_value(FD::Convert("complete"), 1); - } else { - assert(!"unexpected edge type!"); - } - //cerr << head_node->id_ << "<--" << *edge << endl; - -#ifdef DEBUG_CHART_PARSER - for (int i = 0; i < tail.size(); ++i) - if (tail[i] == head_node->id_) { - cerr << "ERROR: " << *edge << "\n i=" << i << endl; - if (i == 1) { cerr << "\tP: " << *edge->passive_parent << endl; } - if (i == 0) { cerr << "\tA: " << *edge->active_parent << endl; } - assert(!"self-loop found!"); - } -#endif - Hypergraph::Edge* hg_edge = NULL; - if (tail.size() == 0) { - hg_edge = hg->AddEdge(kEPSRule, tail); - } else if (tail.size() == 1) { - hg_edge = hg->AddEdge(kX1, tail); - } else if (tail.size() == 2) { - hg_edge = hg->AddEdge(kX1X2, tail); - } - if (edge->features) - hg_edge->feature_values_ += *edge->features; - hg_edge->feature_values_ += extra; - hg->ConnectEdgeToHeadNode(hg_edge, head_node); - } - - Hypergraph::Node* goal_node; - EdgeQueue exp_agenda; - EdgeQueue agenda; - unordered_map<size_t, Hypergraph::Node*> tps2node; - unordered_map<const Edge*, Hypergraph::Node*, UniqueEdgeHash, UniqueEdgeEquals> edge2node; - unordered_set<const Traversal*, UniqueTraversalHash, UniqueTraversalEquals> all_traversals; - unordered_set<const Edge*, UniqueEdgeHash, UniqueEdgeEquals> all_edges; - unordered_multiset<const Edge*, QEdgeHash, QEdgeEquals> passive_edges; - unordered_multiset<const Edge*, REdgeHash, REdgeEquals> active_edges; - vector<Edge*> free_list_; - vector<Traversal*> traversal_free_list_; - const WordID start_cat_; - const WFSTNode* const q_0_; - const WFSTNode* const q_final_; -}; - -#ifdef DEBUG_CHART_PARSER -static string TrimRule(const string& r) { - size_t start = r.find(" |||") + 5; - size_t end = r.rfind(" |||"); - return r.substr(start, end - start); -} -#endif - -void AddGrammarRule(const string& r, EGrammar* g) { - const size_t pos = r.find(" ||| "); - if (pos == string::npos || r[0] != '[') { - cerr << "Bad rule: " << r << endl; - return; - } - const size_t rpos = r.rfind(" ||| "); - string feats; - string rs = r; - if (rpos != pos) { - feats = r.substr(rpos + 5); - rs = r.substr(0, rpos); - } - string rhs = rs.substr(pos + 5); - string trule = rs + " ||| " + rhs + " ||| " + feats; - TRule tr(trule); - cerr << "X: " << tr.e_[0] << endl; -#ifdef DEBUG_CHART_PARSER - string hint_last_rule; -#endif - EGrammarNode* cur = &(*g)[tr.GetLHS()]; - cur->is_root = true; - for (int i = 0; i < tr.FLength(); ++i) { - WordID sym = tr.f()[i]; -#ifdef DEBUG_CHART_PARSER - hint_last_rule = TD::Convert(sym < 0 ? -sym : sym); - cur->hint += " <@@> (*" + hint_last_rule + ") " + TrimRule(tr.AsString()); -#endif - if (sym < 0) - cur = &cur->ntptr[sym]; - else - cur = &cur->tptr[sym]; - } -#ifdef DEBUG_CHART_PARSER - cur->hint += " <@@> (" + hint_last_rule + "*) " + TrimRule(tr.AsString()); -#endif - cur->is_some_rule_complete = true; - cur->input_features = tr.GetFeatureValues(); -} - -CFG_WFSTComposer::~CFG_WFSTComposer() { - delete pimpl_; -} - -CFG_WFSTComposer::CFG_WFSTComposer(const WFST& wfst) { - InitializeConstants(); - pimpl_ = new CFG_WFSTComposerImpl(kUNIQUE_START, wfst.Initial(), wfst.Final()); -} - -bool CFG_WFSTComposer::Compose(const Hypergraph& src_forest, Hypergraph* trg_forest) { - // first, convert the src forest into an EGrammar - EGrammar g; - const int nedges = src_forest.edges_.size(); - const int nnodes = src_forest.nodes_.size(); - vector<int> cats(nnodes); - bool assign_cats = false; - for (int i = 0; i < nnodes; ++i) - if (assign_cats) { - cats[i] = TD::Convert("CAT_" + boost::lexical_cast<string>(i)) * -1; - } else { - cats[i] = src_forest.nodes_[i].cat_; - } - // construct the grammar - for (int i = 0; i < nedges; ++i) { - const Hypergraph::Edge& edge = src_forest.edges_[i]; - const vector<WordID>& src = edge.rule_->f(); - EGrammarNode* cur = &g[cats[edge.head_node_]]; - cur->is_root = true; - int ntc = 0; - for (int j = 0; j < src.size(); ++j) { - WordID sym = src[j]; - if (sym <= 0) { - sym = cats[edge.tail_nodes_[ntc]]; - ++ntc; - cur = &cur->ntptr[sym]; - } else { - cur = &cur->tptr[sym]; - } - } - cur->is_some_rule_complete = true; - cur->input_features = edge.feature_values_; - } - EGrammarNode& goal_rule = g[kUNIQUE_START]; - assert((goal_rule.ntptr.size() == 1 && goal_rule.tptr.size() == 0) || - (goal_rule.ntptr.size() == 0 && goal_rule.tptr.size() == 1)); - - return pimpl_->Compose(g, trg_forest); -} - -bool CFG_WFSTComposer::Compose(istream* in, Hypergraph* trg_forest) { - EGrammar g; - while(*in) { - string line; - getline(*in, line); - if (line.empty()) continue; - AddGrammarRule(line, &g); - } - - return pimpl_->Compose(g, trg_forest); -} diff --git a/gi/pf/cfg_wfst_composer.h b/gi/pf/cfg_wfst_composer.h deleted file mode 100644 index cf47f459..00000000 --- a/gi/pf/cfg_wfst_composer.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef _CFG_WFST_COMPOSER_H_ -#define _CFG_WFST_COMPOSER_H_ - -#include <iostream> -#include <vector> -#include <utility> - -#include "trule.h" -#include "wordid.h" - -class CFG_WFSTComposerImpl; -class Hypergraph; - -struct WFSTNode { - virtual ~WFSTNode(); - // returns the next states reachable by consuming srcindex (which identifies a word) - // paired with the output string generated by taking that transition. - virtual std::vector<std::pair<const WFSTNode*,TRulePtr> > ExtendInput(unsigned srcindex) const = 0; -}; - -struct WFST { - virtual ~WFST(); - virtual const WFSTNode* Final() const = 0; - virtual const WFSTNode* Initial() const = 0; -}; - -class CFG_WFSTComposer { - public: - ~CFG_WFSTComposer(); - explicit CFG_WFSTComposer(const WFST& wfst); - bool Compose(const Hypergraph& in_forest, Hypergraph* trg_forest); - - // reads the grammar from a file. There must be a single top-level - // S -> X rule. Anything else is possible. Format is: - // [S] ||| [SS,1] - // [SS] ||| [NP,1] [VP,2] ||| Feature1=0.2 Feature2=-2.3 - // [SS] ||| [VP,1] [NP,2] ||| Feature1=0.8 - // [NP] ||| [DET,1] [N,2] ||| Feature3=2 - // ... - bool Compose(std::istream* grammar_file, Hypergraph* trg_forest); - - private: - CFG_WFSTComposerImpl* pimpl_; -}; - -#endif diff --git a/gi/pf/conditional_pseg.h b/gi/pf/conditional_pseg.h deleted file mode 100644 index 81ddb206..00000000 --- a/gi/pf/conditional_pseg.h +++ /dev/null @@ -1,275 +0,0 @@ -#ifndef _CONDITIONAL_PSEG_H_ -#define _CONDITIONAL_PSEG_H_ - -#include <vector> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include <iostream> - -#include "m.h" -#include "prob.h" -#include "ccrp_nt.h" -#include "mfcr.h" -#include "trule.h" -#include "base_distributions.h" -#include "tdict.h" - -template <typename ConditionalBaseMeasure> -struct MConditionalTranslationModel { - explicit MConditionalTranslationModel(ConditionalBaseMeasure& rcp0) : - rp0(rcp0), d(0.5), strength(1.0), lambdas(1, prob_t::One()), p0s(1) {} - - void Summary() const { - std::cerr << "Number of conditioning contexts: " << r.size() << std::endl; - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - std::cerr << TD::GetString(it->first) << " \t(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << std::endl; - for (MFCR<1,TRule>::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - std::cerr << " " << i2->second.total_dish_count_ << '\t' << i2->first << std::endl; - } - } - - double log_likelihood(const double& dd, const double& aa) const { - if (aa <= -dd) return -std::numeric_limits<double>::infinity(); - //double llh = Md::log_beta_density(dd, 10, 3) + Md::log_gamma_density(aa, 1, 1); - double llh = Md::log_beta_density(dd, 1, 1) + - Md::log_gamma_density(dd + aa, 1, 1); - typename std::tr1::unordered_map<std::vector<WordID>, MFCR<1,TRule>, boost::hash<std::vector<WordID> > >::const_iterator it; - for (it = r.begin(); it != r.end(); ++it) - llh += it->second.log_crp_prob(dd, aa); - return llh; - } - - struct DiscountResampler { - DiscountResampler(const MConditionalTranslationModel& m) : m_(m) {} - const MConditionalTranslationModel& m_; - double operator()(const double& proposed_discount) const { - return m_.log_likelihood(proposed_discount, m_.strength); - } - }; - - struct AlphaResampler { - AlphaResampler(const MConditionalTranslationModel& m) : m_(m) {} - const MConditionalTranslationModel& m_; - double operator()(const double& proposed_strength) const { - return m_.log_likelihood(m_.d, proposed_strength); - } - }; - - void ResampleHyperparameters(MT19937* rng) { - typename std::tr1::unordered_map<std::vector<WordID>, MFCR<1,TRule>, boost::hash<std::vector<WordID> > >::iterator it; -#if 1 - for (it = r.begin(); it != r.end(); ++it) { - it->second.resample_hyperparameters(rng); - } -#else - const unsigned nloop = 5; - const unsigned niterations = 10; - DiscountResampler dr(*this); - AlphaResampler ar(*this); - for (int iter = 0; iter < nloop; ++iter) { - strength = slice_sampler1d(ar, strength, *rng, -d + std::numeric_limits<double>::min(), - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - double min_discount = std::numeric_limits<double>::min(); - if (strength < 0.0) min_discount -= strength; - d = slice_sampler1d(dr, d, *rng, min_discount, - 1.0, 0.0, niterations, 100*niterations); - } - strength = slice_sampler1d(ar, strength, *rng, -d, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - std::cerr << "MConditionalTranslationModel(d=" << d << ",s=" << strength << ") = " << log_likelihood(d, strength) << std::endl; - for (it = r.begin(); it != r.end(); ++it) { - it->second.set_discount(d); - it->second.set_strength(strength); - } -#endif - } - - int DecrementRule(const TRule& rule, MT19937* rng) { - RuleModelHash::iterator it = r.find(rule.f_); - assert(it != r.end()); - const TableCount delta = it->second.decrement(rule, rng); - if (delta.count) { - if (it->second.num_customers() == 0) r.erase(it); - } - return delta.count; - } - - int IncrementRule(const TRule& rule, MT19937* rng) { - RuleModelHash::iterator it = r.find(rule.f_); - if (it == r.end()) { - //it = r.insert(make_pair(rule.f_, MFCR<1,TRule>(d, strength))).first; - it = r.insert(make_pair(rule.f_, MFCR<1,TRule>(1,1,1,1,0.6, -0.12))).first; - } - p0s[0] = rp0(rule); - TableCount delta = it->second.increment(rule, p0s.begin(), lambdas.begin(), rng); - return delta.count; - } - - prob_t RuleProbability(const TRule& rule) const { - prob_t p; - RuleModelHash::const_iterator it = r.find(rule.f_); - if (it == r.end()) { - p = rp0(rule); - } else { - p0s[0] = rp0(rule); - p = it->second.prob(rule, p0s.begin(), lambdas.begin()); - } - return p; - } - - prob_t Likelihood() const { - prob_t p; p.logeq(log_likelihood(d, strength)); - return p; - } - - const ConditionalBaseMeasure& rp0; - typedef std::tr1::unordered_map<std::vector<WordID>, - MFCR<1, TRule>, - boost::hash<std::vector<WordID> > > RuleModelHash; - RuleModelHash r; - double d, strength; - std::vector<prob_t> lambdas; - mutable std::vector<prob_t> p0s; -}; - -template <typename ConditionalBaseMeasure> -struct ConditionalTranslationModel { - explicit ConditionalTranslationModel(ConditionalBaseMeasure& rcp0) : - rp0(rcp0) {} - - void Summary() const { - std::cerr << "Number of conditioning contexts: " << r.size() << std::endl; - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - std::cerr << TD::GetString(it->first) << " \t(\\alpha = " << it->second.alpha() << ") --------------------------" << std::endl; - for (CCRP_NoTable<TRule>::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - std::cerr << " " << i2->second << '\t' << i2->first << std::endl; - } - } - - void ResampleHyperparameters(MT19937* rng) { - for (RuleModelHash::iterator it = r.begin(); it != r.end(); ++it) - it->second.resample_hyperparameters(rng); - } - - int DecrementRule(const TRule& rule) { - RuleModelHash::iterator it = r.find(rule.f_); - assert(it != r.end()); - int count = it->second.decrement(rule); - if (count) { - if (it->second.num_customers() == 0) r.erase(it); - } - return count; - } - - int IncrementRule(const TRule& rule) { - RuleModelHash::iterator it = r.find(rule.f_); - if (it == r.end()) { - it = r.insert(make_pair(rule.f_, CCRP_NoTable<TRule>(1.0, 1.0, 8.0))).first; - } - int count = it->second.increment(rule); - return count; - } - - void IncrementRules(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - prob_t RuleProbability(const TRule& rule) const { - prob_t p; - RuleModelHash::const_iterator it = r.find(rule.f_); - if (it == r.end()) { - p.logeq(log(rp0(rule))); - } else { - p.logeq(it->second.logprob(rule, log(rp0(rule)))); - } - return p; - } - - prob_t Likelihood() const { - prob_t p = prob_t::One(); - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - prob_t q; q.logeq(it->second.log_crp_prob()); - p *= q; - for (CCRP_NoTable<TRule>::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - p *= rp0(i2->first); - } - return p; - } - - const ConditionalBaseMeasure& rp0; - typedef std::tr1::unordered_map<std::vector<WordID>, - CCRP_NoTable<TRule>, - boost::hash<std::vector<WordID> > > RuleModelHash; - RuleModelHash r; -}; - -template <typename ConditionalBaseMeasure> -struct ConditionalParallelSegementationModel { - explicit ConditionalParallelSegementationModel(ConditionalBaseMeasure& rcp0) : - tmodel(rcp0), base(prob_t::One()), aligns(1,1) {} - - ConditionalTranslationModel<ConditionalBaseMeasure> tmodel; - - void DecrementRule(const TRule& rule) { - tmodel.DecrementRule(rule); - } - - void IncrementRule(const TRule& rule) { - tmodel.IncrementRule(rule); - } - - void IncrementRulesAndAlignments(const std::vector<TRulePtr>& rules) { - tmodel.IncrementRules(rules); - for (int i = 0; i < rules.size(); ++i) { - IncrementAlign(rules[i]->f_.size()); - } - } - - void DecrementRulesAndAlignments(const std::vector<TRulePtr>& rules) { - tmodel.DecrementRules(rules); - for (int i = 0; i < rules.size(); ++i) { - DecrementAlign(rules[i]->f_.size()); - } - } - - prob_t RuleProbability(const TRule& rule) const { - return tmodel.RuleProbability(rule); - } - - void IncrementAlign(unsigned span) { - if (aligns.increment(span)) { - // TODO - } - } - - void DecrementAlign(unsigned span) { - if (aligns.decrement(span)) { - // TODO - } - } - - prob_t AlignProbability(unsigned span) const { - prob_t p; - p.logeq(aligns.logprob(span, Md::log_poisson(span, 1.0))); - return p; - } - - prob_t Likelihood() const { - prob_t p; p.logeq(aligns.log_crp_prob()); - p *= base; - p *= tmodel.Likelihood(); - return p; - } - - prob_t base; - CCRP_NoTable<unsigned> aligns; -}; - -#endif - diff --git a/gi/pf/condnaive.cc b/gi/pf/condnaive.cc deleted file mode 100644 index 419731ac..00000000 --- a/gi/pf/condnaive.cc +++ /dev/null @@ -1,298 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/multi_array.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "base_distributions.h" -#include "monotonic_pseg.h" -#include "conditional_pseg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "corpus.h" - -using namespace std; -using namespace std::tr1; -namespace po = boost::program_options; - -static unsigned kMAX_SRC_PHRASE; -static unsigned kMAX_TRG_PHRASE; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(4),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(4),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -boost::shared_ptr<MT19937> prng; - -struct ModelAndData { - explicit ModelAndData(ConditionalParallelSegementationModel<PhraseConditionalBase>& m, const vector<vector<int> >& ce, const vector<vector<int> >& cf, const set<int>& ve, const set<int>& vf) : - model(m), - rng(&*prng), - corpuse(ce), - corpusf(cf), - vocabe(ve), - vocabf(vf), - mh_samples(), - mh_rejects(), - kX(-TD::Convert("X")), - derivations(corpuse.size()) {} - - void ResampleHyperparameters() { - } - - void InstantiateRule(const pair<short,short>& from, - const pair<short,short>& to, - const vector<int>& sentf, - const vector<int>& sente, - TRule* rule) const { - rule->f_.clear(); - rule->e_.clear(); - rule->lhs_ = kX; - for (short i = from.first; i < to.first; ++i) - rule->f_.push_back(sentf[i]); - for (short i = from.second; i < to.second; ++i) - rule->e_.push_back(sente[i]); - } - - void DecrementDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - model.DecrementRule(x); - model.DecrementAlign(x.f_.size()); - } - } - - void PrintDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - cerr << i << '/' << (d.size() - 1) << ": " << x << endl; - } - } - - void IncrementDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - model.IncrementRule(x); - model.IncrementAlign(x.f_.size()); - } - } - - prob_t Likelihood() const { - return model.Likelihood(); - } - - prob_t DerivationProposalProbability(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) const { - prob_t p = prob_t::One(); - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - p *= model.RuleProbability(x); - p *= model.AlignProbability(x.f_.size()); - } - return p; - } - - void Sample(); - - ConditionalParallelSegementationModel<PhraseConditionalBase>& model; - MT19937* rng; - const vector<vector<int> >& corpuse, corpusf; - const set<int>& vocabe, vocabf; - unsigned mh_samples, mh_rejects; - const int kX; - vector<vector<pair<short, short> > > derivations; -}; - -void ModelAndData::Sample() { - unsigned MAXK = kMAX_SRC_PHRASE; - unsigned MAXL = kMAX_TRG_PHRASE; - TRule x; - x.lhs_ = -TD::Convert("X"); - - for (int samples = 0; samples < 1000; ++samples) { - if (samples % 1 == 0 && samples > 0) { - //ResampleHyperparameters(); - cerr << " [" << samples << " LLH=" << log(Likelihood()) << " MH=" << ((double)mh_rejects / mh_samples) << "]\n"; - for (int i = 0; i < 10; ++i) { - cerr << "SENTENCE: " << TD::GetString(corpusf[i]) << " ||| " << TD::GetString(corpuse[i]) << endl; - PrintDerivation(derivations[i], corpusf[i], corpuse[i]); - } - static TRule xx("[X] ||| w n ||| s h ||| X=0"); - const CCRP_NoTable<TRule>& dcrp = model.tmodel.r.find(xx.f_)->second; - for (CCRP_NoTable<TRule>::const_iterator it = dcrp.begin(); it != dcrp.end(); ++it) { - cerr << "\t" << it->second << "\t" << it->first << endl; - } - } - cerr << '.' << flush; - for (int s = 0; s < corpuse.size(); ++s) { - const vector<int>& sentf = corpusf[s]; - const vector<int>& sente = corpuse[s]; -// cerr << " CUSTOMERS: " << rules.num_customers() << endl; -// cerr << "SENTENCE: " << TD::GetString(sentf) << " ||| " << TD::GetString(sente) << endl; - - vector<pair<short, short> >& deriv = derivations[s]; - const prob_t p_cur = Likelihood(); - DecrementDerivation(deriv, sentf, sente); - - boost::multi_array<prob_t, 2> a(boost::extents[sentf.size() + 1][sente.size() + 1]); - boost::multi_array<prob_t, 4> trans(boost::extents[sentf.size() + 1][sente.size() + 1][MAXK][MAXL]); - a[0][0] = prob_t::One(); - for (int i = 0; i < sentf.size(); ++i) { - for (int j = 0; j < sente.size(); ++j) { - const prob_t src_a = a[i][j]; - x.f_.clear(); - for (int k = 1; k <= MAXK; ++k) { - if (i + k > sentf.size()) break; - x.f_.push_back(sentf[i + k - 1]); - x.e_.clear(); - const prob_t p_span = model.AlignProbability(k); // prob of consuming this much source - for (int l = 1; l <= MAXL; ++l) { - if (j + l > sente.size()) break; - x.e_.push_back(sente[j + l - 1]); - trans[i][j][k - 1][l - 1] = model.RuleProbability(x) * p_span; - a[i + k][j + l] += src_a * trans[i][j][k - 1][l - 1]; - } - } - } - } -// cerr << "Inside: " << log(a[sentf.size()][sente.size()]) << endl; - const prob_t q_cur = DerivationProposalProbability(deriv, sentf, sente); - - vector<pair<short,short> > newderiv; - int cur_i = sentf.size(); - int cur_j = sente.size(); - while(cur_i > 0 && cur_j > 0) { - newderiv.push_back(pair<short,short>(cur_i, cur_j)); -// cerr << "NODE: (" << cur_i << "," << cur_j << ")\n"; - SampleSet<prob_t> ss; - vector<pair<short,short> > nexts; - for (int k = 1; k <= MAXK; ++k) { - const int hyp_i = cur_i - k; - if (hyp_i < 0) break; - for (int l = 1; l <= MAXL; ++l) { - const int hyp_j = cur_j - l; - if (hyp_j < 0) break; - const prob_t& inside = a[hyp_i][hyp_j]; - if (inside == prob_t::Zero()) continue; - const prob_t& transp = trans[hyp_i][hyp_j][k - 1][l - 1]; - if (transp == prob_t::Zero()) continue; - const prob_t p = inside * transp; - ss.add(p); - nexts.push_back(pair<short,short>(hyp_i, hyp_j)); -// cerr << " (" << hyp_i << "," << hyp_j << ") <--- " << log(p) << endl; - } - } -// cerr << " sample set has " << nexts.size() << " elements.\n"; - const int selected = rng->SelectSample(ss); - cur_i = nexts[selected].first; - cur_j = nexts[selected].second; - } - newderiv.push_back(pair<short,short>(0,0)); - const prob_t q_new = DerivationProposalProbability(newderiv, sentf, sente); - IncrementDerivation(newderiv, sentf, sente); -// cerr << "SANITY: " << q_new << " " <<log(DerivationProposalProbability(newderiv, sentf, sente)) << endl; - if (deriv.empty()) { deriv = newderiv; continue; } - ++mh_samples; - - if (deriv != newderiv) { - const prob_t p_new = Likelihood(); -// cerr << "p_cur=" << log(p_cur) << "\t p_new=" << log(p_new) << endl; -// cerr << "q_cur=" << log(q_cur) << "\t q_new=" << log(q_new) << endl; - if (!rng->AcceptMetropolisHastings(p_new, p_cur, q_new, q_cur)) { - ++mh_rejects; - DecrementDerivation(newderiv, sentf, sente); - IncrementDerivation(deriv, sentf, sente); - } else { -// cerr << " ACCEPT\n"; - deriv = newderiv; - } - } - } - } -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); -// MT19937& rng = *prng; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - corpus::ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - Model1 m1(conf["model1"].as<string>()); - - PhraseConditionalBase pcb0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size()); - ConditionalParallelSegementationModel<PhraseConditionalBase> x(pcb0); - - ModelAndData posterior(x, corpuse, corpusf, vocabe, vocabf); - posterior.Sample(); - - TRule r1("[X] ||| x ||| l e ||| X=0"); - TRule r2("[X] ||| A ||| a d ||| X=0"); - TRule r3("[X] ||| n ||| e r ||| X=0"); - TRule r4("[X] ||| x A n ||| b l a g ||| X=0"); - - PhraseConditionalUninformativeBase u0(vocabe.size()); - - cerr << (pcb0(r1)*pcb0(r2)*pcb0(r3)) << endl; - cerr << (u0(r4)) << endl; - - return 0; -} - diff --git a/gi/pf/corpus.cc b/gi/pf/corpus.cc deleted file mode 100644 index cb6e4ed7..00000000 --- a/gi/pf/corpus.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include "corpus.h" - -#include <set> -#include <vector> -#include <string> - -#include "tdict.h" -#include "filelib.h" - -using namespace std; - -namespace corpus { - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<WordID> >* e, - set<WordID>* vocab_f, - set<WordID>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - ReadFile rf(filename); - istream* in = rf.stream(); - assert(*in); - string line; - unsigned lc = 0; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(getline(*in, line)) { - ++lc; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { - isf = false; - } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - if (cur == kDIV) { - cerr << "ERROR in " << lc << ": " << line << endl << endl; - abort(); - } - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } -} - -} - diff --git a/gi/pf/corpus.h b/gi/pf/corpus.h deleted file mode 100644 index e7febdb7..00000000 --- a/gi/pf/corpus.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _CORPUS_H_ -#define _CORPUS_H_ - -#include <string> -#include <vector> -#include <set> -#include "wordid.h" - -namespace corpus { - -void ReadParallelCorpus(const std::string& filename, - std::vector<std::vector<WordID> >* f, - std::vector<std::vector<WordID> >* e, - std::set<WordID>* vocab_f, - std::set<WordID>* vocab_e); - -} - -#endif diff --git a/gi/pf/dpnaive.cc b/gi/pf/dpnaive.cc deleted file mode 100644 index 75ccad72..00000000 --- a/gi/pf/dpnaive.cc +++ /dev/null @@ -1,301 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/multi_array.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "base_distributions.h" -#include "monotonic_pseg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "corpus.h" - -using namespace std; -using namespace std::tr1; -namespace po = boost::program_options; - -static unsigned kMAX_SRC_PHRASE; -static unsigned kMAX_TRG_PHRASE; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(4),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(4),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("inverse_model1,M",po::value<string>(),"Inverse Model 1 parameters (used in base distribution)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -boost::shared_ptr<MT19937> prng; - -template <typename Base> -struct ModelAndData { - explicit ModelAndData(MonotonicParallelSegementationModel<PhraseJointBase_BiDir>& m, const Base& b, const vector<vector<int> >& ce, const vector<vector<int> >& cf, const set<int>& ve, const set<int>& vf) : - model(m), - rng(&*prng), - p0(b), - baseprob(prob_t::One()), - corpuse(ce), - corpusf(cf), - vocabe(ve), - vocabf(vf), - mh_samples(), - mh_rejects(), - kX(-TD::Convert("X")), - derivations(corpuse.size()) {} - - void ResampleHyperparameters() { - } - - void InstantiateRule(const pair<short,short>& from, - const pair<short,short>& to, - const vector<int>& sentf, - const vector<int>& sente, - TRule* rule) const { - rule->f_.clear(); - rule->e_.clear(); - rule->lhs_ = kX; - for (short i = from.first; i < to.first; ++i) - rule->f_.push_back(sentf[i]); - for (short i = from.second; i < to.second; ++i) - rule->e_.push_back(sente[i]); - } - - void DecrementDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - model.DecrementRule(x); - model.DecrementContinue(); - } - model.DecrementStop(); - } - - void PrintDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - cerr << i << '/' << (d.size() - 1) << ": " << x << endl; - } - } - - void IncrementDerivation(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) { - if (d.size() < 2) return; - TRule x; - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - model.IncrementRule(x); - model.IncrementContinue(); - } - model.IncrementStop(); - } - - prob_t Likelihood() const { - return model.Likelihood(); - } - - prob_t DerivationProposalProbability(const vector<pair<short,short> >& d, const vector<int>& sentf, const vector<int>& sente) const { - prob_t p = model.StopProbability(); - if (d.size() < 2) return p; - TRule x; - const prob_t p_cont = model.ContinueProbability(); - for (int i = 1; i < d.size(); ++i) { - InstantiateRule(d[i], d[i-1], sentf, sente, &x); - p *= p_cont; - p *= model.RuleProbability(x); - } - return p; - } - - void Sample(); - - MonotonicParallelSegementationModel<PhraseJointBase_BiDir>& model; - MT19937* rng; - const Base& p0; - prob_t baseprob; // cached value of generating the table table labels from p0 - // this can't be used if we go to a hierarchical prior! - const vector<vector<int> >& corpuse, corpusf; - const set<int>& vocabe, vocabf; - unsigned mh_samples, mh_rejects; - const int kX; - vector<vector<pair<short, short> > > derivations; -}; - -template <typename Base> -void ModelAndData<Base>::Sample() { - unsigned MAXK = kMAX_SRC_PHRASE; - unsigned MAXL = kMAX_TRG_PHRASE; - TRule x; - x.lhs_ = -TD::Convert("X"); - for (int samples = 0; samples < 1000; ++samples) { - if (samples % 1 == 0 && samples > 0) { - //ResampleHyperparameters(); - cerr << " [" << samples << " LLH=" << log(Likelihood()) << " MH=" << ((double)mh_rejects / mh_samples) << "]\n"; - for (int i = 0; i < 10; ++i) { - cerr << "SENTENCE: " << TD::GetString(corpusf[i]) << " ||| " << TD::GetString(corpuse[i]) << endl; - PrintDerivation(derivations[i], corpusf[i], corpuse[i]); - } - } - cerr << '.' << flush; - for (int s = 0; s < corpuse.size(); ++s) { - const vector<int>& sentf = corpusf[s]; - const vector<int>& sente = corpuse[s]; -// cerr << " CUSTOMERS: " << rules.num_customers() << endl; -// cerr << "SENTENCE: " << TD::GetString(sentf) << " ||| " << TD::GetString(sente) << endl; - - vector<pair<short, short> >& deriv = derivations[s]; - const prob_t p_cur = Likelihood(); - DecrementDerivation(deriv, sentf, sente); - - boost::multi_array<prob_t, 2> a(boost::extents[sentf.size() + 1][sente.size() + 1]); - boost::multi_array<prob_t, 4> trans(boost::extents[sentf.size() + 1][sente.size() + 1][MAXK][MAXL]); - a[0][0] = prob_t::One(); - const prob_t q_stop = model.StopProbability(); - const prob_t q_cont = model.ContinueProbability(); - for (int i = 0; i < sentf.size(); ++i) { - for (int j = 0; j < sente.size(); ++j) { - const prob_t src_a = a[i][j]; - x.f_.clear(); - for (int k = 1; k <= MAXK; ++k) { - if (i + k > sentf.size()) break; - x.f_.push_back(sentf[i + k - 1]); - x.e_.clear(); - for (int l = 1; l <= MAXL; ++l) { - if (j + l > sente.size()) break; - x.e_.push_back(sente[j + l - 1]); - const bool stop_now = ((j + l) == sente.size()) && ((i + k) == sentf.size()); - const prob_t& cp = stop_now ? q_stop : q_cont; - trans[i][j][k - 1][l - 1] = model.RuleProbability(x) * cp; - a[i + k][j + l] += src_a * trans[i][j][k - 1][l - 1]; - } - } - } - } -// cerr << "Inside: " << log(a[sentf.size()][sente.size()]) << endl; - const prob_t q_cur = DerivationProposalProbability(deriv, sentf, sente); - - vector<pair<short,short> > newderiv; - int cur_i = sentf.size(); - int cur_j = sente.size(); - while(cur_i > 0 && cur_j > 0) { - newderiv.push_back(pair<short,short>(cur_i, cur_j)); -// cerr << "NODE: (" << cur_i << "," << cur_j << ")\n"; - SampleSet<prob_t> ss; - vector<pair<short,short> > nexts; - for (int k = 1; k <= MAXK; ++k) { - const int hyp_i = cur_i - k; - if (hyp_i < 0) break; - for (int l = 1; l <= MAXL; ++l) { - const int hyp_j = cur_j - l; - if (hyp_j < 0) break; - const prob_t& inside = a[hyp_i][hyp_j]; - if (inside == prob_t::Zero()) continue; - const prob_t& transp = trans[hyp_i][hyp_j][k - 1][l - 1]; - if (transp == prob_t::Zero()) continue; - const prob_t p = inside * transp; - ss.add(p); - nexts.push_back(pair<short,short>(hyp_i, hyp_j)); -// cerr << " (" << hyp_i << "," << hyp_j << ") <--- " << log(p) << endl; - } - } -// cerr << " sample set has " << nexts.size() << " elements.\n"; - const int selected = rng->SelectSample(ss); - cur_i = nexts[selected].first; - cur_j = nexts[selected].second; - } - newderiv.push_back(pair<short,short>(0,0)); - const prob_t q_new = DerivationProposalProbability(newderiv, sentf, sente); - IncrementDerivation(newderiv, sentf, sente); -// cerr << "SANITY: " << q_new << " " <<log(DerivationProposalProbability(newderiv, sentf, sente)) << endl; - if (deriv.empty()) { deriv = newderiv; continue; } - ++mh_samples; - - if (deriv != newderiv) { - const prob_t p_new = Likelihood(); -// cerr << "p_cur=" << log(p_cur) << "\t p_new=" << log(p_new) << endl; -// cerr << "q_cur=" << log(q_cur) << "\t q_new=" << log(q_new) << endl; - if (!rng->AcceptMetropolisHastings(p_new, p_cur, q_new, q_cur)) { - ++mh_rejects; - DecrementDerivation(newderiv, sentf, sente); - IncrementDerivation(deriv, sentf, sente); - } else { -// cerr << " ACCEPT\n"; - deriv = newderiv; - } - } - } - } -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - if (!conf.count("inverse_model1")) { - cerr << argv[0] << "Please use --inverse_model1 to specify inverse model 1 parameters\n"; - return 1; - } - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); -// MT19937& rng = *prng; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - corpus::ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - Model1 m1(conf["model1"].as<string>()); - Model1 invm1(conf["inverse_model1"].as<string>()); -// PhraseJointBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - PhraseJointBase_BiDir alp0(m1, invm1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - MonotonicParallelSegementationModel<PhraseJointBase_BiDir> m(alp0); - - ModelAndData<PhraseJointBase_BiDir> posterior(m, alp0, corpuse, corpusf, vocabe, vocabf); - posterior.Sample(); - - return 0; -} - diff --git a/gi/pf/guess-translits.pl b/gi/pf/guess-translits.pl deleted file mode 100755 index d00c2168..00000000 --- a/gi/pf/guess-translits.pl +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use utf8; - -my $MIN_PMI = -3; - -my %fs; -my %es; -my %ef; - -die "Usage: $0 < input.utf8.txt\n" if scalar @ARGV > 0; - -binmode(STDIN,":utf8"); -binmode(STDOUT,":utf8"); -binmode(STDERR,":utf8"); - -my $tot = 0; -print STDERR "Reading alignments from STDIN ...\n"; -while(<STDIN>) { - chomp; - my ($fsent, $esent, $alsent) = split / \|\|\| /; - die "Format should be 'foreign sentence ||| english sentence ||| 0-0 1-1 ...'\n" unless defined $fsent && defined $esent && defined $alsent; - - my @fws = split /\s+/, $fsent; - my @ews = split /\s+/, $esent; - my @as = split /\s+/, $alsent; - my %a2b; - my %b2a; - for my $ap (@as) { - my ($a,$b) = split /-/, $ap; - die "BAD INPUT: $_\n" unless defined $a && defined $b; - $a2b{$a}->{$b} = 1; - $b2a{$b}->{$a} = 1; - } - for my $a (keys %a2b) { - my $bref = $a2b{$a}; - next unless scalar keys %$bref < 2; - my $b = (keys %$bref)[0]; - next unless scalar keys %{$b2a{$b}} < 2; - my $f = $fws[$a]; - next unless defined $f; - next unless length($f) > 3; - my $e = $ews[$b]; - next unless defined $e; - next unless length($e) > 3; - - $ef{$f}->{$e}++; - $es{$e}++; - $fs{$f}++; - $tot++; - } -} -my $ltot = log($tot); -my $num = 0; -print STDERR "Extracting pairs for PMI > $MIN_PMI ...\n"; -for my $f (keys %fs) { - my $logf = log($fs{$f}); - my $esref = $ef{$f}; - for my $e (keys %$esref) { - my $loge = log($es{$e}); - my $ef = $esref->{$e}; - my $logef = log($ef); - my $pmi = $logef - ($loge + $logf); - next if $pmi < $MIN_PMI; - my @flets = split //, $f; - my @elets = split //, $e; - print "@flets ||| @elets\n"; - $num++; - } -} -print STDERR "Extracted $num pairs.\n"; -print STDERR "Recommend running:\n ../../training/model1 -v -d -t -99999 output.txt\n"; diff --git a/gi/pf/hpyp_tm.cc b/gi/pf/hpyp_tm.cc deleted file mode 100644 index f362d3f8..00000000 --- a/gi/pf/hpyp_tm.cc +++ /dev/null @@ -1,133 +0,0 @@ -#include "hpyp_tm.h" - -#include <tr1/unordered_map> -#include <iostream> -#include <queue> - -#include "tdict.h" -#include "ccrp.h" -#include "pyp_word_model.h" -#include "tied_resampler.h" - -using namespace std; -using namespace std::tr1; - -struct FreqBinner { - FreqBinner(const std::string& fname) { fd_.Load(fname); } - unsigned NumberOfBins() const { return fd_.Max() + 1; } - unsigned Bin(const WordID& w) const { return fd_.LookUp(w); } - FreqDict<unsigned> fd_; -}; - -template <typename Base, class Binner = FreqBinner> -struct ConditionalPYPWordModel { - ConditionalPYPWordModel(Base* b, const Binner* bnr = NULL) : - base(*b), - binner(bnr), - btr(binner ? binner->NumberOfBins() + 1u : 2u) {} - - void Summary() const { - cerr << "Number of conditioning contexts: " << r.size() << endl; - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - cerr << TD::Convert(it->first) << " \tPYP(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << endl; - for (CCRP<vector<WordID> >::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - cerr << " " << i2->second << endl; - } - } - - void ResampleHyperparameters(MT19937* rng) { - btr.ResampleHyperparameters(rng); - } - - prob_t Prob(const WordID src, const vector<WordID>& trglets) const { - RuleModelHash::const_iterator it = r.find(src); - if (it == r.end()) { - return base(trglets); - } else { - return it->second.prob(trglets, base(trglets)); - } - } - - void Increment(const WordID src, const vector<WordID>& trglets, MT19937* rng) { - RuleModelHash::iterator it = r.find(src); - if (it == r.end()) { - it = r.insert(make_pair(src, CCRP<vector<WordID> >(0.5,1.0))).first; - static const WordID kNULL = TD::Convert("NULL"); - unsigned bin = (src == kNULL ? 0 : 1); - if (binner && bin) { bin = binner->Bin(src) + 1; } - btr.Add(bin, &it->second); - } - if (it->second.increment(trglets, base(trglets), rng)) - base.Increment(trglets, rng); - } - - void Decrement(const WordID src, const vector<WordID>& trglets, MT19937* rng) { - RuleModelHash::iterator it = r.find(src); - assert(it != r.end()); - if (it->second.decrement(trglets, rng)) { - base.Decrement(trglets, rng); - } - } - - prob_t Likelihood() const { - prob_t p = prob_t::One(); - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - prob_t q; q.logeq(it->second.log_crp_prob()); - p *= q; - } - return p; - } - - unsigned UniqueConditioningContexts() const { - return r.size(); - } - - // TODO tie PYP hyperparameters based on source word frequency bins - Base& base; - const Binner* binner; - BinTiedResampler<CCRP<vector<WordID> > > btr; - typedef unordered_map<WordID, CCRP<vector<WordID> > > RuleModelHash; - RuleModelHash r; -}; - -HPYPLexicalTranslation::HPYPLexicalTranslation(const vector<vector<WordID> >& lets, - const unsigned vocab_size, - const unsigned num_letters) : - letters(lets), - base(vocab_size, num_letters, 5), - up0(new PYPWordModel<PoissonUniformWordModel>(&base)), - tmodel(new ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel> >(up0, new FreqBinner("10k.freq"))), - kX(-TD::Convert("X")) {} - -void HPYPLexicalTranslation::Summary() const { - tmodel->Summary(); - up0->Summary(); -} - -prob_t HPYPLexicalTranslation::Likelihood() const { - prob_t p = up0->Likelihood(); - p *= tmodel->Likelihood(); - return p; -} - -void HPYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { - tmodel->ResampleHyperparameters(rng); - up0->ResampleHyperparameters(rng); -} - -unsigned HPYPLexicalTranslation::UniqueConditioningContexts() const { - return tmodel->UniqueConditioningContexts(); -} - -prob_t HPYPLexicalTranslation::Prob(WordID src, WordID trg) const { - return tmodel->Prob(src, letters[trg]); -} - -void HPYPLexicalTranslation::Increment(WordID src, WordID trg, MT19937* rng) { - tmodel->Increment(src, letters[trg], rng); -} - -void HPYPLexicalTranslation::Decrement(WordID src, WordID trg, MT19937* rng) { - tmodel->Decrement(src, letters[trg], rng); -} - diff --git a/gi/pf/hpyp_tm.h b/gi/pf/hpyp_tm.h deleted file mode 100644 index af3215ba..00000000 --- a/gi/pf/hpyp_tm.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef HPYP_LEX_TRANS -#define HPYP_LEX_TRANS - -#include <vector> -#include "wordid.h" -#include "prob.h" -#include "sampler.h" -#include "freqdict.h" -#include "poisson_uniform_word_model.h" - -struct FreqBinner; -template <class B> struct PYPWordModel; -template <typename T, class B> struct ConditionalPYPWordModel; - -struct HPYPLexicalTranslation { - explicit HPYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, - const unsigned vocab_size, - const unsigned num_letters); - - prob_t Likelihood() const; - - void ResampleHyperparameters(MT19937* rng); - prob_t Prob(WordID src, WordID trg) const; // return p(trg | src) - void Summary() const; - void Increment(WordID src, WordID trg, MT19937* rng); - void Decrement(WordID src, WordID trg, MT19937* rng); - unsigned UniqueConditioningContexts() const; - - private: - const std::vector<std::vector<WordID> >& letters; // spelling dictionary - PoissonUniformWordModel base; // "generator" of English types - PYPWordModel<PoissonUniformWordModel>* up0; // model English lexicon - ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel>, FreqBinner>* tmodel; // translation distributions - // (model English word | French word) - const WordID kX; -}; - -#endif diff --git a/gi/pf/itg.cc b/gi/pf/itg.cc deleted file mode 100644 index 29ec3860..00000000 --- a/gi/pf/itg.cc +++ /dev/null @@ -1,275 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "ccrp_onetable.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -ostream& operator<<(ostream& os, const vector<WordID>& p) { - os << '['; - for (int i = 0; i < p.size(); ++i) - os << (i==0 ? "" : " ") << TD::Convert(p[i]); - return os << ']'; -} - -struct UnigramModel { - explicit UnigramModel(const string& fname, unsigned vocab_size, double p0null = 0.05) : - use_uniform_(fname.size() == 0), - p0null_(p0null), - uniform_((1.0 - p0null) / vocab_size), - probs_(TD::NumWords() + 1) { - if (fname.size() > 0) LoadUnigrams(fname); - probs_[0] = p0null_; - } - -// -// \data\ -// ngram 1=9295 -// -// \1-grams: -// -3.191193 " - - void LoadUnigrams(const string& fname) { - cerr << "Loading unigram probabilities from " << fname << " ..." << endl; - ReadFile rf(fname); - string line; - istream& in = *rf.stream(); - assert(in); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\data\\"); - getline(in, line); - size_t pos = line.find("ngram 1="); - assert(pos == 0); - assert(line.size() > 8); - const size_t num_unigrams = atoi(&line[8]); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\1-grams:"); - for (size_t i = 0; i < num_unigrams; ++i) { - getline(in, line); - assert(line.size() > 0); - pos = line.find('\t'); - assert(pos > 0); - assert(pos + 1 < line.size()); - const WordID w = TD::Convert(line.substr(pos + 1)); - line[pos] = 0; - float p = atof(&line[0]); - const prob_t pnon_null(1.0 - p0null_.as_float()); - if (w < probs_.size()) probs_[w].logeq(p * log(10) + log(pnon_null)); else abort(); - } - } - - const prob_t& operator()(const WordID& w) const { - if (!w) return p0null_; - if (use_uniform_) return uniform_; - return probs_[w]; - } - - const bool use_uniform_; - const prob_t p0null_; - const prob_t uniform_; - vector<prob_t> probs_; -}; - -struct Model1 { - explicit Model1(const string& fname) : - kNULL(TD::Convert("<eps>")), - kZERO() { - LoadModel1(fname); - } - - void LoadModel1(const string& fname) { - cerr << "Loading Model 1 parameters from " << fname << " ..." << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - unsigned lc = 0; - while(getline(in, line)) { - ++lc; - int cur = 0; - int start = 0; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - const WordID src = TD::Convert(&line[0]); - ++cur; - start = cur; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - WordID trg = TD::Convert(&line[start]); - const double logprob = strtod(&line[cur + 1], NULL); - if (src >= ttable.size()) ttable.resize(src + 1); - ttable[src][trg].logeq(logprob); - } - cerr << " read " << lc << " parameters.\n"; - } - - // returns prob 0 if src or trg is not found! - const prob_t& operator()(WordID src, WordID trg) const { - if (src == 0) src = kNULL; - if (src < ttable.size()) { - const map<WordID, prob_t>& cpd = ttable[src]; - const map<WordID, prob_t>::const_iterator it = cpd.find(trg); - if (it != cpd.end()) - return it->second; - } - return kZERO; - } - - const WordID kNULL; - const prob_t kZERO; - vector<map<WordID, prob_t> > ttable; -}; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("particles,p",po::value<unsigned>()->default_value(25),"Number of particles") - ("input,i",po::value<string>(),"Read parallel data from") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("inverse_model1,M",po::value<string>(),"Inverse Model 1 parameters (used in backward estimate)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("src_unigram,u",po::value<string>()->default_value(""),"Source unigram distribution; empty for uniform") - ("trg_unigram,U",po::value<string>()->default_value(""),"Target unigram distribution; empty for uniform") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<WordID> >* e, - set<WordID>* vocab_f, - set<WordID>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - const unsigned particles = conf["particles"].as<unsigned>(); - const unsigned samples = conf["samples"].as<unsigned>(); - TD::Convert("<s>"); - TD::Convert("</s>"); - TD::Convert("<unk>"); - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<WordID> > corpuse, corpusf; - set<WordID> vocabe, vocabf; - cerr << "Reading corpus...\n"; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "F-corpus size: " << corpusf.size() << " sentences\t (" << vocabf.size() << " word types)\n"; - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - assert(corpusf.size() == corpuse.size()); - UnigramModel src_unigram(conf["src_unigram"].as<string>(), vocabf.size()); - UnigramModel trg_unigram(conf["trg_unigram"].as<string>(), vocabe.size()); - const prob_t kHALF(0.5); - - const string kEMPTY = "NULL"; - const int kLHS = -TD::Convert("X"); - Model1 m1(conf["model1"].as<string>()); - Model1 invm1(conf["inverse_model1"].as<string>()); - for (int si = 0; si < conf["samples"].as<unsigned>(); ++si) { - cerr << '.' << flush; - for (int ci = 0; ci < corpusf.size(); ++ci) { - const vector<WordID>& trg = corpuse[ci]; - const vector<WordID>& src = corpusf[ci]; - for (int i = 0; i <= trg.size(); ++i) { - const WordID e_i = i > 0 ? trg[i-1] : 0; - for (int j = 0; j <= src.size(); ++j) { - const WordID f_j = j > 0 ? src[j-1] : 0; - if (e_i == 0 && f_j == 0) continue; - prob_t je = kHALF * src_unigram(f_j) * m1(f_j,e_i) + kHALF * trg_unigram(e_i) * invm1(e_i,f_j); - cerr << "p( " << (e_i ? TD::Convert(e_i) : kEMPTY) << " , " << (f_j ? TD::Convert(f_j) : kEMPTY) << " ) = " << log(je) << endl; - if (e_i && f_j) - cout << "[X] ||| " << TD::Convert(f_j) << " ||| " << TD::Convert(e_i) << " ||| LogProb=" << log(je) << endl; - } - } - } - } -} - diff --git a/gi/pf/learn_cfg.cc b/gi/pf/learn_cfg.cc deleted file mode 100644 index 1d5126e4..00000000 --- a/gi/pf/learn_cfg.cc +++ /dev/null @@ -1,428 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "inside_outside.h" -#include "hg.h" -#include "bottom_up_parser.h" -#include "fdict.h" -#include "grammar.h" -#include "m.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp.h" -#include "ccrp_onetable.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; -vector<int> nt_vocab; -vector<int> nt_id_to_index; -static unsigned kMAX_RULE_SIZE = 0; -static unsigned kMAX_ARITY = 0; -static bool kALLOW_MIXED = true; // allow rules with mixed terminals and NTs -static bool kHIERARCHICAL_PRIOR = false; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_rule_size,m", po::value<unsigned>()->default_value(0), "Maximum rule size (0 for unlimited)") - ("max_arity,a", po::value<unsigned>()->default_value(0), "Maximum number of nonterminals in a rule (0 for unlimited)") - ("no_mixed_rules,M", "Do not mix terminals and nonterminals in a rule RHS") - ("nonterminals,n", po::value<unsigned>()->default_value(1), "Size of nonterminal vocabulary") - ("hierarchical_prior,h", "Use hierarchical prior") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -unsigned ReadCorpus(const string& filename, - vector<vector<WordID> >* e, - set<WordID>* vocab_e) { - e->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - unsigned toks = 0; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - vector<int>& le = e->back(); - TD::ConvertSentence(line, &le); - for (unsigned i = 0; i < le.size(); ++i) - vocab_e->insert(le[i]); - toks += le.size(); - } - if (in != &cin) delete in; - return toks; -} - -struct Grid { - // a b c d e - // 0 - 0 - - - vector<int> grid; -}; - -struct BaseRuleModel { - explicit BaseRuleModel(unsigned term_size, - unsigned nonterm_size = 1) : - unif_term(1.0 / term_size), - unif_nonterm(1.0 / nonterm_size) {} - prob_t operator()(const TRule& r) const { - prob_t p; p.logeq(Md::log_poisson(1.0, r.f_.size())); - const prob_t term_prob((2.0 + 0.01*r.f_.size()) / (r.f_.size() + 2)); - const prob_t nonterm_prob(1.0 - term_prob.as_float()); - for (unsigned i = 0; i < r.f_.size(); ++i) { - if (r.f_[i] <= 0) { // nonterminal - if (kALLOW_MIXED) p *= nonterm_prob; - p *= unif_nonterm; - } else { // terminal - if (kALLOW_MIXED) p *= term_prob; - p *= unif_term; - } - } - return p; - } - const prob_t unif_term, unif_nonterm; -}; - -struct HieroLMModel { - explicit HieroLMModel(unsigned vocab_size, unsigned num_nts = 1) : - base(vocab_size, num_nts), - q0(1,1,1,1), - nts(num_nts, CCRP<TRule>(1,1,1,1)) {} - - prob_t Prob(const TRule& r) const { - return nts[nt_id_to_index[-r.lhs_]].prob(r, p0(r)); - } - - inline prob_t p0(const TRule& r) const { - if (kHIERARCHICAL_PRIOR) - return q0.prob(r, base(r)); - else - return base(r); - } - - int Increment(const TRule& r, MT19937* rng) { - const int delta = nts[nt_id_to_index[-r.lhs_]].increment(r, p0(r), rng); - if (kHIERARCHICAL_PRIOR && delta) - q0.increment(r, base(r), rng); - return delta; - // return x.increment(r); - } - - int Decrement(const TRule& r, MT19937* rng) { - const int delta = nts[nt_id_to_index[-r.lhs_]].decrement(r, rng); - if (kHIERARCHICAL_PRIOR && delta) - q0.decrement(r, rng); - return delta; - //return x.decrement(r); - } - - prob_t Likelihood() const { - prob_t p = prob_t::One(); - for (unsigned i = 0; i < nts.size(); ++i) { - prob_t q; q.logeq(nts[i].log_crp_prob()); - p *= q; - for (CCRP<TRule>::const_iterator it = nts[i].begin(); it != nts[i].end(); ++it) { - prob_t tp = p0(it->first); - tp.poweq(it->second.num_tables()); - p *= tp; - } - } - if (kHIERARCHICAL_PRIOR) { - prob_t q; q.logeq(q0.log_crp_prob()); - p *= q; - for (CCRP<TRule>::const_iterator it = q0.begin(); it != q0.end(); ++it) { - prob_t tp = base(it->first); - tp.poweq(it->second.num_tables()); - p *= tp; - } - } - //for (CCRP_OneTable<TRule>::const_iterator it = x.begin(); it != x.end(); ++it) - // p *= base(it->first); - return p; - } - - void ResampleHyperparameters(MT19937* rng) { - for (unsigned i = 0; i < nts.size(); ++i) - nts[i].resample_hyperparameters(rng); - if (kHIERARCHICAL_PRIOR) { - q0.resample_hyperparameters(rng); - cerr << "[base d=" << q0.discount() << ", s=" << q0.strength() << "]"; - } - cerr << " d=" << nts[0].discount() << ", s=" << nts[0].strength() << endl; - } - - const BaseRuleModel base; - CCRP<TRule> q0; - vector<CCRP<TRule> > nts; - //CCRP_OneTable<TRule> x; -}; - -vector<GrammarIter* > tofreelist; - -HieroLMModel* plm; - -struct NPGrammarIter : public GrammarIter, public RuleBin { - NPGrammarIter() : arity() { tofreelist.push_back(this); } - NPGrammarIter(const TRulePtr& inr, const int a, int symbol) : arity(a) { - if (inr) { - r.reset(new TRule(*inr)); - } else { - r.reset(new TRule); - } - TRule& rr = *r; - rr.lhs_ = nt_vocab[0]; - rr.f_.push_back(symbol); - rr.e_.push_back(symbol < 0 ? (1-int(arity)) : symbol); - tofreelist.push_back(this); - } - inline static unsigned NextArity(int cur_a, int symbol) { - return cur_a + (symbol <= 0 ? 1 : 0); - } - virtual int GetNumRules() const { - if (r) return nt_vocab.size(); else return 0; - } - virtual TRulePtr GetIthRule(int i) const { - if (i == 0) return r; - TRulePtr nr(new TRule(*r)); - nr->lhs_ = nt_vocab[i]; - return nr; - } - virtual int Arity() const { - return arity; - } - virtual const RuleBin* GetRules() const { - if (!r) return NULL; else return this; - } - virtual const GrammarIter* Extend(int symbol) const { - const int next_arity = NextArity(arity, symbol); - if (kMAX_ARITY && next_arity > kMAX_ARITY) - return NULL; - if (!kALLOW_MIXED && r) { - bool t1 = r->f_.front() <= 0; - bool t2 = symbol <= 0; - if (t1 != t2) return NULL; - } - if (!kMAX_RULE_SIZE || !r || (r->f_.size() < kMAX_RULE_SIZE)) - return new NPGrammarIter(r, next_arity, symbol); - else - return NULL; - } - const unsigned char arity; - TRulePtr r; -}; - -struct NPGrammar : public Grammar { - virtual const GrammarIter* GetRoot() const { - return new NPGrammarIter; - } -}; - -prob_t TotalProb(const Hypergraph& hg) { - return Inside<prob_t, EdgeProb>(hg); -} - -void SampleDerivation(const Hypergraph& hg, MT19937* rng, vector<unsigned>* sampled_deriv) { - vector<prob_t> node_probs; - Inside<prob_t, EdgeProb>(hg, &node_probs); - queue<unsigned> q; - q.push(hg.nodes_.size() - 2); - while(!q.empty()) { - unsigned cur_node_id = q.front(); -// cerr << "NODE=" << cur_node_id << endl; - q.pop(); - const Hypergraph::Node& node = hg.nodes_[cur_node_id]; - const unsigned num_in_edges = node.in_edges_.size(); - unsigned sampled_edge = 0; - if (num_in_edges == 1) { - sampled_edge = node.in_edges_[0]; - } else { - //prob_t z; - assert(num_in_edges > 1); - SampleSet<prob_t> ss; - for (unsigned j = 0; j < num_in_edges; ++j) { - const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; - prob_t p = edge.edge_prob_; - for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) - p *= node_probs[edge.tail_nodes_[k]]; - ss.add(p); -// cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; - //z += p; - } -// for (unsigned j = 0; j < num_in_edges; ++j) { -// const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; -// cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; -// } -// cerr << " --- \n"; - sampled_edge = node.in_edges_[rng->SelectSample(ss)]; - } - sampled_deriv->push_back(sampled_edge); - const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; - for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { - q.push(edge.tail_nodes_[j]); - } - } - for (unsigned i = 0; i < sampled_deriv->size(); ++i) { - cerr << *hg.edges_[(*sampled_deriv)[i]].rule_ << endl; - } -} - -void IncrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, HieroLMModel* plm, MT19937* rng) { - for (unsigned i = 0; i < d.size(); ++i) - plm->Increment(*hg.edges_[d[i]].rule_, rng); -} - -void DecrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, HieroLMModel* plm, MT19937* rng) { - for (unsigned i = 0; i < d.size(); ++i) - plm->Decrement(*hg.edges_[d[i]].rule_, rng); -} - -int main(int argc, char** argv) { - po::variables_map conf; - - InitCommandLine(argc, argv, &conf); - nt_vocab.resize(conf["nonterminals"].as<unsigned>()); - assert(nt_vocab.size() > 0); - assert(nt_vocab.size() < 26); - { - string nt = "X"; - for (unsigned i = 0; i < nt_vocab.size(); ++i) { - if (nt_vocab.size() > 1) nt[0] = ('A' + i); - int pid = TD::Convert(nt); - nt_vocab[i] = -pid; - if (pid >= nt_id_to_index.size()) { - nt_id_to_index.resize(pid + 1, -1); - } - nt_id_to_index[pid] = i; - } - } - vector<GrammarPtr> grammars; - grammars.push_back(GrammarPtr(new NPGrammar)); - - const unsigned samples = conf["samples"].as<unsigned>(); - kMAX_RULE_SIZE = conf["max_rule_size"].as<unsigned>(); - if (kMAX_RULE_SIZE == 1) { - cerr << "Invalid maximum rule size: must be 0 or >1\n"; - return 1; - } - kMAX_ARITY = conf["max_arity"].as<unsigned>(); - if (kMAX_ARITY == 1) { - cerr << "Invalid maximum arity: must be 0 or >1\n"; - return 1; - } - kALLOW_MIXED = !conf.count("no_mixed_rules"); - - kHIERARCHICAL_PRIOR = conf.count("hierarchical_prior"); - - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - vector<vector<WordID> > corpuse; - set<WordID> vocabe; - cerr << "Reading corpus...\n"; - const unsigned toks = ReadCorpus(conf["input"].as<string>(), &corpuse, &vocabe); - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - HieroLMModel lm(vocabe.size(), nt_vocab.size()); - - plm = &lm; - ExhaustiveBottomUpParser parser(TD::Convert(-nt_vocab[0]), grammars); - - Hypergraph hg; - const int kGoal = -TD::Convert("Goal"); - const int kLP = FD::Convert("LogProb"); - SparseVector<double> v; v.set_value(kLP, 1.0); - vector<vector<unsigned> > derivs(corpuse.size()); - vector<Lattice> cl(corpuse.size()); - for (int ci = 0; ci < corpuse.size(); ++ci) { - vector<int>& src = corpuse[ci]; - Lattice& lat = cl[ci]; - lat.resize(src.size()); - for (unsigned i = 0; i < src.size(); ++i) - lat[i].push_back(LatticeArc(src[i], 0.0, 1)); - } - for (int SS=0; SS < samples; ++SS) { - const bool is_last = ((samples - 1) == SS); - prob_t dlh = prob_t::One(); - for (int ci = 0; ci < corpuse.size(); ++ci) { - const vector<int>& src = corpuse[ci]; - const Lattice& lat = cl[ci]; - cerr << TD::GetString(src) << endl; - hg.clear(); - parser.Parse(lat, &hg); // exhaustive parse - vector<unsigned>& d = derivs[ci]; - if (!is_last) DecrementDerivation(hg, d, &lm, &rng); - for (unsigned i = 0; i < hg.edges_.size(); ++i) { - TRule& r = *hg.edges_[i].rule_; - if (r.lhs_ == kGoal) - hg.edges_[i].edge_prob_ = prob_t::One(); - else - hg.edges_[i].edge_prob_ = lm.Prob(r); - } - if (!is_last) { - d.clear(); - SampleDerivation(hg, &rng, &d); - IncrementDerivation(hg, derivs[ci], &lm, &rng); - } else { - prob_t p = TotalProb(hg); - dlh *= p; - cerr << " p(sentence) = " << log(p) << "\t" << log(dlh) << endl; - } - if (tofreelist.size() > 200000) { - cerr << "Freeing ... "; - for (unsigned i = 0; i < tofreelist.size(); ++i) - delete tofreelist[i]; - tofreelist.clear(); - cerr << "Freed.\n"; - } - } - double llh = log(lm.Likelihood()); - cerr << "LLH=" << llh << "\tENTROPY=" << (-llh / log(2) / toks) << "\tPPL=" << pow(2, -llh / log(2) / toks) << endl; - if (SS % 10 == 9) lm.ResampleHyperparameters(&rng); - if (is_last) { - double z = log(dlh); - cerr << "TOTAL_PROB=" << z << "\tENTROPY=" << (-z / log(2) / toks) << "\tPPL=" << pow(2, -z / log(2) / toks) << endl; - } - } - for (unsigned i = 0; i < nt_vocab.size(); ++i) - cerr << lm.nts[i] << endl; - return 0; -} - diff --git a/gi/pf/make-freq-bins.pl b/gi/pf/make-freq-bins.pl deleted file mode 100755 index fdcd3555..00000000 --- a/gi/pf/make-freq-bins.pl +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -my $BASE = 6; -my $CUTOFF = 3; - -my %d; -my $num = 0; -while(<>){ - chomp; - my @words = split /\s+/; - for my $w (@words) {$d{$w}++; $num++;} -} - -my @vocab = sort {$d{$b} <=> $d{$a}} keys %d; - -for (my $i=0; $i<scalar @vocab; $i++) { - my $most = $d{$vocab[$i]}; - my $least = 1; - - my $nl = -int(log($most / $num) / log($BASE) + $CUTOFF); - if ($nl < 0) { $nl = 0; } - print "$vocab[$i] $nl\n" -} - - diff --git a/gi/pf/mh_test.cc b/gi/pf/mh_test.cc deleted file mode 100644 index 296e7285..00000000 --- a/gi/pf/mh_test.cc +++ /dev/null @@ -1,148 +0,0 @@ -#include "ccrp.h" - -#include <vector> -#include <iostream> - -#include "tdict.h" -#include "transliterations.h" - -using namespace std; - -MT19937 rng; - -static bool verbose = false; - -struct Model { - - Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} - - double p0(int x) const { - assert(x > 0); - assert(x < 5); - return 1.0/4.0; - } - - double llh() const { - double lh = bp + base.log_crp_prob(); - for (int ctx = 1; ctx < 5; ++ctx) - lh += ccrps[ctx].log_crp_prob(); - return lh; - } - - double prob(int ctx, int x) const { - assert(ctx > 0 && ctx < 5); - return ccrps[ctx].prob(x, base.prob(x, p0(x))); - } - - void increment(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { - if (base.increment(x, p0(x), &rng)) { - bp += log(1.0 / 4.0); - } - } - } - - // this is just a biased estimate - double est_base_prob(int x) { - return (x + 1) * x / 40.0; - } - - void increment_is(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - SampleSet<double> ss; - const int PARTICLES = 25; - vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); - vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); - vector<double> sp0s(PARTICLES); - - CCRP<int> s1 = ccrps[ctx]; - CCRP<int> sb = base; - double sp0 = bp; - for (int pp = 0; pp < PARTICLES; ++pp) { - if (pp > 0) { - ccrps[ctx] = s1; - base = sb; - bp = sp0; - } - - double q = 1; - double gamma = 1; - double est_p = est_base_prob(x); - //base.prob(x, p0(x)) + rng.next() * 0.1; - if (ccrps[ctx].increment(x, est_p, &rng, &q)) { - gamma = q * base.prob(x, p0(x)); - q *= est_p; - if (verbose) cerr << "(DP-base draw) "; - double qq = -1; - if (base.increment(x, p0(x), &rng, &qq)) { - if (verbose) cerr << "(G0 draw) "; - bp += log(p0(x)); - qq *= p0(x); - } - } else { gamma = q; } - double w = gamma / q; - if (verbose) - cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; - ss.add(w); - s1s[pp] = ccrps[ctx]; - sbs[pp] = base; - sp0s[pp] = bp; - } - int ps = rng.SelectSample(ss); - ccrps[ctx] = s1s[ps]; - base = sbs[ps]; - bp = sp0s[ps]; - if (verbose) { - cerr << "SELECTED: " << ps << endl; - static int cc = 0; cc++; if (cc ==10) exit(1); - } - } - - void decrement(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - if (ccrps[ctx].decrement(x, &rng)) { - if (base.decrement(x, &rng)) { - bp -= log(p0(x)); - } - } - } - - double bp; - CCRP<int> base; - vector<CCRP<int> > ccrps; - -}; - -int main(int argc, char** argv) { - if (argc > 1) { verbose = true; } - vector<int> counts(15, 0); - vector<int> tcounts(15, 0); - int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; - double tlh = 0; - double tt = 0; - for (int n = 0; n < 1000; ++n) { - if (n % 10 == 0) cerr << '.'; - if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; - Model m; - for (int *x = points; *x; x += 2) - m.increment(x[0], x[1]); - - for (int j = 0; j < 24; ++j) { - for (int *x = points; *x; x += 2) { - if (rng.next() < 0.8) { - m.decrement(x[0], x[1]); - m.increment_is(x[0], x[1]); - } - } - } - counts[m.base.num_customers()]++; - tcounts[m.base.num_tables()]++; - tlh += m.llh(); - tt += 1.0; - } - cerr << "mean LLH = " << (tlh / tt) << endl; - for (int i = 0; i < 15; ++i) - cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; -} - diff --git a/gi/pf/monotonic_pseg.h b/gi/pf/monotonic_pseg.h deleted file mode 100644 index 10d171fe..00000000 --- a/gi/pf/monotonic_pseg.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef _MONOTONIC_PSEG_H_ -#define _MONOTONIC_PSEG_H_ - -#include <vector> - -#include "prob.h" -#include "ccrp_nt.h" -#include "trule.h" -#include "base_distributions.h" - -template <typename BaseMeasure> -struct MonotonicParallelSegementationModel { - explicit MonotonicParallelSegementationModel(BaseMeasure& rcp0) : - rp0(rcp0), base(prob_t::One()), rules(1,1), stop(1.0) {} - - void DecrementRule(const TRule& rule) { - if (rules.decrement(rule)) - base /= rp0(rule); - } - - void IncrementRule(const TRule& rule) { - if (rules.increment(rule)) - base *= rp0(rule); - } - - void IncrementRulesAndStops(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - if (rules.size()) IncrementContinue(rules.size() - 1); - IncrementStop(); - } - - void DecrementRulesAndStops(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - if (rules.size()) { - DecrementContinue(rules.size() - 1); - DecrementStop(); - } - } - - prob_t RuleProbability(const TRule& rule) const { - prob_t p; p.logeq(rules.logprob(rule, log(rp0(rule)))); - return p; - } - - prob_t Likelihood() const { - prob_t p = base; - prob_t q; q.logeq(rules.log_crp_prob()); - p *= q; - q.logeq(stop.log_crp_prob()); - p *= q; - return p; - } - - void IncrementStop() { - stop.increment(true); - } - - void IncrementContinue(int n = 1) { - for (int i = 0; i < n; ++i) - stop.increment(false); - } - - void DecrementStop() { - stop.decrement(true); - } - - void DecrementContinue(int n = 1) { - for (int i = 0; i < n; ++i) - stop.decrement(false); - } - - prob_t StopProbability() const { - return prob_t(stop.prob(true, 0.5)); - } - - prob_t ContinueProbability() const { - return prob_t(stop.prob(false, 0.5)); - } - - const BaseMeasure& rp0; - prob_t base; - CCRP_NoTable<TRule> rules; - CCRP_NoTable<bool> stop; -}; - -#endif - diff --git a/gi/pf/ngram_base.cc b/gi/pf/ngram_base.cc deleted file mode 100644 index 1299f06f..00000000 --- a/gi/pf/ngram_base.cc +++ /dev/null @@ -1,69 +0,0 @@ -#include "ngram_base.h" - -#include "lm/model.hh" -#include "tdict.h" - -using namespace std; - -namespace { -struct GICSVMapper : public lm::EnumerateVocab { - GICSVMapper(vector<lm::WordIndex>* out) : out_(out), kLM_UNKNOWN_TOKEN(0) { out_->clear(); } - void Add(lm::WordIndex index, const StringPiece &str) { - const WordID cdec_id = TD::Convert(str.as_string()); - if (cdec_id >= out_->size()) - out_->resize(cdec_id + 1, kLM_UNKNOWN_TOKEN); - (*out_)[cdec_id] = index; - } - vector<lm::WordIndex>* out_; - const lm::WordIndex kLM_UNKNOWN_TOKEN; -}; -} - -struct FixedNgramBaseImpl { - FixedNgramBaseImpl(const string& param) { - GICSVMapper vm(&cdec2klm_map_); - lm::ngram::Config conf; - conf.enumerate_vocab = &vm; - cerr << "Reading character LM from " << param << endl; - model = new lm::ngram::ProbingModel(param.c_str(), conf); - order = model->Order(); - kEOS = MapWord(TD::Convert("</s>")); - assert(kEOS > 0); - } - - lm::WordIndex MapWord(const WordID w) const { - if (w < cdec2klm_map_.size()) return cdec2klm_map_[w]; - return 0; - } - - ~FixedNgramBaseImpl() { delete model; } - - prob_t StringProbability(const vector<WordID>& s) const { - lm::ngram::State state = model->BeginSentenceState(); - double prob = 0; - for (unsigned i = 0; i < s.size(); ++i) { - const lm::ngram::State scopy(state); - prob += model->Score(scopy, MapWord(s[i]), state); - } - const lm::ngram::State scopy(state); - prob += model->Score(scopy, kEOS, state); - prob_t p; p.logeq(prob * log(10)); - return p; - } - - lm::ngram::ProbingModel* model; - unsigned order; - vector<lm::WordIndex> cdec2klm_map_; - lm::WordIndex kEOS; -}; - -FixedNgramBase::~FixedNgramBase() { delete impl; } - -FixedNgramBase::FixedNgramBase(const string& lmfname) { - impl = new FixedNgramBaseImpl(lmfname); -} - -prob_t FixedNgramBase::StringProbability(const vector<WordID>& s) const { - return impl->StringProbability(s); -} - diff --git a/gi/pf/ngram_base.h b/gi/pf/ngram_base.h deleted file mode 100644 index 4ea999f3..00000000 --- a/gi/pf/ngram_base.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _NGRAM_BASE_H_ -#define _NGRAM_BASE_H_ - -#include <string> -#include <vector> -#include "trule.h" -#include "wordid.h" -#include "prob.h" - -struct FixedNgramBaseImpl; -struct FixedNgramBase { - FixedNgramBase(const std::string& lmfname); - ~FixedNgramBase(); - prob_t StringProbability(const std::vector<WordID>& s) const; - - prob_t operator()(const TRule& rule) const { - return StringProbability(rule.e_); - } - - private: - FixedNgramBaseImpl* impl; - -}; - -#endif diff --git a/gi/pf/nuisance_test.cc b/gi/pf/nuisance_test.cc deleted file mode 100644 index fc0af9cb..00000000 --- a/gi/pf/nuisance_test.cc +++ /dev/null @@ -1,161 +0,0 @@ -#include "ccrp.h" - -#include <vector> -#include <iostream> - -#include "tdict.h" -#include "transliterations.h" - -using namespace std; - -MT19937 rng; - -ostream& operator<<(ostream&os, const vector<int>& v) { - os << '[' << v[0]; - if (v.size() == 2) os << ' ' << v[1]; - return os << ']'; -} - -struct Base { - Base() : llh(), v(2), v1(1), v2(1), crp(0.25, 0.5) {} - inline double p0(const vector<int>& x) const { - double p = 0.75; - if (x.size() == 2) p = 0.25; - p *= 1.0 / 3.0; - if (x.size() == 2) p *= 1.0 / 3.0; - return p; - } - double est_deriv_prob(int a, int b, int seg) const { - assert(a > 0 && a < 4); // a \in {1,2,3} - assert(b > 0 && b < 4); // b \in {1,2,3} - assert(seg == 0 || seg == 1); // seg \in {0,1} - if (seg == 0) { - v[0] = a; - v[1] = b; - return crp.prob(v, p0(v)); - } else { - v1[0] = a; - v2[0] = b; - return crp.prob(v1, p0(v1)) * crp.prob(v2, p0(v2)); - } - } - double est_marginal_prob(int a, int b) const { - return est_deriv_prob(a,b,0) + est_deriv_prob(a,b,1); - } - int increment(int a, int b, double* pw = NULL) { - double p1 = est_deriv_prob(a, b, 0); - double p2 = est_deriv_prob(a, b, 1); - //p1 = 0.5; p2 = 0.5; - int seg = rng.SelectSample(p1,p2); - double tmp = 0; - if (!pw) pw = &tmp; - double& w = *pw; - if (seg == 0) { - v[0] = a; - v[1] = b; - w = crp.prob(v, p0(v)) / p1; - if (crp.increment(v, p0(v), &rng)) { - llh += log(p0(v)); - } - } else { - v1[0] = a; - w = crp.prob(v1, p0(v1)) / p2; - if (crp.increment(v1, p0(v1), &rng)) { - llh += log(p0(v1)); - } - v2[0] = b; - w *= crp.prob(v2, p0(v2)); - if (crp.increment(v2, p0(v2), &rng)) { - llh += log(p0(v2)); - } - } - return seg; - } - void increment(int a, int b, int seg) { - if (seg == 0) { - v[0] = a; - v[1] = b; - if (crp.increment(v, p0(v), &rng)) { - llh += log(p0(v)); - } - } else { - v1[0] = a; - if (crp.increment(v1, p0(v1), &rng)) { - llh += log(p0(v1)); - } - v2[0] = b; - if (crp.increment(v2, p0(v2), &rng)) { - llh += log(p0(v2)); - } - } - } - void decrement(int a, int b, int seg) { - if (seg == 0) { - v[0] = a; - v[1] = b; - if (crp.decrement(v, &rng)) { - llh -= log(p0(v)); - } - } else { - v1[0] = a; - if (crp.decrement(v1, &rng)) { - llh -= log(p0(v1)); - } - v2[0] = b; - if (crp.decrement(v2, &rng)) { - llh -= log(p0(v2)); - } - } - } - double log_likelihood() const { - return llh + crp.log_crp_prob(); - } - double llh; - mutable vector<int> v, v1, v2; - CCRP<vector<int> > crp; -}; - -int main(int argc, char** argv) { - double tl = 0; - const int ITERS = 1000; - const int PARTICLES = 20; - const int DATAPOINTS = 50; - WordID x = TD::Convert("souvenons"); - WordID y = TD::Convert("remember"); - vector<WordID> src; TD::ConvertSentence("s o u v e n o n s", &src); - vector<WordID> trg; TD::ConvertSentence("r e m e m b e r", &trg); -// Transliterations xx; -// xx.Initialize(x, src, y, trg); -// return 1; - - for (int j = 0; j < ITERS; ++j) { - Base b; - vector<int> segs(DATAPOINTS); - SampleSet<double> ss; - vector<int> sss; - for (int i = 0; i < DATAPOINTS; i++) { - ss.clear(); - sss.clear(); - int x = ((i / 10) % 3) + 1; - int y = (i % 3) + 1; - //double ep = b.est_marginal_prob(x,y); - //cerr << "est p(" << x << "," << y << ") = " << ep << endl; - for (int n = 0; n < PARTICLES; ++n) { - double w; - int seg = b.increment(x,y,&w); - //cerr << seg << " w=" << w << endl; - ss.add(w); - sss.push_back(seg); - b.decrement(x,y,seg); - } - int seg = sss[rng.SelectSample(ss)]; - b.increment(x, y, seg); - //cerr << "Selected: " << seg << endl; - //return 1; - segs[i] = seg; - } - tl += b.log_likelihood(); - } - cerr << "LLH=" << tl / ITERS << endl; -} - diff --git a/gi/pf/os_phrase.h b/gi/pf/os_phrase.h deleted file mode 100644 index dfe40cb1..00000000 --- a/gi/pf/os_phrase.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _OS_PHRASE_H_ -#define _OS_PHRASE_H_ - -#include <iostream> -#include <vector> -#include "tdict.h" - -inline std::ostream& operator<<(std::ostream& os, const std::vector<WordID>& p) { - os << '['; - for (int i = 0; i < p.size(); ++i) - os << (i==0 ? "" : " ") << TD::Convert(p[i]); - return os << ']'; -} - -#endif diff --git a/gi/pf/pf.h b/gi/pf/pf.h deleted file mode 100644 index ede7cda8..00000000 --- a/gi/pf/pf.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef _PF_H_ -#define _PF_H_ - -#include <cassert> -#include <vector> -#include "sampler.h" -#include "prob.h" - -template <typename ParticleType> -struct ParticleRenormalizer { - void operator()(std::vector<ParticleType>* pv) const { - if (pv->empty()) return; - prob_t z = prob_t::Zero(); - for (unsigned i = 0; i < pv->size(); ++i) - z += (*pv)[i].weight; - assert(z > prob_t::Zero()); - for (unsigned i = 0; i < pv->size(); ++i) - (*pv)[i].weight /= z; - } -}; - -template <typename ParticleType> -struct MultinomialResampleFilter { - explicit MultinomialResampleFilter(MT19937* rng) : rng_(rng) {} - - void operator()(std::vector<ParticleType>* pv) { - if (pv->empty()) return; - std::vector<ParticleType>& ps = *pv; - SampleSet<prob_t> ss; - for (int i = 0; i < ps.size(); ++i) - ss.add(ps[i].weight); - std::vector<ParticleType> nps; nps.reserve(ps.size()); - const prob_t uniform_weight(1.0 / ps.size()); - for (int i = 0; i < ps.size(); ++i) { - nps.push_back(ps[rng_->SelectSample(ss)]); - nps[i].weight = uniform_weight; - } - nps.swap(ps); - } - - private: - MT19937* rng_; -}; - -template <typename ParticleType> -struct SystematicResampleFilter { - explicit SystematicResampleFilter(MT19937* rng) : rng_(rng), renorm_() {} - - void operator()(std::vector<ParticleType>* pv) { - if (pv->empty()) return; - renorm_(pv); - std::vector<ParticleType>& ps = *pv; - std::vector<ParticleType> nps; nps.reserve(ps.size()); - double lower = 0, upper = 0; - const double skip = 1.0 / ps.size(); - double u_j = rng_->next() * skip; - //std::cerr << "u_0: " << u_j << std::endl; - int j = 0; - for (unsigned i = 0; i < ps.size(); ++i) { - upper += ps[i].weight.as_float(); - //std::cerr << "lower: " << lower << " upper: " << upper << std::endl; - // how many children does ps[i] have? - while (u_j < lower) { u_j += skip; ++j; } - while (u_j >= lower && u_j <= upper) { - assert(j < ps.size()); - nps.push_back(ps[i]); - u_j += skip; - //std::cerr << " add u_j=" << u_j << std::endl; - ++j; - } - lower = upper; - } - //std::cerr << ps.size() << " " << nps.size() << "\n"; - assert(ps.size() == nps.size()); - //exit(1); - ps.swap(nps); - } - - private: - MT19937* rng_; - ParticleRenormalizer<ParticleType> renorm_; -}; - -#endif diff --git a/gi/pf/pf_test.cc b/gi/pf/pf_test.cc deleted file mode 100644 index 296e7285..00000000 --- a/gi/pf/pf_test.cc +++ /dev/null @@ -1,148 +0,0 @@ -#include "ccrp.h" - -#include <vector> -#include <iostream> - -#include "tdict.h" -#include "transliterations.h" - -using namespace std; - -MT19937 rng; - -static bool verbose = false; - -struct Model { - - Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} - - double p0(int x) const { - assert(x > 0); - assert(x < 5); - return 1.0/4.0; - } - - double llh() const { - double lh = bp + base.log_crp_prob(); - for (int ctx = 1; ctx < 5; ++ctx) - lh += ccrps[ctx].log_crp_prob(); - return lh; - } - - double prob(int ctx, int x) const { - assert(ctx > 0 && ctx < 5); - return ccrps[ctx].prob(x, base.prob(x, p0(x))); - } - - void increment(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { - if (base.increment(x, p0(x), &rng)) { - bp += log(1.0 / 4.0); - } - } - } - - // this is just a biased estimate - double est_base_prob(int x) { - return (x + 1) * x / 40.0; - } - - void increment_is(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - SampleSet<double> ss; - const int PARTICLES = 25; - vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); - vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); - vector<double> sp0s(PARTICLES); - - CCRP<int> s1 = ccrps[ctx]; - CCRP<int> sb = base; - double sp0 = bp; - for (int pp = 0; pp < PARTICLES; ++pp) { - if (pp > 0) { - ccrps[ctx] = s1; - base = sb; - bp = sp0; - } - - double q = 1; - double gamma = 1; - double est_p = est_base_prob(x); - //base.prob(x, p0(x)) + rng.next() * 0.1; - if (ccrps[ctx].increment(x, est_p, &rng, &q)) { - gamma = q * base.prob(x, p0(x)); - q *= est_p; - if (verbose) cerr << "(DP-base draw) "; - double qq = -1; - if (base.increment(x, p0(x), &rng, &qq)) { - if (verbose) cerr << "(G0 draw) "; - bp += log(p0(x)); - qq *= p0(x); - } - } else { gamma = q; } - double w = gamma / q; - if (verbose) - cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; - ss.add(w); - s1s[pp] = ccrps[ctx]; - sbs[pp] = base; - sp0s[pp] = bp; - } - int ps = rng.SelectSample(ss); - ccrps[ctx] = s1s[ps]; - base = sbs[ps]; - bp = sp0s[ps]; - if (verbose) { - cerr << "SELECTED: " << ps << endl; - static int cc = 0; cc++; if (cc ==10) exit(1); - } - } - - void decrement(int ctx, int x) { - assert(ctx > 0 && ctx < 5); - if (ccrps[ctx].decrement(x, &rng)) { - if (base.decrement(x, &rng)) { - bp -= log(p0(x)); - } - } - } - - double bp; - CCRP<int> base; - vector<CCRP<int> > ccrps; - -}; - -int main(int argc, char** argv) { - if (argc > 1) { verbose = true; } - vector<int> counts(15, 0); - vector<int> tcounts(15, 0); - int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; - double tlh = 0; - double tt = 0; - for (int n = 0; n < 1000; ++n) { - if (n % 10 == 0) cerr << '.'; - if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; - Model m; - for (int *x = points; *x; x += 2) - m.increment(x[0], x[1]); - - for (int j = 0; j < 24; ++j) { - for (int *x = points; *x; x += 2) { - if (rng.next() < 0.8) { - m.decrement(x[0], x[1]); - m.increment_is(x[0], x[1]); - } - } - } - counts[m.base.num_customers()]++; - tcounts[m.base.num_tables()]++; - tlh += m.llh(); - tt += 1.0; - } - cerr << "mean LLH = " << (tlh / tt) << endl; - for (int i = 0; i < 15; ++i) - cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; -} - diff --git a/gi/pf/pfbrat.cc b/gi/pf/pfbrat.cc deleted file mode 100644 index 832f22cf..00000000 --- a/gi/pf/pfbrat.cc +++ /dev/null @@ -1,543 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/multi_array.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "cfg_wfst_composer.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -static unsigned kMAX_SRC_PHRASE; -static unsigned kMAX_TRG_PHRASE; -struct FSTState; - -double log_poisson(unsigned x, const double& lambda) { - assert(lambda > 0.0); - return log(lambda) * x - lgamma(x + 1) - lambda; -} - -struct ConditionalBase { - explicit ConditionalBase(const double m1mixture, const unsigned vocab_e_size, const string& model1fname) : - kM1MIXTURE(m1mixture), - kUNIFORM_MIXTURE(1.0 - m1mixture), - kUNIFORM_TARGET(1.0 / vocab_e_size), - kNULL(TD::Convert("<eps>")) { - assert(m1mixture >= 0.0 && m1mixture <= 1.0); - assert(vocab_e_size > 0); - LoadModel1(model1fname); - } - - void LoadModel1(const string& fname) { - cerr << "Loading Model 1 parameters from " << fname << " ..." << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - unsigned lc = 0; - while(getline(in, line)) { - ++lc; - int cur = 0; - int start = 0; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - const WordID src = TD::Convert(&line[0]); - ++cur; - start = cur; - while(cur < line.size() && line[cur] != ' ') { ++cur; } - assert(cur != line.size()); - line[cur] = 0; - WordID trg = TD::Convert(&line[start]); - const double logprob = strtod(&line[cur + 1], NULL); - if (src >= ttable.size()) ttable.resize(src + 1); - ttable[src][trg].logeq(logprob); - } - cerr << " read " << lc << " parameters.\n"; - } - - // return logp0 of rule.e_ | rule.f_ - prob_t operator()(const TRule& rule) const { - const int flen = rule.f_.size(); - const int elen = rule.e_.size(); - prob_t uniform_src_alignment; uniform_src_alignment.logeq(-log(flen + 1)); - prob_t p; - p.logeq(log_poisson(elen, flen + 0.01)); // elen | flen ~Pois(flen + 0.01) - for (int i = 0; i < elen; ++i) { // for each position i in e-RHS - const WordID trg = rule.e_[i]; - prob_t tp = prob_t::Zero(); - for (int j = -1; j < flen; ++j) { - const WordID src = j < 0 ? kNULL : rule.f_[j]; - const map<WordID, prob_t>::const_iterator it = ttable[src].find(trg); - if (it != ttable[src].end()) { - tp += kM1MIXTURE * it->second; - } - tp += kUNIFORM_MIXTURE * kUNIFORM_TARGET; - } - tp *= uniform_src_alignment; // draw a_i ~uniform - p *= tp; // draw e_i ~Model1(f_a_i) / uniform - } - return p; - } - - const prob_t kM1MIXTURE; // Model 1 mixture component - const prob_t kUNIFORM_MIXTURE; // uniform mixture component - const prob_t kUNIFORM_TARGET; - const WordID kNULL; - vector<map<WordID, prob_t> > ttable; -}; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(3),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(3),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<int> >* e, - set<int>* vocab_f, - set<int>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -struct UniphraseLM { - UniphraseLM(const vector<vector<int> >& corpus, - const set<int>& vocab, - const po::variables_map& conf) : - phrases_(1,1), - gen_(1,1), - corpus_(corpus), - uniform_word_(1.0 / vocab.size()), - gen_p0_(0.5), - p_end_(0.5), - use_poisson_(conf.count("poisson_length") > 0) {} - - void ResampleHyperparameters(MT19937* rng) { - phrases_.resample_hyperparameters(rng); - gen_.resample_hyperparameters(rng); - cerr << " " << phrases_.alpha(); - } - - CCRP_NoTable<vector<int> > phrases_; - CCRP_NoTable<bool> gen_; - vector<vector<bool> > z_; // z_[i] is there a phrase boundary after the ith word - const vector<vector<int> >& corpus_; - const double uniform_word_; - const double gen_p0_; - const double p_end_; // in base length distribution, p of the end of a phrase - const bool use_poisson_; -}; - -struct Reachability { - boost::multi_array<bool, 4> edges; // edges[src_covered][trg_covered][x][trg_delta] is this edge worth exploring? - boost::multi_array<short, 2> max_src_delta; // msd[src_covered][trg_covered] -- the largest src delta that's valid - - Reachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) : - edges(boost::extents[srclen][trglen][src_max_phrase_len+1][trg_max_phrase_len+1]), - max_src_delta(boost::extents[srclen][trglen]) { - ComputeReachability(srclen, trglen, src_max_phrase_len, trg_max_phrase_len); - } - - private: - struct SState { - SState() : prev_src_covered(), prev_trg_covered() {} - SState(int i, int j) : prev_src_covered(i), prev_trg_covered(j) {} - int prev_src_covered; - int prev_trg_covered; - }; - - struct NState { - NState() : next_src_covered(), next_trg_covered() {} - NState(int i, int j) : next_src_covered(i), next_trg_covered(j) {} - int next_src_covered; - int next_trg_covered; - }; - - void ComputeReachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) { - typedef boost::multi_array<vector<SState>, 2> array_type; - array_type a(boost::extents[srclen + 1][trglen + 1]); - a[0][0].push_back(SState()); - for (int i = 0; i < srclen; ++i) { - for (int j = 0; j < trglen; ++j) { - if (a[i][j].size() == 0) continue; - const SState prev(i,j); - for (int k = 1; k <= src_max_phrase_len; ++k) { - if ((i + k) > srclen) continue; - for (int l = 1; l <= trg_max_phrase_len; ++l) { - if ((j + l) > trglen) continue; - a[i + k][j + l].push_back(prev); - } - } - } - } - a[0][0].clear(); - cerr << "Final cell contains " << a[srclen][trglen].size() << " back pointers\n"; - assert(a[srclen][trglen].size() > 0); - - typedef boost::multi_array<bool, 2> rarray_type; - rarray_type r(boost::extents[srclen + 1][trglen + 1]); -// typedef boost::multi_array<vector<NState>, 2> narray_type; -// narray_type b(boost::extents[srclen + 1][trglen + 1]); - r[srclen][trglen] = true; - for (int i = srclen; i >= 0; --i) { - for (int j = trglen; j >= 0; --j) { - vector<SState>& prevs = a[i][j]; - if (!r[i][j]) { prevs.clear(); } -// const NState nstate(i,j); - for (int k = 0; k < prevs.size(); ++k) { - r[prevs[k].prev_src_covered][prevs[k].prev_trg_covered] = true; - int src_delta = i - prevs[k].prev_src_covered; - edges[prevs[k].prev_src_covered][prevs[k].prev_trg_covered][src_delta][j - prevs[k].prev_trg_covered] = true; - short &msd = max_src_delta[prevs[k].prev_src_covered][prevs[k].prev_trg_covered]; - if (src_delta > msd) msd = src_delta; -// b[prevs[k].prev_src_covered][prevs[k].prev_trg_covered].push_back(nstate); - } - } - } - assert(!edges[0][0][1][0]); - assert(!edges[0][0][0][1]); - assert(!edges[0][0][0][0]); - cerr << " MAX SRC DELTA[0][0] = " << max_src_delta[0][0] << endl; - assert(max_src_delta[0][0] > 0); - //cerr << "First cell contains " << b[0][0].size() << " forward pointers\n"; - //for (int i = 0; i < b[0][0].size(); ++i) { - // cerr << " -> (" << b[0][0][i].next_src_covered << "," << b[0][0][i].next_trg_covered << ")\n"; - //} - } -}; - -ostream& operator<<(ostream& os, const FSTState& q); -struct FSTState { - explicit FSTState(int src_size) : - trg_covered_(), - src_covered_(), - src_coverage_(src_size) {} - - FSTState(short trg_covered, short src_covered, const vector<bool>& src_coverage, const vector<short>& src_prefix) : - trg_covered_(trg_covered), - src_covered_(src_covered), - src_coverage_(src_coverage), - src_prefix_(src_prefix) { - if (src_coverage_.size() == src_covered) { - assert(src_prefix.size() == 0); - } - } - - // if we extend by the word at src_position, what are - // the next states that are reachable and lie on a valid - // path to the final state? - vector<FSTState> Extensions(int src_position, int src_len, int trg_len, const Reachability& r) const { - assert(src_position < src_coverage_.size()); - if (src_coverage_[src_position]) { - cerr << "Trying to extend " << *this << " with position " << src_position << endl; - abort(); - } - vector<bool> ncvg = src_coverage_; - ncvg[src_position] = true; - - vector<FSTState> res; - const int trg_remaining = trg_len - trg_covered_; - if (trg_remaining <= 0) { - cerr << "Target appears to have been covered: " << *this << " (trg_len=" << trg_len << ",trg_covered=" << trg_covered_ << ")" << endl; - abort(); - } - const int src_remaining = src_len - src_covered_; - if (src_remaining <= 0) { - cerr << "Source appears to have been covered: " << *this << endl; - abort(); - } - - for (int tc = 1; tc <= kMAX_TRG_PHRASE; ++tc) { - if (r.edges[src_covered_][trg_covered_][src_prefix_.size() + 1][tc]) { - int nc = src_prefix_.size() + 1 + src_covered_; - res.push_back(FSTState(trg_covered_ + tc, nc, ncvg, vector<short>())); - } - } - - if ((src_prefix_.size() + 1) < r.max_src_delta[src_covered_][trg_covered_]) { - vector<short> nsp = src_prefix_; - nsp.push_back(src_position); - res.push_back(FSTState(trg_covered_, src_covered_, ncvg, nsp)); - } - - if (res.size() == 0) { - cerr << *this << " can't be extended!\n"; - abort(); - } - return res; - } - - short trg_covered_, src_covered_; - vector<bool> src_coverage_; - vector<short> src_prefix_; -}; -bool operator<(const FSTState& q, const FSTState& r) { - if (q.trg_covered_ != r.trg_covered_) return q.trg_covered_ < r.trg_covered_; - if (q.src_covered_!= r.src_covered_) return q.src_covered_ < r.src_covered_; - if (q.src_coverage_ != r.src_coverage_) return q.src_coverage_ < r.src_coverage_; - return q.src_prefix_ < r.src_prefix_; -} - -ostream& operator<<(ostream& os, const FSTState& q) { - os << "[" << q.trg_covered_ << " : "; - for (int i = 0; i < q.src_coverage_.size(); ++i) - os << q.src_coverage_[i]; - os << " : <"; - for (int i = 0; i < q.src_prefix_.size(); ++i) { - if (i != 0) os << ' '; - os << q.src_prefix_[i]; - } - return os << ">]"; -} - -struct MyModel { - MyModel(ConditionalBase& rcp0) : rp0(rcp0) {} - typedef unordered_map<vector<WordID>, CCRP_NoTable<TRule>, boost::hash<vector<WordID> > > SrcToRuleCRPMap; - - void DecrementRule(const TRule& rule) { - SrcToRuleCRPMap::iterator it = rules.find(rule.f_); - assert(it != rules.end()); - it->second.decrement(rule); - if (it->second.num_customers() == 0) rules.erase(it); - } - - void IncrementRule(const TRule& rule) { - SrcToRuleCRPMap::iterator it = rules.find(rule.f_); - if (it == rules.end()) { - CCRP_NoTable<TRule> crp(1,1); - it = rules.insert(make_pair(rule.f_, crp)).first; - } - it->second.increment(rule); - } - - // conditioned on rule.f_ - prob_t RuleConditionalProbability(const TRule& rule) const { - const prob_t base = rp0(rule); - SrcToRuleCRPMap::const_iterator it = rules.find(rule.f_); - if (it == rules.end()) { - return base; - } else { - const double lp = it->second.logprob(rule, log(base)); - prob_t q; q.logeq(lp); - return q; - } - } - - const ConditionalBase& rp0; - SrcToRuleCRPMap rules; -}; - -struct MyFST : public WFST { - MyFST(const vector<WordID>& ssrc, const vector<WordID>& strg, MyModel* m) : - src(ssrc), trg(strg), - r(src.size(),trg.size(),kMAX_SRC_PHRASE, kMAX_TRG_PHRASE), - model(m) { - FSTState in(src.size()); - cerr << " INIT: " << in << endl; - init = GetNode(in); - for (int i = 0; i < in.src_coverage_.size(); ++i) in.src_coverage_[i] = true; - in.src_covered_ = src.size(); - in.trg_covered_ = trg.size(); - cerr << "FINAL: " << in << endl; - final = GetNode(in); - } - virtual const WFSTNode* Final() const; - virtual const WFSTNode* Initial() const; - - const WFSTNode* GetNode(const FSTState& q); - map<FSTState, boost::shared_ptr<WFSTNode> > m; - const vector<WordID>& src; - const vector<WordID>& trg; - Reachability r; - const WFSTNode* init; - const WFSTNode* final; - MyModel* model; -}; - -struct MyNode : public WFSTNode { - MyNode(const FSTState& q, MyFST* fst) : state(q), container(fst) {} - virtual vector<pair<const WFSTNode*, TRulePtr> > ExtendInput(unsigned srcindex) const; - const FSTState state; - mutable MyFST* container; -}; - -vector<pair<const WFSTNode*, TRulePtr> > MyNode::ExtendInput(unsigned srcindex) const { - cerr << "EXTEND " << state << " with " << srcindex << endl; - vector<FSTState> ext = state.Extensions(srcindex, container->src.size(), container->trg.size(), container->r); - vector<pair<const WFSTNode*,TRulePtr> > res(ext.size()); - for (unsigned i = 0; i < ext.size(); ++i) { - res[i].first = container->GetNode(ext[i]); - if (ext[i].src_prefix_.size() == 0) { - const unsigned trg_from = state.trg_covered_; - const unsigned trg_to = ext[i].trg_covered_; - const unsigned prev_prfx_size = state.src_prefix_.size(); - res[i].second.reset(new TRule); - res[i].second->lhs_ = -TD::Convert("X"); - vector<WordID>& src = res[i].second->f_; - vector<WordID>& trg = res[i].second->e_; - src.resize(prev_prfx_size + 1); - for (unsigned j = 0; j < prev_prfx_size; ++j) - src[j] = container->src[state.src_prefix_[j]]; - src[prev_prfx_size] = container->src[srcindex]; - for (unsigned j = trg_from; j < trg_to; ++j) - trg.push_back(container->trg[j]); - res[i].second->scores_.set_value(FD::Convert("Proposal"), log(container->model->RuleConditionalProbability(*res[i].second))); - } - } - return res; -} - -const WFSTNode* MyFST::GetNode(const FSTState& q) { - boost::shared_ptr<WFSTNode>& res = m[q]; - if (!res) { - res.reset(new MyNode(q, this)); - } - return &*res; -} - -const WFSTNode* MyFST::Final() const { - return final; -} - -const WFSTNode* MyFST::Initial() const { - return init; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<int> > corpuse, corpusf; - set<int> vocabe, vocabf; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "f-Corpus size: " << corpusf.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabf.size() << " types\n"; - cerr << "f-Corpus size: " << corpuse.size() << " sentences\n"; - cerr << "f-Vocabulary size: " << vocabe.size() << " types\n"; - assert(corpusf.size() == corpuse.size()); - - ConditionalBase lp0(conf["model1_interpolation_weight"].as<double>(), - vocabe.size(), - conf["model1"].as<string>()); - MyModel m(lp0); - - TRule x("[X] ||| kAnwntR myN ||| at the convent ||| 0"); - m.IncrementRule(x); - TRule y("[X] ||| nY dyN ||| gave ||| 0"); - m.IncrementRule(y); - - - MyFST fst(corpusf[0], corpuse[0], &m); - ifstream in("./kimura.g"); - assert(in); - CFG_WFSTComposer comp(fst); - Hypergraph hg; - bool succeed = comp.Compose(&in, &hg); - hg.PrintGraphviz(); - if (succeed) { cerr << "SUCCESS.\n"; } else { cerr << "FAILURE REPORTED.\n"; } - -#if 0 - ifstream in2("./amnabooks.g"); - assert(in2); - MyFST fst2(corpusf[1], corpuse[1], &m); - CFG_WFSTComposer comp2(fst2); - Hypergraph hg2; - bool succeed2 = comp2.Compose(&in2, &hg2); - if (succeed2) { cerr << "SUCCESS.\n"; } else { cerr << "FAILURE REPORTED.\n"; } -#endif - - SparseVector<double> w; w.set_value(FD::Convert("Proposal"), 1.0); - hg.Reweight(w); - cerr << ViterbiFTree(hg) << endl; - return 0; -} - diff --git a/gi/pf/pfdist.cc b/gi/pf/pfdist.cc deleted file mode 100644 index a3e46064..00000000 --- a/gi/pf/pfdist.cc +++ /dev/null @@ -1,598 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "pf.h" -#include "base_distributions.h" -#include "reachability.h" -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "ccrp_onetable.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("particles,p",po::value<unsigned>()->default_value(30),"Number of particles") - ("filter_frequency,f",po::value<unsigned>()->default_value(5),"Number of time steps between filterings") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(5),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(5),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("inverse_model1,M",po::value<string>(),"Inverse Model 1 parameters (used in backward estimate)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<WordID> >* e, - set<WordID>* vocab_f, - set<WordID>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -#if 0 -struct MyConditionalModel { - MyConditionalModel(PhraseConditionalBase& rcp0) : rp0(&rcp0), base(prob_t::One()), src_phrases(1,1), src_jumps(200, CCRP_NoTable<int>(1,1)) {} - - prob_t srcp0(const vector<WordID>& src) const { - prob_t p(1.0 / 3000.0); - p.poweq(src.size()); - prob_t lenp; lenp.logeq(log_poisson(src.size(), 1.0)); - p *= lenp; - return p; - } - - void DecrementRule(const TRule& rule) { - const RuleCRPMap::iterator it = rules.find(rule.f_); - assert(it != rules.end()); - if (it->second.decrement(rule)) { - base /= (*rp0)(rule); - if (it->second.num_customers() == 0) - rules.erase(it); - } - if (src_phrases.decrement(rule.f_)) - base /= srcp0(rule.f_); - } - - void IncrementRule(const TRule& rule) { - RuleCRPMap::iterator it = rules.find(rule.f_); - if (it == rules.end()) - it = rules.insert(make_pair(rule.f_, CCRP_NoTable<TRule>(1,1))).first; - if (it->second.increment(rule)) { - base *= (*rp0)(rule); - } - if (src_phrases.increment(rule.f_)) - base *= srcp0(rule.f_); - } - - void IncrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - void IncrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].increment(dist)) - base *= jp0(dist, src_len); - } - - void DecrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].decrement(dist)) - base /= jp0(dist, src_len); - } - - void IncrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - IncrementJump(js[i], src_len); - } - - void DecrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - DecrementJump(js[i], src_len); - } - - // p(jump = dist | src_len , z) - prob_t JumpProbability(int dist, unsigned src_len) { - const prob_t p0 = jp0(dist, src_len); - const double lp = src_jumps[src_len].logprob(dist, log(p0)); - prob_t q; q.logeq(lp); - return q; - } - - // p(rule.f_ | z) * p(rule.e_ | rule.f_ , z) - prob_t RuleProbability(const TRule& rule) const { - const prob_t p0 = (*rp0)(rule); - prob_t srcp; srcp.logeq(src_phrases.logprob(rule.f_, log(srcp0(rule.f_)))); - const RuleCRPMap::const_iterator it = rules.find(rule.f_); - if (it == rules.end()) return srcp * p0; - const double lp = it->second.logprob(rule, log(p0)); - prob_t q; q.logeq(lp); - return q * srcp; - } - - prob_t Likelihood() const { - prob_t p = base; - for (RuleCRPMap::const_iterator it = rules.begin(); - it != rules.end(); ++it) { - prob_t cl; cl.logeq(it->second.log_crp_prob()); - p *= cl; - } - for (unsigned l = 1; l < src_jumps.size(); ++l) { - if (src_jumps[l].num_customers() > 0) { - prob_t q; - q.logeq(src_jumps[l].log_crp_prob()); - p *= q; - } - } - return p; - } - - JumpBase jp0; - const PhraseConditionalBase* rp0; - prob_t base; - typedef unordered_map<vector<WordID>, CCRP_NoTable<TRule>, boost::hash<vector<WordID> > > RuleCRPMap; - RuleCRPMap rules; - CCRP_NoTable<vector<WordID> > src_phrases; - vector<CCRP_NoTable<int> > src_jumps; -}; - -#endif - -struct MyJointModel { - MyJointModel(PhraseJointBase& rcp0) : - rp0(rcp0), base(prob_t::One()), rules(1,1), src_jumps(200, CCRP_NoTable<int>(1,1)) {} - - void DecrementRule(const TRule& rule) { - if (rules.decrement(rule)) - base /= rp0(rule); - } - - void IncrementRule(const TRule& rule) { - if (rules.increment(rule)) - base *= rp0(rule); - } - - void IncrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - void IncrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].increment(dist)) - base *= jp0(dist, src_len); - } - - void DecrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].decrement(dist)) - base /= jp0(dist, src_len); - } - - void IncrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - IncrementJump(js[i], src_len); - } - - void DecrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - DecrementJump(js[i], src_len); - } - - // p(jump = dist | src_len , z) - prob_t JumpProbability(int dist, unsigned src_len) { - const prob_t p0 = jp0(dist, src_len); - const double lp = src_jumps[src_len].logprob(dist, log(p0)); - prob_t q; q.logeq(lp); - return q; - } - - // p(rule.f_ | z) * p(rule.e_ | rule.f_ , z) - prob_t RuleProbability(const TRule& rule) const { - prob_t p; p.logeq(rules.logprob(rule, log(rp0(rule)))); - return p; - } - - prob_t Likelihood() const { - prob_t p = base; - prob_t q; q.logeq(rules.log_crp_prob()); - p *= q; - for (unsigned l = 1; l < src_jumps.size(); ++l) { - if (src_jumps[l].num_customers() > 0) { - prob_t q; - q.logeq(src_jumps[l].log_crp_prob()); - p *= q; - } - } - return p; - } - - JumpBase jp0; - const PhraseJointBase& rp0; - prob_t base; - CCRP_NoTable<TRule> rules; - vector<CCRP_NoTable<int> > src_jumps; -}; - -struct BackwardEstimate { - BackwardEstimate(const Model1& m1, const vector<WordID>& src, const vector<WordID>& trg) : - model1_(m1), src_(src), trg_(trg) { - } - const prob_t& operator()(const vector<bool>& src_cov, unsigned trg_cov) const { - assert(src_.size() == src_cov.size()); - assert(trg_cov <= trg_.size()); - prob_t& e = cache_[src_cov][trg_cov]; - if (e.is_0()) { - if (trg_cov == trg_.size()) { e = prob_t::One(); return e; } - vector<WordID> r(src_.size() + 1); r.clear(); - r.push_back(0); // NULL word - for (int i = 0; i < src_cov.size(); ++i) - if (!src_cov[i]) r.push_back(src_[i]); - const prob_t uniform_alignment(1.0 / r.size()); - e.logeq(Md::log_poisson(trg_.size() - trg_cov, r.size() - 1)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_cov; j < trg_.size(); ++j) { - prob_t p; - for (unsigned i = 0; i < r.size(); ++i) - p += model1_(r[i], trg_[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg_[j]) << " | " << TD::GetString(r) << ") = 0!\n"; - abort(); - } - p *= uniform_alignment; - e *= p; - } - } - return e; - } - const Model1& model1_; - const vector<WordID>& src_; - const vector<WordID>& trg_; - mutable unordered_map<vector<bool>, map<unsigned, prob_t>, boost::hash<vector<bool> > > cache_; -}; - -struct BackwardEstimateSym { - BackwardEstimateSym(const Model1& m1, - const Model1& invm1, const vector<WordID>& src, const vector<WordID>& trg) : - model1_(m1), invmodel1_(invm1), src_(src), trg_(trg) { - } - const prob_t& operator()(const vector<bool>& src_cov, unsigned trg_cov) const { - assert(src_.size() == src_cov.size()); - assert(trg_cov <= trg_.size()); - prob_t& e = cache_[src_cov][trg_cov]; - if (e.is_0()) { - if (trg_cov == trg_.size()) { e = prob_t::One(); return e; } - vector<WordID> r(src_.size() + 1); r.clear(); - for (int i = 0; i < src_cov.size(); ++i) - if (!src_cov[i]) r.push_back(src_[i]); - r.push_back(0); // NULL word - const prob_t uniform_alignment(1.0 / r.size()); - e.logeq(Md::log_poisson(trg_.size() - trg_cov, r.size() - 1)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_cov; j < trg_.size(); ++j) { - prob_t p; - for (unsigned i = 0; i < r.size(); ++i) - p += model1_(r[i], trg_[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg_[j]) << " | " << TD::GetString(r) << ") = 0!\n"; - abort(); - } - p *= uniform_alignment; - e *= p; - } - r.pop_back(); - const prob_t inv_uniform(1.0 / (trg_.size() - trg_cov + 1.0)); - prob_t inv; - inv.logeq(Md::log_poisson(r.size(), trg_.size() - trg_cov)); - for (unsigned i = 0; i < r.size(); ++i) { - prob_t p; - for (unsigned j = trg_cov - 1; j < trg_.size(); ++j) - p += invmodel1_(j < trg_cov ? 0 : trg_[j], r[i]); - if (p.is_0()) { - cerr << "ERROR: p_inv(" << TD::Convert(r[i]) << " | " << TD::GetString(trg_) << ") = 0!\n"; - abort(); - } - p *= inv_uniform; - inv *= p; - } - prob_t x = pow(e * inv, 0.5); - e = x; - //cerr << "Forward: " << log(e) << "\tBackward: " << log(inv) << "\t prop: " << log(x) << endl; - } - return e; - } - const Model1& model1_; - const Model1& invmodel1_; - const vector<WordID>& src_; - const vector<WordID>& trg_; - mutable unordered_map<vector<bool>, map<unsigned, prob_t>, boost::hash<vector<bool> > > cache_; -}; - -struct Particle { - Particle() : weight(prob_t::One()), src_cov(), trg_cov(), prev_pos(-1) {} - prob_t weight; - prob_t gamma_last; - vector<int> src_jumps; - vector<TRulePtr> rules; - vector<bool> src_cv; - int src_cov; - int trg_cov; - int prev_pos; -}; - -ostream& operator<<(ostream& o, const vector<bool>& v) { - for (int i = 0; i < v.size(); ++i) - o << (v[i] ? '1' : '0'); - return o; -} -ostream& operator<<(ostream& o, const Particle& p) { - o << "[cv=" << p.src_cv << " src_cov=" << p.src_cov << " trg_cov=" << p.trg_cov << " last_pos=" << p.prev_pos << " num_rules=" << p.rules.size() << " w=" << log(p.weight) << ']'; - return o; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - const unsigned kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - const unsigned kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - const unsigned particles = conf["particles"].as<unsigned>(); - const unsigned samples = conf["samples"].as<unsigned>(); - const unsigned rejuv_freq = conf["filter_frequency"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<WordID> > corpuse, corpusf; - set<WordID> vocabe, vocabf; - cerr << "Reading corpus...\n"; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "F-corpus size: " << corpusf.size() << " sentences\t (" << vocabf.size() << " word types)\n"; - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - assert(corpusf.size() == corpuse.size()); - - const int kLHS = -TD::Convert("X"); - Model1 m1(conf["model1"].as<string>()); - Model1 invm1(conf["inverse_model1"].as<string>()); - -#if 0 - PhraseConditionalBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size()); - MyConditionalModel m(lp0); -#else - PhraseJointBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - MyJointModel m(lp0); -#endif - - MultinomialResampleFilter<Particle> filter(&rng); - cerr << "Initializing reachability limits...\n"; - vector<Particle> ps(corpusf.size()); - vector<Reachability> reaches; reaches.reserve(corpusf.size()); - for (int ci = 0; ci < corpusf.size(); ++ci) - reaches.push_back(Reachability(corpusf[ci].size(), - corpuse[ci].size(), - kMAX_SRC_PHRASE, - kMAX_TRG_PHRASE)); - cerr << "Sampling...\n"; - vector<Particle> tmp_p(10000); // work space - SampleSet<prob_t> pfss; - for (int SS=0; SS < samples; ++SS) { - for (int ci = 0; ci < corpusf.size(); ++ci) { - vector<int>& src = corpusf[ci]; - vector<int>& trg = corpuse[ci]; - m.DecrementRules(ps[ci].rules); - m.DecrementJumps(ps[ci].src_jumps, src.size()); - - //BackwardEstimate be(m1, src, trg); - BackwardEstimateSym be(m1, invm1, src, trg); - const Reachability& r = reaches[ci]; - vector<Particle> lps(particles); - - for (int pi = 0; pi < particles; ++pi) { - Particle& p = lps[pi]; - p.src_cv.resize(src.size(), false); - } - - bool all_complete = false; - while(!all_complete) { - SampleSet<prob_t> ss; - - // all particles have now been extended a bit, we will reweight them now - if (lps[0].trg_cov > 0) - filter(&lps); - - // loop over all particles and extend them - bool done_nothing = true; - for (int pi = 0; pi < particles; ++pi) { - Particle& p = lps[pi]; - int tic = 0; - while(p.trg_cov < trg.size() && tic < rejuv_freq) { - ++tic; - done_nothing = false; - ss.clear(); - TRule x; x.lhs_ = kLHS; - prob_t z; - int first_uncovered = src.size(); - int last_uncovered = -1; - for (int i = 0; i < src.size(); ++i) { - const bool is_uncovered = !p.src_cv[i]; - if (i < first_uncovered && is_uncovered) first_uncovered = i; - if (is_uncovered && i > last_uncovered) last_uncovered = i; - } - assert(last_uncovered > -1); - assert(first_uncovered < src.size()); - - for (int trg_len = 1; trg_len <= kMAX_TRG_PHRASE; ++trg_len) { - x.e_.push_back(trg[trg_len - 1 + p.trg_cov]); - for (int src_len = 1; src_len <= kMAX_SRC_PHRASE; ++src_len) { - if (!r.edges[p.src_cov][p.trg_cov][src_len][trg_len]) continue; - - const int last_possible_start = last_uncovered - src_len + 1; - assert(last_possible_start >= 0); - //cerr << src_len << "," << trg_len << " is allowed. E=" << TD::GetString(x.e_) << endl; - //cerr << " first_uncovered=" << first_uncovered << " last_possible_start=" << last_possible_start << endl; - for (int i = first_uncovered; i <= last_possible_start; ++i) { - if (p.src_cv[i]) continue; - assert(ss.size() < tmp_p.size()); // if fails increase tmp_p size - Particle& np = tmp_p[ss.size()]; - np = p; - x.f_.clear(); - int gap_add = 0; - bool bad = false; - prob_t jp = prob_t::One(); - int prev_pos = p.prev_pos; - for (int j = 0; j < src_len; ++j) { - if ((j + i + gap_add) == src.size()) { bad = true; break; } - while ((i+j+gap_add) < src.size() && p.src_cv[i + j + gap_add]) { ++gap_add; } - if ((j + i + gap_add) == src.size()) { bad = true; break; } - np.src_cv[i + j + gap_add] = true; - x.f_.push_back(src[i + j + gap_add]); - jp *= m.JumpProbability(i + j + gap_add - prev_pos, src.size()); - int jump = i + j + gap_add - prev_pos; - assert(jump != 0); - np.src_jumps.push_back(jump); - prev_pos = i + j + gap_add; - } - if (bad) continue; - np.prev_pos = prev_pos; - np.src_cov += x.f_.size(); - np.trg_cov += x.e_.size(); - if (x.f_.size() != src_len) continue; - prob_t rp = m.RuleProbability(x); - np.gamma_last = rp * jp; - const prob_t u = pow(np.gamma_last * be(np.src_cv, np.trg_cov), 0.2); - //cerr << "**rule=" << x << endl; - //cerr << " u=" << log(u) << " rule=" << rp << " jump=" << jp << endl; - ss.add(u); - np.rules.push_back(TRulePtr(new TRule(x))); - z += u; - - const bool completed = (p.trg_cov == trg.size()); - if (completed) { - int last_jump = src.size() - p.prev_pos; - assert(last_jump > 0); - p.src_jumps.push_back(last_jump); - p.weight *= m.JumpProbability(last_jump, src.size()); - } - } - } - } - cerr << "number of edges to consider: " << ss.size() << endl; - const int sampled = rng.SelectSample(ss); - prob_t q_n = ss[sampled] / z; - p = tmp_p[sampled]; - //m.IncrementRule(*p.rules.back()); - p.weight *= p.gamma_last / q_n; - cerr << "[w=" << log(p.weight) << "]\tsampled rule: " << p.rules.back()->AsString() << endl; - cerr << p << endl; - } - } // loop over particles (pi = 0 .. particles) - if (done_nothing) all_complete = true; - } - pfss.clear(); - for (int i = 0; i < lps.size(); ++i) - pfss.add(lps[i].weight); - const int sampled = rng.SelectSample(pfss); - ps[ci] = lps[sampled]; - m.IncrementRules(lps[sampled].rules); - m.IncrementJumps(lps[sampled].src_jumps, src.size()); - for (int i = 0; i < lps[sampled].rules.size(); ++i) { cerr << "S:\t" << lps[sampled].rules[i]->AsString() << "\n"; } - cerr << "tmp-LLH: " << log(m.Likelihood()) << endl; - } - cerr << "LLH: " << log(m.Likelihood()) << endl; - for (int sni = 0; sni < 5; ++sni) { - for (int i = 0; i < ps[sni].rules.size(); ++i) { cerr << "\t" << ps[sni].rules[i]->AsString() << endl; } - } - } - return 0; -} - diff --git a/gi/pf/pfdist.new.cc b/gi/pf/pfdist.new.cc deleted file mode 100644 index 3169eb75..00000000 --- a/gi/pf/pfdist.new.cc +++ /dev/null @@ -1,620 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "base_measures.h" -#include "reachability.h" -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "ccrp_onetable.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -shared_ptr<MT19937> prng; - -size_t hash_value(const TRule& r) { - size_t h = boost::hash_value(r.e_); - boost::hash_combine(h, -r.lhs_); - boost::hash_combine(h, boost::hash_value(r.f_)); - return h; -} - -bool operator==(const TRule& a, const TRule& b) { - return (a.lhs_ == b.lhs_ && a.e_ == b.e_ && a.f_ == b.f_); -} - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("particles,p",po::value<unsigned>()->default_value(25),"Number of particles") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(5),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(5),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("inverse_model1,M",po::value<string>(),"Inverse Model 1 parameters (used in backward estimate)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadParallelCorpus(const string& filename, - vector<vector<WordID> >* f, - vector<vector<WordID> >* e, - set<WordID>* vocab_f, - set<WordID>* vocab_e) { - f->clear(); - e->clear(); - vocab_f->clear(); - vocab_e->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - const WordID kDIV = TD::Convert("|||"); - vector<WordID> tmp; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - e->push_back(vector<int>()); - f->push_back(vector<int>()); - vector<int>& le = e->back(); - vector<int>& lf = f->back(); - tmp.clear(); - TD::ConvertSentence(line, &tmp); - bool isf = true; - for (unsigned i = 0; i < tmp.size(); ++i) { - const int cur = tmp[i]; - if (isf) { - if (kDIV == cur) { isf = false; } else { - lf.push_back(cur); - vocab_f->insert(cur); - } - } else { - assert(cur != kDIV); - le.push_back(cur); - vocab_e->insert(cur); - } - } - assert(isf == false); - } - if (in != &cin) delete in; -} - -#if 0 -struct MyConditionalModel { - MyConditionalModel(PhraseConditionalBase& rcp0) : rp0(&rcp0), base(prob_t::One()), src_phrases(1,1), src_jumps(200, CCRP_NoTable<int>(1,1)) {} - - prob_t srcp0(const vector<WordID>& src) const { - prob_t p(1.0 / 3000.0); - p.poweq(src.size()); - prob_t lenp; lenp.logeq(log_poisson(src.size(), 1.0)); - p *= lenp; - return p; - } - - void DecrementRule(const TRule& rule) { - const RuleCRPMap::iterator it = rules.find(rule.f_); - assert(it != rules.end()); - if (it->second.decrement(rule)) { - base /= (*rp0)(rule); - if (it->second.num_customers() == 0) - rules.erase(it); - } - if (src_phrases.decrement(rule.f_)) - base /= srcp0(rule.f_); - } - - void IncrementRule(const TRule& rule) { - RuleCRPMap::iterator it = rules.find(rule.f_); - if (it == rules.end()) - it = rules.insert(make_pair(rule.f_, CCRP_NoTable<TRule>(1,1))).first; - if (it->second.increment(rule)) { - base *= (*rp0)(rule); - } - if (src_phrases.increment(rule.f_)) - base *= srcp0(rule.f_); - } - - void IncrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - void IncrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].increment(dist)) - base *= jp0(dist, src_len); - } - - void DecrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].decrement(dist)) - base /= jp0(dist, src_len); - } - - void IncrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - IncrementJump(js[i], src_len); - } - - void DecrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - DecrementJump(js[i], src_len); - } - - // p(jump = dist | src_len , z) - prob_t JumpProbability(int dist, unsigned src_len) { - const prob_t p0 = jp0(dist, src_len); - const double lp = src_jumps[src_len].logprob(dist, log(p0)); - prob_t q; q.logeq(lp); - return q; - } - - // p(rule.f_ | z) * p(rule.e_ | rule.f_ , z) - prob_t RuleProbability(const TRule& rule) const { - const prob_t p0 = (*rp0)(rule); - prob_t srcp; srcp.logeq(src_phrases.logprob(rule.f_, log(srcp0(rule.f_)))); - const RuleCRPMap::const_iterator it = rules.find(rule.f_); - if (it == rules.end()) return srcp * p0; - const double lp = it->second.logprob(rule, log(p0)); - prob_t q; q.logeq(lp); - return q * srcp; - } - - prob_t Likelihood() const { - prob_t p = base; - for (RuleCRPMap::const_iterator it = rules.begin(); - it != rules.end(); ++it) { - prob_t cl; cl.logeq(it->second.log_crp_prob()); - p *= cl; - } - for (unsigned l = 1; l < src_jumps.size(); ++l) { - if (src_jumps[l].num_customers() > 0) { - prob_t q; - q.logeq(src_jumps[l].log_crp_prob()); - p *= q; - } - } - return p; - } - - JumpBase jp0; - const PhraseConditionalBase* rp0; - prob_t base; - typedef unordered_map<vector<WordID>, CCRP_NoTable<TRule>, boost::hash<vector<WordID> > > RuleCRPMap; - RuleCRPMap rules; - CCRP_NoTable<vector<WordID> > src_phrases; - vector<CCRP_NoTable<int> > src_jumps; -}; - -#endif - -struct MyJointModel { - MyJointModel(PhraseJointBase& rcp0) : - rp0(rcp0), base(prob_t::One()), rules(1,1), src_jumps(200, CCRP_NoTable<int>(1,1)) {} - - void DecrementRule(const TRule& rule) { - if (rules.decrement(rule)) - base /= rp0(rule); - } - - void IncrementRule(const TRule& rule) { - if (rules.increment(rule)) - base *= rp0(rule); - } - - void IncrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - void IncrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].increment(dist)) - base *= jp0(dist, src_len); - } - - void DecrementJump(int dist, unsigned src_len) { - assert(src_len > 0); - if (src_jumps[src_len].decrement(dist)) - base /= jp0(dist, src_len); - } - - void IncrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - IncrementJump(js[i], src_len); - } - - void DecrementJumps(const vector<int>& js, unsigned src_len) { - for (unsigned i = 0; i < js.size(); ++i) - DecrementJump(js[i], src_len); - } - - // p(jump = dist | src_len , z) - prob_t JumpProbability(int dist, unsigned src_len) { - const prob_t p0 = jp0(dist, src_len); - const double lp = src_jumps[src_len].logprob(dist, log(p0)); - prob_t q; q.logeq(lp); - return q; - } - - // p(rule.f_ | z) * p(rule.e_ | rule.f_ , z) - prob_t RuleProbability(const TRule& rule) const { - prob_t p; p.logeq(rules.logprob(rule, log(rp0(rule)))); - return p; - } - - prob_t Likelihood() const { - prob_t p = base; - prob_t q; q.logeq(rules.log_crp_prob()); - p *= q; - for (unsigned l = 1; l < src_jumps.size(); ++l) { - if (src_jumps[l].num_customers() > 0) { - prob_t q; - q.logeq(src_jumps[l].log_crp_prob()); - p *= q; - } - } - return p; - } - - JumpBase jp0; - const PhraseJointBase& rp0; - prob_t base; - CCRP_NoTable<TRule> rules; - vector<CCRP_NoTable<int> > src_jumps; -}; - -struct BackwardEstimate { - BackwardEstimate(const Model1& m1, const vector<WordID>& src, const vector<WordID>& trg) : - model1_(m1), src_(src), trg_(trg) { - } - const prob_t& operator()(const vector<bool>& src_cov, unsigned trg_cov) const { - assert(src_.size() == src_cov.size()); - assert(trg_cov <= trg_.size()); - prob_t& e = cache_[src_cov][trg_cov]; - if (e.is_0()) { - if (trg_cov == trg_.size()) { e = prob_t::One(); return e; } - vector<WordID> r(src_.size() + 1); r.clear(); - r.push_back(0); // NULL word - for (int i = 0; i < src_cov.size(); ++i) - if (!src_cov[i]) r.push_back(src_[i]); - const prob_t uniform_alignment(1.0 / r.size()); - e.logeq(log_poisson(trg_.size() - trg_cov, r.size() - 1)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_cov; j < trg_.size(); ++j) { - prob_t p; - for (unsigned i = 0; i < r.size(); ++i) - p += model1_(r[i], trg_[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg_[j]) << " | " << TD::GetString(r) << ") = 0!\n"; - abort(); - } - p *= uniform_alignment; - e *= p; - } - } - return e; - } - const Model1& model1_; - const vector<WordID>& src_; - const vector<WordID>& trg_; - mutable unordered_map<vector<bool>, map<unsigned, prob_t>, boost::hash<vector<bool> > > cache_; -}; - -struct BackwardEstimateSym { - BackwardEstimateSym(const Model1& m1, - const Model1& invm1, const vector<WordID>& src, const vector<WordID>& trg) : - model1_(m1), invmodel1_(invm1), src_(src), trg_(trg) { - } - const prob_t& operator()(const vector<bool>& src_cov, unsigned trg_cov) const { - assert(src_.size() == src_cov.size()); - assert(trg_cov <= trg_.size()); - prob_t& e = cache_[src_cov][trg_cov]; - if (e.is_0()) { - if (trg_cov == trg_.size()) { e = prob_t::One(); return e; } - vector<WordID> r(src_.size() + 1); r.clear(); - for (int i = 0; i < src_cov.size(); ++i) - if (!src_cov[i]) r.push_back(src_[i]); - r.push_back(0); // NULL word - const prob_t uniform_alignment(1.0 / r.size()); - e.logeq(log_poisson(trg_.size() - trg_cov, r.size() - 1)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_cov; j < trg_.size(); ++j) { - prob_t p; - for (unsigned i = 0; i < r.size(); ++i) - p += model1_(r[i], trg_[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg_[j]) << " | " << TD::GetString(r) << ") = 0!\n"; - abort(); - } - p *= uniform_alignment; - e *= p; - } - r.pop_back(); - const prob_t inv_uniform(1.0 / (trg_.size() - trg_cov + 1.0)); - prob_t inv; - inv.logeq(log_poisson(r.size(), trg_.size() - trg_cov)); - for (unsigned i = 0; i < r.size(); ++i) { - prob_t p; - for (unsigned j = trg_cov - 1; j < trg_.size(); ++j) - p += invmodel1_(j < trg_cov ? 0 : trg_[j], r[i]); - if (p.is_0()) { - cerr << "ERROR: p_inv(" << TD::Convert(r[i]) << " | " << TD::GetString(trg_) << ") = 0!\n"; - abort(); - } - p *= inv_uniform; - inv *= p; - } - prob_t x = pow(e * inv, 0.5); - e = x; - //cerr << "Forward: " << log(e) << "\tBackward: " << log(inv) << "\t prop: " << log(x) << endl; - } - return e; - } - const Model1& model1_; - const Model1& invmodel1_; - const vector<WordID>& src_; - const vector<WordID>& trg_; - mutable unordered_map<vector<bool>, map<unsigned, prob_t>, boost::hash<vector<bool> > > cache_; -}; - -struct Particle { - Particle() : weight(prob_t::One()), src_cov(), trg_cov(), prev_pos(-1) {} - prob_t weight; - prob_t gamma_last; - vector<int> src_jumps; - vector<TRulePtr> rules; - vector<bool> src_cv; - int src_cov; - int trg_cov; - int prev_pos; -}; - -ostream& operator<<(ostream& o, const vector<bool>& v) { - for (int i = 0; i < v.size(); ++i) - o << (v[i] ? '1' : '0'); - return o; -} -ostream& operator<<(ostream& o, const Particle& p) { - o << "[cv=" << p.src_cv << " src_cov=" << p.src_cov << " trg_cov=" << p.trg_cov << " last_pos=" << p.prev_pos << " num_rules=" << p.rules.size() << " w=" << log(p.weight) << ']'; - return o; -} - -void FilterCrapParticlesAndReweight(vector<Particle>* pps) { - vector<Particle>& ps = *pps; - SampleSet<prob_t> ss; - for (int i = 0; i < ps.size(); ++i) - ss.add(ps[i].weight); - vector<Particle> nps; nps.reserve(ps.size()); - const prob_t uniform_weight(1.0 / ps.size()); - for (int i = 0; i < ps.size(); ++i) { - nps.push_back(ps[prng->SelectSample(ss)]); - nps[i].weight = uniform_weight; - } - nps.swap(ps); -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - const unsigned kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - const unsigned kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - const unsigned particles = conf["particles"].as<unsigned>(); - const unsigned samples = conf["samples"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<WordID> > corpuse, corpusf; - set<WordID> vocabe, vocabf; - cerr << "Reading corpus...\n"; - ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "F-corpus size: " << corpusf.size() << " sentences\t (" << vocabf.size() << " word types)\n"; - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - assert(corpusf.size() == corpuse.size()); - - const int kLHS = -TD::Convert("X"); - Model1 m1(conf["model1"].as<string>()); - Model1 invm1(conf["inverse_model1"].as<string>()); - -#if 0 - PhraseConditionalBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size()); - MyConditionalModel m(lp0); -#else - PhraseJointBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - MyJointModel m(lp0); -#endif - - cerr << "Initializing reachability limits...\n"; - vector<Particle> ps(corpusf.size()); - vector<Reachability> reaches; reaches.reserve(corpusf.size()); - for (int ci = 0; ci < corpusf.size(); ++ci) - reaches.push_back(Reachability(corpusf[ci].size(), - corpuse[ci].size(), - kMAX_SRC_PHRASE, - kMAX_TRG_PHRASE)); - cerr << "Sampling...\n"; - vector<Particle> tmp_p(10000); // work space - SampleSet<prob_t> pfss; - for (int SS=0; SS < samples; ++SS) { - for (int ci = 0; ci < corpusf.size(); ++ci) { - vector<int>& src = corpusf[ci]; - vector<int>& trg = corpuse[ci]; - m.DecrementRules(ps[ci].rules); - m.DecrementJumps(ps[ci].src_jumps, src.size()); - - //BackwardEstimate be(m1, src, trg); - BackwardEstimateSym be(m1, invm1, src, trg); - const Reachability& r = reaches[ci]; - vector<Particle> lps(particles); - - for (int pi = 0; pi < particles; ++pi) { - Particle& p = lps[pi]; - p.src_cv.resize(src.size(), false); - } - - bool all_complete = false; - while(!all_complete) { - SampleSet<prob_t> ss; - - // all particles have now been extended a bit, we will reweight them now - if (lps[0].trg_cov > 0) - FilterCrapParticlesAndReweight(&lps); - - // loop over all particles and extend them - bool done_nothing = true; - for (int pi = 0; pi < particles; ++pi) { - Particle& p = lps[pi]; - int tic = 0; - const int rejuv_freq = 1; - while(p.trg_cov < trg.size() && tic < rejuv_freq) { - ++tic; - done_nothing = false; - ss.clear(); - TRule x; x.lhs_ = kLHS; - prob_t z; - int first_uncovered = src.size(); - int last_uncovered = -1; - for (int i = 0; i < src.size(); ++i) { - const bool is_uncovered = !p.src_cv[i]; - if (i < first_uncovered && is_uncovered) first_uncovered = i; - if (is_uncovered && i > last_uncovered) last_uncovered = i; - } - assert(last_uncovered > -1); - assert(first_uncovered < src.size()); - - for (int trg_len = 1; trg_len <= kMAX_TRG_PHRASE; ++trg_len) { - x.e_.push_back(trg[trg_len - 1 + p.trg_cov]); - for (int src_len = 1; src_len <= kMAX_SRC_PHRASE; ++src_len) { - if (!r.edges[p.src_cov][p.trg_cov][src_len][trg_len]) continue; - - const int last_possible_start = last_uncovered - src_len + 1; - assert(last_possible_start >= 0); - //cerr << src_len << "," << trg_len << " is allowed. E=" << TD::GetString(x.e_) << endl; - //cerr << " first_uncovered=" << first_uncovered << " last_possible_start=" << last_possible_start << endl; - for (int i = first_uncovered; i <= last_possible_start; ++i) { - if (p.src_cv[i]) continue; - assert(ss.size() < tmp_p.size()); // if fails increase tmp_p size - Particle& np = tmp_p[ss.size()]; - np = p; - x.f_.clear(); - int gap_add = 0; - bool bad = false; - prob_t jp = prob_t::One(); - int prev_pos = p.prev_pos; - for (int j = 0; j < src_len; ++j) { - if ((j + i + gap_add) == src.size()) { bad = true; break; } - while ((i+j+gap_add) < src.size() && p.src_cv[i + j + gap_add]) { ++gap_add; } - if ((j + i + gap_add) == src.size()) { bad = true; break; } - np.src_cv[i + j + gap_add] = true; - x.f_.push_back(src[i + j + gap_add]); - jp *= m.JumpProbability(i + j + gap_add - prev_pos, src.size()); - int jump = i + j + gap_add - prev_pos; - assert(jump != 0); - np.src_jumps.push_back(jump); - prev_pos = i + j + gap_add; - } - if (bad) continue; - np.prev_pos = prev_pos; - np.src_cov += x.f_.size(); - np.trg_cov += x.e_.size(); - if (x.f_.size() != src_len) continue; - prob_t rp = m.RuleProbability(x); - np.gamma_last = rp * jp; - const prob_t u = pow(np.gamma_last * be(np.src_cv, np.trg_cov), 0.2); - //cerr << "**rule=" << x << endl; - //cerr << " u=" << log(u) << " rule=" << rp << " jump=" << jp << endl; - ss.add(u); - np.rules.push_back(TRulePtr(new TRule(x))); - z += u; - - const bool completed = (p.trg_cov == trg.size()); - if (completed) { - int last_jump = src.size() - p.prev_pos; - assert(last_jump > 0); - p.src_jumps.push_back(last_jump); - p.weight *= m.JumpProbability(last_jump, src.size()); - } - } - } - } - cerr << "number of edges to consider: " << ss.size() << endl; - const int sampled = rng.SelectSample(ss); - prob_t q_n = ss[sampled] / z; - p = tmp_p[sampled]; - //m.IncrementRule(*p.rules.back()); - p.weight *= p.gamma_last / q_n; - cerr << "[w=" << log(p.weight) << "]\tsampled rule: " << p.rules.back()->AsString() << endl; - cerr << p << endl; - } - } // loop over particles (pi = 0 .. particles) - if (done_nothing) all_complete = true; - } - pfss.clear(); - for (int i = 0; i < lps.size(); ++i) - pfss.add(lps[i].weight); - const int sampled = rng.SelectSample(pfss); - ps[ci] = lps[sampled]; - m.IncrementRules(lps[sampled].rules); - m.IncrementJumps(lps[sampled].src_jumps, src.size()); - for (int i = 0; i < lps[sampled].rules.size(); ++i) { cerr << "S:\t" << lps[sampled].rules[i]->AsString() << "\n"; } - cerr << "tmp-LLH: " << log(m.Likelihood()) << endl; - } - cerr << "LLH: " << log(m.Likelihood()) << endl; - for (int sni = 0; sni < 5; ++sni) { - for (int i = 0; i < ps[sni].rules.size(); ++i) { cerr << "\t" << ps[sni].rules[i]->AsString() << endl; } - } - } - return 0; -} - diff --git a/gi/pf/pfnaive.cc b/gi/pf/pfnaive.cc deleted file mode 100644 index 958ec4e2..00000000 --- a/gi/pf/pfnaive.cc +++ /dev/null @@ -1,284 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "pf.h" -#include "base_distributions.h" -#include "monotonic_pseg.h" -#include "reachability.h" -#include "viterbi.h" -#include "hg.h" -#include "trule.h" -#include "tdict.h" -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp_nt.h" -#include "ccrp_onetable.h" -#include "corpus.h" - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("particles,p",po::value<unsigned>()->default_value(30),"Number of particles") - ("filter_frequency,f",po::value<unsigned>()->default_value(5),"Number of time steps between filterings") - ("input,i",po::value<string>(),"Read parallel data from") - ("max_src_phrase",po::value<unsigned>()->default_value(5),"Maximum length of source language phrases") - ("max_trg_phrase",po::value<unsigned>()->default_value(5),"Maximum length of target language phrases") - ("model1,m",po::value<string>(),"Model 1 parameters (used in base distribution)") - ("inverse_model1,M",po::value<string>(),"Inverse Model 1 parameters (used in backward estimate)") - ("model1_interpolation_weight",po::value<double>()->default_value(0.95),"Mixing proportion of model 1 with uniform target distribution") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -struct BackwardEstimateSym { - BackwardEstimateSym(const Model1& m1, - const Model1& invm1, const vector<WordID>& src, const vector<WordID>& trg) : - model1_(m1), invmodel1_(invm1), src_(src), trg_(trg) { - } - const prob_t& operator()(unsigned src_cov, unsigned trg_cov) const { - assert(src_cov <= src_.size()); - assert(trg_cov <= trg_.size()); - prob_t& e = cache_[src_cov][trg_cov]; - if (e.is_0()) { - if (trg_cov == trg_.size()) { e = prob_t::One(); return e; } - vector<WordID> r(src_.size() + 1); r.clear(); - for (int i = src_cov; i < src_.size(); ++i) - r.push_back(src_[i]); - r.push_back(0); // NULL word - const prob_t uniform_alignment(1.0 / r.size()); - e.logeq(Md::log_poisson(trg_.size() - trg_cov, r.size() - 1)); // p(trg len remaining | src len remaining) - for (unsigned j = trg_cov; j < trg_.size(); ++j) { - prob_t p; - for (unsigned i = 0; i < r.size(); ++i) - p += model1_(r[i], trg_[j]); - if (p.is_0()) { - cerr << "ERROR: p(" << TD::Convert(trg_[j]) << " | " << TD::GetString(r) << ") = 0!\n"; - abort(); - } - p *= uniform_alignment; - e *= p; - } - r.pop_back(); - const prob_t inv_uniform(1.0 / (trg_.size() - trg_cov + 1.0)); - prob_t inv; - inv.logeq(Md::log_poisson(r.size(), trg_.size() - trg_cov)); - for (unsigned i = 0; i < r.size(); ++i) { - prob_t p; - for (unsigned j = trg_cov - 1; j < trg_.size(); ++j) - p += invmodel1_(j < trg_cov ? 0 : trg_[j], r[i]); - if (p.is_0()) { - cerr << "ERROR: p_inv(" << TD::Convert(r[i]) << " | " << TD::GetString(trg_) << ") = 0!\n"; - abort(); - } - p *= inv_uniform; - inv *= p; - } - prob_t x = pow(e * inv, 0.5); - e = x; - //cerr << "Forward: " << log(e) << "\tBackward: " << log(inv) << "\t prop: " << log(x) << endl; - } - return e; - } - const Model1& model1_; - const Model1& invmodel1_; - const vector<WordID>& src_; - const vector<WordID>& trg_; - mutable unordered_map<unsigned, map<unsigned, prob_t> > cache_; -}; - -struct Particle { - Particle() : weight(prob_t::One()), src_cov(), trg_cov() {} - prob_t weight; - prob_t gamma_last; - vector<TRulePtr> rules; - int src_cov; - int trg_cov; -}; - -ostream& operator<<(ostream& o, const vector<bool>& v) { - for (int i = 0; i < v.size(); ++i) - o << (v[i] ? '1' : '0'); - return o; -} -ostream& operator<<(ostream& o, const Particle& p) { - o << "[src_cov=" << p.src_cov << " trg_cov=" << p.trg_cov << " num_rules=" << p.rules.size() << " w=" << log(p.weight) << ']'; - return o; -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - const unsigned kMAX_TRG_PHRASE = conf["max_trg_phrase"].as<unsigned>(); - const unsigned kMAX_SRC_PHRASE = conf["max_src_phrase"].as<unsigned>(); - const unsigned particles = conf["particles"].as<unsigned>(); - const unsigned samples = conf["samples"].as<unsigned>(); - const unsigned rejuv_freq = conf["filter_frequency"].as<unsigned>(); - - if (!conf.count("model1")) { - cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; - return 1; - } - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<WordID> > corpuse, corpusf; - set<WordID> vocabe, vocabf; - cerr << "Reading corpus...\n"; - corpus::ReadParallelCorpus(conf["input"].as<string>(), &corpusf, &corpuse, &vocabf, &vocabe); - cerr << "F-corpus size: " << corpusf.size() << " sentences\t (" << vocabf.size() << " word types)\n"; - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - assert(corpusf.size() == corpuse.size()); - - const int kLHS = -TD::Convert("X"); - Model1 m1(conf["model1"].as<string>()); - Model1 invm1(conf["inverse_model1"].as<string>()); - - PhraseJointBase lp0(m1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - PhraseJointBase_BiDir alp0(m1, invm1, conf["model1_interpolation_weight"].as<double>(), vocabe.size(), vocabf.size()); - MonotonicParallelSegementationModel<PhraseJointBase_BiDir> m(alp0); - TRule xx("[X] ||| ms. kimura ||| MS. KIMURA ||| X=0"); - cerr << xx << endl << lp0(xx) << " " << alp0(xx) << endl; - TRule xx12("[X] ||| . ||| PHARMACY . ||| X=0"); - TRule xx21("[X] ||| pharmacy . ||| . ||| X=0"); -// TRule xx22("[X] ||| . ||| . ||| X=0"); - TRule xx22("[X] ||| . ||| THE . ||| X=0"); - cerr << xx12 << "\t" << lp0(xx12) << " " << alp0(xx12) << endl; - cerr << xx21 << "\t" << lp0(xx21) << " " << alp0(xx21) << endl; - cerr << xx22 << "\t" << lp0(xx22) << " " << alp0(xx22) << endl; - - cerr << "Initializing reachability limits...\n"; - vector<Particle> ps(corpusf.size()); - vector<Reachability> reaches; reaches.reserve(corpusf.size()); - for (int ci = 0; ci < corpusf.size(); ++ci) - reaches.push_back(Reachability(corpusf[ci].size(), - corpuse[ci].size(), - kMAX_SRC_PHRASE, - kMAX_TRG_PHRASE)); - cerr << "Sampling...\n"; - vector<Particle> tmp_p(10000); // work space - SampleSet<prob_t> pfss; - SystematicResampleFilter<Particle> filter(&rng); - // MultinomialResampleFilter<Particle> filter(&rng); - for (int SS=0; SS < samples; ++SS) { - for (int ci = 0; ci < corpusf.size(); ++ci) { - vector<int>& src = corpusf[ci]; - vector<int>& trg = corpuse[ci]; - m.DecrementRulesAndStops(ps[ci].rules); - const prob_t q_stop = m.StopProbability(); - const prob_t q_cont = m.ContinueProbability(); - cerr << "P(stop)=" << q_stop << "\tP(continue)=" <<q_cont << endl; - - BackwardEstimateSym be(m1, invm1, src, trg); - const Reachability& r = reaches[ci]; - vector<Particle> lps(particles); - - bool all_complete = false; - while(!all_complete) { - SampleSet<prob_t> ss; - - // all particles have now been extended a bit, we will reweight them now - if (lps[0].trg_cov > 0) - filter(&lps); - - // loop over all particles and extend them - bool done_nothing = true; - for (int pi = 0; pi < particles; ++pi) { - Particle& p = lps[pi]; - int tic = 0; - while(p.trg_cov < trg.size() && tic < rejuv_freq) { - ++tic; - done_nothing = false; - ss.clear(); - TRule x; x.lhs_ = kLHS; - prob_t z; - - for (int trg_len = 1; trg_len <= kMAX_TRG_PHRASE; ++trg_len) { - x.e_.push_back(trg[trg_len - 1 + p.trg_cov]); - for (int src_len = 1; src_len <= kMAX_SRC_PHRASE; ++src_len) { - if (!r.edges[p.src_cov][p.trg_cov][src_len][trg_len]) continue; - - int i = p.src_cov; - assert(ss.size() < tmp_p.size()); // if fails increase tmp_p size - Particle& np = tmp_p[ss.size()]; - np = p; - x.f_.clear(); - for (int j = 0; j < src_len; ++j) - x.f_.push_back(src[i + j]); - np.src_cov += x.f_.size(); - np.trg_cov += x.e_.size(); - const bool stop_now = (np.src_cov == src_len && np.trg_cov == trg_len); - prob_t rp = m.RuleProbability(x) * (stop_now ? q_stop : q_cont); - np.gamma_last = rp; - const prob_t u = pow(np.gamma_last * pow(be(np.src_cov, np.trg_cov), 1.2), 0.1); - //cerr << "**rule=" << x << endl; - //cerr << " u=" << log(u) << " rule=" << rp << endl; - ss.add(u); - np.rules.push_back(TRulePtr(new TRule(x))); - z += u; - } - } - //cerr << "number of edges to consider: " << ss.size() << endl; - const int sampled = rng.SelectSample(ss); - prob_t q_n = ss[sampled] / z; - p = tmp_p[sampled]; - //m.IncrementRule(*p.rules.back()); - p.weight *= p.gamma_last / q_n; - //cerr << "[w=" << log(p.weight) << "]\tsampled rule: " << p.rules.back()->AsString() << endl; - //cerr << p << endl; - } - } // loop over particles (pi = 0 .. particles) - if (done_nothing) all_complete = true; - prob_t wv = prob_t::Zero(); - for (int pp = 0; pp < lps.size(); ++pp) - wv += lps[pp].weight; - for (int pp = 0; pp < lps.size(); ++pp) - lps[pp].weight /= wv; - } - pfss.clear(); - for (int i = 0; i < lps.size(); ++i) - pfss.add(lps[i].weight); - const int sampled = rng.SelectSample(pfss); - ps[ci] = lps[sampled]; - m.IncrementRulesAndStops(lps[sampled].rules); - for (int i = 0; i < lps[sampled].rules.size(); ++i) { cerr << "S:\t" << lps[sampled].rules[i]->AsString() << "\n"; } - cerr << "tmp-LLH: " << log(m.Likelihood()) << endl; - } - cerr << "LLH: " << log(m.Likelihood()) << endl; - } - return 0; -} - diff --git a/gi/pf/poisson_uniform_word_model.h b/gi/pf/poisson_uniform_word_model.h deleted file mode 100644 index 76204a0e..00000000 --- a/gi/pf/poisson_uniform_word_model.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef _POISSON_UNIFORM_WORD_MODEL_H_ -#define _POISSON_UNIFORM_WORD_MODEL_H_ - -#include <cmath> -#include <vector> -#include "prob.h" -#include "m.h" - -// len ~ Poisson(lambda) -// for (1..len) -// e_i ~ Uniform({Vocabulary}) -struct PoissonUniformWordModel { - explicit PoissonUniformWordModel(const unsigned vocab_size, - const unsigned alphabet_size, - const double mean_len = 5) : - lh(prob_t::One()), - v0(-std::log(vocab_size)), - u0(-std::log(alphabet_size)), - mean_length(mean_len) {} - - void ResampleHyperparameters(MT19937*) {} - - inline prob_t operator()(const std::vector<WordID>& s) const { - prob_t p; - p.logeq(Md::log_poisson(s.size(), mean_length) + s.size() * u0); - //p.logeq(v0); - return p; - } - - inline void Increment(const std::vector<WordID>& w, MT19937*) { - lh *= (*this)(w); - } - - inline void Decrement(const std::vector<WordID>& w, MT19937 *) { - lh /= (*this)(w); - } - - inline prob_t Likelihood() const { return lh; } - - void Summary() const {} - - private: - - prob_t lh; // keeps track of the draws from the base distribution - const double v0; // uniform log prob of generating a word - const double u0; // uniform log prob of generating a letter - const double mean_length; // mean length of a word in the base distribution -}; - -#endif diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc deleted file mode 100644 index 605d8206..00000000 --- a/gi/pf/pyp_lm.cc +++ /dev/null @@ -1,273 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "gamma_poisson.h" -#include "corpus_tools.h" -#include "m.h" -#include "tdict.h" -#include "sampler.h" -#include "ccrp.h" -#include "tied_resampler.h" - -// A not very memory-efficient implementation of an N-gram LM based on PYPs -// as described in Y.-W. Teh. (2006) A Hierarchical Bayesian Language Model -// based on Pitman-Yor Processes. In Proc. ACL. - -// I use templates to handle the recursive formalation of the prior, so -// the order of the model has to be specified here, at compile time: -#define kORDER 3 - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,n",po::value<unsigned>()->default_value(300),"Number of samples") - ("train,i",po::value<string>(),"Training data file") - ("test,T",po::value<string>(),"Test data file") - ("discount_prior_a,a",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): a=this") - ("discount_prior_b,b",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): b=this") - ("strength_prior_s,s",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): s=this") - ("strength_prior_r,r",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): r=this") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("train") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -// uniform distribution over a fixed vocabulary -struct UniformVocabulary { - UniformVocabulary(unsigned vs, double, double, double, double) : p0(1.0 / vs), draws() {} - void increment(WordID, const vector<WordID>&, MT19937*) { ++draws; } - void decrement(WordID, const vector<WordID>&, MT19937*) { --draws; assert(draws >= 0); } - double prob(WordID, const vector<WordID>&) const { return p0; } - void resample_hyperparameters(MT19937*) {} - double log_likelihood() const { return draws * log(p0); } - const double p0; - int draws; -}; - -// Lord Rothschild. 1986. THE DISTRIBUTION OF ENGLISH DICTIONARY WORD LENGTHS. -// Journal of Statistical Planning and Inference 14 (1986) 311-322 -struct PoissonLengthUniformCharWordModel { - explicit PoissonLengthUniformCharWordModel(unsigned vocab_size, double, double, double, double) : plen(5,5), uc(-log(95)), llh() {} - void increment(WordID w, const vector<WordID>& v, MT19937*) { - llh += log(prob(w, v)); // this isn't quite right - plen.increment(TD::Convert(w).size() - 1); - } - void decrement(WordID w, const vector<WordID>& v, MT19937*) { - plen.decrement(TD::Convert(w).size() - 1); - llh -= log(prob(w, v)); // this isn't quite right - } - double prob(WordID w, const vector<WordID>&) const { - const unsigned len = TD::Convert(w).size(); - return plen.prob(len - 1) * exp(uc * len); - } - double log_likelihood() const { return llh; } - void resample_hyperparameters(MT19937*) {} - GammaPoisson plen; - const double uc; - double llh; -}; - -struct PYPAdaptedPoissonLengthUniformCharWordModel { - explicit PYPAdaptedPoissonLengthUniformCharWordModel(unsigned vocab_size, double, double, double, double) : - base(vocab_size,1,1,1,1), - crp(1,1,1,1) {} - void increment(WordID w, const vector<WordID>& v, MT19937* rng) { - double p0 = base.prob(w, v); - if (crp.increment(w, p0, rng)) - base.increment(w, v, rng); - } - void decrement(WordID w, const vector<WordID>& v, MT19937* rng) { - if (crp.decrement(w, rng)) - base.decrement(w, v, rng); - } - double prob(WordID w, const vector<WordID>& v) const { - double p0 = base.prob(w, v); - return crp.prob(w, p0); - } - double log_likelihood() const { return crp.log_crp_prob() + base.log_likelihood(); } - void resample_hyperparameters(MT19937* rng) { crp.resample_hyperparameters(rng); } - PoissonLengthUniformCharWordModel base; - CCRP<WordID> crp; -}; - -template <unsigned N> struct PYPLM; - -#if 1 -template<> struct PYPLM<0> : public UniformVocabulary { - PYPLM(unsigned vs, double a, double b, double c, double d) : - UniformVocabulary(vs, a, b, c, d) {} -}; -#else -#if 0 -template<> struct PYPLM<0> : public PoissonLengthUniformCharWordModel { - PYPLM(unsigned vs, double a, double b, double c, double d) : - PoissonLengthUniformCharWordModel(vs, a, b, c, d) {} -}; -#else -template<> struct PYPLM<0> : public PYPAdaptedPoissonLengthUniformCharWordModel { - PYPLM(unsigned vs, double a, double b, double c, double d) : - PYPAdaptedPoissonLengthUniformCharWordModel(vs, a, b, c, d) {} -}; -#endif -#endif - -// represents an N-gram LM -template <unsigned N> struct PYPLM { - PYPLM(unsigned vs, double da, double db, double ss, double sr) : - backoff(vs, da, db, ss, sr), - tr(da, db, ss, sr, 0.8, 1.0), - lookup(N-1) {} - void increment(WordID w, const vector<WordID>& context, MT19937* rng) { - const double bo = backoff.prob(w, context); - for (unsigned i = 0; i < N-1; ++i) - lookup[i] = context[context.size() - 1 - i]; - typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::iterator it = p.find(lookup); - if (it == p.end()) { - it = p.insert(make_pair(lookup, CCRP<WordID>(0.5,1))).first; - tr.Add(&it->second); // add to resampler - } - if (it->second.increment(w, bo, rng)) - backoff.increment(w, context, rng); - } - void decrement(WordID w, const vector<WordID>& context, MT19937* rng) { - for (unsigned i = 0; i < N-1; ++i) - lookup[i] = context[context.size() - 1 - i]; - typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::iterator it = p.find(lookup); - assert(it != p.end()); - if (it->second.decrement(w, rng)) - backoff.decrement(w, context, rng); - } - double prob(WordID w, const vector<WordID>& context) const { - const double bo = backoff.prob(w, context); - for (unsigned i = 0; i < N-1; ++i) - lookup[i] = context[context.size() - 1 - i]; - typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::const_iterator it = p.find(lookup); - if (it == p.end()) return bo; - return it->second.prob(w, bo); - } - - double log_likelihood() const { - double llh = backoff.log_likelihood(); - typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::const_iterator it; - for (it = p.begin(); it != p.end(); ++it) - llh += it->second.log_crp_prob(); - llh += tr.LogLikelihood(); - return llh; - } - - void resample_hyperparameters(MT19937* rng) { - tr.ResampleHyperparameters(rng); - backoff.resample_hyperparameters(rng); - } - - PYPLM<N-1> backoff; - TiedResampler<CCRP<WordID> > tr; - double discount_a, discount_b, strength_s, strength_r; - double d, strength; - mutable vector<WordID> lookup; // thread-local - unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > > p; -}; - -int main(int argc, char** argv) { - po::variables_map conf; - - InitCommandLine(argc, argv, &conf); - const unsigned samples = conf["samples"].as<unsigned>(); - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - vector<vector<WordID> > corpuse; - set<WordID> vocabe; - const WordID kEOS = TD::Convert("</s>"); - cerr << "Reading corpus...\n"; - CorpusTools::ReadFromFile(conf["train"].as<string>(), &corpuse, &vocabe); - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - vector<vector<WordID> > test; - if (conf.count("test")) - CorpusTools::ReadFromFile(conf["test"].as<string>(), &test); - else - test = corpuse; - PYPLM<kORDER> lm(vocabe.size(), - conf["discount_prior_a"].as<double>(), - conf["discount_prior_b"].as<double>(), - conf["strength_prior_s"].as<double>(), - conf["strength_prior_r"].as<double>()); - vector<WordID> ctx(kORDER - 1, TD::Convert("<s>")); - for (int SS=0; SS < samples; ++SS) { - for (int ci = 0; ci < corpuse.size(); ++ci) { - ctx.resize(kORDER - 1); - const vector<WordID>& s = corpuse[ci]; - for (int i = 0; i <= s.size(); ++i) { - WordID w = (i < s.size() ? s[i] : kEOS); - if (SS > 0) lm.decrement(w, ctx, &rng); - lm.increment(w, ctx, &rng); - ctx.push_back(w); - } - } - if (SS % 10 == 9) { - cerr << " [LLH=" << lm.log_likelihood() << "]" << endl; - if (SS % 30 == 29) lm.resample_hyperparameters(&rng); - } else { cerr << '.' << flush; } - } - double llh = 0; - unsigned cnt = 0; - unsigned oovs = 0; - for (int ci = 0; ci < test.size(); ++ci) { - ctx.resize(kORDER - 1); - const vector<WordID>& s = test[ci]; - for (int i = 0; i <= s.size(); ++i) { - WordID w = (i < s.size() ? s[i] : kEOS); - double lp = log(lm.prob(w, ctx)) / log(2); - if (i < s.size() && vocabe.count(w) == 0) { - cerr << "**OOV "; - ++oovs; - lp = 0; - } - cerr << "p(" << TD::Convert(w) << " |"; - for (int j = ctx.size() + 1 - kORDER; j < ctx.size(); ++j) - cerr << ' ' << TD::Convert(ctx[j]); - cerr << ") = " << lp << endl; - ctx.push_back(w); - llh -= lp; - cnt++; - } - } - cerr << " Log_10 prob: " << (-llh * log(2) / log(10)) << endl; - cerr << " Count: " << cnt << endl; - cerr << " OOVs: " << oovs << endl; - cerr << "Cross-entropy: " << (llh / cnt) << endl; - cerr << " Perplexity: " << pow(2, llh / cnt) << endl; - return 0; -} - - diff --git a/gi/pf/pyp_tm.cc b/gi/pf/pyp_tm.cc deleted file mode 100644 index 37b9a604..00000000 --- a/gi/pf/pyp_tm.cc +++ /dev/null @@ -1,128 +0,0 @@ -#include "pyp_tm.h" - -#include <tr1/unordered_map> -#include <iostream> -#include <queue> - -#include "tdict.h" -#include "ccrp.h" -#include "pyp_word_model.h" -#include "tied_resampler.h" - -using namespace std; -using namespace std::tr1; - -struct FreqBinner { - FreqBinner(const std::string& fname) { fd_.Load(fname); } - unsigned NumberOfBins() const { return fd_.Max() + 1; } - unsigned Bin(const WordID& w) const { return fd_.LookUp(w); } - FreqDict<unsigned> fd_; -}; - -template <typename Base, class Binner = FreqBinner> -struct ConditionalPYPWordModel { - ConditionalPYPWordModel(Base* b, const Binner* bnr = NULL) : - base(*b), - binner(bnr), - btr(binner ? binner->NumberOfBins() + 1u : 2u) {} - - void Summary() const { - cerr << "Number of conditioning contexts: " << r.size() << endl; - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - cerr << TD::Convert(it->first) << " \tPYP(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << endl; - for (CCRP<vector<WordID> >::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - cerr << " " << i2->second << '\t' << TD::GetString(i2->first) << endl; - } - } - - void ResampleHyperparameters(MT19937* rng) { - btr.ResampleHyperparameters(rng); - } - - prob_t Prob(const WordID src, const vector<WordID>& trglets) const { - RuleModelHash::const_iterator it = r.find(src); - if (it == r.end()) { - return base(trglets); - } else { - return it->second.prob(trglets, base(trglets)); - } - } - - void Increment(const WordID src, const vector<WordID>& trglets, MT19937* rng) { - RuleModelHash::iterator it = r.find(src); - if (it == r.end()) { - it = r.insert(make_pair(src, CCRP<vector<WordID> >(0.5,1.0))).first; - static const WordID kNULL = TD::Convert("NULL"); - unsigned bin = (src == kNULL ? 0 : 1); - if (binner && bin) { bin = binner->Bin(src) + 1; } - btr.Add(bin, &it->second); - } - if (it->second.increment(trglets, base(trglets), rng)) - base.Increment(trglets, rng); - } - - void Decrement(const WordID src, const vector<WordID>& trglets, MT19937* rng) { - RuleModelHash::iterator it = r.find(src); - assert(it != r.end()); - if (it->second.decrement(trglets, rng)) { - base.Decrement(trglets, rng); - } - } - - prob_t Likelihood() const { - prob_t p = prob_t::One(); - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - prob_t q; q.logeq(it->second.log_crp_prob()); - p *= q; - } - return p; - } - - unsigned UniqueConditioningContexts() const { - return r.size(); - } - - // TODO tie PYP hyperparameters based on source word frequency bins - Base& base; - const Binner* binner; - BinTiedResampler<CCRP<vector<WordID> > > btr; - typedef unordered_map<WordID, CCRP<vector<WordID> > > RuleModelHash; - RuleModelHash r; -}; - -PYPLexicalTranslation::PYPLexicalTranslation(const vector<vector<WordID> >& lets, - const unsigned vocab_size, - const unsigned num_letters) : - letters(lets), - base(vocab_size, num_letters, 5), - tmodel(new ConditionalPYPWordModel<PoissonUniformWordModel>(&base, new FreqBinner("10k.freq"))), - kX(-TD::Convert("X")) {} - -void PYPLexicalTranslation::Summary() const { - tmodel->Summary(); -} - -prob_t PYPLexicalTranslation::Likelihood() const { - return tmodel->Likelihood() * base.Likelihood(); -} - -void PYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { - tmodel->ResampleHyperparameters(rng); -} - -unsigned PYPLexicalTranslation::UniqueConditioningContexts() const { - return tmodel->UniqueConditioningContexts(); -} - -prob_t PYPLexicalTranslation::Prob(WordID src, WordID trg) const { - return tmodel->Prob(src, letters[trg]); -} - -void PYPLexicalTranslation::Increment(WordID src, WordID trg, MT19937* rng) { - tmodel->Increment(src, letters[trg], rng); -} - -void PYPLexicalTranslation::Decrement(WordID src, WordID trg, MT19937* rng) { - tmodel->Decrement(src, letters[trg], rng); -} - diff --git a/gi/pf/pyp_tm.h b/gi/pf/pyp_tm.h deleted file mode 100644 index 2b076a25..00000000 --- a/gi/pf/pyp_tm.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef PYP_LEX_TRANS -#define PYP_LEX_TRANS - -#include <vector> -#include "wordid.h" -#include "prob.h" -#include "sampler.h" -#include "freqdict.h" -#include "poisson_uniform_word_model.h" - -struct FreqBinner; -template <typename T, class B> struct ConditionalPYPWordModel; - -struct PYPLexicalTranslation { - explicit PYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, - const unsigned vocab_size, - const unsigned num_letters); - - prob_t Likelihood() const; - - void ResampleHyperparameters(MT19937* rng); - prob_t Prob(WordID src, WordID trg) const; // return p(trg | src) - void Summary() const; - void Increment(WordID src, WordID trg, MT19937* rng); - void Decrement(WordID src, WordID trg, MT19937* rng); - unsigned UniqueConditioningContexts() const; - - private: - const std::vector<std::vector<WordID> >& letters; // spelling dictionary - PoissonUniformWordModel base; // "generator" of English types - ConditionalPYPWordModel<PoissonUniformWordModel, FreqBinner>* tmodel; // translation distributions - // (model English word | French word) - const WordID kX; -}; - -#endif diff --git a/gi/pf/pyp_word_model.h b/gi/pf/pyp_word_model.h deleted file mode 100644 index 0bebb751..00000000 --- a/gi/pf/pyp_word_model.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _PYP_WORD_MODEL_H_ -#define _PYP_WORD_MODEL_H_ - -#include <iostream> -#include <cmath> -#include <vector> -#include "prob.h" -#include "ccrp.h" -#include "m.h" -#include "tdict.h" -#include "os_phrase.h" - -// PYP(d,s,poisson-uniform) represented as a CRP -template <class Base> -struct PYPWordModel { - explicit PYPWordModel(Base* b) : - base(*b), - r(1,1,1,1,0.66,50.0) - {} - - void ResampleHyperparameters(MT19937* rng) { - r.resample_hyperparameters(rng); - std::cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; - } - - inline prob_t operator()(const std::vector<WordID>& s) const { - return r.prob(s, base(s)); - } - - inline void Increment(const std::vector<WordID>& s, MT19937* rng) { - if (r.increment(s, base(s), rng)) - base.Increment(s, rng); - } - - inline void Decrement(const std::vector<WordID>& s, MT19937 *rng) { - if (r.decrement(s, rng)) - base.Decrement(s, rng); - } - - inline prob_t Likelihood() const { - prob_t p; p.logeq(r.log_crp_prob()); - p *= base.Likelihood(); - return p; - } - - void Summary() const { - std::cerr << "PYPWordModel: generations=" << r.num_customers() - << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << std::endl; - for (typename CCRP<std::vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) { - std::cerr << " " << it->second - << TD::GetString(it->first) << std::endl; - } - } - - private: - - Base& base; // keeps track of the draws from the base distribution - CCRP<std::vector<WordID> > r; -}; - -#endif diff --git a/gi/pf/quasi_model2.h b/gi/pf/quasi_model2.h deleted file mode 100644 index 4075affe..00000000 --- a/gi/pf/quasi_model2.h +++ /dev/null @@ -1,177 +0,0 @@ -#ifndef _QUASI_MODEL2_H_ -#define _QUASI_MODEL2_H_ - -#include <vector> -#include <cmath> -#include <tr1/unordered_map> -#include "boost/functional.hpp" -#include "prob.h" -#include "array2d.h" -#include "slice_sampler.h" -#include "m.h" -#include "have_64_bits.h" - -struct AlignmentObservation { - AlignmentObservation() : src_len(), trg_len(), j(), a_j() {} - AlignmentObservation(unsigned sl, unsigned tl, unsigned tw, unsigned sw) : - src_len(sl), trg_len(tl), j(tw), a_j(sw) {} - unsigned short src_len; - unsigned short trg_len; - unsigned short j; - unsigned short a_j; -}; - -#ifdef HAVE_64_BITS -inline size_t hash_value(const AlignmentObservation& o) { - return reinterpret_cast<const size_t&>(o); -} -inline bool operator==(const AlignmentObservation& a, const AlignmentObservation& b) { - return hash_value(a) == hash_value(b); -} -#else -inline size_t hash_value(const AlignmentObservation& o) { - size_t h = 1; - boost::hash_combine(h, o.src_len); - boost::hash_combine(h, o.trg_len); - boost::hash_combine(h, o.j); - boost::hash_combine(h, o.a_j); - return h; -} -#endif - -struct QuasiModel2 { - explicit QuasiModel2(double alpha, double pnull = 0.1) : - alpha_(alpha), - pnull_(pnull), - pnotnull_(1 - pnull) {} - - // a_j = 0 => NULL; src_len does *not* include null - prob_t Prob(unsigned a_j, unsigned j, unsigned src_len, unsigned trg_len) const { - if (!a_j) return pnull_; - return pnotnull_ * - prob_t(UnnormalizedProb(a_j, j, src_len, trg_len, alpha_) / GetOrComputeZ(j, src_len, trg_len)); - } - - void Increment(unsigned a_j, unsigned j, unsigned src_len, unsigned trg_len) { - assert(a_j <= src_len); - assert(j < trg_len); - ++obs_[AlignmentObservation(src_len, trg_len, j, a_j)]; - } - - void Decrement(unsigned a_j, unsigned j, unsigned src_len, unsigned trg_len) { - const AlignmentObservation ao(src_len, trg_len, j, a_j); - int &cc = obs_[ao]; - assert(cc > 0); - --cc; - if (!cc) obs_.erase(ao); - } - - struct PNullResampler { - PNullResampler(const QuasiModel2& m) : m_(m) {} - const QuasiModel2& m_; - double operator()(const double& proposed_pnull) const { - return log(m_.Likelihood(m_.alpha_, proposed_pnull)); - } - }; - - struct AlphaResampler { - AlphaResampler(const QuasiModel2& m) : m_(m) {} - const QuasiModel2& m_; - double operator()(const double& proposed_alpha) const { - return log(m_.Likelihood(proposed_alpha, m_.pnull_.as_float())); - } - }; - - void ResampleHyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - const PNullResampler dr(*this); - const AlphaResampler ar(*this); - for (unsigned i = 0; i < nloop; ++i) { - double pnull = slice_sampler1d(dr, pnull_.as_float(), *rng, 0.00000001, - 1.0, 0.0, niterations, 100*niterations); - pnull_ = prob_t(pnull); - alpha_ = slice_sampler1d(ar, alpha_, *rng, 0.00000001, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - std::cerr << "QuasiModel2(alpha=" << alpha_ << ",p_null=" - << pnull_.as_float() << ") = " << Likelihood() << std::endl; - zcache_.clear(); - } - - prob_t Likelihood() const { - return Likelihood(alpha_, pnull_.as_float()); - } - - prob_t Likelihood(double alpha, double ppnull) const { - const prob_t pnull(ppnull); - const prob_t pnotnull(1 - ppnull); - - prob_t p; - p.logeq(Md::log_gamma_density(alpha, 0.1, 25)); // TODO configure - assert(!p.is_0()); - prob_t prob_of_ppnull; prob_of_ppnull.logeq(Md::log_beta_density(ppnull, 2, 10)); - assert(!prob_of_ppnull.is_0()); - p *= prob_of_ppnull; - for (ObsCount::const_iterator it = obs_.begin(); it != obs_.end(); ++it) { - const AlignmentObservation& ao = it->first; - if (ao.a_j) { - prob_t u = XUnnormalizedProb(ao.a_j, ao.j, ao.src_len, ao.trg_len, alpha); - prob_t z = XComputeZ(ao.j, ao.src_len, ao.trg_len, alpha); - prob_t pa(u / z); - pa *= pnotnull; - pa.poweq(it->second); - p *= pa; - } else { - p *= pnull.pow(it->second); - } - } - return p; - } - - private: - static prob_t XUnnormalizedProb(unsigned a_j, unsigned j, unsigned src_len, unsigned trg_len, double alpha) { - prob_t p; - p.logeq(-fabs(double(a_j - 1) / src_len - double(j) / trg_len) * alpha); - return p; - } - - static prob_t XComputeZ(unsigned j, unsigned src_len, unsigned trg_len, double alpha) { - prob_t z = prob_t::Zero(); - for (int a_j = 1; a_j <= src_len; ++a_j) - z += XUnnormalizedProb(a_j, j, src_len, trg_len, alpha); - return z; - } - - static double UnnormalizedProb(unsigned a_j, unsigned j, unsigned src_len, unsigned trg_len, double alpha) { - return exp(-fabs(double(a_j - 1) / src_len - double(j) / trg_len) * alpha); - } - - static double ComputeZ(unsigned j, unsigned src_len, unsigned trg_len, double alpha) { - double z = 0; - for (int a_j = 1; a_j <= src_len; ++a_j) - z += UnnormalizedProb(a_j, j, src_len, trg_len, alpha); - return z; - } - - const double& GetOrComputeZ(unsigned j, unsigned src_len, unsigned trg_len) const { - if (src_len >= zcache_.size()) - zcache_.resize(src_len + 1); - if (trg_len >= zcache_[src_len].size()) - zcache_[src_len].resize(trg_len + 1); - std::vector<double>& zv = zcache_[src_len][trg_len]; - if (zv.size() == 0) - zv.resize(trg_len); - double& z = zv[j]; - if (!z) - z = ComputeZ(j, src_len, trg_len, alpha_); - return z; - } - - double alpha_; - prob_t pnull_; - prob_t pnotnull_; - mutable std::vector<std::vector<std::vector<double> > > zcache_; - typedef std::tr1::unordered_map<AlignmentObservation, int, boost::hash<AlignmentObservation> > ObsCount; - ObsCount obs_; -}; - -#endif diff --git a/gi/pf/reachability.cc b/gi/pf/reachability.cc deleted file mode 100644 index 7d0d04ac..00000000 --- a/gi/pf/reachability.cc +++ /dev/null @@ -1,74 +0,0 @@ -#include "reachability.h" - -#include <vector> -#include <iostream> - -using namespace std; - -struct SState { - SState() : prev_src_covered(), prev_trg_covered() {} - SState(int i, int j) : prev_src_covered(i), prev_trg_covered(j) {} - int prev_src_covered; - int prev_trg_covered; -}; - -void Reachability::ComputeReachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) { - typedef boost::multi_array<vector<SState>, 2> array_type; - array_type a(boost::extents[srclen + 1][trglen + 1]); - a[0][0].push_back(SState()); - for (int i = 0; i < srclen; ++i) { - for (int j = 0; j < trglen; ++j) { - if (a[i][j].size() == 0) continue; - const SState prev(i,j); - for (int k = 1; k <= src_max_phrase_len; ++k) { - if ((i + k) > srclen) continue; - for (int l = 1; l <= trg_max_phrase_len; ++l) { - if ((j + l) > trglen) continue; - a[i + k][j + l].push_back(prev); - } - } - } - } - a[0][0].clear(); - //cerr << srclen << "," << trglen << ": Final cell contains " << a[srclen][trglen].size() << " back pointers\n"; - if (a[srclen][trglen].empty()) { - cerr << "Sequence pair with lengths (" << srclen << ',' << trglen << ") violates reachability constraints\n"; - nodes = 0; - return; - } - - typedef boost::multi_array<bool, 2> rarray_type; - rarray_type r(boost::extents[srclen + 1][trglen + 1]); - r[srclen][trglen] = true; - nodes = 0; - for (int i = srclen; i >= 0; --i) { - for (int j = trglen; j >= 0; --j) { - vector<SState>& prevs = a[i][j]; - if (!r[i][j]) { prevs.clear(); } - for (int k = 0; k < prevs.size(); ++k) { - r[prevs[k].prev_src_covered][prevs[k].prev_trg_covered] = true; - int src_delta = i - prevs[k].prev_src_covered; - edges[prevs[k].prev_src_covered][prevs[k].prev_trg_covered][src_delta][j - prevs[k].prev_trg_covered] = true; - valid_deltas[prevs[k].prev_src_covered][prevs[k].prev_trg_covered].push_back(make_pair<short,short>(src_delta,j - prevs[k].prev_trg_covered)); - short &msd = max_src_delta[prevs[k].prev_src_covered][prevs[k].prev_trg_covered]; - if (src_delta > msd) msd = src_delta; - } - } - } - assert(!edges[0][0][1][0]); - assert(!edges[0][0][0][1]); - assert(!edges[0][0][0][0]); - assert(max_src_delta[0][0] > 0); - nodes = 0; - for (int i = 0; i < srclen; ++i) { - for (int j = 0; j < trglen; ++j) { - if (valid_deltas[i][j].size() > 0) { - node_addresses[i][j] = nodes++; - } else { - node_addresses[i][j] = -1; - } - } - } - cerr << "Sequence pair with lengths (" << srclen << ',' << trglen << ") has " << valid_deltas[0][0].size() << " out edges in its root node, " << nodes << " nodes in total, and outside estimate matrix will require " << sizeof(float)*nodes << " bytes\n"; - } - diff --git a/gi/pf/reachability.h b/gi/pf/reachability.h deleted file mode 100644 index 1e22c76a..00000000 --- a/gi/pf/reachability.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef _REACHABILITY_H_ -#define _REACHABILITY_H_ - -#include "boost/multi_array.hpp" - -// determines minimum and maximum lengths of outgoing edges from all -// coverage positions such that the alignment path respects src and -// trg maximum phrase sizes -// -// runs in O(n^2 * src_max * trg_max) time but should be relatively fast -// -// currently forbids 0 -> n and n -> 0 alignments - -struct Reachability { - unsigned nodes; - boost::multi_array<bool, 4> edges; // edges[src_covered][trg_covered][src_delta][trg_delta] is this edge worth exploring? - boost::multi_array<short, 2> max_src_delta; // msd[src_covered][trg_covered] -- the largest src delta that's valid - boost::multi_array<short, 2> node_addresses; // na[src_covered][trg_covered] -- the index of the node in a one-dimensional array (of size "nodes") - boost::multi_array<std::vector<std::pair<short,short> >, 2> valid_deltas; // valid_deltas[src_covered][trg_covered] list of valid transitions leaving a particular node - - Reachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len) : - nodes(), - edges(boost::extents[srclen][trglen][src_max_phrase_len+1][trg_max_phrase_len+1]), - max_src_delta(boost::extents[srclen][trglen]), - node_addresses(boost::extents[srclen][trglen]), - valid_deltas(boost::extents[srclen][trglen]) { - ComputeReachability(srclen, trglen, src_max_phrase_len, trg_max_phrase_len); - } - - private: - void ComputeReachability(int srclen, int trglen, int src_max_phrase_len, int trg_max_phrase_len); -}; - -#endif diff --git a/gi/pf/tied_resampler.h b/gi/pf/tied_resampler.h deleted file mode 100644 index a4f4af36..00000000 --- a/gi/pf/tied_resampler.h +++ /dev/null @@ -1,122 +0,0 @@ -#ifndef _TIED_RESAMPLER_H_ -#define _TIED_RESAMPLER_H_ - -#include <set> -#include <vector> -#include "sampler.h" -#include "slice_sampler.h" -#include "m.h" - -template <class CRP> -struct TiedResampler { - explicit TiedResampler(double da, double db, double ss, double sr, double d=0.5, double s=1.0) : - d_alpha(da), - d_beta(db), - s_shape(ss), - s_rate(sr), - discount(d), - strength(s) {} - - void Add(CRP* crp) { - crps.insert(crp); - crp->set_discount(discount); - crp->set_strength(strength); - assert(!crp->has_discount_prior()); - assert(!crp->has_strength_prior()); - } - - void Remove(CRP* crp) { - crps.erase(crp); - } - - size_t size() const { - return crps.size(); - } - - double LogLikelihood(double d, double s) const { - if (s <= -d) return -std::numeric_limits<double>::infinity(); - double llh = Md::log_beta_density(d, d_alpha, d_beta) + - Md::log_gamma_density(d + s, s_shape, s_rate); - for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) - llh += (*it)->log_crp_prob(d, s); - return llh; - } - - double LogLikelihood() const { - return LogLikelihood(discount, strength); - } - - struct DiscountResampler { - DiscountResampler(const TiedResampler& m) : m_(m) {} - const TiedResampler& m_; - double operator()(const double& proposed_discount) const { - return m_.LogLikelihood(proposed_discount, m_.strength); - } - }; - - struct AlphaResampler { - AlphaResampler(const TiedResampler& m) : m_(m) {} - const TiedResampler& m_; - double operator()(const double& proposed_strength) const { - return m_.LogLikelihood(m_.discount, proposed_strength); - } - }; - - void ResampleHyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - if (size() == 0) { std::cerr << "EMPTY - not resampling\n"; return; } - const DiscountResampler dr(*this); - const AlphaResampler ar(*this); - for (int iter = 0; iter < nloop; ++iter) { - strength = slice_sampler1d(ar, strength, *rng, -discount + std::numeric_limits<double>::min(), - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - double min_discount = std::numeric_limits<double>::min(); - if (strength < 0.0) min_discount -= strength; - discount = slice_sampler1d(dr, discount, *rng, min_discount, - 1.0, 0.0, niterations, 100*niterations); - } - strength = slice_sampler1d(ar, strength, *rng, -discount + std::numeric_limits<double>::min(), - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - std::cerr << "TiedCRPs(d=" << discount << ",s=" - << strength << ") = " << LogLikelihood(discount, strength) << std::endl; - for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) - (*it)->set_hyperparameters(discount, strength); - } - private: - std::set<CRP*> crps; - const double d_alpha, d_beta, s_shape, s_rate; - double discount, strength; -}; - -// split according to some criterion -template <class CRP> -struct BinTiedResampler { - explicit BinTiedResampler(unsigned nbins) : - resamplers(nbins, TiedResampler<CRP>(1,1,1,1)) {} - - void Add(unsigned bin, CRP* crp) { - resamplers[bin].Add(crp); - } - - void Remove(unsigned bin, CRP* crp) { - resamplers[bin].Remove(crp); - } - - void ResampleHyperparameters(MT19937* rng) { - for (unsigned i = 0; i < resamplers.size(); ++i) { - std::cerr << "BIN " << i << " (" << resamplers[i].size() << " CRPs): " << std::flush; - resamplers[i].ResampleHyperparameters(rng); - } - } - - double LogLikelihood() const { - double llh = 0; - for (unsigned i = 0; i < resamplers.size(); ++i) - llh += resamplers[i].LogLikelihood(); - return llh; - } - - private: - std::vector<TiedResampler<CRP> > resamplers; -}; - -#endif diff --git a/gi/pf/tpf.cc b/gi/pf/tpf.cc deleted file mode 100644 index 7348d21c..00000000 --- a/gi/pf/tpf.cc +++ /dev/null @@ -1,99 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include "sampler.h" - -using namespace std; -using namespace tr1; - -shared_ptr<MT19937> prng; - -struct Particle { - Particle() : weight(prob_t::One()) {} - vector<int> states; - prob_t weight; - prob_t gamma_last; -}; - -ostream& operator<<(ostream& os, const Particle& p) { - os << "["; - for (int i = 0; i < p.states.size(); ++i) os << p.states[i] << ' '; - os << "| w=" << log(p.weight) << ']'; - return os; -} - -void Rejuvenate(vector<Particle>& pps) { - SampleSet<prob_t> ss; - vector<Particle> nps(pps.size()); - for (int i = 0; i < pps.size(); ++i) { -// cerr << pps[i] << endl; - ss.add(pps[i].weight); - } -// cerr << "REJUVINATING...\n"; - for (int i = 0; i < pps.size(); ++i) { - nps[i] = pps[prng->SelectSample(ss)]; - nps[i].weight = prob_t(1.0 / pps.size()); -// cerr << nps[i] << endl; - } - nps.swap(pps); -// exit(1); -} - -int main(int argc, char** argv) { - const unsigned particles = 100; - prng.reset(new MT19937); - MT19937& rng = *prng; - - // q(a) = 0.8 - // q(b) = 0.8 - // q(c) = 0.4 - SampleSet<double> ssq; - ssq.add(0.4); - ssq.add(0.6); - ssq.add(0); - double qz = 1; - - // p(a) = 0.2 - // p(b) = 0.8 - vector<double> p(3); - p[0] = 0.2; - p[1] = 0.8; - p[2] = 0; - - vector<int> counts(3); - int tot = 0; - - vector<Particle> pps(particles); - SampleSet<prob_t> ppss; - int LEN = 12; - int PP = 1; - while (pps[0].states.size() < LEN) { - for (int pi = 0; pi < particles; ++pi) { - Particle& prt = pps[pi]; - - bool redo = true; - const Particle savedp = prt; - while (redo) { - redo = false; - for (int i = 0; i < PP; ++i) { - int s = rng.SelectSample(ssq); - double gamma_last = p[s]; - if (!gamma_last) { redo = true; break; } - double q = ssq[s] / qz; - prt.states.push_back(s); - prt.weight *= prob_t(gamma_last / q); - } - if (redo) { prt = savedp; continue; } - } - } - Rejuvenate(pps); - } - ppss.clear(); - for (int i = 0; i < particles; ++i) { ppss.add(pps[i].weight); } - int sp = rng.SelectSample(ppss); - cerr << pps[sp] << endl; - - return 0; -} - diff --git a/gi/pf/transliterations.cc b/gi/pf/transliterations.cc deleted file mode 100644 index b2996f65..00000000 --- a/gi/pf/transliterations.cc +++ /dev/null @@ -1,334 +0,0 @@ -#include "transliterations.h" - -#include <iostream> -#include <vector> - -#include "boost/shared_ptr.hpp" - -#include "backward.h" -#include "filelib.h" -#include "tdict.h" -#include "trule.h" -#include "filelib.h" -#include "ccrp_nt.h" -#include "m.h" -#include "reachability.h" - -using namespace std; -using namespace std::tr1; - -struct TruncatedConditionalLengthModel { - TruncatedConditionalLengthModel(unsigned max_src_size, unsigned max_trg_size, double expected_src_to_trg_ratio) : - plens(max_src_size+1, vector<prob_t>(max_trg_size+1, 0.0)) { - for (unsigned i = 1; i <= max_src_size; ++i) { - prob_t z = prob_t::Zero(); - for (unsigned j = 1; j <= max_trg_size; ++j) - z += (plens[i][j] = prob_t(0.01 + exp(Md::log_poisson(j, i * expected_src_to_trg_ratio)))); - for (unsigned j = 1; j <= max_trg_size; ++j) - plens[i][j] /= z; - //for (unsigned j = 1; j <= max_trg_size; ++j) - // cerr << "P(trg_len=" << j << " | src_len=" << i << ") = " << plens[i][j] << endl; - } - } - - // return p(tlen | slen) for *chunks* not full words - inline const prob_t& operator()(int slen, int tlen) const { - return plens[slen][tlen]; - } - - vector<vector<prob_t> > plens; -}; - -struct CondBaseDist { - CondBaseDist(unsigned max_src_size, unsigned max_trg_size, double expected_src_to_trg_ratio) : - tclm(max_src_size, max_trg_size, expected_src_to_trg_ratio) {} - - prob_t operator()(const vector<WordID>& src, unsigned sf, unsigned st, - const vector<WordID>& trg, unsigned tf, unsigned tt) const { - prob_t p = tclm(st - sf, tt - tf); // target len | source length ~ TCLM(source len) - assert(!"not impl"); - return p; - } - inline prob_t operator()(const vector<WordID>& src, const vector<WordID>& trg) const { - return (*this)(src, 0, src.size(), trg, 0, trg.size()); - } - TruncatedConditionalLengthModel tclm; -}; - -// represents transliteration phrase probabilities, e.g. -// p( a l - | A l ) , p( o | A w ) , ... -struct TransliterationChunkConditionalModel { - explicit TransliterationChunkConditionalModel(const CondBaseDist& pp0) : - d(0.0), - strength(1.0), - rp0(pp0) { - } - - void Summary() const { - std::cerr << "Number of conditioning contexts: " << r.size() << std::endl; - for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { - std::cerr << TD::GetString(it->first) << " \t(\\alpha = " << it->second.alpha() << ") --------------------------" << std::endl; - for (CCRP_NoTable<TRule>::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) - std::cerr << " " << i2->second << '\t' << i2->first << std::endl; - } - } - - int DecrementRule(const TRule& rule) { - RuleModelHash::iterator it = r.find(rule.f_); - assert(it != r.end()); - int count = it->second.decrement(rule); - if (count) { - if (it->second.num_customers() == 0) r.erase(it); - } - return count; - } - - int IncrementRule(const TRule& rule) { - RuleModelHash::iterator it = r.find(rule.f_); - if (it == r.end()) { - it = r.insert(make_pair(rule.f_, CCRP_NoTable<TRule>(strength))).first; - } - int count = it->second.increment(rule); - return count; - } - - void IncrementRules(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - IncrementRule(*rules[i]); - } - - void DecrementRules(const std::vector<TRulePtr>& rules) { - for (int i = 0; i < rules.size(); ++i) - DecrementRule(*rules[i]); - } - - prob_t RuleProbability(const TRule& rule) const { - prob_t p; - RuleModelHash::const_iterator it = r.find(rule.f_); - if (it == r.end()) { - p = rp0(rule.f_, rule.e_); - } else { - p = it->second.prob(rule, rp0(rule.f_, rule.e_)); - } - return p; - } - - double LogLikelihood(const double& dd, const double& aa) const { - if (aa <= -dd) return -std::numeric_limits<double>::infinity(); - //double llh = Md::log_beta_density(dd, 10, 3) + Md::log_gamma_density(aa, 1, 1); - double llh = //Md::log_beta_density(dd, 1, 1) + - Md::log_gamma_density(dd + aa, 1, 1); - std::tr1::unordered_map<std::vector<WordID>, CCRP_NoTable<TRule>, boost::hash<std::vector<WordID> > >::const_iterator it; - for (it = r.begin(); it != r.end(); ++it) - llh += it->second.log_crp_prob(aa); - return llh; - } - - struct AlphaResampler { - AlphaResampler(const TransliterationChunkConditionalModel& m) : m_(m) {} - const TransliterationChunkConditionalModel& m_; - double operator()(const double& proposed_strength) const { - return m_.LogLikelihood(m_.d, proposed_strength); - } - }; - - void ResampleHyperparameters(MT19937* rng) { - std::tr1::unordered_map<std::vector<WordID>, CCRP_NoTable<TRule>, boost::hash<std::vector<WordID> > >::iterator it; - //const unsigned nloop = 5; - const unsigned niterations = 10; - //DiscountResampler dr(*this); - AlphaResampler ar(*this); -#if 0 - for (int iter = 0; iter < nloop; ++iter) { - strength = slice_sampler1d(ar, strength, *rng, -d + std::numeric_limits<double>::min(), - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - double min_discount = std::numeric_limits<double>::min(); - if (strength < 0.0) min_discount -= strength; - d = slice_sampler1d(dr, d, *rng, min_discount, - 1.0, 0.0, niterations, 100*niterations); - } -#endif - strength = slice_sampler1d(ar, strength, *rng, -d, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - std::cerr << "CTMModel(alpha=" << strength << ") = " << LogLikelihood(d, strength) << std::endl; - for (it = r.begin(); it != r.end(); ++it) { -#if 0 - it->second.set_discount(d); -#endif - it->second.set_alpha(strength); - } - } - - prob_t Likelihood() const { - prob_t p; p.logeq(LogLikelihood(d, strength)); - return p; - } - - const CondBaseDist& rp0; - typedef std::tr1::unordered_map<std::vector<WordID>, - CCRP_NoTable<TRule>, - boost::hash<std::vector<WordID> > > RuleModelHash; - RuleModelHash r; - double d, strength; -}; - -struct GraphStructure { - GraphStructure() : r() {} - // leak memory - these are basically static - const Reachability* r; - bool IsReachable() const { return r->nodes > 0; } -}; - -struct ProbabilityEstimates { - ProbabilityEstimates() : gs(), backward() {} - explicit ProbabilityEstimates(const GraphStructure& g) : - gs(&g), backward() { - if (g.r->nodes > 0) - backward = new float[g.r->nodes]; - } - // leak memory, these are static - - // returns an estimate of the marginal probability - double MarginalEstimate() const { - if (!backward) return 0; - return backward[0]; - } - - // returns an backward estimate - double Backward(int src_covered, int trg_covered) const { - if (!backward) return 0; - int ind = gs->r->node_addresses[src_covered][trg_covered]; - if (ind < 0) return 0; - return backward[ind]; - } - - prob_t estp; - float* backward; - private: - const GraphStructure* gs; -}; - -struct TransliterationsImpl { - TransliterationsImpl(int max_src, int max_trg, double sr, const BackwardEstimator& b) : - cp0(max_src, max_trg, sr), - tccm(cp0), - be(b), - kMAX_SRC_CHUNK(max_src), - kMAX_TRG_CHUNK(max_trg), - kS2T_RATIO(sr), - tot_pairs(), tot_mem() { - } - const CondBaseDist cp0; - TransliterationChunkConditionalModel tccm; - const BackwardEstimator& be; - - void Initialize(WordID src, const vector<WordID>& src_lets, WordID trg, const vector<WordID>& trg_lets) { - const size_t src_len = src_lets.size(); - const size_t trg_len = trg_lets.size(); - - // init graph structure - if (src_len >= graphs.size()) graphs.resize(src_len + 1); - if (trg_len >= graphs[src_len].size()) graphs[src_len].resize(trg_len + 1); - GraphStructure& gs = graphs[src_len][trg_len]; - if (!gs.r) { - double rat = exp(fabs(log(trg_len / (src_len * kS2T_RATIO)))); - if (rat > 1.5 || (rat > 2.4 && src_len < 6)) { - cerr << " ** Forbidding transliterations of size " << src_len << "," << trg_len << ": " << rat << endl; - gs.r = new Reachability(src_len, trg_len, 0, 0); - } else { - gs.r = new Reachability(src_len, trg_len, kMAX_SRC_CHUNK, kMAX_TRG_CHUNK); - } - } - - const Reachability& r = *gs.r; - - // init backward estimates - if (src >= ests.size()) ests.resize(src + 1); - unordered_map<WordID, ProbabilityEstimates>::iterator it = ests[src].find(trg); - if (it != ests[src].end()) return; // already initialized - - it = ests[src].insert(make_pair(trg, ProbabilityEstimates(gs))).first; - ProbabilityEstimates& est = it->second; - if (!gs.r->nodes) return; // not derivable subject to length constraints - - be.InitializeGrid(src_lets, trg_lets, r, kS2T_RATIO, est.backward); - cerr << TD::GetString(src_lets) << " ||| " << TD::GetString(trg_lets) << " ||| " << (est.backward[0] / trg_lets.size()) << endl; - tot_pairs++; - tot_mem += sizeof(float) * gs.r->nodes; - } - - void Forbid(WordID src, const vector<WordID>& src_lets, WordID trg, const vector<WordID>& trg_lets) { - const size_t src_len = src_lets.size(); - const size_t trg_len = trg_lets.size(); - // TODO - } - - prob_t EstimateProbability(WordID s, const vector<WordID>& src, WordID t, const vector<WordID>& trg) const { - assert(src.size() < graphs.size()); - const vector<GraphStructure>& tv = graphs[src.size()]; - assert(trg.size() < tv.size()); - const GraphStructure& gs = tv[trg.size()]; - if (gs.r->nodes == 0) - return prob_t::Zero(); - const unordered_map<WordID, ProbabilityEstimates>::const_iterator it = ests[s].find(t); - assert(it != ests[s].end()); - return it->second.estp; - } - - void GraphSummary() const { - double to = 0; - double tn = 0; - double tt = 0; - for (int i = 0; i < graphs.size(); ++i) { - const vector<GraphStructure>& vt = graphs[i]; - for (int j = 0; j < vt.size(); ++j) { - const GraphStructure& gs = vt[j]; - if (!gs.r) continue; - tt++; - for (int k = 0; k < i; ++k) { - for (int l = 0; l < j; ++l) { - size_t c = gs.r->valid_deltas[k][l].size(); - if (c) { - tn += 1; - to += c; - } - } - } - } - } - cerr << " Average nodes = " << (tn / tt) << endl; - cerr << "Average out-degree = " << (to / tn) << endl; - cerr << " Unique structures = " << tt << endl; - cerr << " Unique pairs = " << tot_pairs << endl; - cerr << " BEs size = " << (tot_mem / (1024.0*1024.0)) << " MB" << endl; - } - - const int kMAX_SRC_CHUNK; - const int kMAX_TRG_CHUNK; - const double kS2T_RATIO; - unsigned tot_pairs; - size_t tot_mem; - vector<vector<GraphStructure> > graphs; // graphs[src_len][trg_len] - vector<unordered_map<WordID, ProbabilityEstimates> > ests; // ests[src][trg] -}; - -Transliterations::Transliterations(int max_src, int max_trg, double sr, const BackwardEstimator& be) : - pimpl_(new TransliterationsImpl(max_src, max_trg, sr, be)) {} -Transliterations::~Transliterations() { delete pimpl_; } - -void Transliterations::Initialize(WordID src, const vector<WordID>& src_lets, WordID trg, const vector<WordID>& trg_lets) { - pimpl_->Initialize(src, src_lets, trg, trg_lets); -} - -prob_t Transliterations::EstimateProbability(WordID s, const vector<WordID>& src, WordID t, const vector<WordID>& trg) const { - return pimpl_->EstimateProbability(s, src,t, trg); -} - -void Transliterations::Forbid(WordID src, const vector<WordID>& src_lets, WordID trg, const vector<WordID>& trg_lets) { - pimpl_->Forbid(src, src_lets, trg, trg_lets); -} - -void Transliterations::GraphSummary() const { - pimpl_->GraphSummary(); -} - diff --git a/gi/pf/transliterations.h b/gi/pf/transliterations.h deleted file mode 100644 index 49d14684..00000000 --- a/gi/pf/transliterations.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _TRANSLITERATIONS_H_ -#define _TRANSLITERATIONS_H_ - -#include <vector> -#include "wordid.h" -#include "prob.h" - -struct BackwardEstimator; -struct TransliterationsImpl; -struct Transliterations { - // max_src and max_trg indicate how big the transliteration phrases can be - // see reachability.h for information about filter_ratio - explicit Transliterations(int max_src, int max_trg, double s2t_rat, const BackwardEstimator& be); - ~Transliterations(); - void Initialize(WordID src, const std::vector<WordID>& src_lets, WordID trg, const std::vector<WordID>& trg_lets); - void Forbid(WordID src, const std::vector<WordID>& src_lets, WordID trg, const std::vector<WordID>& trg_lets); - void GraphSummary() const; - prob_t EstimateProbability(WordID s, const std::vector<WordID>& src, WordID t, const std::vector<WordID>& trg) const; - private: - TransliterationsImpl* pimpl_; -}; - -#endif - diff --git a/gi/pf/unigrams.cc b/gi/pf/unigrams.cc deleted file mode 100644 index 40829775..00000000 --- a/gi/pf/unigrams.cc +++ /dev/null @@ -1,80 +0,0 @@ -#include "unigrams.h" - -#include <string> -#include <cmath> - -#include "stringlib.h" -#include "filelib.h" - -using namespace std; - -void UnigramModel::LoadUnigrams(const string& fname) { - cerr << "Loading unigram probabilities from " << fname << " ..." << endl; - ReadFile rf(fname); - string line; - istream& in = *rf.stream(); - assert(in); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\data\\"); - getline(in, line); - size_t pos = line.find("ngram 1="); - assert(pos == 0); - assert(line.size() > 8); - const size_t num_unigrams = atoi(&line[8]); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\1-grams:"); - for (size_t i = 0; i < num_unigrams; ++i) { - getline(in, line); - assert(line.size() > 0); - pos = line.find('\t'); - assert(pos > 0); - assert(pos + 1 < line.size()); - const WordID w = TD::Convert(line.substr(pos + 1)); - line[pos] = 0; - float p = atof(&line[0]); - if (w < probs_.size()) probs_[w].logeq(p * log(10)); else cerr << "WARNING: don't know about '" << TD::Convert(w) << "'\n"; - } -} - -void UnigramWordModel::LoadUnigrams(const string& fname) { - cerr << "Loading unigram probabilities from " << fname << " ..." << endl; - ReadFile rf(fname); - string line; - istream& in = *rf.stream(); - assert(in); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\data\\"); - getline(in, line); - size_t pos = line.find("ngram 1="); - assert(pos == 0); - assert(line.size() > 8); - const size_t num_unigrams = atoi(&line[8]); - getline(in, line); - assert(line.empty()); - getline(in, line); - assert(line == "\\1-grams:"); - for (size_t i = 0; i < num_unigrams; ++i) { - getline(in, line); - assert(line.size() > 0); - pos = line.find('\t'); - assert(pos > 0); - assert(pos + 1 < line.size()); - size_t cur = pos + 1; - vector<WordID> w; - while (cur < line.size()) { - const size_t len = UTF8Len(line[cur]); - w.push_back(TD::Convert(line.substr(cur, len))); - cur += len; - } - line[pos] = 0; - float p = atof(&line[0]); - probs_[w].logeq(p * log(10.0)); - } -} - diff --git a/gi/pf/unigrams.h b/gi/pf/unigrams.h deleted file mode 100644 index 1660d1ed..00000000 --- a/gi/pf/unigrams.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef _UNIGRAMS_H_ -#define _UNIGRAMS_H_ - -#include <vector> -#include <string> -#include <tr1/unordered_map> -#include <boost/functional.hpp> - -#include "wordid.h" -#include "prob.h" -#include "tdict.h" - -struct UnigramModel { - explicit UnigramModel(const std::string& fname, unsigned vocab_size) : - use_uniform_(fname.size() == 0), - uniform_(1.0 / vocab_size), - probs_() { - if (fname.size() > 0) { - probs_.resize(TD::NumWords() + 1); - LoadUnigrams(fname); - } - } - - const prob_t& operator()(const WordID& w) const { - assert(w); - if (use_uniform_) return uniform_; - return probs_[w]; - } - - private: - void LoadUnigrams(const std::string& fname); - - const bool use_uniform_; - const prob_t uniform_; - std::vector<prob_t> probs_; -}; - - -// reads an ARPA unigram file and converts words like 'cat' into a string 'c a t' -struct UnigramWordModel { - explicit UnigramWordModel(const std::string& fname) : - use_uniform_(false), - uniform_(1.0), - probs_() { - LoadUnigrams(fname); - } - - explicit UnigramWordModel(const unsigned vocab_size) : - use_uniform_(true), - uniform_(1.0 / vocab_size), - probs_() {} - - const prob_t& operator()(const std::vector<WordID>& s) const { - if (use_uniform_) return uniform_; - const VectorProbHash::const_iterator it = probs_.find(s); - assert(it != probs_.end()); - return it->second; - } - - private: - void LoadUnigrams(const std::string& fname); - - const bool use_uniform_; - const prob_t uniform_; - typedef std::tr1::unordered_map<std::vector<WordID>, prob_t, boost::hash<std::vector<WordID> > > VectorProbHash; - VectorProbHash probs_; -}; - -#endif diff --git a/gi/pipeline/OLD.clsp.config b/gi/pipeline/OLD.clsp.config deleted file mode 100644 index cd0f9d65..00000000 --- a/gi/pipeline/OLD.clsp.config +++ /dev/null @@ -1,9 +0,0 @@ -# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED -# name path aligned-corpus LM xfeats.grammar dev dev-refs test1 testt-eval.sh ... -btec /export/ws10smt/data/btec/ split.zh-en.al lm/en.3gram.lm.gz xgrammar/grammar.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh -fbis /export/ws10smt/data/chinese-english.fbis corpus.zh-en.al -zhen /export/ws10smt/data/chinese-english corpus.zh-en.al -aren /export/ws10smt/data/arabic-english corpus.ar-en.al -uren /export/ws10smt/data/urdu-english corpus.ur-en.al -nlfr /export/ws10smt/data/dutch-french corpus.nl-fr.al - diff --git a/gi/pipeline/OLD.evaluation-pipeline.pl b/gi/pipeline/OLD.evaluation-pipeline.pl deleted file mode 100755 index 49c303eb..00000000 --- a/gi/pipeline/OLD.evaluation-pipeline.pl +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use Getopt::Long; -use Cwd; -my $CWD = getcwd; - -my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR; } - -my @DEFAULT_FEATS = qw( - LogRuleCount SingletonRule LexE2F LexF2E WordPenalty - LogFCount LanguageModel Glue GlueTop PassThrough SingletonF -); - -my %init_weights = qw( - LogRuleCount 0.2 - LexE2F -0.3 - LexF2E -0.3 - LogFCount 0.1 - WordPenalty -1.5 - LanguageModel 1.2 - Glue -1.0 - GlueTop 0.00001 - PassThrough -10.0 - SingletonRule -0.1 - X_EGivenF -0.3 - X_FGivenE -0.3 - X_LogECount -1 - X_LogFCount -0.1 - X_LogRuleCount 0.3 - X_SingletonE -0.1 - X_SingletonF -0.1 - X_SingletonRule -0.5 -); - -my $CDEC = "$SCRIPT_DIR/../../decoder/cdec"; -my $PARALLELIZE = "$SCRIPT_DIR/../../vest/parallelize.pl"; -my $EXTOOLS = "$SCRIPT_DIR/../../extools"; -die "Can't find extools: $EXTOOLS" unless -e $EXTOOLS && -d $EXTOOLS; -my $VEST = "$SCRIPT_DIR/../../vest"; -die "Can't find vest: $VEST" unless -e $VEST && -d $VEST; -my $DISTVEST = "$VEST/dist-vest.pl"; -my $FILTSCORE = "$EXTOOLS/filter_score_grammar"; -my $ADDXFEATS = "$SCRIPT_DIR/scripts/xfeats.pl"; -assert_exec($CDEC, $PARALLELIZE, $FILTSCORE, $DISTVEST, $ADDXFEATS); - -my $config = "$SCRIPT_DIR/OLD.clsp.config"; -print STDERR "CORPORA CONFIGURATION: $config\n"; -open CONF, "<$config" or die "Can't read $config: $!"; -my %paths; -my %corpora; -my %lms; -my %devs; -my %devrefs; -my %tests; -my %testevals; -my %xgrammars; -print STDERR " LANGUAGE PAIRS:"; -while(<CONF>) { - chomp; - next if /^#/; - next if /^\s*$/; - s/^\s+//; - s/\s+$//; - my ($name, $path, $corpus, $lm, $xgrammar, $dev, $devref, @xtests) = split /\s+/; - $paths{$name} = $path; - $corpora{$name} = $corpus; - $lms{$name} = $lm; - $xgrammars{$name} = $xgrammar; - $devs{$name} = $dev; - $devrefs{$name} = $devref; - $tests{$name} = $xtests[0]; - $testevals{$name} = $xtests[1]; - print STDERR " $name"; -} -print STDERR "\n"; - -my %langpairs = map { $_ => 1 } qw( btec zhen fbis aren uren nlfr ); - -my $outdir = "$CWD/exp"; -my $help; -my $XFEATS; -my $EXTRA_FILTER = ''; -my $dataDir = '/export/ws10smt/data'; -if (GetOptions( - "data=s" => \$dataDir, - "xfeats" => \$XFEATS, -) == 0 || @ARGV!=2 || $help) { - print_help(); - exit; -} -my $lp = $ARGV[0]; -my $grammar = $ARGV[1]; -print STDERR " CORPUS REPO: $dataDir\n"; -print STDERR " LANGUAGE PAIR: $lp\n"; -die "I don't know about that language pair\n" unless $paths{$lp}; -my $corpdir = "$dataDir"; -if ($paths{$lp} =~ /^\//) { $corpdir = $paths{$lp}; } else { $corpdir .= '/' . $paths{$lp}; } -die "I can't find the corpora directory: $corpdir" unless -d $corpdir; -print STDERR " GRAMMAR: $grammar\n"; -my $LANG_MODEL = mydircat($corpdir, $lms{$lp}); -print STDERR " LM: $LANG_MODEL\n"; -my $CORPUS = mydircat($corpdir, $corpora{$lp}); -die "Can't find corpus: $CORPUS" unless -f $CORPUS; - -my $dev = mydircat($corpdir, $devs{$lp}); -my $drefs = $devrefs{$lp}; -die "Can't find dev: $dev\n" unless -f $dev; -die "Dev refs not set" unless $drefs; -$drefs = mydircat($corpdir, $drefs); - -my $test = mydircat($corpdir, $tests{$lp}); -my $teval = mydircat($corpdir, $testevals{$lp}); -die "Can't find test: $test\n" unless -f $test; -assert_exec($teval); - -if ($XFEATS) { - my $xgram = mydircat($corpdir, $xgrammars{$lp}); - die "Can't find x-grammar: $xgram" unless -f $xgram; - $EXTRA_FILTER = "$ADDXFEATS $xgram |"; - print STDERR "ADDING X-FEATS FROM $xgram\n"; -} - -# MAKE DEV -print STDERR "\nFILTERING FOR dev...\n"; -print STDERR "DEV: $dev (REFS=$drefs)\n"; -`mkdir -p $outdir`; -my $devgrammar = filter($grammar, $dev, 'dev', $outdir); -my $devini = mydircat($outdir, "cdec-dev.ini"); -write_cdec_ini($devini, $devgrammar); - - -# MAKE TEST -print STDERR "\nFILTERING FOR test...\n"; -print STDERR "TEST: $test (EVAL=$teval)\n"; -`mkdir -p $outdir`; -my $testgrammar = filter($grammar, $test, 'test', $outdir); -my $testini = mydircat($outdir, "cdec-test.ini"); -write_cdec_ini($testini, $testgrammar); - - -# CREATE INIT WEIGHTS -print STDERR "\nCREATING INITIAL WEIGHTS FILE: weights.init\n"; -my $weights = mydircat($outdir, "weights.init"); -write_random_weights_file($weights); - - -# VEST -print STDERR "\nMINIMUM ERROR TRAINING\n"; -my $tuned_weights = mydircat($outdir, 'weights.tuned'); -if (-f $tuned_weights) { - print STDERR "TUNED WEIGHTS $tuned_weights EXISTS: REUSING\n"; -} else { - my $cmd = "$DISTVEST --ref-files=$drefs --source-file=$dev --weights $weights $devini"; - print STDERR "MERT COMMAND: $cmd\n"; - `rm -rf $outdir/vest 2> /dev/null`; - chdir $outdir or die "Can't chdir to $outdir: $!"; - $weights = `$cmd`; - die "MERT reported non-zero exit code" unless $? == 0; - chomp $weights; - safesystem($tuned_weights, "cp $weights $tuned_weights"); - print STDERR "TUNED WEIGHTS: $tuned_weights\n"; - die "$tuned_weights is missing!" unless -f $tuned_weights; -} - -# DECODE -print STDERR "\nDECODE TEST SET\n"; -my $decolog = mydircat($outdir, "test-decode.log"); -my $testtrans = mydircat($outdir, "test.trans"); -my $cmd = "cat $test | $PARALLELIZE -j 20 -e $decolog -- $CDEC -c $testini -w $tuned_weights > $testtrans"; -safesystem($testtrans, $cmd) or die "Failed to decode test set!"; - - -# EVALUATE -print STDERR "\nEVALUATE TEST SET\n"; -print STDERR "TEST: $testtrans\n"; -$cmd = "$teval $testtrans"; -safesystem(undef, $cmd) or die "Failed to evaluate!"; -exit 0; - - -sub write_random_weights_file { - my ($file, @extras) = @_; - open F, ">$file" or die "Can't write $file: $!"; - my @feats = (@DEFAULT_FEATS, @extras); - if ($XFEATS) { - my @xfeats = qw( - X_LogRuleCount X_LogECount X_LogFCount X_EGivenF X_FGivenE X_SingletonRule X_SingletonE X_SingletonF - ); - @feats = (@feats, @xfeats); - } - for my $feat (@feats) { - my $r = rand(1.6); - my $w = $init_weights{$feat} * $r; - if ($w == 0) { $w = 0.0001; print STDERR "WARNING: $feat had no initial weight!\n"; } - print F "$feat $w\n"; - } - close F; -} - -sub filter { - my ($grammar, $set, $name, $outdir) = @_; - my $outgrammar = mydircat($outdir, "$name.scfg.gz"); - if (-f $outgrammar) { print STDERR "$outgrammar exists - REUSING!\n"; } else { - my $cmd = "gunzip -c $grammar | $FILTSCORE -c $CORPUS -t $set | $EXTRA_FILTER gzip > $outgrammar"; - safesystem($outgrammar, $cmd) or die "Can't filter and score grammar!"; - } - return $outgrammar; -} - -sub mydircat { - my ($base, $suffix) = @_; - if ($suffix =~ /^\//) { return $suffix; } - my $res = $base . '/' . $suffix; - $res =~ s/\/\//\//g; - return $res; -} - -sub write_cdec_ini { - my ($filename, $grammar_path) = (@_); - open CDECINI, ">$filename" or die "Can't write $filename: $!"; - print CDECINI <<EOT; -formalism=scfg -cubepruning_pop_limit=100 -add_pass_through_rules=true -scfg_extra_glue_grammar=/export/ws10smt/data/glue/glue.scfg.gz -grammar=$grammar_path -feature_function=WordPenalty -feature_function=LanguageModel -o 3 $LANG_MODEL -EOT - close CDECINI; -}; - -sub print_help { - print STDERR<<EOT; - -Usage: $0 [OPTIONS] language-pair unfiltered-grammar.gz - -Given an induced grammar for an entire corpus (i.e., generated by -local-gi-pipeline.pl), filter and featurize it for a dev and test set, -run MERT, report scores. - -EOT -} - -sub safesystem { - my $output = shift @_; - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - else { - my $exitcode = $? >> 8; - if ($exitcode) { - print STDERR "Exit code: $exitcode\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - } - return ! $exitcode; - } -} - -sub assert_exec { - my @files = @_; - for my $file (@files) { - die "Can't find $file - did you run make?\n" unless -e $file; - die "Can't execute $file" unless -e $file; - } -}; - diff --git a/gi/pipeline/backoff-pipe.pl b/gi/pipeline/backoff-pipe.pl deleted file mode 100644 index ac103c8b..00000000 --- a/gi/pipeline/backoff-pipe.pl +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -use Getopt::Long "GetOptions"; - -my @grammars; -my $OUTPUTPREFIX = './giwork/bo.hier.grammar'; -safemkdir($OUTPUTPREFIX); -my $backoff_levels = 1; -my $glue_levels = 1; - -usage() unless &GetOptions('grmr=s@' => \ @grammars, - 'outprefix=s' => \ $OUTPUTPREFIX, - 'bo-lvls=i' => \ $backoff_levels, - 'glue-lvls=i' => \ $glue_levels, -); - -my $OUTDIR = $OUTPUTPREFIX . '/hier'; -print STDERR "@grammars\n"; - - -my %grmr = (); -foreach my $grammar (@grammars) { - $grammar =~ m/\/[^\/]*\.t(\d+)\.[^\/]*/; - $grmr{$1} = $grammar; -} - -my @index = sort keys %grmr; -$OUTDIR = $OUTDIR . join('-',@index); -safemkdir($OUTDIR); -my $BACKOFF_GRMR = $OUTDIR . '/backoff.hier.gz'; -safesystem("echo \"\" | gzip > $BACKOFF_GRMR"); -my $GLUE_GRMR = $OUTDIR . '/glue.hier.gz'; -safesystem("echo \"\" | gzip > $GLUE_GRMR"); -my $joinedgrammars = $OUTDIR . '/grammar.hier.gz'; - -join_grammars(); - -for my $i (0..(scalar @index)-2) { - my $freqs = extract_freqs($index[$i], $index[$i+1]); - if ($i < $backoff_levels) { - create_backoff_rules($index[$i],$index[$i+1],$freqs); - } - if ($i < $glue_levels) { - add_glue_rules($index[$i]); - } -} - -output_grammar_info(); - - -sub usage { - print <<EOT; - -Usage: $0 [OPTIONS] corpus.fr-en-al - -Induces a grammar using Pitman-Yor topic modeling or Posterior Regularisation. - -EOT - exit 1; -}; - -sub safemkdir { - my $dir = shift; - if (-d $dir) { return 1; } - return mkdir($dir); -} - - -sub safesystem { - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - exit(1); - } - else { - my $exitcode = $? >> 8; - print STDERR "Exit code: $exitcode\n" if $exitcode; - return ! $exitcode; - } -} - - -sub join_grammars { - print STDERR "\n!!! JOINING GRAMMARS\n"; - if(-e $joinedgrammars) { - print STDERR "$joinedgrammars exists, reusing...\n"; - return; - } - safesystem("echo \"\" | gzip > $joinedgrammars"); - foreach my $i (@index) { - my $g = $grmr{$i}; - safesystem("zcat $g | sed -r -e 's/X([0-9]+)/X$i\\1/g' - | gzip > $g.2.gz"); - safesystem("zcat $joinedgrammars $g.2.gz | gzip > $joinedgrammars.2.gz"); - safesystem("mv $joinedgrammars.2.gz $joinedgrammars"); - } -} - - -sub extract_freqs { - my($grmr1,$grmr2) = @_; - print STDERR "\n!!!EXTRACTING FREQUENCIES: $grmr1->$grmr2\n"; - my $IN_COARSE = substr($grmr{$grmr1},0,index($grmr{$grmr1},".grammar/")) . "/labeled_spans.txt"; - my $IN_FINE = substr($grmr{$grmr2},0,index($grmr{$grmr2},".grammar/")) . "/labeled_spans.txt"; - my $OUT_SPANS = "$OUTDIR/labeled_spans.hier$grmr1-$grmr2.txt"; - my $FREQS = "$OUTDIR/label_freq.hier$grmr1-$grmr2.txt"; - if(-e $OUT_SPANS && -e $FREQS) { - print STDERR "$OUT_SPANS exists, reusing...\n"; - print STDERR "$FREQS exists, reusing...\n"; - return $FREQS; - } - - safesystem("paste -d ' ' $IN_COARSE $IN_FINE > $OUT_SPANS"); - - my %FREQ_HIER = (); - my %finehier = (); - - open SPANS, $OUT_SPANS or die $!; - while (<SPANS>) { - my ($tmp, $coarse, $fine) = split /\|\|\|/; - my @coarse_spans = $coarse =~ /\d+-\d+:X(\d+)/g; - my @fine_spans = $fine =~ /\d+-\d+:X(\d+)/g; - - foreach my $i (0..(scalar @coarse_spans)-1) { - my $coarse_cat = $coarse_spans[$i]; - my $fine_cat = $fine_spans[$i]; - - $FREQ_HIER{$coarse_cat}{$fine_cat}++; - } - } - close SPANS; - foreach (values %FREQ_HIER) { - my $coarse_freq = $_; - my $total = 0; - $total+=$_ for (values %{ $coarse_freq }); - $coarse_freq->{$_}=log($coarse_freq->{$_}/$total) for (keys %{ $coarse_freq }); - } - open FREQS, ">", $FREQS or die $!; - foreach my $coarse_cat (keys %FREQ_HIER) { - print FREQS "$coarse_cat |||"; - foreach my $fine_cat (keys %{$FREQ_HIER{$coarse_cat}}) { - my $freq = $FREQ_HIER{$coarse_cat}{$fine_cat}; - print FREQS " $fine_cat:$freq"; - if(! exists $finehier{$fine_cat} || $finehier{$fine_cat} < $freq) { - $finehier{$fine_cat} = $coarse_cat; - } - } - print FREQS "\n"; - } -# foreach my $fine_cat (keys %finehier) { -# print FREQS "$fine_cat -> $finehier{$fine_cat}\n"; -# } - close FREQS; - return $FREQS; -} - - -sub create_backoff_rules { - print STDERR "\n!!! CREATING BACKOFF RULES\n"; - my ($grmr1, $grmr2, $freq) = @_; - my $OUTFILE = "$OUTDIR/backoff.hier$grmr1-$grmr2.txt"; - if(-e $OUTFILE) { - print STDERR "$OUTFILE exists, reusing...\n"; - return; - } - open FREQS, $freq or die $!; - open TMP, ">", $OUTFILE or die $!; - while (<FREQS>) { - my $line = $_; - $line = m/^(\d+) \|\|\| (.+)$/; - my $coarse = $1; - $line = $2; - my @finefreq = $line =~ m/(\d+):(\S+)/g; - for(my $i = 0; $i < scalar @finefreq; $i+=2) { - my $finecat = $finefreq[$i]; - my $finefreq = $finefreq[$i+1]; - print TMP "[X$grmr1$coarse] ||| [X$grmr2$finecat,1]\t[1] ||| BackoffRule=$finefreq A=0-0\n"; - } - } - close TMP; - close FREQS; - safesystem("zcat $BACKOFF_GRMR | cat - $OUTFILE | gzip > $BACKOFF_GRMR.2.gz"); - safesystem("mv $BACKOFF_GRMR.2.gz $BACKOFF_GRMR"); -} - -sub add_glue_rules { - print STDERR "\n!!! CREATING GLUE RULES\n"; - my ($grmr) = @_; - my $OUTFILE = "$OUTDIR/glue.$grmr.gz"; - if (-e $OUTFILE) { - print STDERR "$OUTFILE exists, reusing...\n"; - return; - } - open TMP, ">", $OUTFILE or die $!; - for my $i (0..($grmr-1)) { - print TMP "[S] ||| [S,1] [X$grmr$i,2] ||| [1] [2] ||| Glue=1\n"; - print TMP "[S] ||| [X$grmr$i,1] ||| [1] ||| GlueTop=1\n"; - } - close TMP; - safesystem("zcat $GLUE_GRMR | cat - $OUTFILE | gzip > $GLUE_GRMR.2.gz"); - safesystem("mv $GLUE_GRMR.2.gz $GLUE_GRMR"); -} - -sub output_grammar_info { - print STDERR "\n!!! GRAMMAR INFORMATION\n"; - print STDOUT "GRAMMAR: \t$joinedgrammars\n"; - print STDOUT "GLUE: \t$GLUE_GRMR\n"; - print STDOUT "BACKOFF: \t$BACKOFF_GRMR\n"; -} diff --git a/gi/pipeline/blacklight.config b/gi/pipeline/blacklight.config deleted file mode 100644 index fc59a604..00000000 --- a/gi/pipeline/blacklight.config +++ /dev/null @@ -1,9 +0,0 @@ -# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED -# name path aligned-corpus LM dev dev-refs test1 testt-eval.sh ... -/usr/users/0/cdyer/ws10smt/data -btec /home/cdyer/ws10smt-data/btec/ split.zh-en.al lm/en.3gram.lm.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh -zhen /home/cdyer/ws10smt-data/chinese-english corpus.zh-en.al lm/c2e.3gram.lm.gz dev_and_test/mt02.src.txt dev_and_test/mt02.ref.* dev_and_test/mt03.src.txt eval-mt03.sh -aren /home/cdyer/ws10smt-data/arabic-english corpus.ar-en-al lm/a2e.3gram.lm.gz dev_and_test/dev.src.txt dev_and_test/dev.ref.txt.* dev_and_test/mt05.src.txt eval-mt05.sh -uren /usr/users/0/cdyer/ws10smt/data/urdu-english corpus.ur-en.al lm/u2e.en.lm.gz dev/dev.ur dev/dev.en* devtest/devtest.ur eval-devtest.sh -nlfr /home/cdyer/ws10smt-data/dutch-french corpus.nl-fr.al - diff --git a/gi/pipeline/clsp.config b/gi/pipeline/clsp.config deleted file mode 100644 index c23d409f..00000000 --- a/gi/pipeline/clsp.config +++ /dev/null @@ -1,10 +0,0 @@ -# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED -# name path aligned-corpus LM dev dev-refs test1 testt-eval.sh ... -/export/ws10smt/data -btec /export/ws10smt/data/btec/ split.zh-en.al lm/en.3gram.lm.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh -fbis /export/ws10smt/data/chinese-english.fbis corpus.zh-en.al -zhen /export/ws10smt/data/chinese-english corpus.zh-en.al lm/c2e.3gram.lm.gz dev_and_test/mt02.src.txt dev_and_test/mt02.ref.* dev_and_test/mt03.src.txt eval-mt03.sh -aren /export/ws10smt/data/arabic-english corpus.ar-en-al lm/a2e.3gram.lm.gz dev_and_test/dev.src.txt dev_and_test/dev.ref.txt.* dev_and_test/mt05.src.txt eval-mt05.sh -uren /export/ws10smt/data/urdu-english corpus.ur-en.al lm/u2e.en.lm.gz dev/dev.ur dev/dev.en* devtest/devtest.ur eval-devtest.sh -nlfr /export/ws10smt/data/dutch-french corpus.nl-fr.al - diff --git a/gi/pipeline/evaluation-pipeline.pl b/gi/pipeline/evaluation-pipeline.pl deleted file mode 100755 index 4b4529d9..00000000 --- a/gi/pipeline/evaluation-pipeline.pl +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use Getopt::Long; -use Cwd; -my $CWD = getcwd; - -my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR, "$SCRIPT_DIR/../../environment"; } -use LocalConfig; - -my $JOBS = 15; -my $PMEM = "9G"; -my $NUM_TRANSLATIONS = 50; -my $GOAL = "S"; - -# featurize_grammar may add multiple features from a single feature extractor -# the key in this map is the extractor name, the value is a list of the extracted features -my $feat_map = { - "LogRuleCount" => [ "LogRuleCount", "SingletonRule" ] , -# "XFeatures" => [ "XFE","XEF" ] , - "XFeatures" => [ "XFE","XEF","LabelledEF","LabelledFE"], # ,"XE_Singleton","XF_Singleton"] , - "LabelledRuleConditionals" => [ "LabelledFE","LabelledEF" ] , - "LexProb" => [ "LexE2F", "LexF2E" ] , - "BackoffRule" => [ "BackoffRule" ] , - "RulePenalty" => [ "RulePenalty" ] , - "LHSProb" => [ "LHSProb" ] , - "LabellingShape" => [ "LabellingShape" ] , - "GenerativeProb" => [ "GenerativeProb" ] , -}; - -my %init_weights = qw( - EGivenF -0.735245 - FGivenE -0.219391 - Glue -0.306709 - GlueTop 0.0473331 - LanguageModel 2.40403 - LexE2F -0.266989 - LexF2E -0.550373 - LogECount -0.129853 - LogFCount -0.194037 - LogRuleCount 0.256706 - BackoffRule 0.5 - XFE -0.256706 - XEF -0.256706 - XF_Singleton -0.05 - XE_Singleton -0.8 - LabelledFE -0.256706 - LabelledEF -0.256706 - PassThrough -0.9304905 - SingletonE -3.04161 - SingletonF 0.0714027 - SingletonRule -0.889377 - WordPenalty -1.99495 - RulePenalty -0.1 - LabellingShape -0.1 - LHSProb -0.1 - GenerativeProb -0.1 -); - - -# these features are included by default -my @DEFAULT_FEATS = qw( PassThrough Glue GlueTop LanguageModel WordPenalty ); - - - -my $FILTERBYF = "$SCRIPT_DIR/scripts/filter-by-f.pl"; -my $CDEC = "$SCRIPT_DIR/../../decoder/cdec"; -my $PARALLELIZE = "$SCRIPT_DIR/../../vest/parallelize.pl"; -my $EXTOOLS = "$SCRIPT_DIR/../../extools"; -die "Can't find extools: $EXTOOLS" unless -e $EXTOOLS && -d $EXTOOLS; -my $VEST = "$SCRIPT_DIR/../../vest"; -die "Can't find vest: $VEST" unless -e $VEST && -d $VEST; -my $DISTVEST = "$VEST/dist-vest.pl"; -my $FILTER = "$EXTOOLS/filter_grammar"; -my $FEATURIZE = "$EXTOOLS/featurize_grammar"; -assert_exec($CDEC, $PARALLELIZE, $FILTER, $FEATURIZE, $DISTVEST, $FILTERBYF); - -my $numtopics = 25; - -my $config = "$SCRIPT_DIR/" . (lc environment_name()) . '.config'; -print STDERR "CORPORA CONFIGURATION: $config\n"; -open CONF, "<$config" or die "Can't read $config: $!"; -my %paths; -my %corpora; -my %lms; -my %devs; -my %devrefs; -my %tests; -my %testevals; -my $datadir; -print STDERR " LANGUAGE PAIRS:"; -while(<CONF>) { - chomp; - next if /^#/; - next if /^\s*$/; - s/^\s+//; - s/\s+$//; - if (! defined $datadir) { $datadir = $_; next; } - my ($name, $path, $corpus, $lm, $dev, $devref, @xtests) = split /\s+/; - $paths{$name} = $path; - $corpora{$name} = $corpus; - $lms{$name} = $lm; - $devs{$name} = $dev; - $devrefs{$name} = $devref; - $tests{$name} = $xtests[0]; - $testevals{$name} = $xtests[1]; - print STDERR " $name"; -} -print STDERR "\n"; - -my %langpairs = map { $_ => 1 } qw( btec zhen fbis aren uren nlfr ); - -my $outdir = "$CWD/exp"; -my $help; -my $FEATURIZER_OPTS = ''; -my $dataDir = '/export/ws10smt/data'; -my @features; -my $bkoffgram; -my $gluegram; -my $oovgram; -my $usefork; -my $lmorder = 3; -my $density; -if (GetOptions( - "backoff-grammar=s" => \$bkoffgram, - "density-prune=f" => \$density, - "glue-grammar=s" => \$gluegram, - "oov-grammar=s" => \$oovgram, - "data=s" => \$dataDir, - "pmem=s" => \$PMEM, - "n=i" => \$NUM_TRANSLATIONS, - "features=s@" => \@features, - "use-fork" => \$usefork, - "jobs=i" => \$JOBS, - "out-dir=s" => \$outdir, - "lmorder=i" => \$lmorder, - "goal=s" => \$GOAL, -) == 0 || @ARGV!=2 || $help) { - print_help(); - exit; -} -my $DENSITY_PRUNE = ''; -if ($density) { - $DENSITY_PRUNE = "--density-prune $density"; -} -if ($usefork) { $usefork="--use-fork"; } else { $usefork = ''; } -my @fkeys = keys %$feat_map; -die "You must specify one or more features with -f. Known features: @fkeys\n" unless scalar @features > 0; -my @xfeats; -for my $feat (@features) { - my $rs = $feat_map->{$feat}; - if (!defined $rs) { die "DON'T KNOW ABOUT FEATURE $feat\n"; } - my @xfs = @$rs; - @xfeats = (@xfeats, @xfs); - $FEATURIZER_OPTS .= " -f $feat" unless $feat eq "BackoffRule"; -} -print STDERR "X-FEATS: @xfeats\n"; - -my $lp = $ARGV[0]; -my $grammar = $ARGV[1]; -print STDERR " CORPUS REPO: $dataDir\n"; -print STDERR " LANGUAGE PAIR: $lp\n"; -die "I don't know about that language pair\n" unless $paths{$lp}; -my $corpdir = "$dataDir"; -if ($paths{$lp} =~ /^\//) { $corpdir = $paths{$lp}; } else { $corpdir .= '/' . $paths{$lp}; } -die "I can't find the corpora directory: $corpdir" unless -d $corpdir; -print STDERR " GRAMMAR: $grammar\n"; -my $LANG_MODEL = mydircat($corpdir, $lms{$lp}); -print STDERR " LM: $LANG_MODEL\n"; -my $CORPUS = mydircat($corpdir, $corpora{$lp}); -die "Can't find corpus: $CORPUS" unless -f $CORPUS; - -my $dev = mydircat($corpdir, $devs{$lp}); -my $drefs = $devrefs{$lp}; -die "Can't find dev: $dev\n" unless -f $dev; -die "Dev refs not set" unless $drefs; -$drefs = mydircat($corpdir, $drefs); - -my $test = mydircat($corpdir, $tests{$lp}); -my $teval = mydircat($corpdir, $testevals{$lp}); -#die "Can't find test: $test\n" unless -f $test; -#assert_exec($teval); - -`mkdir -p $outdir`; - -# CREATE INIT WEIGHTS -print STDERR "\nCREATING INITIAL WEIGHTS FILE: weights.init\n"; -my $weights = mydircat($outdir, "weights.init"); -write_random_weights_file($weights, @xfeats); - -my $bkoff_grmr; -my $glue_grmr; -if($bkoffgram) { - print STDERR "Placing backoff grammar…\n"; - $bkoff_grmr = mydircat($outdir, "backoff.scfg.gz"); - print STDERR "cp $bkoffgram $bkoff_grmr\n"; - safesystem(undef,"cp $bkoffgram $bkoff_grmr"); -} -if($gluegram) { - print STDERR "Placing glue grammar…\n"; - $glue_grmr = mydircat($outdir, "glue.bo.scfg.gz"); - print STDERR "cp $gluegram $glue_grmr\n"; - safesystem(undef,"cp $gluegram $glue_grmr"); -} - -# MAKE DEV -print STDERR "\nFILTERING FOR dev...\n"; -print STDERR "DEV: $dev (REFS=$drefs)\n"; -my $devgrammar = filter($grammar, $dev, 'dev', $outdir); -my $devini = mydircat($outdir, "cdec-dev.ini"); -write_cdec_ini($devini, $devgrammar); - - -# MAKE TEST -print STDERR "\nFILTERING FOR test...\n"; -print STDERR "TEST: $test (EVAL=$teval)\n"; -`mkdir -p $outdir`; -my $testgrammar = filter($grammar, $test, 'test', $outdir); -my $testini = mydircat($outdir, "cdec-test.ini"); -write_cdec_ini($testini, $testgrammar); - - -# VEST -print STDERR "\nMINIMUM ERROR TRAINING\n"; -my $tuned_weights = mydircat($outdir, 'weights.tuned'); -if (-f $tuned_weights) { - print STDERR "TUNED WEIGHTS $tuned_weights EXISTS: REUSING\n"; -} else { - my $cmd = "$DISTVEST $usefork $DENSITY_PRUNE --decode-nodes $JOBS --pmem=$PMEM --ref-files=$drefs --source-file=$dev --weights $weights $devini"; - print STDERR "MERT COMMAND: $cmd\n"; - `rm -rf $outdir/vest 2> /dev/null`; - chdir $outdir or die "Can't chdir to $outdir: $!"; - $weights = `$cmd`; - die "MERT reported non-zero exit code" unless $? == 0; - chomp $weights; - safesystem($tuned_weights, "cp $weights $tuned_weights"); - print STDERR "TUNED WEIGHTS: $tuned_weights\n"; - die "$tuned_weights is missing!" unless -f $tuned_weights; -} - -# DECODE -print STDERR "\nDECODE TEST SET\n"; -my $decolog = mydircat($outdir, "test-decode.log"); -my $testtrans = mydircat($outdir, "test.trans"); -my $cmd = "cat $test | $PARALLELIZE $usefork -j $JOBS -e $decolog -- $CDEC -c $testini -w $tuned_weights > $testtrans"; -safesystem($testtrans, $cmd) or die "Failed to decode test set!"; - - -# EVALUATE -print STDERR "\nEVALUATE TEST SET\n"; -print STDERR "TEST: $testtrans\n"; -$cmd = "$teval $testtrans"; -safesystem(undef, $cmd) or die "Failed to evaluate!"; -exit 0; - - -sub write_random_weights_file { - my ($file, @extras) = @_; - if (-f $file) { - print STDERR "$file exists - REUSING!\n"; - return; - } - open F, ">$file" or die "Can't write $file: $!"; - my @feats = (@DEFAULT_FEATS, @extras); - for my $feat (@feats) { - my $r = rand(0.4) + 0.8; - my $w = $init_weights{$feat} * $r; - if ($w == 0) { $w = 0.0001; print STDERR "WARNING: $feat had no initial weight!\n"; } - print F "$feat $w\n"; - } - close F; -} - -sub filter { - my ($grammar, $set, $name, $outdir) = @_; - my $out1 = mydircat($outdir, "$name.filt.gz"); - my $out2 = mydircat($outdir, "$name.f_feat.gz"); - my $outgrammar = mydircat($outdir, "$name.scfg.gz"); - if (-f $outgrammar) { print STDERR "$outgrammar exists - REUSING!\n"; } else { - my $cmd = "gunzip -c $grammar | $FILTER -t $set | gzip > $out1"; - safesystem($out1, $cmd) or die "Filtering failed."; - $cmd = "gunzip -c $out1 | $FEATURIZE $FEATURIZER_OPTS -g $out1 -c $CORPUS | gzip > $out2"; - safesystem($out2, $cmd) or die "Featurizing failed"; - $cmd = "$FILTERBYF $NUM_TRANSLATIONS $out2 $outgrammar"; - safesystem($outgrammar, $cmd) or die "Secondary filtering failed"; - } - return $outgrammar; -} - -sub mydircat { - my ($base, $suffix) = @_; - if ($suffix =~ /^\//) { return $suffix; } - my $res = $base . '/' . $suffix; - $res =~ s/\/\//\//g; - return $res; -} - -sub write_cdec_ini { - my ($filename, $grammar_path) = (@_); - open CDECINI, ">$filename" or die "Can't write $filename: $!"; - my $glue = ($gluegram ? "$glue_grmr" : "$datadir/glue/glue.scfg.gz"); - my $oov = ($oovgram ? "$oovgram" : "$datadir/oov.scfg.gz"); - print CDECINI <<EOT; -formalism=scfg -cubepruning_pop_limit=100 -add_pass_through_rules=true -scfg_extra_glue_grammar=$glue -grammar=$oov -grammar=$grammar_path -scfg_default_nt=OOV -scfg_no_hiero_glue_grammar=true -feature_function=WordPenalty -feature_function=LanguageModel -o $lmorder $LANG_MODEL -goal=$GOAL -EOT - print CDECINI "grammar=$bkoff_grmr\n" if $bkoffgram; - close CDECINI; -}; - -sub print_help { - print STDERR<<EOT; - -Usage: $0 [-c data-config-file] [-n N] language-pair grammar.bidir.gz [OPTIONS] - -Given an induced grammar for an entire corpus (i.e., generated by -local-gi-pipeline.pl), filter and featurize it for a dev and test set, -run MERT, report scores. Use -n to specify the number of translations -to keep for a given source (30 is default). - -EOT -} - -sub safesystem { - my $output = shift @_; - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - else { - my $exitcode = $? >> 8; - if ($exitcode) { - print STDERR "Exit code: $exitcode\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - } - return ! $exitcode; - } -} - -sub assert_exec { - my @files = @_; - for my $file (@files) { - die "Can't find $file - did you run make?\n" unless -e $file; - die "Can't execute $file" unless -e $file; - } -}; - diff --git a/gi/pipeline/local-gi-pipeline.pl b/gi/pipeline/local-gi-pipeline.pl deleted file mode 100755 index e31167a2..00000000 --- a/gi/pipeline/local-gi-pipeline.pl +++ /dev/null @@ -1,465 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use File::Copy; - -my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path cwd /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR; } - -use Getopt::Long "GetOptions"; - -my $GZIP = 'gzip'; -my $ZCAT = 'gunzip -c'; -my $SED = 'sed -e'; -my $BASE_PHRASE_MAX_SIZE = 10; -my $COMPLETE_CACHE = 1; -my $ITEMS_IN_MEMORY = 10000000; # cache size in extractors -my $NUM_TOPICS = 50; -my $NUM_TOPICS_COARSE; -my $NUM_TOPICS_FINE = $NUM_TOPICS; -my $NUM_SAMPLES = 1000; -my $CONTEXT_SIZE = 1; -my $BIDIR = 0; -my $TOPICS_CONFIG = "pyp-topics.conf"; -my $LANGUAGE = "target"; -my $LABEL_THRESHOLD = "0"; -my $PRESERVE_PHRASES; - -my $MODEL = "pyp"; -my $NUM_ITERS = 100; -my $PR_SCALE_P = 0; -my $PR_SCALE_C = 0; -my $PR_FLAGS = ""; -my $MORFMARK = ""; - -my $EXTOOLS = "$SCRIPT_DIR/../../extools"; -die "Can't find extools: $EXTOOLS" unless -e $EXTOOLS && -d $EXTOOLS; -my $PYPTOOLS = "$SCRIPT_DIR/../pyp-topics/src"; -die "Can't find pyp-topics: $PYPTOOLS" unless -e $PYPTOOLS && -d $PYPTOOLS; -my $PYPSCRIPTS = "$SCRIPT_DIR/../pyp-topics/scripts"; -die "Can't find pyp-topics: $PYPSCRIPTS" unless -e $PYPSCRIPTS && -d $PYPSCRIPTS; -my $PRTOOLS = "$SCRIPT_DIR/../posterior-regularisation"; -die "Can't find posterior-regularisation: $PRTOOLS" unless -e $PRTOOLS && -d $PRTOOLS; -my $REDUCER = "$EXTOOLS/mr_stripe_rule_reduce"; -my $C2D = "$PYPSCRIPTS/contexts2documents.py"; -my $S2L = "$PYPSCRIPTS/spans2labels.py"; -my $SPLIT = "$SCRIPT_DIR/../posterior-regularisation/split-languages.py"; - -my $PREM_TRAIN="$PRTOOLS/prjava/train-PR-cluster.sh"; - -my $SORT_KEYS = "$SCRIPT_DIR/scripts/sort-by-key.sh"; -my $PATCH_CORPUS = "$SCRIPT_DIR/scripts/patch-corpus.pl"; -my $REMOVE_TAGS_CORPUS = "$SCRIPT_DIR/scripts/remove-tags-from-corpus.pl"; -my $REMOVE_TAGS_CONTEXT = "$SCRIPT_DIR/scripts/remove-tags-from-contexts.pl"; -my $EXTRACTOR = "$EXTOOLS/extractor"; -my $TOPIC_TRAIN = "$PYPTOOLS/pyp-contexts-train"; -my $MORF_DOC_FILTER = "$SCRIPT_DIR/../morf-segmentation/filter_docs.pl"; - -assert_exec($PATCH_CORPUS, $SORT_KEYS, $REDUCER, $EXTRACTOR, - $S2L, $C2D, $TOPIC_TRAIN, $SPLIT, $REMOVE_TAGS_CONTEXT, $REMOVE_TAGS_CORPUS, $MORF_DOC_FILTER); - -my $BACKOFF_GRAMMAR; -my $DEFAULT_CAT; -my $HIER_CAT; -my %FREQ_HIER = (); -my $TAGGED_CORPUS; - -my $NAME_SHORTCUT; - -my $OUTPUT = './giwork'; -usage() unless &GetOptions('base_phrase_max_size=i' => \$BASE_PHRASE_MAX_SIZE, - 'backoff_grammar' => \$BACKOFF_GRAMMAR, - 'output=s' => \$OUTPUT, - 'model=s' => \$MODEL, - 'topics=i' => \$NUM_TOPICS_FINE, - 'coarse_topics=i' => \$NUM_TOPICS_COARSE, - 'trg_context=i' => \$CONTEXT_SIZE, - 'samples=i' => \$NUM_SAMPLES, - 'label_threshold=f' => \$LABEL_THRESHOLD, - 'use_default_cat' => \$DEFAULT_CAT, - 'topics-config=s' => \$TOPICS_CONFIG, - 'iterations=i' => \$NUM_ITERS, - 'pr-scale-phrase=f' => \$PR_SCALE_P, - 'pr-scale-context=f' => \$PR_SCALE_C, - 'pr-flags=s' => \$PR_FLAGS, - 'tagged_corpus=s' => \$TAGGED_CORPUS, - 'language=s' => \$LANGUAGE, - 'get_name_only' => \$NAME_SHORTCUT, - 'preserve_phrases' => \$PRESERVE_PHRASES, - 'morf=s' => \$MORFMARK, - ); -if ($NAME_SHORTCUT) { - $NUM_TOPICS = $NUM_TOPICS_FINE; - print STDERR labeled_dir(); - exit 0; -} -usage() unless scalar @ARGV == 1; -my $CORPUS = $ARGV[0]; -open F, "<$CORPUS" or die "Can't read $CORPUS: $!"; close F; - -$NUM_TOPICS = $NUM_TOPICS_FINE; - -$HIER_CAT = ( $NUM_TOPICS_COARSE ? 1 : 0 ); - -print STDERR " Output: $OUTPUT\n"; -my $DATA_DIR = $OUTPUT . '/corpora'; -my $LEX_NAME = "corpus.f_e_a.$LANGUAGE.lex"; -my $CORPUS_LEX = $DATA_DIR . '/' . $LEX_NAME; # corpus used to extract rules -my $CORPUS_CLUSTER = $DATA_DIR . "/corpus.f_e_a.$LANGUAGE.cluster"; # corpus used for clustering (often identical) - -my $CONTEXT_DIR = $OUTPUT . '/' . context_dir(); -my $CLUSTER_DIR = $OUTPUT . '/' . cluster_dir(); -my $LABELED_DIR = $OUTPUT . '/' . labeled_dir(); -my $CLUSTER_DIR_C; -my $CLUSTER_DIR_F; -my $LABELED_DIR_C; -my $LABELED_DIR_F; -if($HIER_CAT) { - $CLUSTER_DIR_F = $CLUSTER_DIR; - $LABELED_DIR_F = $LABELED_DIR; - $NUM_TOPICS = $NUM_TOPICS_COARSE; - $CLUSTER_DIR_C = $OUTPUT . '/' . cluster_dir(); - $LABELED_DIR_C = $OUTPUT . '/' . labeled_dir(); - $NUM_TOPICS = $NUM_TOPICS_FINE; -} -my $GRAMMAR_DIR = $OUTPUT . '/' . grammar_dir(); -print STDERR " Context: $CONTEXT_DIR\n Cluster: $CLUSTER_DIR\n Labeled: $LABELED_DIR\n Grammar: $GRAMMAR_DIR\n"; -safemkdir($OUTPUT) or die "Couldn't create output directory $OUTPUT: $!"; -safemkdir($DATA_DIR) or die "Couldn't create output directory $DATA_DIR: $!"; -safemkdir($CONTEXT_DIR) or die "Couldn't create output directory $CONTEXT_DIR: $!"; -safemkdir($CLUSTER_DIR) or die "Couldn't create output directory $CLUSTER_DIR: $!"; -if($HIER_CAT) { - safemkdir($CLUSTER_DIR_C) or die "Couldn't create output directory $CLUSTER_DIR_C: $!"; - safemkdir($LABELED_DIR_C) or die "Couldn't create output directory $LABELED_DIR_C: $!"; -} -safemkdir($LABELED_DIR) or die "Couldn't create output directory $LABELED_DIR: $!"; -safemkdir($GRAMMAR_DIR) or die "Couldn't create output directory $GRAMMAR_DIR: $!"; -if(-e $TOPICS_CONFIG) { - copy($TOPICS_CONFIG, $CLUSTER_DIR) or die "Copy failed: $!"; -} - -setup_data(); - -if (lc($MODEL) eq "blagree") { - extract_bilingual_context(); -} else { - extract_context(); -} - -if (lc($MODEL) eq "pyp") { - if($HIER_CAT) { - $NUM_TOPICS = $NUM_TOPICS_COARSE; - $CLUSTER_DIR = $CLUSTER_DIR_C; - topic_train(); - $NUM_TOPICS = $NUM_TOPICS_FINE; - $CLUSTER_DIR = $CLUSTER_DIR_F; - topic_train(); - } else { - topic_train(); - } -} elsif (lc($MODEL) =~ /pr|em|agree/) { - prem_train(); -} else { die "Unsupported model type: $MODEL. Must be one of PYP or PREM.\n"; } -if($HIER_CAT) { - $NUM_TOPICS = $NUM_TOPICS_COARSE; - $CLUSTER_DIR = $CLUSTER_DIR_C; - $LABELED_DIR = $LABELED_DIR_C; - label_spans_with_topics(); - $NUM_TOPICS = $NUM_TOPICS_FINE; - $CLUSTER_DIR = $CLUSTER_DIR_F; - $LABELED_DIR = $LABELED_DIR_F; - label_spans_with_topics(); - extract_freqs(); -} else { - label_spans_with_topics(); -} -my $res; -if ($BIDIR) { - $res = grammar_extract_bidir(); -} else { - $res = grammar_extract(); -} -print STDERR "\n!!!COMPLETE!!!\n"; -print STDERR "GRAMMAR: $res\nYou should probably run: $SCRIPT_DIR/evaluation-pipeline.pl LANGPAIR giwork/ct1s0.L10.PYP.t4.s20.grammar/grammar.gz -f FEAT1 -f FEAT2\n\n"; -exit 0; - -sub setup_data { - print STDERR "\n!!!PREPARE CORPORA!!!\n"; - if (-f $CORPUS_LEX && $CORPUS_CLUSTER) { - print STDERR "$CORPUS_LEX and $CORPUS_CLUSTER exist, reusing...\n"; - return; - } - copy($CORPUS, $CORPUS_LEX); - if ($TAGGED_CORPUS) { - die "Can't find $TAGGED_CORPUS" unless -f $TAGGED_CORPUS; - my $opt=""; - $opt = "-s" if ($LANGUAGE eq "source"); - $opt = $opt . " -a" if ($PRESERVE_PHRASES); - my $cmd="$PATCH_CORPUS $opt $TAGGED_CORPUS $CORPUS_LEX > $CORPUS_CLUSTER"; - safesystem($cmd) or die "Failed to extract contexts."; - } else { - symlink($LEX_NAME, $CORPUS_CLUSTER); - } -} - -sub context_dir { - return "ct${CONTEXT_SIZE}s0.L$BASE_PHRASE_MAX_SIZE.l$LANGUAGE"; -} - -sub cluster_dir { - if (lc($MODEL) eq "pyp") { - return context_dir() . ".PYP.t$NUM_TOPICS.s$NUM_SAMPLES"; - } elsif (lc($MODEL) eq "em") { - return context_dir() . ".EM.t$NUM_TOPICS.i$NUM_ITERS"; - } elsif (lc($MODEL) eq "pr") { - return context_dir() . ".PR.t$NUM_TOPICS.i$NUM_ITERS.sp$PR_SCALE_P.sc$PR_SCALE_C"; - } elsif (lc($MODEL) eq "agree") { - return context_dir() . ".AGREE.t$NUM_TOPICS.i$NUM_ITERS"; - } elsif (lc($MODEL) eq "blagree") { - return context_dir() . ".BLAGREE.t$NUM_TOPICS.i$NUM_ITERS"; - } -} - -sub labeled_dir { - if (lc($MODEL) eq "pyp" && $LABEL_THRESHOLD ne "0") { - return cluster_dir() . "_lt$LABEL_THRESHOLD"; - } else { - return cluster_dir(); - } -} - -sub grammar_dir { - # TODO add grammar config options -- adjacent NTs, etc - if($HIER_CAT) { - return cluster_dir() . ".hier$NUM_TOPICS_COARSE-$NUM_TOPICS_FINE.grammar"; - } else { - return labeled_dir() . ".grammar"; - } -} - - - -sub safemkdir { - my $dir = shift; - if (-d $dir) { return 1; } - return mkdir($dir); -} - -sub usage { - print <<EOT; - -Usage: $0 [OPTIONS] corpus.fr-en-al - -Induces a grammar using Pitman-Yor topic modeling or Posterior Regularisation. - -EOT - exit 1; -}; - -sub assert_exec { - my @files = @_; - for my $file (@files) { - die "Can't find $file - did you run make?\n" unless -e $file; - die "Can't execute $file" unless -e $file; - } -}; - -sub extract_context { - print STDERR "\n!!!CONTEXT EXTRACTION\n"; - my $OUT_CONTEXTS = "$CONTEXT_DIR/context.txt.gz"; - if (-e $OUT_CONTEXTS) { - print STDERR "$OUT_CONTEXTS exists, reusing...\n"; - } else { - my $ccopt = "-c $ITEMS_IN_MEMORY"; - my $postsort = "| $REDUCER "; - if ($COMPLETE_CACHE) { - print STDERR "COMPLETE_CACHE is set: removing memory limits on cache.\n"; - $ccopt = "-c 0"; - $postsort = "" unless ($PRESERVE_PHRASES); - } - - my $presort = ($PRESERVE_PHRASES ? "| $REMOVE_TAGS_CONTEXT --phrase=tok --context=tag " : ""); - - if ($MORFMARK ne "") { - $presort = $presort . "| $MORF_DOC_FILTER \"$MORFMARK\" "; - } - - my $cmd = "$EXTRACTOR -i $CORPUS_CLUSTER $ccopt -L $BASE_PHRASE_MAX_SIZE -C -S $CONTEXT_SIZE --phrase_language $LANGUAGE --context_language $LANGUAGE $presort | $SORT_KEYS $postsort | $GZIP > $OUT_CONTEXTS"; - safesystem($cmd) or die "Failed to extract contexts."; - } -} - -sub extract_bilingual_context { - print STDERR "\n!!!CONTEXT EXTRACTION\n"; - my $OUT_SRC_CONTEXTS = "$CONTEXT_DIR/context.source"; - my $OUT_TGT_CONTEXTS = "$CONTEXT_DIR/context.target"; - - if (-e $OUT_SRC_CONTEXTS . ".gz" and -e $OUT_TGT_CONTEXTS . ".gz") { - print STDERR "$OUT_SRC_CONTEXTS.gz and $OUT_TGT_CONTEXTS.gz exist, reusing...\n"; - } else { - my $OUT_BI_CONTEXTS = "$CONTEXT_DIR/context.bilingual.txt.gz"; - my $cmd = "$EXTRACTOR -i $CORPUS_CLUSTER -c $ITEMS_IN_MEMORY -L $BASE_PHRASE_MAX_SIZE -C -S $CONTEXT_SIZE --phrase_language both --context_language both | $SORT_KEYS | $REDUCER | $GZIP > $OUT_BI_CONTEXTS"; - if ($COMPLETE_CACHE) { - print STDERR "COMPLETE_CACHE is set: removing memory limits on cache.\n"; - $cmd = "$EXTRACTOR -i $CORPUS_CLUSTER -c 0 -L $BASE_PHRASE_MAX_SIZE -C -S $CONTEXT_SIZE --phrase_language both --context_language both | $SORT_KEYS | $GZIP > $OUT_BI_CONTEXTS"; - } - safesystem($cmd) or die "Failed to extract contexts."; - - safesystem("$ZCAT $OUT_BI_CONTEXTS | $SPLIT $OUT_SRC_CONTEXTS $OUT_TGT_CONTEXTS") or die "Failed to split contexts.\n"; - safesystem("$GZIP -f $OUT_SRC_CONTEXTS") or die "Failed to zip output contexts.\n"; - safesystem("$GZIP -f $OUT_TGT_CONTEXTS") or die "Failed to zip output contexts.\n"; - } -} - - -sub topic_train { - print STDERR "\n!!!TRAIN PYP TOPICS\n"; - my $IN_CONTEXTS = "$CONTEXT_DIR/context.txt.gz"; - my $OUT_CLUSTERS = "$CLUSTER_DIR/docs.txt.gz"; - if (-e $OUT_CLUSTERS) { - print STDERR "$OUT_CLUSTERS exists, reusing...\n"; - } else { - safesystem("$TOPIC_TRAIN --data $IN_CONTEXTS --backoff-type simple -t $NUM_TOPICS -s $NUM_SAMPLES -o $OUT_CLUSTERS -c $TOPICS_CONFIG -w /dev/null") or die "Topic training failed.\n"; - } -} - -sub prem_train { - print STDERR "\n!!!TRAIN PR/EM model\n"; - my $OUT_CLUSTERS = "$CLUSTER_DIR/docs.txt.gz"; - if (-e $OUT_CLUSTERS) { - print STDERR "$OUT_CLUSTERS exists, reusing...\n"; - } else { - my $in = "--in $CONTEXT_DIR/context.txt.gz"; - my $opts = ""; - if (lc($MODEL) eq "pr") { - $opts = "--scale-phrase $PR_SCALE_P --scale-context $PR_SCALE_C"; - } elsif (lc($MODEL) eq "agree") { - $opts = "--agree-direction"; - } elsif (lc($MODEL) eq "blagree") { - $in = "--in $CONTEXT_DIR/context.source.gz --in1 $CONTEXT_DIR/context.target.gz"; - $opts = "--agree-language"; - } - safesystem("$PREM_TRAIN $in --topics $NUM_TOPICS --out $OUT_CLUSTERS --iterations $NUM_ITERS $opts $PR_FLAGS") or die "Topic training failed.\n"; - } -} - -sub label_spans_with_topics { - my ($file) = (@_); - print STDERR "\n!!!LABEL SPANS\n"; - my $IN_CLUSTERS = "$CLUSTER_DIR/docs.txt.gz"; - my $OUT_SPANS = "$LABELED_DIR/labeled_spans.txt"; - if (-e $OUT_SPANS) { - print STDERR "$OUT_SPANS exists, reusing...\n"; - } else { - my $extra = "tt"; - if ($LANGUAGE eq "source") { - $extra = "ss"; - } elsif ($LANGUAGE eq "both") { - $extra = "bb"; - } else { die "Invalid language specifier $LANGUAGE\n" unless $LANGUAGE eq "target" }; - $extra = $extra . " tok,tag" if ($PRESERVE_PHRASES); - safesystem("$ZCAT $IN_CLUSTERS > $CLUSTER_DIR/clusters.txt") or die "Failed to unzip"; - safesystem("$EXTRACTOR --base_phrase_spans -i $CORPUS_CLUSTER -c $ITEMS_IN_MEMORY -L $BASE_PHRASE_MAX_SIZE -S $CONTEXT_SIZE | $S2L $CLUSTER_DIR/clusters.txt $CONTEXT_SIZE $LABEL_THRESHOLD $extra > $OUT_SPANS") or die "Failed to label spans"; - unlink("$CLUSTER_DIR/clusters.txt") or warn "Failed to remove $CLUSTER_DIR/clusters.txt"; - safesystem("paste -d ' ' $CORPUS_LEX $OUT_SPANS | sed 's/ *||| *\$//' > $LABELED_DIR/corpus.src_trg_al_label") or die "Couldn't paste"; - } -} - -sub extract_freqs { - print STDERR "\n!!!EXTRACTING FREQUENCIES\n"; - my $IN_COARSE = "$LABELED_DIR_C/labeled_spans.txt"; - my $IN_FINE = "$LABELED_DIR_F/labeled_spans.txt"; - my $OUT_SPANS = "$LABELED_DIR_F/labeled_spans.hier$NUM_TOPICS_COARSE-$NUM_TOPICS_FINE.txt"; - my $FREQS = "$LABELED_DIR_F/label_freq.hier$NUM_TOPICS_COARSE-$NUM_TOPICS_FINE.txt"; - my $COARSE_EXPR = "\'s/\\(X[0-9][0-9]*\\)/\\1c/g\'"; #' - my $FINE_EXPR = "\'s/\\(X[0-9][0-9]*\\)/\\1f/g\'"; #' - my %finehier = (); - if (-e $OUT_SPANS) { - print STDERR "$OUT_SPANS exists, reusing...\n"; - } else { - safesystem("paste -d ' ' $IN_COARSE $IN_FINE > $OUT_SPANS"); - } - open SPANS, $OUT_SPANS or die $!; - while (<SPANS>) { - my ($tmp, $coarse, $fine) = split /\|\|\|/; - my @coarse_spans = $coarse =~ /\d+-\d+:X(\d+)/g; - my @fine_spans = $fine =~ /\d+-\d+:X(\d+)/g; - - foreach my $i (0..(scalar @coarse_spans)-1) { - my $coarse_cat = $coarse_spans[$i]; - my $fine_cat = $fine_spans[$i]; - - $FREQ_HIER{$coarse_cat}{$fine_cat}++; - } - } - close SPANS; - foreach (values %FREQ_HIER) { - my $coarse_freq = $_; - my $total = 0; - $total+=$_ for (values %{ $coarse_freq }); - $coarse_freq->{$_}=log($coarse_freq->{$_}/$total) for (keys %{ $coarse_freq }); - } - open FREQS, ">", $FREQS or die $!; - foreach my $coarse_cat (keys %FREQ_HIER) { - print FREQS "$coarse_cat |||"; - foreach my $fine_cat (keys %{$FREQ_HIER{$coarse_cat}}) { - my $res = $FREQ_HIER{$coarse_cat}{$fine_cat}; - print FREQS " $fine_cat:$res"; - if(! exists $finehier{$fine_cat} || $finehier{$fine_cat} < $res) { - $finehier{$fine_cat} = $coarse_cat; - } - } - print FREQS "\n"; - } -# foreach my $fine_cat (keys %finehier) { -# print FREQS "$fine_cat -> $finehier{$fine_cat}\n"; -# } - close FREQS; - $CLUSTER_DIR = $CLUSTER_DIR_F; -} - -sub grammar_extract { - my $LABELED = "$LABELED_DIR/corpus.src_trg_al_label"; - print STDERR "\n!!!EXTRACTING GRAMMAR\n"; - my $OUTGRAMMAR = "$GRAMMAR_DIR/grammar.gz"; - if (-e $OUTGRAMMAR) { - print STDERR "$OUTGRAMMAR exists, reusing...\n"; - } else { - my $BACKOFF_ARG = ($BACKOFF_GRAMMAR ? "-g" : ""); - my $DEFAULT_CAT_ARG = ($DEFAULT_CAT ? "-d X" : ""); - safesystem("$EXTRACTOR -i $LABELED -c $ITEMS_IN_MEMORY -L $BASE_PHRASE_MAX_SIZE -t $NUM_TOPICS $BACKOFF_ARG $DEFAULT_CAT_ARG | $SORT_KEYS | $REDUCER -p | $GZIP > $OUTGRAMMAR") or die "Couldn't extract grammar"; - } - return $OUTGRAMMAR; -} - -sub grammar_extract_bidir { -#gzcat ex.output.gz | ./mr_stripe_rule_reduce -p -b | sort -t $'\t' -k 1 | ./mr_stripe_rule_reduce | gzip > phrase-table.gz - my $LABELED = "$LABELED_DIR/corpus.src_trg_al_label"; - print STDERR "\n!!!EXTRACTING GRAMMAR\n"; - my $OUTGRAMMAR = "$GRAMMAR_DIR/grammar.bidir.gz"; - if (-e $OUTGRAMMAR) { - print STDERR "$OUTGRAMMAR exists, reusing...\n"; - } else { - my $BACKOFF_ARG = ($BACKOFF_GRAMMAR ? "-g" : ""); - safesystem("$EXTRACTOR -i $LABELED -c $ITEMS_IN_MEMORY -L $BASE_PHRASE_MAX_SIZE -b -t $NUM_TOPICS $BACKOFF_ARG | $SORT_KEYS | $REDUCER -p -b | $SORT_KEYS | $REDUCER | $GZIP > $OUTGRAMMAR") or die "Couldn't extract grammar"; - } - return $OUTGRAMMAR; -} - -sub safesystem { - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - exit(1); - } - else { - my $exitcode = $? >> 8; - print STDERR "Exit code: $exitcode\n" if $exitcode; - return ! $exitcode; - } -} - diff --git a/gi/pipeline/lticluster.config b/gi/pipeline/lticluster.config deleted file mode 100644 index 3e23c8cb..00000000 --- a/gi/pipeline/lticluster.config +++ /dev/null @@ -1,9 +0,0 @@ -# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED -# name path aligned-corpus LM dev dev-refs test1 testt-eval.sh ... -/home/cdyer/ws10smt-data -btec /home/cdyer/ws10smt-data/btec/ split.zh-en.al lm/en.3gram.lm.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh -zhen /home/cdyer/ws10smt-data/chinese-english corpus.zh-en.al lm/c2e.3gram.lm.gz dev_and_test/mt02.src.txt dev_and_test/mt02.ref.* dev_and_test/mt03.src.txt eval-mt03.sh -aren /home/cdyer/ws10smt-data/arabic-english corpus.ar-en-al lm/a2e.3gram.lm.gz dev_and_test/dev.src.txt dev_and_test/dev.ref.txt.* dev_and_test/mt05.src.txt eval-mt05.sh -uren /home/cdyer/ws10smt-data/urdu-english corpus.ur-en.al lm/u2e.en.lm.gz dev/dev.ur dev/dev.en* devtest/devtest.ur eval-devtest.sh -nlfr /home/cdyer/ws10smt-data/dutch-french corpus.nl-fr.al - diff --git a/gi/pipeline/scripts/filter-by-f.pl b/gi/pipeline/scripts/filter-by-f.pl deleted file mode 100755 index 0cef0606..00000000 --- a/gi/pipeline/scripts/filter-by-f.pl +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR; } - -my $REKEY="$SCRIPT_DIR/rekey.pl"; -my $REFILTER="$SCRIPT_DIR/refilter.pl"; -my $SORT="$SCRIPT_DIR/sort-by-key.sh"; -assert_exec($REKEY, $REFILTER, $SORT); - - -die "Usage: $0 NUM-TRANSLATIONS ingrammar.gz outgrammar.gz\n" unless scalar @ARGV == 3; -my $translations = shift @ARGV; -die "Need number: $translations" unless $translations > 0; -die unless $ARGV[0] =~ /\.gz$/; -die unless $ARGV[1] =~ /\.gz$/; -die if $ARGV[0] eq $ARGV[1]; -die "Can't find $ARGV[0]" unless -f $ARGV[0]; - -my $cmd = "gunzip -c $ARGV[0] | $REKEY | $SORT | $REFILTER $translations | gzip > $ARGV[1]"; -safesystem($ARGV[1], $cmd) or die "Filtering failed"; -exit 0; - -sub assert_exec { - my @files = @_; - for my $file (@files) { - die "Can't find $file - did you run make?\n" unless -e $file; - die "Can't execute $file" unless -e $file; - } -}; - -sub safesystem { - my $output = shift @_; - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - exit(1); - } - else { - my $exitcode = $? >> 8; - if ($exitcode) { - print STDERR "Exit code: $exitcode\n"; - if (defined $output && -e $output) { printf STDERR "Removing $output\n"; `rm -rf $output`; } - } - return ! $exitcode; - } -} - diff --git a/gi/pipeline/scripts/patch-corpus.pl b/gi/pipeline/scripts/patch-corpus.pl deleted file mode 100755 index c0eec43e..00000000 --- a/gi/pipeline/scripts/patch-corpus.pl +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -my $PATCH = shift @ARGV; -my $TGT = 1; -my $APPEND; -while ($PATCH eq "-s" || $PATCH eq "-a") { - if ($PATCH eq "-s") { - undef $TGT; - } else { - $APPEND = 1; - } - $PATCH = shift @ARGV; -} - -die "Usage: $0 [-s] [-a] tagged.en[_fr] < lexical.en_fr_al[_...]\n" unless $PATCH; - -open P, "<$PATCH" or die "Can't read tagged corpus $PATCH: $!"; -my $first=<P>; close P; -my @fields = split / \|\|\| /, $first; -die "Bad format!" if (scalar @fields > 2); - -if (scalar @fields != 1) { - # TODO support this - die "Patching source and target not supported yet!"; -} - -my $line = 0; -open P, "<$PATCH" or die "Can't read tagged corpus $PATCH: $!"; -while(my $pline = <P>) { - chomp $pline; - $line++; - my $line = <>; - die "Too few lines in lexical corpus!" unless $line; - chomp $line; - @fields = split / \|\|\| /, $line; - my @pwords = split /\s+/, $pline; - if ($TGT) { - my @lwords = split /\s+/, $fields[1]; - die "Length mismatch in line $line!\n" unless (scalar @pwords == scalar @lwords); - if ($APPEND) { - foreach my $i (0..(scalar @pwords-1)) { - $lwords[$i] = $lwords[$i] . '_' . $pwords[$i]; - } - $fields[1] = join ' ', @lwords; - } else { - $fields[1] = $pline; - } - } else { # source side - my @lwords = split /\s+/, $fields[0]; - die "Length mismatch in line $line!\n" unless (scalar @pwords == scalar @lwords); - if ($APPEND) { - foreach my $i (0..(scalar @pwords-1)) { - $lwords[$i] = $lwords[$i] . '_' . $pwords[$i]; - } - $fields[0] = join ' ', @lwords; - } else { - $fields[0] = $pline; - } - } - print join ' ||| ', @fields; - print "\n"; -} - - diff --git a/gi/pipeline/scripts/refilter.pl b/gi/pipeline/scripts/refilter.pl deleted file mode 100755 index a783eb4e..00000000 --- a/gi/pipeline/scripts/refilter.pl +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -my $NUM_TRANSLATIONS = shift @ARGV; -unless ($NUM_TRANSLATIONS) { $NUM_TRANSLATIONS=30; } -print STDERR "KEEPING $NUM_TRANSLATIONS TRANSLATIONS FOR SOURCE\n"; - -my $pk = ''; -my %dict; -while(<>) { - s/^(.+)\t//; - my $key = $1; - if ($key ne $pk) { - if ($pk) { - emit_dict(); - } - %dict = (); - $pk = $key; - } - my ($lhs, $f, $e, $s) = split / \|\|\| /; - my $score = 0; - if ($s =~ /XEF=([^ ]+)/) { - $score += $1; - } else { die; } - if ($s =~ /GenerativeProb=([^ ]+)/) { - $score += ($1 / 10); - } else { die; } - $dict{"$lhs ||| $f ||| $e ||| $s"} = $score; -} -emit_dict(); - -sub emit_dict { - my $cc = 0; - for my $k (sort { $dict{$a} <=> $dict{$b} } keys %dict) { - print "$k"; - $cc++; - if ($cc >= $NUM_TRANSLATIONS) { last; } - } -} - diff --git a/gi/pipeline/scripts/rekey.pl b/gi/pipeline/scripts/rekey.pl deleted file mode 100755 index 31eb86b8..00000000 --- a/gi/pipeline/scripts/rekey.pl +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/perl - -while(<>) { - my ($lhs, $f, $e, $s) = split / \|\|\| /; - $f =~ s/\[X[0-9]+\]/\[X\]/g; - print "$f\t$_"; -} - diff --git a/gi/pipeline/scripts/remove-tags-from-contexts.pl b/gi/pipeline/scripts/remove-tags-from-contexts.pl deleted file mode 100755 index 20698816..00000000 --- a/gi/pipeline/scripts/remove-tags-from-contexts.pl +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -use Getopt::Long "GetOptions"; - -my $PHRASE = 'tok'; -my $CONTEXT = 'tag'; - -die "Usage: $0 [--phrase=tok|tag] [--context=tok|tag] < corpus" - unless &GetOptions('phrase=s' => \$PHRASE, 'context=s' => \$CONTEXT); - -my $lno = 0; -while(my $line = <>) { - $lno++; - chomp $line; - my @top = split /\t/, $line; - die unless (scalar @top == 2); - - my @pwords = split /\s+/, $top[0]; - foreach my $token (@pwords) { - #print $token . "\n"; - my @parts = split /_(?!.*_)/, $token; - die unless (scalar @parts == 2); - if ($PHRASE eq "tok") { - $token = $parts[0] - } elsif ($PHRASE eq "tag") { - $token = $parts[1] - } - } - - my @fields = split / \|\|\| /, $top[1]; - foreach my $i (0..((scalar @fields) / 2 - 1)) { - #print $i . ": " . $fields[2*$i] . " of " . (scalar @fields) . "\n"; - my @cwords = split /\s+/, $fields[2*$i]; - foreach my $token (@cwords) { - #print $i . ": " . $token . "\n"; - my @parts = split /_(?!.*_)/, $token; - if (scalar @parts == 2) { - if ($CONTEXT eq "tok") { - $token = $parts[0] - } elsif ($CONTEXT eq "tag") { - $token = $parts[1] - } - } - } - $fields[2*$i] = join ' ', @cwords; - } - - print join ' ', @pwords; - print "\t"; - print join ' ||| ', @fields; - print "\n"; -} diff --git a/gi/pipeline/scripts/remove-tags-from-corpus.pl b/gi/pipeline/scripts/remove-tags-from-corpus.pl deleted file mode 100755 index be3e97c0..00000000 --- a/gi/pipeline/scripts/remove-tags-from-corpus.pl +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -use Getopt::Long "GetOptions"; - -my $LANGUAGE = shift @ARGV; -$LANGUAGE = 'target' unless ($LANGUAGE); - -my $lno = 0; -while(my $line = <>) { - $lno++; - chomp $line; - - my @fields = split / \|\|\| /, $line; - - if ($LANGUAGE eq "source" or $LANGUAGE eq "both") { - my @cwords = split /\s+/, $fields[0]; - foreach my $token (@cwords) { - my @parts = split /_(?!.*_)/, $token; - if (scalar @parts == 2) { - $token = $parts[0] - } else { - print STDERR "WARNING: invalid tagged token $token\n"; - } - } - $fields[0] = join ' ', @cwords; - } - - if ($LANGUAGE eq "target" or $LANGUAGE eq "both") { - my @cwords = split /\s+/, $fields[1]; - foreach my $token (@cwords) { - my @parts = split /_(?!.*_)/, $token; - if (scalar @parts == 2) { - $token = $parts[1] - } else { - print STDERR "WARNING: invalid tagged token $token\n"; - } - } - $fields[0] = join ' ', @cwords; - } - - print join ' ||| ', @fields; - print "\n"; -} diff --git a/gi/pipeline/scripts/sort-by-key.sh b/gi/pipeline/scripts/sort-by-key.sh deleted file mode 100755 index 7ae33e03..00000000 --- a/gi/pipeline/scripts/sort-by-key.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -export LANG=C -sort -t $'\t' -k 1 -T /tmp -S 6000000000 - diff --git a/gi/pipeline/scripts/xfeats.pl b/gi/pipeline/scripts/xfeats.pl deleted file mode 100755 index dc578513..00000000 --- a/gi/pipeline/scripts/xfeats.pl +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/perl -w -use strict; - -die "Usage: $0 x-grammar.scfg[.gz] < cat-grammar.scfg\n" unless scalar @ARGV > 0; - -my $xgrammar = shift @ARGV; -die "Can't find $xgrammar" unless -f $xgrammar; -my $fh; -if ($xgrammar =~ /\.gz$/) { - open $fh, "gunzip -c $xgrammar|" or die "Can't fork: $!"; -} else { - open $fh, "<$xgrammar" or die "Can't read $xgrammar: $!"; -} -print STDERR "Reading X-feats from $xgrammar...\n"; -my %dict; -while(<$fh>) { - chomp; - my ($lhs, $f, $e, $feats) = split / \|\|\| /; - my $xfeats; - my $cc = 0; - my @xfeats = (); - while ($feats =~ /(EGivenF|FGivenE|LogRuleCount|LogECount|LogFCount|SingletonRule|SingletonE|SingletonF)=([^ ]+)( |$)/og) { - push @xfeats, "X_$1=$2"; - } - #print "$lhs ||| $f ||| $e ||| @xfeats\n"; - $dict{"$lhs ||| $f ||| $e"} = "@xfeats"; -} -close $fh; - -print STDERR "Add features...\n"; -while(<>) { - chomp; - my ($lhs, $f, $e) = split / \|\|\| /; - $f=~ s/\[[^]]+,([12])\]/\[X,$1\]/g; - my $xfeats = $dict{"[X] ||| $f ||| $e"}; - die "Can't find x features for: $_\n" unless $xfeats; - print "$_ $xfeats\n"; -} - diff --git a/gi/pipeline/valhalla.config b/gi/pipeline/valhalla.config deleted file mode 100644 index e00a8485..00000000 --- a/gi/pipeline/valhalla.config +++ /dev/null @@ -1,9 +0,0 @@ -# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED -# name path aligned-corpus LM dev dev-refs test1 testt-eval.sh ... -/home/chris/ws10smt/data -btec /home/chris/ws10smt/data/btec/ split.zh-en.al lm/en.3gram.lm.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh -fbis /home/chris/ws10smt/data/chinese-english.fbis corpus.zh-en.al -zhen /home/chris/ws10smt/data/chinese-english corpus.zh-en.al -aren /home/chris/ws10smt/data/arabic-english corpus.ar-en.al -uren /home/chris/ws10smt/data/urdu-english corpus.ur-en.al lm/u2e.en.lm.gz dev/dev.ur dev/dev.en* devtest/devtest.ur eval-devtest.sh -nlfr /home/chris/ws10smt/data/dutch-french corpus.nl-fr.al diff --git a/gi/posterior-regularisation/Corpus.java b/gi/posterior-regularisation/Corpus.java deleted file mode 100644 index 07b27387..00000000 --- a/gi/posterior-regularisation/Corpus.java +++ /dev/null @@ -1,167 +0,0 @@ -import gnu.trove.TIntArrayList; - -import java.io.*; -import java.util.*; -import java.util.regex.Pattern; - -public class Corpus -{ - private Lexicon<String> tokenLexicon = new Lexicon<String>(); - private Lexicon<TIntArrayList> phraseLexicon = new Lexicon<TIntArrayList>(); - private Lexicon<TIntArrayList> contextLexicon = new Lexicon<TIntArrayList>(); - private List<Edge> edges = new ArrayList<Edge>(); - private List<List<Edge>> phraseToContext = new ArrayList<List<Edge>>(); - private List<List<Edge>> contextToPhrase = new ArrayList<List<Edge>>(); - - public class Edge - { - Edge(int phraseId, int contextId, int count) - { - this.phraseId = phraseId; - this.contextId = contextId; - this.count = count; - } - public int getPhraseId() - { - return phraseId; - } - public TIntArrayList getPhrase() - { - return phraseLexicon.lookup(phraseId); - } - public String getPhraseString() - { - StringBuffer b = new StringBuffer(); - for (int tid: getPhrase().toNativeArray()) - { - if (b.length() > 0) - b.append(" "); - b.append(tokenLexicon.lookup(tid)); - } - return b.toString(); - } - public int getContextId() - { - return contextId; - } - public TIntArrayList getContext() - { - return contextLexicon.lookup(contextId); - } - public String getContextString() - { - StringBuffer b = new StringBuffer(); - for (int tid: getContext().toNativeArray()) - { - if (b.length() > 0) - b.append(" "); - b.append(tokenLexicon.lookup(tid)); - } - return b.toString(); - } - public int getCount() - { - return count; - } - private int phraseId; - private int contextId; - private int count; - } - - List<Edge> getEdges() - { - return edges; - } - - int getNumEdges() - { - return edges.size(); - } - - int getNumPhrases() - { - return phraseLexicon.size(); - } - - List<Edge> getEdgesForPhrase(int phraseId) - { - return phraseToContext.get(phraseId); - } - - int getNumContexts() - { - return contextLexicon.size(); - } - - List<Edge> getEdgesForContext(int contextId) - { - return contextToPhrase.get(contextId); - } - - int getNumTokens() - { - return tokenLexicon.size(); - } - - static Corpus readFromFile(Reader in) throws IOException - { - Corpus c = new Corpus(); - - // read in line-by-line - BufferedReader bin = new BufferedReader(in); - String line; - Pattern separator = Pattern.compile(" \\|\\|\\| "); - - while ((line = bin.readLine()) != null) - { - // split into phrase and contexts - StringTokenizer st = new StringTokenizer(line, "\t"); - assert (st.hasMoreTokens()); - String phraseToks = st.nextToken(); - assert (st.hasMoreTokens()); - String rest = st.nextToken(); - assert (!st.hasMoreTokens()); - - // process phrase - st = new StringTokenizer(phraseToks, " "); - TIntArrayList ptoks = new TIntArrayList(); - while (st.hasMoreTokens()) - ptoks.add(c.tokenLexicon.insert(st.nextToken())); - int phraseId = c.phraseLexicon.insert(ptoks); - if (phraseId == c.phraseToContext.size()) - c.phraseToContext.add(new ArrayList<Edge>()); - - // process contexts - String[] parts = separator.split(rest); - assert (parts.length % 2 == 0); - for (int i = 0; i < parts.length; i += 2) - { - // process pairs of strings - context and count - TIntArrayList ctx = new TIntArrayList(); - String ctxString = parts[i]; - String countString = parts[i + 1]; - StringTokenizer ctxStrtok = new StringTokenizer(ctxString, " "); - while (ctxStrtok.hasMoreTokens()) - { - String token = ctxStrtok.nextToken(); - if (!token.equals("<PHRASE>")) - ctx.add(c.tokenLexicon.insert(token)); - } - int contextId = c.contextLexicon.insert(ctx); - if (contextId == c.contextToPhrase.size()) - c.contextToPhrase.add(new ArrayList<Edge>()); - - assert (countString.startsWith("C=")); - Edge e = c.new Edge(phraseId, contextId, - Integer.parseInt(countString.substring(2).trim())); - c.edges.add(e); - - // index the edge for fast phrase, context lookup - c.phraseToContext.get(phraseId).add(e); - c.contextToPhrase.get(contextId).add(e); - } - } - - return c; - } -} diff --git a/gi/posterior-regularisation/Lexicon.java b/gi/posterior-regularisation/Lexicon.java deleted file mode 100644 index 9f0245ee..00000000 --- a/gi/posterior-regularisation/Lexicon.java +++ /dev/null @@ -1,32 +0,0 @@ -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class Lexicon<T> -{ - public int insert(T word) - { - Integer i = wordToIndex.get(word); - if (i == null) - { - i = indexToWord.size(); - wordToIndex.put(word, i); - indexToWord.add(word); - } - return i; - } - - public T lookup(int index) - { - return indexToWord.get(index); - } - - public int size() - { - return indexToWord.size(); - } - - private Map<T, Integer> wordToIndex = new HashMap<T, Integer>(); - private List<T> indexToWord = new ArrayList<T>(); -}
\ No newline at end of file diff --git a/gi/posterior-regularisation/PhraseContextModel.java b/gi/posterior-regularisation/PhraseContextModel.java deleted file mode 100644 index 85bcfb89..00000000 --- a/gi/posterior-regularisation/PhraseContextModel.java +++ /dev/null @@ -1,466 +0,0 @@ -// Input of the form: -// " the phantom of the opera " tickets for <PHRASE> tonight ? ||| C=1 ||| seats for <PHRASE> ? </s> ||| C=1 ||| i see <PHRASE> ? </s> ||| C=1 -// phrase TAB [context]+ -// where context = phrase ||| C=... which are separated by ||| - -// Model parameterised as follows: -// - each phrase, p, is allocated a latent state, t -// - this is used to generate the contexts, c -// - each context is generated using 4 independent multinomials, one for each position LL, L, R, RR - -// Training with EM: -// - e-step is estimating q(t) = P(t|p,c) for all x,c -// - m-step is estimating model parameters P(c,t|p) = P(t) P(c|t) -// - PR uses alternate e-step, which first optimizes lambda -// min_q KL(q||p) + delta sum_pt max_c E_q[phi_ptc] -// where -// q(t|p,c) propto p(t,c|p) exp( -phi_ptc ) -// Then q is used to obtain expectations for vanilla M-step. - -// Sexing it up: -// - learn p-specific conditionals P(t|p) -// - or generate phrase internals, e.g., generate edge words from -// different distribution to central words -// - agreement between phrase->context model and context->phrase model - -import java.io.*; -import optimization.gradientBasedMethods.*; -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.gradientBasedMethods.stats.ProjectedOptimizerStats; -import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc; -import optimization.linesearch.GenericPickFirstStep; -import optimization.linesearch.InterpolationPickFirstStep; -import optimization.linesearch.LineSearchMethod; -import optimization.linesearch.WolfRuleLineSearch; -import optimization.projections.SimplexProjection; -import optimization.stopCriteria.CompositeStopingCriteria; -import optimization.stopCriteria.NormalizedProjectedGradientL2Norm; -import optimization.stopCriteria.NormalizedValueDifference; -import optimization.stopCriteria.ProjectedGradientL2Norm; -import optimization.stopCriteria.StopingCriteria; -import optimization.stopCriteria.ValueDifference; -import optimization.util.MathUtils; -import java.util.*; -import java.util.regex.*; -import gnu.trove.TDoubleArrayList; -import gnu.trove.TIntArrayList; -import static java.lang.Math.*; - -class PhraseContextModel -{ - // model/optimisation configuration parameters - int numTags; - boolean posteriorRegularisation = true; - double constraintScale = 3; // FIXME: make configurable - - // copied from L1LMax in depparsing code - final double c1= 0.0001, c2=0.9, stoppingPrecision = 1e-5, maxStep = 10; - final int maxZoomEvals = 10, maxExtrapolationIters = 200; - int maxProjectionIterations = 200; - int minOccurrencesForProjection = 0; - - // book keeping - int numPositions; - Random rng = new Random(); - - // training set - Corpus training; - - // model parameters (learnt) - double emissions[][][]; // position in 0 .. 3 x tag x word Pr(word | tag, position) - double prior[][]; // phrase x tag Pr(tag | phrase) - double lambda[]; // edge = (phrase, context) x tag flattened lagrange multipliers - - PhraseContextModel(Corpus training, int tags) - { - this.training = training; - this.numTags = tags; - assert (!training.getEdges().isEmpty()); - assert (numTags > 1); - - // now initialise emissions - numPositions = training.getEdges().get(0).getContext().size(); - assert (numPositions > 0); - - emissions = new double[numPositions][numTags][training.getNumTokens()]; - prior = new double[training.getNumEdges()][numTags]; - if (posteriorRegularisation) - lambda = new double[training.getNumEdges() * numTags]; - - for (double[][] emissionTW : emissions) - { - for (double[] emissionW : emissionTW) - { - randomise(emissionW); -// for (int i = 0; i < emissionW.length; ++i) -// emissionW[i] = i+1; -// normalise(emissionW); - } - } - - for (double[] priorTag : prior) - { - randomise(priorTag); -// for (int i = 0; i < priorTag.length; ++i) -// priorTag[i] = i+1; -// normalise(priorTag); - } - } - - void expectationMaximisation(int numIterations) - { - double lastLlh = Double.NEGATIVE_INFINITY; - - for (int iteration = 0; iteration < numIterations; ++iteration) - { - double emissionsCounts[][][] = new double[numPositions][numTags][training.getNumTokens()]; - double priorCounts[][] = new double[training.getNumPhrases()][numTags]; - - // E-step - double llh = 0; - if (posteriorRegularisation) - { - EStepDualObjective objective = new EStepDualObjective(); - - // copied from x2y2withconstraints -// LineSearchMethod ls = new ArmijoLineSearchMinimizationAlongProjectionArc(new InterpolationPickFirstStep(1)); -// OptimizerStats stats = new OptimizerStats(); -// ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls); -// CompositeStopingCriteria compositeStop = new CompositeStopingCriteria(); -// compositeStop.add(new ProjectedGradientL2Norm(0.001)); -// compositeStop.add(new ValueDifference(0.001)); -// optimizer.setMaxIterations(50); -// boolean succeed = optimizer.optimize(objective,stats,compositeStop); - - // copied from depparser l1lmaxobjective - ProjectedOptimizerStats stats = new ProjectedOptimizerStats(); - GenericPickFirstStep pickFirstStep = new GenericPickFirstStep(1); - LineSearchMethod linesearch = new WolfRuleLineSearch(pickFirstStep, c1, c2); - ProjectedGradientDescent optimizer = new ProjectedGradientDescent(linesearch); - optimizer.setMaxIterations(maxProjectionIterations); - CompositeStopingCriteria stop = new CompositeStopingCriteria(); - stop.add(new NormalizedProjectedGradientL2Norm(stoppingPrecision)); - stop.add(new NormalizedValueDifference(stoppingPrecision)); - boolean succeed = optimizer.optimize(objective, stats, stop); - - System.out.println("Ended optimzation Projected Gradient Descent\n" + stats.prettyPrint(1)); - //System.out.println("Solution: " + objective.parameters); - if (!succeed) - System.out.println("Failed to optimize"); - //System.out.println("Ended optimization in " + optimizer.getCurrentIteration()); - - //lambda = objective.getParameters(); - llh = objective.primal(); - - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - for (int j = 0; j < edges.size(); ++j) - { - Corpus.Edge e = edges.get(j); - for (int t = 0; t < numTags; t++) - { - double p = objective.q.get(i).get(j).get(t); - priorCounts[i][t] += e.getCount() * p; - TIntArrayList tokens = e.getContext(); - for (int k = 0; k < tokens.size(); ++k) - emissionsCounts[k][t][tokens.get(k)] += e.getCount() * p; - } - } - } - } - else - { - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - for (int j = 0; j < edges.size(); ++j) - { - Corpus.Edge e = edges.get(j); - double probs[] = posterior(i, e); - double z = normalise(probs); - llh += log(z) * e.getCount(); - - TIntArrayList tokens = e.getContext(); - for (int t = 0; t < numTags; ++t) - { - priorCounts[i][t] += e.getCount() * probs[t]; - for (int k = 0; k < tokens.size(); ++k) - emissionsCounts[j][t][tokens.get(k)] += e.getCount() * probs[t]; - } - } - } - } - - // M-step: normalise - for (double[][] emissionTW : emissionsCounts) - for (double[] emissionW : emissionTW) - normalise(emissionW); - - for (double[] priorTag : priorCounts) - normalise(priorTag); - - emissions = emissionsCounts; - prior = priorCounts; - - System.out.println("Iteration " + iteration + " llh " + llh); - -// if (llh - lastLlh < 1e-4) -// break; -// else -// lastLlh = llh; - } - } - - static double normalise(double probs[]) - { - double z = 0; - for (double p : probs) - z += p; - for (int i = 0; i < probs.length; ++i) - probs[i] /= z; - return z; - } - - void randomise(double probs[]) - { - double z = 0; - for (int i = 0; i < probs.length; ++i) - { - probs[i] = 10 + rng.nextDouble(); - z += probs[i]; - } - - for (int i = 0; i < probs.length; ++i) - probs[i] /= z; - } - - static int argmax(double probs[]) - { - double m = Double.NEGATIVE_INFINITY; - int mi = -1; - for (int i = 0; i < probs.length; ++i) - { - if (probs[i] > m) - { - m = probs[i]; - mi = i; - } - } - return mi; - } - - double[] posterior(int phraseId, Corpus.Edge e) // unnormalised - { - double probs[] = new double[numTags]; - TIntArrayList tokens = e.getContext(); - for (int t = 0; t < numTags; ++t) - { - probs[t] = prior[phraseId][t]; - for (int k = 0; k < tokens.size(); ++k) - probs[t] *= emissions[k][t][tokens.get(k)]; - } - return probs; - } - - void displayPosterior() - { - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - for (Corpus.Edge e: edges) - { - double probs[] = posterior(i, e); - normalise(probs); - - // emit phrase - System.out.print(e.getPhraseString()); - System.out.print("\t"); - System.out.print(e.getContextString()); - System.out.print("||| C=" + e.getCount() + " |||"); - - int t = argmax(probs); - System.out.print(" " + t + " ||| " + probs[t]); - // for (int t = 0; t < numTags; ++t) - // System.out.print(" " + probs[t]); - System.out.println(); - } - } - } - - public static void main(String[] args) - { - assert (args.length >= 2); - try - { - Corpus corpus = Corpus.readFromFile(new FileReader(new File(args[0]))); - PhraseContextModel model = new PhraseContextModel(corpus, Integer.parseInt(args[1])); - model.expectationMaximisation(Integer.parseInt(args[2])); - model.displayPosterior(); - } - catch (IOException e) - { - System.out.println("Failed to read input file: " + args[0]); - e.printStackTrace(); - } - } - - class EStepDualObjective extends ProjectedObjective - { - List<List<TDoubleArrayList>> conditionals; // phrase id x context # x tag - precomputed - List<List<TDoubleArrayList>> q; // ditto, but including exp(-lambda) terms - double objective = 0; // log(z) - // Objective.gradient = d log(z) / d lambda = E_q[phi] - double llh = 0; - - public EStepDualObjective() - { - super(); - // compute conditionals p(context, tag | phrase) for all training instances - conditionals = new ArrayList<List<TDoubleArrayList>>(training.getNumPhrases()); - q = new ArrayList<List<TDoubleArrayList>>(training.getNumPhrases()); - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - - conditionals.add(new ArrayList<TDoubleArrayList>(edges.size())); - q.add(new ArrayList<TDoubleArrayList>(edges.size())); - - for (int j = 0; j < edges.size(); ++j) - { - Corpus.Edge e = edges.get(j); - double probs[] = posterior(i, e); - double z = normalise(probs); - llh += log(z) * e.getCount(); - conditionals.get(i).add(new TDoubleArrayList(probs)); - q.get(i).add(new TDoubleArrayList(probs)); - } - } - - gradient = new double[training.getNumEdges()*numTags]; - setInitialParameters(lambda); - computeObjectiveAndGradient(); - } - - @Override - public double[] projectPoint(double[] point) - { - SimplexProjection p = new SimplexProjection(constraintScale); - - double[] newPoint = point.clone(); - int edgeIndex = 0; - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - - for (int t = 0; t < numTags; t++) - { - double[] subPoint = new double[edges.size()]; - for (int j = 0; j < edges.size(); ++j) - subPoint[j] = point[edgeIndex+j*numTags+t]; - - p.project(subPoint); - for (int j = 0; j < edges.size(); ++j) - newPoint[edgeIndex+j*numTags+t] = subPoint[j]; - } - - edgeIndex += edges.size() * numTags; - } -// System.out.println("Proj from: " + Arrays.toString(point)); -// System.out.println("Proj to: " + Arrays.toString(newPoint)); - return newPoint; - } - - @Override - public void setParameters(double[] params) - { - super.setParameters(params); - computeObjectiveAndGradient(); - } - - @Override - public double[] getGradient() - { - gradientCalls += 1; - return gradient; - } - - @Override - public double getValue() - { - functionCalls += 1; - return objective; - } - - public void computeObjectiveAndGradient() - { - int edgeIndex = 0; - objective = 0; - Arrays.fill(gradient, 0); - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - - for (int j = 0; j < edges.size(); ++j) - { - Corpus.Edge e = edges.get(j); - - double z = 0; - for (int t = 0; t < numTags; t++) - { - double v = conditionals.get(i).get(j).get(t) * exp(-parameters[edgeIndex+t]); - q.get(i).get(j).set(t, v); - z += v; - } - objective += log(z) * e.getCount(); - - for (int t = 0; t < numTags; t++) - { - double v = q.get(i).get(j).get(t) / z; - q.get(i).get(j).set(t, v); - gradient[edgeIndex+t] -= e.getCount() * v; - } - - edgeIndex += numTags; - } - } -// System.out.println("computeObjectiveAndGradient logz=" + objective); -// System.out.println("lambda= " + Arrays.toString(parameters)); -// System.out.println("gradient=" + Arrays.toString(gradient)); - } - - public String toString() - { - StringBuilder sb = new StringBuilder(); - sb.append(getClass().getCanonicalName()).append(" with "); - sb.append(parameters.length).append(" parameters and "); - sb.append(training.getNumPhrases() * numTags).append(" constraints"); - return sb.toString(); - } - - double primal() - { - // primal = llh + KL(q||p) + scale * sum_pt max_c E_q[phi_pct] - // kl = sum_Y q(Y) log q(Y) / p(Y|X) - // = sum_Y q(Y) { -lambda . phi(Y) - log Z } - // = -log Z - lambda . E_q[phi] - // = -objective + lambda . gradient - - double kl = -objective + MathUtils.dotProduct(parameters, gradient); - double l1lmax = 0; - for (int i = 0; i < training.getNumPhrases(); ++i) - { - List<Corpus.Edge> edges = training.getEdgesForPhrase(i); - for (int t = 0; t < numTags; t++) - { - double lmax = Double.NEGATIVE_INFINITY; - for (int j = 0; j < edges.size(); ++j) - lmax = max(lmax, q.get(i).get(j).get(t)); - l1lmax += lmax; - } - } - - return llh + kl + constraintScale * l1lmax; - } - } -} diff --git a/gi/posterior-regularisation/README b/gi/posterior-regularisation/README deleted file mode 100644 index a3d54ffc..00000000 --- a/gi/posterior-regularisation/README +++ /dev/null @@ -1,3 +0,0 @@ - 557 ./cdec_extools/extractor -i btec/split.zh-en.al -c 500000 -L 12 -C | sort -t $'\t' -k 1 | ./cdec_extools/mr_stripe_rule_reduce > btec.concordance - 559 wc -l btec.concordance - 588 cat btec.concordance | sed 's/.* //' | awk '{ for (i=1; i < NF; i++) { x=substr($i, 1, 2); if (x == "C=") printf "\n"; else if (x != "||") printf "%s ", $i; }; printf "\n"; }' | sort | uniq | wc -l diff --git a/gi/posterior-regularisation/alphabet.hh b/gi/posterior-regularisation/alphabet.hh deleted file mode 100644 index 1db928da..00000000 --- a/gi/posterior-regularisation/alphabet.hh +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _alphabet_hh -#define _alphabet_hh - -#include <cassert> -#include <iosfwd> -#include <map> -#include <string> -#include <vector> - -// Alphabet: indexes a set of types -template <typename T> -class Alphabet: protected std::map<T, int> -{ -public: - Alphabet() {}; - - bool empty() const { return std::map<T,int>::empty(); } - int size() const { return std::map<T,int>::size(); } - - int operator[](const T &k) const - { - typename std::map<T,int>::const_iterator cit = find(k); - if (cit != std::map<T,int>::end()) - return cit->second; - else - return -1; - } - - int lookup(const T &k) const { return (*this)[k]; } - - int insert(const T &k) - { - int sz = size(); - assert((unsigned) sz == _items.size()); - - std::pair<typename std::map<T,int>::iterator, bool> - ins = std::map<T,int>::insert(make_pair(k, sz)); - - if (ins.second) - _items.push_back(k); - - return ins.first->second; - } - - const T &type(int i) const - { - assert(i >= 0); - assert(i < size()); - return _items[i]; - } - - std::ostream &display(std::ostream &out, int i) const - { - return out << type(i); - } - -private: - std::vector<T> _items; -}; - -#endif diff --git a/gi/posterior-regularisation/canned.concordance b/gi/posterior-regularisation/canned.concordance deleted file mode 100644 index 710973ff..00000000 --- a/gi/posterior-regularisation/canned.concordance +++ /dev/null @@ -1,4 +0,0 @@ -a 0 0 <PHRASE> 0 0 ||| C=1 ||| 1 1 <PHRASE> 1 1 ||| C=1 ||| 2 2 <PHRASE> 2 2 ||| C=1 -b 0 0 <PHRASE> 0 0 ||| C=1 ||| 1 1 <PHRASE> 1 1 ||| C=1 -c 2 2 <PHRASE> 2 2 ||| C=1 ||| 4 4 <PHRASE> 4 4 ||| C=1 ||| 5 5 <PHRASE> 5 5 ||| C=1 -d 4 4 <PHRASE> 4 4 ||| C=1 ||| 5 5 <PHRASE> 5 5 ||| C=1 diff --git a/gi/posterior-regularisation/em.cc b/gi/posterior-regularisation/em.cc deleted file mode 100644 index f6c9fd68..00000000 --- a/gi/posterior-regularisation/em.cc +++ /dev/null @@ -1,830 +0,0 @@ -// Input of the form: -// " the phantom of the opera " tickets for <PHRASE> tonight ? ||| C=1 ||| seats for <PHRASE> ? </s> ||| C=1 ||| i see <PHRASE> ? </s> ||| C=1 -// phrase TAB [context]+ -// where context = phrase ||| C=... which are separated by ||| - -// Model parameterised as follows: -// - each phrase, p, is allocated a latent state, t -// - this is used to generate the contexts, c -// - each context is generated using 4 independent multinomials, one for each position LL, L, R, RR - -// Training with EM: -// - e-step is estimating P(t|p,c) for all x,c -// - m-step is estimating model parameters P(p,c,t) = P(t) P(p|t) P(c|t) - -// Sexing it up: -// - constrain the posteriors P(t|c) and P(t|p) to have few high-magnitude entries -// - improve the generation of phrase internals, e.g., generate edge words from -// different distribution to central words - -#include "alphabet.hh" -#include "log_add.hh" -#include <algorithm> -#include <fstream> -#include <iostream> -#include <iterator> -#include <map> -#include <sstream> -#include <stdexcept> -#include <vector> -#include <tr1/random> -#include <tr1/tuple> -#include <nlopt.h> - -using namespace std; -using namespace std::tr1; - -const int numTags = 5; -const int numIterations = 100; -const bool posterior_regularisation = true; -const double PHRASE_VIOLATION_WEIGHT = 10; -const double CONTEXT_VIOLATION_WEIGHT = 0; -const bool includePhraseProb = false; - -// Data structures: -Alphabet<string> lexicon; -typedef vector<int> Phrase; -typedef tuple<int, int, int, int> Context; -Alphabet<Phrase> phrases; -Alphabet<Context> contexts; - -typedef map<int, int> ContextCounts; -typedef map<int, int> PhraseCounts; -typedef map<int, ContextCounts> PhraseToContextCounts; -typedef map<int, PhraseCounts> ContextToPhraseCounts; - -PhraseToContextCounts concordancePhraseToContexts; -ContextToPhraseCounts concordanceContextToPhrases; - -typedef vector<double> Dist; -typedef vector<Dist> ConditionalDist; -Dist prior; // class -> P(class) -vector<ConditionalDist> probCtx; // word -> class -> P(word | class), for each position of context word -ConditionalDist probPhrase; // class -> P(word | class) -Dist probPhraseLength; // class -> P(length | class) expressed as geometric distribution parameter - -mt19937 randomGenerator((size_t) time(NULL)); -uniform_real<double> uniDist(0.0, 1e-1); -variate_generator< mt19937, uniform_real<double> > rng(randomGenerator, uniDist); - -void addRandomNoise(Dist &d); -void normalise(Dist &d); -void addTo(Dist &d, const Dist &e); -int argmax(const Dist &d); - -map<Phrase, map<Context, int> > lambda_indices; - -Dist conditional_probs(const Phrase &phrase, const Context &context, double *normalisation = 0); -template <typename T> -Dist -penalised_conditionals(const Phrase &phrase, const Context &context, - const T &lambda, double *normalisation); -//Dist penalised_conditionals(const Phrase &phrase, const Context &context, const double *lambda, double *normalisation = 0); -double penalised_log_likelihood(int n, const double *lambda, double *gradient, void *data); -void optimise_lambda(double delta, double gamma, vector<double> &lambda); -double expected_violation_phrases(const double *lambda); -double expected_violation_contexts(const double *lambda); -double primal_kl_divergence(const double *lambda); -double dual(const double *lambda); -void print_primal_dual(const double *lambda, double delta, double gamma); - -ostream &operator<<(ostream &, const Phrase &); -ostream &operator<<(ostream &, const Context &); -ostream &operator<<(ostream &, const Dist &); -ostream &operator<<(ostream &, const ConditionalDist &); - -int -main(int argc, char *argv[]) -{ - randomGenerator.seed(time(NULL)); - - int edges = 0; - istream &input = cin; - while (input.good()) - { - // read the phrase - string phraseString; - Phrase phrase; - getline(input, phraseString, '\t'); - istringstream pinput(phraseString); - string token; - while (pinput >> token) - phrase.push_back(lexicon.insert(token)); - int phraseId = phrases.insert(phrase); - - // read the rest, storing each context - string remainder; - getline(input, remainder, '\n'); - istringstream rinput(remainder); - Context context(-1, -1, -1, -1); - int index = 0; - while (rinput >> token) - { - if (token != "|||" && token != "<PHRASE>") - { - if (index < 4) - { - // eugh! damn templates - switch (index) - { - case 0: get<0>(context) = lexicon.insert(token); break; - case 1: get<1>(context) = lexicon.insert(token); break; - case 2: get<2>(context) = lexicon.insert(token); break; - case 3: get<3>(context) = lexicon.insert(token); break; - default: assert(false); - } - index += 1; - } - else if (token.find("C=") == 0) - { - int contextId = contexts.insert(context); - int count = atoi(token.substr(strlen("C=")).c_str()); - concordancePhraseToContexts[phraseId][contextId] += count; - concordanceContextToPhrases[contextId][phraseId] += count; - index = 0; - context = Context(-1, -1, -1, -1); - edges += 1; - } - } - } - - // trigger EOF - input >> ws; - } - - cout << "Read in " << phrases.size() << " phrases" - << " and " << contexts.size() << " contexts" - << " and " << edges << " edges" - << " and " << lexicon.size() << " word types\n"; - - // FIXME: filter out low count phrases and low count contexts (based on individual words?) - // now populate model parameters with uniform + random noise - prior.resize(numTags, 1.0); - addRandomNoise(prior); - normalise(prior); - - probCtx.resize(4, ConditionalDist(numTags, Dist(lexicon.size(), 1.0))); - if (includePhraseProb) - probPhrase.resize(numTags, Dist(lexicon.size(), 1.0)); - for (int t = 0; t < numTags; ++t) - { - for (int j = 0; j < 4; ++j) - { - addRandomNoise(probCtx[j][t]); - normalise(probCtx[j][t]); - } - if (includePhraseProb) - { - addRandomNoise(probPhrase[t]); - normalise(probPhrase[t]); - } - } - if (includePhraseProb) - { - probPhraseLength.resize(numTags, 0.5); // geometric distribution p=0.5 - addRandomNoise(probPhraseLength); - } - - cout << "\tprior: " << prior << "\n"; - //cout << "\tcontext: " << probCtx << "\n"; - //cout << "\tphrase: " << probPhrase << "\n"; - //cout << "\tphraseLen: " << probPhraseLength << endl; - - vector<double> lambda; - - // now do EM training - for (int iteration = 0; iteration < numIterations; ++iteration) - { - cout << "EM iteration " << iteration << endl; - - if (posterior_regularisation) - optimise_lambda(PHRASE_VIOLATION_WEIGHT, CONTEXT_VIOLATION_WEIGHT, lambda); - //cout << "\tlambda " << lambda << endl; - - Dist countsPrior(numTags, 0.0); - vector<ConditionalDist> countsCtx(4, ConditionalDist(numTags, Dist(lexicon.size(), 1e-10))); - ConditionalDist countsPhrase(numTags, Dist(lexicon.size(), 1e-10)); - Dist countsPhraseLength(numTags, 0.0); - Dist nPhrases(numTags, 0.0); - - double llh = 0; - for (PhraseToContextCounts::iterator pcit = concordancePhraseToContexts.begin(); - pcit != concordancePhraseToContexts.end(); ++pcit) - { - const Phrase &phrase = phrases.type(pcit->first); - - // e-step: estimate latent class probs; compile (class,word) stats for m-step - for (ContextCounts::iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - - double z = 0; - Dist tagCounts; - if (!posterior_regularisation) - tagCounts = conditional_probs(phrase, context, &z); - else - tagCounts = penalised_conditionals(phrase, context, lambda, &z); - - llh += log(z) * ccit->second; - addTo(countsPrior, tagCounts); // FIXME: times ccit->secon - - for (int t = 0; t < numTags; ++t) - { - for (int j = 0; j < 4; ++j) - countsCtx[j][t][get<0>(context)] += tagCounts[t] * ccit->second; - - if (includePhraseProb) - { - for (Phrase::const_iterator pit = phrase.begin(); pit != phrase.end(); ++pit) - countsPhrase[t][*pit] += tagCounts[t] * ccit->second; - countsPhraseLength[t] += phrase.size() * tagCounts[t] * ccit->second; - nPhrases[t] += tagCounts[t] * ccit->second; - } - } - } - } - - cout << "M-step\n"; - - // m-step: normalise prior and (class,word) stats and assign to model parameters - normalise(countsPrior); - prior = countsPrior; - for (int t = 0; t < numTags; ++t) - { - //cout << "\t\tt " << t << " prior " << countsPrior[t] << "\n"; - for (int j = 0; j < 4; ++j) - normalise(countsCtx[j][t]); - if (includePhraseProb) - { - normalise(countsPhrase[t]); - countsPhraseLength[t] = nPhrases[t] / countsPhraseLength[t]; - } - } - probCtx = countsCtx; - if (includePhraseProb) - { - probPhrase = countsPhrase; - probPhraseLength = countsPhraseLength; - } - - double *larray = new double[lambda.size()]; - copy(lambda.begin(), lambda.end(), larray); - print_primal_dual(larray, PHRASE_VIOLATION_WEIGHT, CONTEXT_VIOLATION_WEIGHT); - delete [] larray; - - //cout << "\tllh " << llh << endl; - //cout << "\tprior: " << prior << "\n"; - //cout << "\tcontext: " << probCtx << "\n"; - //cout << "\tphrase: " << probPhrase << "\n"; - //cout << "\tphraseLen: " << probPhraseLength << "\n"; - } - - // output class membership - for (PhraseToContextCounts::iterator pcit = concordancePhraseToContexts.begin(); - pcit != concordancePhraseToContexts.end(); ++pcit) - { - const Phrase &phrase = phrases.type(pcit->first); - for (ContextCounts::iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - Dist tagCounts = conditional_probs(phrase, context, 0); - cout << phrase << " ||| " << context << " ||| " << argmax(tagCounts) << "\n"; - } - } - - return 0; -} - -void addRandomNoise(Dist &d) -{ - for (Dist::iterator dit = d.begin(); dit != d.end(); ++dit) - *dit += rng(); -} - -void normalise(Dist &d) -{ - double z = 0; - for (Dist::iterator dit = d.begin(); dit != d.end(); ++dit) - z += *dit; - for (Dist::iterator dit = d.begin(); dit != d.end(); ++dit) - *dit /= z; -} - -void addTo(Dist &d, const Dist &e) -{ - assert(d.size() == e.size()); - for (int i = 0; i < (int) d.size(); ++i) - d[i] += e[i]; -} - -int argmax(const Dist &d) -{ - double best = d[0]; - int index = 0; - for (int i = 1; i < (int) d.size(); ++i) - { - if (d[i] > best) - { - best = d[i]; - index = i; - } - } - return index; -} - -ostream &operator<<(ostream &out, const Phrase &phrase) -{ - for (Phrase::const_iterator pit = phrase.begin(); pit != phrase.end(); ++pit) - lexicon.display(((pit == phrase.begin()) ? out : out << " "), *pit); - return out; -} - -ostream &operator<<(ostream &out, const Context &context) -{ - lexicon.display(out, get<0>(context)); - lexicon.display(out << " ", get<1>(context)); - lexicon.display(out << " <PHRASE> ", get<2>(context)); - lexicon.display(out << " ", get<3>(context)); - return out; -} - -ostream &operator<<(ostream &out, const Dist &dist) -{ - for (Dist::const_iterator dit = dist.begin(); dit != dist.end(); ++dit) - out << ((dit == dist.begin()) ? "" : " ") << *dit; - return out; -} - -ostream &operator<<(ostream &out, const ConditionalDist &dist) -{ - for (ConditionalDist::const_iterator dit = dist.begin(); dit != dist.end(); ++dit) - out << ((dit == dist.begin()) ? "" : "; ") << *dit; - return out; -} - -// FIXME: slow - just use the phrase index, context index to do the mapping -// (n.b. it's a sparse setup, not just equal to 3d array index) -int -lambda_index(const Phrase &phrase, const Context &context, int tag) -{ - return lambda_indices[phrase][context] + tag; -} - -template <typename T> -Dist -penalised_conditionals(const Phrase &phrase, const Context &context, - const T &lambda, double *normalisation) -{ - Dist d = conditional_probs(phrase, context, 0); - - double z = 0; - for (int t = 0; t < numTags; ++t) - { - d[t] *= exp(-lambda[lambda_index(phrase, context, t)]); - z += d[t]; - } - - if (normalisation) - *normalisation = z; - - for (int t = 0; t < numTags; ++t) - d[t] /= z; - - return d; -} - -Dist -conditional_probs(const Phrase &phrase, const Context &context, double *normalisation) -{ - Dist tagCounts(numTags, 0.0); - double z = 0; - for (int t = 0; t < numTags; ++t) - { - double prob = prior[t]; - prob *= (probCtx[0][t][get<0>(context)] * probCtx[1][t][get<1>(context)] * - probCtx[2][t][get<2>(context)] * probCtx[3][t][get<3>(context)]); - - if (includePhraseProb) - { - prob *= pow(1 - probPhraseLength[t], phrase.size() - 1) * probPhraseLength[t]; - for (Phrase::const_iterator pit = phrase.begin(); pit != phrase.end(); ++pit) - prob *= probPhrase[t][*pit]; - } - - tagCounts[t] = prob; - z += prob; - } - if (normalisation) - *normalisation = z; - - for (int t = 0; t < numTags; ++t) - tagCounts[t] /= z; - - return tagCounts; -} - -double -penalised_log_likelihood(int n, const double *lambda, double *grad, void *) -{ - // return log Z(lambda, theta) over the corpus - // where theta are the global parameters (prior, probCtx*, probPhrase*) - // and lambda are lagrange multipliers for the posterior sparsity constraints - // - // this is formulated as: - // f = log Z(lambda) = sum_i log ( sum_i p_theta(t_i|p_i,c_i) exp [-lambda_{t_i,p_i,c_i}] ) - // where i indexes the training examples - specifying the (p, c) pair (which may occur with count > 1) - // - // with derivative: - // f'_{tpc} = frac { - count(t,p,c) p_theta(t|p,c) exp (-lambda_{t,p,c}) } - // { sum_t' p_theta(t'|p,c) exp (-lambda_{t',p,c}) } - - //cout << "penalised_log_likelihood with lambda "; - //copy(lambda, lambda+n, ostream_iterator<double>(cout, " ")); - //cout << "\n"; - - double f = 0; - if (grad) - { - for (int i = 0; i < n; ++i) - grad[i] = 0.0; - } - - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(p); - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - double z = 0; - Dist scores = penalised_conditionals(phrase, context, lambda, &z); - - f += ccit->second * log(z); - //cout << "\tphrase: " << phrase << " context: " << context << " count: " << ccit->second << " z " << z << endl; - //cout << "\t\tscores: " << scores << "\n"; - - if (grad) - { - for (int t = 0; t < numTags; ++t) - { - int i = lambda_index(phrase, context, t); // FIXME: redundant lookups - assert(grad[i] == 0.0); - grad[i] = - ccit->second * scores[t]; - } - } - } - } - - //cout << "penalised_log_likelihood returning " << f; - //if (grad) - //{ - //cout << "\ngradient: "; - //copy(grad, grad+n, ostream_iterator<double>(cout, " ")); - //} - //cout << "\n"; - - return f; -} - -typedef struct -{ - // one of p or c should be set to -1, in which case it will be marginalised out - // i.e. sum_p' lambda_{p'ct} <= threshold - // or sum_c' lambda_{pc't} <= threshold - int p, c, t, threshold; -} constraint_data; - -double -constraint_and_gradient(int n, const double *lambda, double *grad, void *data) -{ - constraint_data *d = (constraint_data *) data; - assert(d->t >= 0); - assert(d->threshold >= 0); - - //cout << "constraint_and_gradient: t " << d->t << " p " << d->p << " c " << d->c << " tau " << d->threshold << endl; - //cout << "\tlambda "; - //copy(lambda, lambda+n, ostream_iterator<double>(cout, " ")); - //cout << "\n"; - - // FIXME: it's crazy to use a dense gradient here => will only have a handful of non-zero entries - if (grad) - { - for (int i = 0; i < n; ++i) - grad[i] = 0.0; - } - - //cout << "constraint_and_gradient: " << d->p << "; " << d->c << "; " << d->t << "; " << d->threshold << endl; - - if (d->p >= 0) - { - assert(d->c < 0); - // sum_c lambda_pct <= delta [a.k.a. threshold] - // => sum_c lambda_pct - delta <= 0 - // derivative_pct = { 1, if p and t match; 0, otherwise } - - double val = -d->threshold; - - const Phrase &phrase = phrases.type(d->p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(d->p); - assert(pcit != concordancePhraseToContexts.end()); - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - int i = lambda_index(phrase, context, d->t); - val += lambda[i]; - if (grad) grad[i] = 1; - } - //cout << "\treturning " << val << endl; - - return val; - } - else - { - assert(d->c >= 0); - assert(d->p < 0); - // sum_p lambda_pct <= gamma [a.k.a. threshold] - // => sum_p lambda_pct - gamma <= 0 - // derivative_pct = { 1, if c and t match; 0, otherwise } - - double val = -d->threshold; - - const Context &context = contexts.type(d->c); - ContextToPhraseCounts::iterator cpit = concordanceContextToPhrases.find(d->c); - assert(cpit != concordanceContextToPhrases.end()); - for (PhraseCounts::iterator pcit = cpit->second.begin(); - pcit != cpit->second.end(); ++pcit) - { - const Phrase &phrase = phrases.type(pcit->first); - int i = lambda_index(phrase, context, d->t); - val += lambda[i]; - if (grad) grad[i] = 1; - } - //cout << "\treturning " << val << endl; - - return val; - } -} - -void -optimise_lambda(double delta, double gamma, vector<double> &lambdav) -{ - int num_lambdas = lambdav.size(); - if (lambda_indices.empty() || lambdav.empty()) - { - lambda_indices.clear(); - lambdav.clear(); - - int i = 0; - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::iterator pcit = concordancePhraseToContexts.find(p); - for (ContextCounts::iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - lambda_indices[phrase][context] = i; - i += numTags; - } - } - num_lambdas = i; - lambdav.resize(num_lambdas); - } - //cout << "optimise_lambda: #langrange multipliers " << num_lambdas << endl; - - // FIXME: better to work with an implicit representation to save memory usage - int num_constraints = (((delta > 0) ? phrases.size() : 0) + ((gamma > 0) ? contexts.size() : 0)) * numTags; - //cout << "optimise_lambda: #constraints " << num_constraints << endl; - constraint_data *data = new constraint_data[num_constraints]; - int i = 0; - if (delta > 0) - { - for (int p = 0; p < phrases.size(); ++p) - { - for (int t = 0; t < numTags; ++t, ++i) - { - constraint_data &d = data[i]; - d.p = p; - d.c = -1; - d.t = t; - d.threshold = delta; - } - } - } - - if (gamma > 0) - { - for (int c = 0; c < contexts.size(); ++c) - { - for (int t = 0; t < numTags; ++t, ++i) - { - constraint_data &d = data[i]; - d.p = -1; - d.c = c; - d.t = t; - d.threshold = gamma; - } - } - } - assert(i == num_constraints); - - double lambda[num_lambdas]; - double lb[num_lambdas], ub[num_lambdas]; - for (i = 0; i < num_lambdas; ++i) - { - lambda[i] = lambdav[i]; // starting value - lb[i] = 0; // lower bound - if (delta <= 0) // upper bound - ub[i] = gamma; - else if (gamma <= 0) - ub[i] = delta; - else - assert(false); - } - - //print_primal_dual(lambda, delta, gamma); - - double minf; - int error_code = nlopt_minimize_constrained(NLOPT_LN_COBYLA, num_lambdas, penalised_log_likelihood, NULL, - num_constraints, constraint_and_gradient, data, sizeof(constraint_data), - lb, ub, lambda, &minf, -HUGE_VAL, 0.0, 0.0, 1e-4, NULL, 0, 0.0); - //cout << "optimise error code " << error_code << endl; - - //print_primal_dual(lambda, delta, gamma); - - delete [] data; - - if (error_code < 0) - cout << "WARNING: optimisation failed with error code: " << error_code << endl; - //else - //{ - //cout << "success; minf " << minf << endl; - //print_primal_dual(lambda, delta, gamma); - //} - - lambdav = vector<double>(&lambda[0], &lambda[0] + num_lambdas); -} - -// FIXME: inefficient - cache the scores -double -expected_violation_phrases(const double *lambda) -{ - // sum_pt max_c E_q[phi_pct] - double violation = 0; - - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(p); - - for (int t = 0; t < numTags; ++t) - { - double best = 0; - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - Dist scores = penalised_conditionals(phrase, context, lambda, 0); - best = max(best, scores[t]); - } - violation += best; - } - } - - return violation; -} - -// FIXME: inefficient - cache the scores -double -expected_violation_contexts(const double *lambda) -{ - // sum_ct max_p E_q[phi_pct] - double violation = 0; - - for (int c = 0; c < contexts.size(); ++c) - { - const Context &context = contexts.type(c); - ContextToPhraseCounts::iterator cpit = concordanceContextToPhrases.find(c); - - for (int t = 0; t < numTags; ++t) - { - double best = 0; - for (PhraseCounts::iterator pit = cpit->second.begin(); - pit != cpit->second.end(); ++pit) - { - const Phrase &phrase = phrases.type(pit->first); - Dist scores = penalised_conditionals(phrase, context, lambda, 0); - best = max(best, scores[t]); - } - violation += best; - } - } - - return violation; -} - -// FIXME: possibly inefficient -double -primal_likelihood() // FIXME: primal evaluation needs to use lambda and calculate l1linf terms -{ - double llh = 0; - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(p); - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - double z = 0; - Dist scores = conditional_probs(phrase, context, &z); - llh += ccit->second * log(z); - } - } - return llh; -} - -// FIXME: inefficient - cache the scores -double -primal_kl_divergence(const double *lambda) -{ - // return KL(q || p) = sum_y q(y) { log q(y) - log p(y | x) } - // = sum_y q(y) { log p(y | x) - lambda . phi(x, y) - log Z - log p(y | x) } - // = sum_y q(y) { - lambda . phi(x, y) } - log Z - // and q(y) factors with each edge, ditto for Z - - double feature_sum = 0, log_z = 0; - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(p); - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - - double local_z = 0; - double local_f = 0; - Dist d = conditional_probs(phrase, context, 0); - for (int t = 0; t < numTags; ++t) - { - int i = lambda_index(phrase, context, t); - double s = d[t] * exp(-lambda[i]); - local_f += lambda[i] * s; - local_z += s; - } - - log_z += ccit->second * log(local_z); - feature_sum += ccit->second * (local_f / local_z); - } - } - - return -feature_sum - log_z; -} - -// FIXME: inefficient - cache the scores -double -dual(const double *lambda) -{ - // return log(Z) = - log { sum_y p(y | x) exp( - lambda . phi(x, y) } - // n.b. have flipped the sign as we're minimising - - double z = 0; - for (int p = 0; p < phrases.size(); ++p) - { - const Phrase &phrase = phrases.type(p); - PhraseToContextCounts::const_iterator pcit = concordancePhraseToContexts.find(p); - for (ContextCounts::const_iterator ccit = pcit->second.begin(); - ccit != pcit->second.end(); ++ccit) - { - const Context &context = contexts.type(ccit->first); - double lz = 0; - Dist scores = penalised_conditionals(phrase, context, lambda, &z); - z += lz * ccit->second; - } - } - return log(z); -} - -void -print_primal_dual(const double *lambda, double delta, double gamma) -{ - double likelihood = primal_likelihood(); - double kl = primal_kl_divergence(lambda); - double sum_pt = expected_violation_phrases(lambda); - double sum_ct = expected_violation_contexts(lambda); - //double d = dual(lambda); - - cout << "\tllh=" << likelihood - << " kl=" << kl - << " violations phrases=" << sum_pt - << " contexts=" << sum_ct - //<< " primal=" << (kl + delta * sum_pt + gamma * sum_ct) - //<< " dual=" << d - << " objective=" << (likelihood - kl + delta * sum_pt + gamma * sum_ct) - << endl; -} diff --git a/gi/posterior-regularisation/invert.hh b/gi/posterior-regularisation/invert.hh deleted file mode 100644 index d06356e9..00000000 --- a/gi/posterior-regularisation/invert.hh +++ /dev/null @@ -1,45 +0,0 @@ -// The following code inverts the matrix input using LU-decomposition with -// backsubstitution of unit vectors. Reference: Numerical Recipies in C, 2nd -// ed., by Press, Teukolsky, Vetterling & Flannery. -// Code written by Fredrik Orderud. -// http://www.crystalclearsoftware.com/cgi-bin/boost_wiki/wiki.pl?LU_Matrix_Inversion - -#ifndef INVERT_MATRIX_HPP -#define INVERT_MATRIX_HPP - -// REMEMBER to update "lu.hpp" header includes from boost-CVS -#include <boost/numeric/ublas/vector.hpp> -#include <boost/numeric/ublas/vector_proxy.hpp> -#include <boost/numeric/ublas/matrix.hpp> -#include <boost/numeric/ublas/triangular.hpp> -#include <boost/numeric/ublas/lu.hpp> -#include <boost/numeric/ublas/io.hpp> - -namespace ublas = boost::numeric::ublas; - -/* Matrix inversion routine. - Uses lu_factorize and lu_substitute in uBLAS to invert a matrix */ -template<class T> -bool invert_matrix(const ublas::matrix<T>& input, ublas::matrix<T>& inverse) -{ - using namespace boost::numeric::ublas; - typedef permutation_matrix<std::size_t> pmatrix; - // create a working copy of the input - matrix<T> A(input); - // create a permutation matrix for the LU-factorization - pmatrix pm(A.size1()); - - // perform LU-factorization - int res = lu_factorize(A,pm); - if( res != 0 ) return false; - - // create identity matrix of "inverse" - inverse.assign(ublas::identity_matrix<T>(A.size1())); - - // backsubstitute to get the inverse - lu_substitute(A, pm, inverse); - - return true; -} - -#endif //INVERT_MATRIX_HPP diff --git a/gi/posterior-regularisation/linesearch.py b/gi/posterior-regularisation/linesearch.py deleted file mode 100644 index 5a3f2e9c..00000000 --- a/gi/posterior-regularisation/linesearch.py +++ /dev/null @@ -1,58 +0,0 @@ -## Automatically adapted for scipy Oct 07, 2005 by convertcode.py - -from scipy.optimize import minpack2 -import numpy - -import __builtin__ -pymin = __builtin__.min - -def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, - args=(), c1=1e-4, c2=0.9, amax=50): - - fc = 0 - gc = 0 - phi0 = old_fval - derphi0 = numpy.dot(gfk,pk) - alpha1 = pymin(1.0,1.01*2*(phi0-old_old_fval)/derphi0) - # trevor: added this test - alpha1 = pymin(alpha1,amax) - - if isinstance(myfprime,type(())): - eps = myfprime[1] - fprime = myfprime[0] - newargs = (f,eps) + args - gradient = False - else: - fprime = myfprime - newargs = args - gradient = True - - xtol = 1e-14 - amin = 1e-8 - isave = numpy.zeros((2,), numpy.intc) - dsave = numpy.zeros((13,), float) - task = 'START' - fval = old_fval - gval = gfk - - while 1: - stp,fval,derphi,task = minpack2.dcsrch(alpha1, phi0, derphi0, c1, c2, - xtol, task, amin, amax,isave,dsave) - #print 'minpack2.dcsrch', alpha1, phi0, derphi0, c1, c2, xtol, task, amin, amax,isave,dsave - #print 'returns', stp,fval,derphi,task - - if task[:2] == 'FG': - alpha1 = stp - fval = f(xk+stp*pk,*args) - fc += 1 - gval = fprime(xk+stp*pk,*newargs) - if gradient: gc += 1 - else: fc += len(xk) + 1 - phi0 = fval - derphi0 = numpy.dot(gval,pk) - else: - break - - if task[:5] == 'ERROR' or task[1:4] == 'WARN': - stp = None # failed - return stp, fc, gc, fval, old_fval, gval diff --git a/gi/posterior-regularisation/log_add.hh b/gi/posterior-regularisation/log_add.hh deleted file mode 100644 index e0620c5a..00000000 --- a/gi/posterior-regularisation/log_add.hh +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef log_add_hh -#define log_add_hh - -#include <limits> -#include <iostream> -#include <cassert> -#include <cmath> - -template <typename T> -struct Log -{ - static T zero() { return -std::numeric_limits<T>::infinity(); } - - static T add(T l1, T l2) - { - if (l1 == zero()) return l2; - if (l1 > l2) - return l1 + std::log(1 + exp(l2 - l1)); - else - return l2 + std::log(1 + exp(l1 - l2)); - } - - static T subtract(T l1, T l2) - { - //std::assert(l1 >= l2); - return l1 + log(1 - exp(l2 - l1)); - } -}; - -#endif diff --git a/gi/posterior-regularisation/prjava.jar b/gi/posterior-regularisation/prjava.jar deleted file mode 120000 index da8bf761..00000000 --- a/gi/posterior-regularisation/prjava.jar +++ /dev/null @@ -1 +0,0 @@ -prjava/prjava-20100708.jar
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/Makefile b/gi/posterior-regularisation/prjava/Makefile deleted file mode 100755 index bd3bfca0..00000000 --- a/gi/posterior-regularisation/prjava/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -all: - ant dist - -check: - echo no tests - -clean: - ant clean diff --git a/gi/posterior-regularisation/prjava/build.xml b/gi/posterior-regularisation/prjava/build.xml deleted file mode 100644 index 7222b3c8..00000000 --- a/gi/posterior-regularisation/prjava/build.xml +++ /dev/null @@ -1,38 +0,0 @@ -<project name="prjava" default="dist" basedir="."> - <!-- set global properties for this build --> - <property name="src" location="src"/> - <property name="build" location="build"/> - <property name="dist" location="lib"/> - <path id="classpath"> - <pathelement location="lib/trove-2.0.2.jar"/> - <pathelement location="lib/optimization.jar"/> - <pathelement location="lib/jopt-simple-3.2.jar"/> - <pathelement location="lib/commons-math-2.1.jar"/> - </path> - - <target name="init"> - <!-- Create the time stamp --> - <tstamp/> - <!-- Create the build directory structure used by compile --> - <mkdir dir="${build}"/> - </target> - - <target name="compile" depends="init" - description="compile the source " > - <!-- Compile the java code from ${src} into ${build} --> - <javac srcdir="${src}" destdir="${build}" includeantruntime="false"> - <classpath refid="classpath"/> - </javac> - </target> - - <target name="dist" depends="compile" - description="generate the distribution" > - <jar jarfile="${dist}/prjava-${DSTAMP}.jar" basedir="${build}"/> - <symlink link="./prjava.jar" resource="${dist}/prjava-${DSTAMP}.jar" overwrite="true"/> - </target> - - <target name="clean" - description="clean up" > - <delete dir="${build}"/> - </target> -</project> diff --git a/gi/posterior-regularisation/prjava/lib/commons-math-2.1.jar b/gi/posterior-regularisation/prjava/lib/commons-math-2.1.jar Binary files differdeleted file mode 100644 index 43b4b369..00000000 --- a/gi/posterior-regularisation/prjava/lib/commons-math-2.1.jar +++ /dev/null diff --git a/gi/posterior-regularisation/prjava/lib/jopt-simple-3.2.jar b/gi/posterior-regularisation/prjava/lib/jopt-simple-3.2.jar Binary files differdeleted file mode 100644 index 56373621..00000000 --- a/gi/posterior-regularisation/prjava/lib/jopt-simple-3.2.jar +++ /dev/null diff --git a/gi/posterior-regularisation/prjava/lib/trove-2.0.2.jar b/gi/posterior-regularisation/prjava/lib/trove-2.0.2.jar Binary files differdeleted file mode 100644 index 3e59fbf3..00000000 --- a/gi/posterior-regularisation/prjava/lib/trove-2.0.2.jar +++ /dev/null diff --git a/gi/posterior-regularisation/prjava/src/arr/F.java b/gi/posterior-regularisation/prjava/src/arr/F.java deleted file mode 100644 index be0a6ed6..00000000 --- a/gi/posterior-regularisation/prjava/src/arr/F.java +++ /dev/null @@ -1,99 +0,0 @@ -package arr;
-
-import java.util.Arrays;
-import java.util.Random;
-
-public class F {
- public static Random rng = new Random();
-
- public static void randomise(double probs[])
- {
- randomise(probs, true);
- }
-
- public static void randomise(double probs[], boolean normalise)
- {
- double z = 0;
- for (int i = 0; i < probs.length; ++i)
- {
- probs[i] = 10 + rng.nextDouble();
- if (normalise)
- z += probs[i];
- }
-
- if (normalise)
- for (int i = 0; i < probs.length; ++i)
- probs[i] /= z;
- }
-
- public static void uniform(double probs[])
- {
- for (int i = 0; i < probs.length; ++i)
- probs[i] = 1.0 / probs.length;
- }
-
- public static void l1normalize(double [] a){
- double sum=0;
- for(int i=0;i<a.length;i++){
- sum+=a[i];
- }
- if(sum==0)
- Arrays.fill(a, 1.0/a.length);
- else
- {
- for(int i=0;i<a.length;i++){
- a[i]/=sum;
- }
- }
- }
-
- public static void l1normalize(double [][] a){
- double sum=0;
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- sum+=a[i][j];
- }
- }
- if(sum==0){
- return;
- }
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- a[i][j]/=sum;
- }
- }
- }
-
- public static double l1norm(double a[]){
- // FIXME: this isn't the l1 norm for a < 0
- double norm=0;
- for(int i=0;i<a.length;i++){
- norm += a[i];
- }
- return norm;
- }
-
- public static double l2norm(double a[]){
- double norm=0;
- for(int i=0;i<a.length;i++){
- norm += a[i]*a[i];
- }
- return Math.sqrt(norm);
- }
-
- public static int argmax(double probs[])
- {
- double m = Double.NEGATIVE_INFINITY;
- int mi = -1;
- for (int i = 0; i < probs.length; ++i)
- {
- if (probs[i] > m)
- {
- m = probs[i];
- mi = i;
- }
- }
- return mi;
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/data/Corpus.java b/gi/posterior-regularisation/prjava/src/data/Corpus.java deleted file mode 100644 index 425ede11..00000000 --- a/gi/posterior-regularisation/prjava/src/data/Corpus.java +++ /dev/null @@ -1,233 +0,0 @@ -package data;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Scanner;
-
-public class Corpus {
-
- public static final String alphaFilename="../posdata/corpus.alphabet";
- public static final String tagalphaFilename="../posdata/corpus.tag.alphabet";
-
-// public static final String START_SYM="<s>";
- public static final String END_SYM="<e>";
- public static final String NUM_TOK="<NUM>";
-
- public static final String UNK_TOK="<unk>";
-
- private ArrayList<String[]>sent;
- private ArrayList<int[]>data;
-
- public ArrayList<String[]>tag;
- public ArrayList<int[]>tagData;
-
- public static boolean convertNumTok=true;
-
- private HashMap<String,Integer>freq;
- public HashMap<String,Integer>vocab;
-
- public HashMap<String,Integer>tagVocab;
- private int tagV;
-
- private int V;
-
- public static void main(String[] args) {
- Corpus c=new Corpus("../posdata/en_test.conll");
- System.out.println(
- Arrays.toString(c.get(0))
- );
- System.out.println(
- Arrays.toString(c.getInt(0))
- );
-
- System.out.println(
- Arrays.toString(c.get(1))
- );
- System.out.println(
- Arrays.toString(c.getInt(1))
- );
- }
-
- public Corpus(String filename,HashMap<String,Integer>dict){
- V=0;
- tagV=0;
- freq=new HashMap<String,Integer>();
- tagVocab=new HashMap<String,Integer>();
- vocab=dict;
-
- sent=new ArrayList<String[]>();
- tag=new ArrayList<String[]>();
-
- Scanner sc=io.FileUtil.openInFile(filename);
- ArrayList<String>s=new ArrayList<String>();
- // s.add(START_SYM);
- while(sc.hasNextLine()){
- String line=sc.nextLine();
- String toks[]=line.split("\t");
- if(toks.length<2){
- s.add(END_SYM);
- sent.add(s.toArray(new String[0]));
- s=new ArrayList<String>();
- // s.add(START_SYM);
- continue;
- }
- String tok=toks[1].toLowerCase();
- s.add(tok);
- }
- sc.close();
-
- buildData();
- }
-
- public Corpus(String filename){
- V=0;
- freq=new HashMap<String,Integer>();
- vocab=new HashMap<String,Integer>();
- tagVocab=new HashMap<String,Integer>();
-
- sent=new ArrayList<String[]>();
- tag=new ArrayList<String[]>();
-
- System.out.println("Reading:"+filename);
-
- Scanner sc=io.FileUtil.openInFile(filename);
- ArrayList<String>s=new ArrayList<String>();
- ArrayList<String>tags=new ArrayList<String>();
- //s.add(START_SYM);
- while(sc.hasNextLine()){
- String line=sc.nextLine();
- String toks[]=line.split("\t");
- if(toks.length<2){
- s.add(END_SYM);
- tags.add(END_SYM);
- if(s.size()>2){
- sent.add(s.toArray(new String[0]));
- tag.add(tags.toArray(new String [0]));
- }
- s=new ArrayList<String>();
- tags=new ArrayList<String>();
- // s.add(START_SYM);
- continue;
- }
-
- String tok=toks[1].toLowerCase();
- if(convertNumTok && tok.matches(".*\\d.*")){
- tok=NUM_TOK;
- }
- s.add(tok);
-
- if(toks.length>3){
- tok=toks[3].toLowerCase();
- }else{
- tok="_";
- }
- tags.add(tok);
-
- }
- sc.close();
-
- for(int i=0;i<sent.size();i++){
- String[]toks=sent.get(i);
- for(int j=0;j<toks.length;j++){
- addVocab(toks[j]);
- addTag(tag.get(i)[j]);
- }
- }
-
- buildVocab();
- buildData();
- System.out.println(data.size()+"sentences, "+vocab.keySet().size()+" word types");
- }
-
- public String[] get(int idx){
- return sent.get(idx);
- }
-
- private void addVocab(String s){
- Integer integer=freq.get(s);
- if(integer==null){
- integer=0;
- }
- freq.put(s, integer+1);
- }
-
- public int tokIdx(String tok){
- Integer integer=vocab.get(tok);
- if(integer==null){
- return V;
- }
- return integer;
- }
-
- public int tagIdx(String tok){
- Integer integer=tagVocab.get(tok);
- if(integer==null){
- return tagV;
- }
- return integer;
- }
-
- private void buildData(){
- data=new ArrayList<int[]>();
- for(int i=0;i<sent.size();i++){
- String s[]=sent.get(i);
- data.add(new int [s.length]);
- for(int j=0;j<s.length;j++){
- data.get(i)[j]=tokIdx(s[j]);
- }
- }
-
- tagData=new ArrayList<int[]>();
- for(int i=0;i<tag.size();i++){
- String s[]=tag.get(i);
- tagData.add(new int [s.length]);
- for(int j=0;j<s.length;j++){
- tagData.get(i)[j]=tagIdx(s[j]);
- }
- }
- sent=null;
- tag=null;
- System.gc();
- }
-
- public int [] getInt(int idx){
- return data.get(idx);
- }
-
- /**
- *
- * @return size of vocabulary
- */
- public int getVocabSize(){
- return V;
- }
-
- public int [][]getAllData(){
- return data.toArray(new int [0][]);
- }
-
- public int [][]getTagData(){
- return tagData.toArray(new int [0][]);
- }
-
- private void buildVocab(){
- for (String key:freq.keySet()){
- if(freq.get(key)>2){
- vocab.put(key, V);
- V++;
- }
- }
- io.SerializedObjects.writeSerializedObject(vocab, alphaFilename);
- io.SerializedObjects.writeSerializedObject(tagVocab,tagalphaFilename);
- }
-
- private void addTag(String tag){
- Integer i=tagVocab.get(tag);
- if(i==null){
- tagVocab.put(tag, tagV);
- tagV++;
- }
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/hmm/HMM.java b/gi/posterior-regularisation/prjava/src/hmm/HMM.java deleted file mode 100644 index 17a4679f..00000000 --- a/gi/posterior-regularisation/prjava/src/hmm/HMM.java +++ /dev/null @@ -1,579 +0,0 @@ -package hmm;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Scanner;
-
-public class HMM {
-
-
- //trans[i][j]=prob of going FROM i to j
- double [][]trans;
- double [][]emit;
- double []pi;
- int [][]data;
- int [][]tagdata;
-
- double logtrans[][];
-
- public HMMObjective o;
-
- public static void main(String[] args) {
-
- }
-
- public HMM(int n_state,int n_emit,int [][]data){
- trans=new double [n_state][n_state];
- emit=new double[n_state][n_emit];
- pi=new double [n_state];
- System.out.println(" random initial parameters");
- fillRand(trans);
- fillRand(emit);
- fillRand(pi);
-
- this.data=data;
-
- }
-
- private void fillRand(double [][] a){
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- a[i][j]=Math.random();
- }
- l1normalize(a[i]);
- }
- }
- private void fillRand(double []a){
- for(int i=0;i<a.length;i++){
- a[i]=Math.random();
- }
- l1normalize(a);
- }
-
- private double loglikely=0;
-
- public void EM(){
- double trans_exp_cnt[][]=new double [trans.length][trans.length];
- double emit_exp_cnt[][]=new double[trans.length][emit[0].length];
- double start_exp_cnt[]=new double[trans.length];
- loglikely=0;
-
- //E
- for(int i=0;i<data.length;i++){
-
- double [][][] post=forwardBackward(data[i]);
- incrementExpCnt(post, data[i],
- trans_exp_cnt,
- emit_exp_cnt,
- start_exp_cnt);
-
-
- if(i%100==0){
- System.out.print(".");
- }
- if(i%1000==0){
- System.out.println(i);
- }
-
- }
- System.out.println("Log likelihood: "+loglikely);
-
- //M
- addOneSmooth(emit_exp_cnt);
- for(int i=0;i<trans.length;i++){
-
- //transition probs
- double sum=0;
- for(int j=0;j<trans.length;j++){
- sum+=trans_exp_cnt[i][j];
- }
- //avoid NAN
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<trans[i].length;j++){
- trans[i][j]=trans_exp_cnt[i][j]/sum;
- }
-
- //emission probs
-
- sum=0;
- for(int j=0;j<emit[i].length;j++){
- sum+=emit_exp_cnt[i][j];
- }
- //avoid NAN
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<emit[i].length;j++){
- emit[i][j]=emit_exp_cnt[i][j]/sum;
- }
-
-
- //initial probs
- for(int j=0;j<pi.length;j++){
- pi[j]=start_exp_cnt[j];
- }
- l1normalize(pi);
- }
- }
-
- private double [][][]forwardBackward(int [] seq){
- double a[][]=new double [seq.length][trans.length];
- double b[][]=new double [seq.length][trans.length];
-
- int len=seq.length;
- //initialize the first step
- for(int i=0;i<trans.length;i++){
- a[0][i]=emit[i][seq[0]]*pi[i];
- b[len-1][i]=1;
- }
-
- //log of denominator for likelyhood
- double c=Math.log(l1norm(a[0]));
-
- l1normalize(a[0]);
- l1normalize(b[len-1]);
-
-
-
- //forward
- for(int n=1;n<len;n++){
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans.length;j++){
- a[n][i]+=trans[j][i]*a[n-1][j];
- }
- a[n][i]*=emit[i][seq[n]];
- }
- c+=Math.log(l1norm(a[n]));
- l1normalize(a[n]);
- }
-
- loglikely+=c;
-
- //backward
- for(int n=len-2;n>=0;n--){
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans.length;j++){
- b[n][i]+=trans[i][j]*b[n+1][j]*emit[j][seq[n+1]];
- }
- }
- l1normalize(b[n]);
- }
-
-
- //expected transition
- double p[][][]=new double [seq.length][trans.length][trans.length];
- for(int n=0;n<len-1;n++){
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans.length;j++){
- p[n][i][j]=a[n][i]*trans[i][j]*emit[j][seq[n+1]]*b[n+1][j];
-
- }
- }
-
- l1normalize(p[n]);
- }
- return p;
- }
-
- private void incrementExpCnt(
- double post[][][],int [] seq,
- double trans_exp_cnt[][],
- double emit_exp_cnt[][],
- double start_exp_cnt[])
- {
-
- for(int n=0;n<post.length;n++){
- for(int i=0;i<trans.length;i++){
- double py=0;
- for(int j=0;j<trans.length;j++){
- py+=post[n][i][j];
- trans_exp_cnt[i][j]+=post[n][i][j];
- }
-
- emit_exp_cnt[i][seq[n]]+=py;
-
- }
- }
-
- //the first state
- for(int i=0;i<trans.length;i++){
- double py=0;
- for(int j=0;j<trans.length;j++){
- py+=post[0][i][j];
- }
- start_exp_cnt[i]+=py;
- }
-
-
- //the last state
- int len=post.length;
- for(int i=0;i<trans.length;i++){
- double py=0;
- for(int j=0;j<trans.length;j++){
- py+=post[len-2][j][i];
- }
- emit_exp_cnt[i][seq[len-1]]+=py;
- }
- }
-
- public void l1normalize(double [] a){
- double sum=0;
- for(int i=0;i<a.length;i++){
- sum+=a[i];
- }
- if(sum==0){
- return ;
- }
- for(int i=0;i<a.length;i++){
- a[i]/=sum;
- }
- }
-
- public void l1normalize(double [][] a){
- double sum=0;
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- sum+=a[i][j];
- }
- }
- if(sum==0){
- return;
- }
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- a[i][j]/=sum;
- }
- }
- }
-
- public void writeModel(String modelFilename) throws FileNotFoundException, IOException{
- PrintStream ps=io.FileUtil.printstream(new File(modelFilename));
- ps.println(trans.length);
- ps.println("Initial Probabilities:");
- for(int i=0;i<pi.length;i++){
- ps.print(pi[i]+"\t");
- }
- ps.println();
- ps.println("Transition Probabilities:");
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans[i].length;j++){
- ps.print(trans[i][j]+"\t");
- }
- ps.println();
- }
- ps.println("Emission Probabilities:");
- ps.println(emit[0].length);
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<emit[i].length;j++){
- ps.println(emit[i][j]);
- }
- ps.println();
- }
- ps.close();
- }
-
- public HMM(){
-
- }
-
- public void readModel(String modelFilename){
- Scanner sc=io.FileUtil.openInFile(modelFilename);
-
- int n_state=sc.nextInt();
- sc.nextLine();
- sc.nextLine();
- pi=new double [n_state];
- for(int i=0;i<n_state;i++){
- pi[i]=sc.nextDouble();
- }
- sc.nextLine();
- sc.nextLine();
- trans=new double[n_state][n_state];
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans[i].length;j++){
- trans[i][j]=sc.nextDouble();
- }
- }
- sc.nextLine();
- sc.nextLine();
-
- int n_obs=sc.nextInt();
- emit=new double[n_state][n_obs];
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<emit[i].length;j++){
- emit[i][j]=sc.nextDouble();
- }
- }
- sc.close();
- }
-
- public int []viterbi(int [] seq){
- double [][]p=new double [seq.length][trans.length];
- int backp[][]=new int [seq.length][trans.length];
-
- for(int i=0;i<trans.length;i++){
- p[0][i]=Math.log(emit[i][seq[0]]*pi[i]);
- }
-
- double a[][]=logtrans;
- if(logtrans==null){
- a=new double [trans.length][trans.length];
- for(int i=0;i<trans.length;i++){
- for(int j=0;j<trans.length;j++){
- a[i][j]=Math.log(trans[i][j]);
- }
- }
- logtrans=a;
- }
-
- double maxprob=0;
- for(int n=1;n<seq.length;n++){
- for(int i=0;i<trans.length;i++){
- maxprob=p[n-1][0]+a[0][i];
- backp[n][i]=0;
- for(int j=1;j<trans.length;j++){
- double prob=p[n-1][j]+a[j][i];
- if(maxprob<prob){
- backp[n][i]=j;
- maxprob=prob;
- }
- }
- p[n][i]=maxprob+Math.log(emit[i][seq[n]]);
- }
- }
-
- maxprob=p[seq.length-1][0];
- int maxIdx=0;
- for(int i=1;i<trans.length;i++){
- if(p[seq.length-1][i]>maxprob){
- maxprob=p[seq.length-1][i];
- maxIdx=i;
- }
- }
- int ans[]=new int [seq.length];
- ans[seq.length-1]=maxIdx;
- for(int i=seq.length-2;i>=0;i--){
- ans[i]=backp[i+1][ans[i+1]];
- }
- return ans;
- }
-
- public double l1norm(double a[]){
- double norm=0;
- for(int i=0;i<a.length;i++){
- norm += a[i];
- }
- return norm;
- }
-
- public double [][]getEmitProb(){
- return emit;
- }
-
- public int [] sample(int terminalSym){
- ArrayList<Integer > s=new ArrayList<Integer>();
- int state=sample(pi);
- int sym=sample(emit[state]);
- while(sym!=terminalSym){
- s.add(sym);
- state=sample(trans[state]);
- sym=sample(emit[state]);
- }
-
- int ans[]=new int [s.size()];
- for(int i=0;i<ans.length;i++){
- ans[i]=s.get(i);
- }
- return ans;
- }
-
- public int sample(double p[]){
- double r=Math.random();
- double sum=0;
- for(int i=0;i<p.length;i++){
- sum+=p[i];
- if(sum>=r){
- return i;
- }
- }
- return p.length-1;
- }
-
- public void train(int tagdata[][]){
- double trans_exp_cnt[][]=new double [trans.length][trans.length];
- double emit_exp_cnt[][]=new double[trans.length][emit[0].length];
- double start_exp_cnt[]=new double[trans.length];
-
- for(int i=0;i<tagdata.length;i++){
- start_exp_cnt[tagdata[i][0]]++;
-
- for(int j=0;j<tagdata[i].length;j++){
- if(j+1<tagdata[i].length){
- trans_exp_cnt[ tagdata[i][j] ] [ tagdata[i][j+1] ]++;
- }
- emit_exp_cnt[tagdata[i][j]][data[i][j]]++;
- }
-
- }
-
- //M
- addOneSmooth(emit_exp_cnt);
- for(int i=0;i<trans.length;i++){
-
- //transition probs
- double sum=0;
- for(int j=0;j<trans.length;j++){
- sum+=trans_exp_cnt[i][j];
- }
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<trans[i].length;j++){
- trans[i][j]=trans_exp_cnt[i][j]/sum;
- }
-
- //emission probs
-
- sum=0;
- for(int j=0;j<emit[i].length;j++){
- sum+=emit_exp_cnt[i][j];
- }
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<emit[i].length;j++){
- emit[i][j]=emit_exp_cnt[i][j]/sum;
- }
-
-
- //initial probs
- for(int j=0;j<pi.length;j++){
- pi[j]=start_exp_cnt[j];
- }
- l1normalize(pi);
- }
- }
-
- private void addOneSmooth(double a[][]){
- for(int i=0;i<a.length;i++){
- for(int j=0;j<a[i].length;j++){
- a[i][j]+=0.01;
- }
- //l1normalize(a[i]);
- }
- }
-
- public void PREM(){
-
- o.optimizeWithProjectedGradientDescent();
-
- double trans_exp_cnt[][]=new double [trans.length][trans.length];
- double emit_exp_cnt[][]=new double[trans.length][emit[0].length];
- double start_exp_cnt[]=new double[trans.length];
-
- o.loglikelihood=0;
- //E
- for(int sentNum=0;sentNum<data.length;sentNum++){
-
- double [][][] post=o.forwardBackward(sentNum);
- incrementExpCnt(post, data[sentNum],
- trans_exp_cnt,
- emit_exp_cnt,
- start_exp_cnt);
-
-
- if(sentNum%100==0){
- System.out.print(".");
- }
- if(sentNum%1000==0){
- System.out.println(sentNum);
- }
-
- }
-
- System.out.println("Log likelihood: "+o.getValue());
-
- //M
- addOneSmooth(emit_exp_cnt);
- for(int i=0;i<trans.length;i++){
-
- //transition probs
- double sum=0;
- for(int j=0;j<trans.length;j++){
- sum+=trans_exp_cnt[i][j];
- }
- //avoid NAN
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<trans[i].length;j++){
- trans[i][j]=trans_exp_cnt[i][j]/sum;
- }
-
- //emission probs
-
- sum=0;
- for(int j=0;j<emit[i].length;j++){
- sum+=emit_exp_cnt[i][j];
- }
- //avoid NAN
- if(sum==0){
- sum=1;
- }
- for(int j=0;j<emit[i].length;j++){
- emit[i][j]=emit_exp_cnt[i][j]/sum;
- }
-
-
- //initial probs
- for(int j=0;j<pi.length;j++){
- pi[j]=start_exp_cnt[j];
- }
- l1normalize(pi);
- }
-
- }
-
- public void computeMaxwt(double[][]maxwt, int[][] d){
-
- for(int sentNum=0;sentNum<d.length;sentNum++){
- double post[][][]=forwardBackward(d[sentNum]);
-
- for(int n=0;n<post.length;n++){
- for(int i=0;i<trans.length;i++){
- double py=0;
- for(int j=0;j<trans.length;j++){
- py+=post[n][i][j];
- }
-
- if(py>maxwt[i][d[sentNum][n]]){
- maxwt[i][d[sentNum][n]]=py;
- }
-
- }
- }
-
- //the last state
- int len=post.length;
- for(int i=0;i<trans.length;i++){
- double py=0;
- for(int j=0;j<trans.length;j++){
- py+=post[len-2][j][i];
- }
-
- if(py>maxwt[i][d[sentNum][len-1]]){
- maxwt[i][d[sentNum][len-1]]=py;
- }
-
- }
-
- }
-
- }
-
-}//end of class
diff --git a/gi/posterior-regularisation/prjava/src/hmm/HMMObjective.java b/gi/posterior-regularisation/prjava/src/hmm/HMMObjective.java deleted file mode 100644 index 70b6c966..00000000 --- a/gi/posterior-regularisation/prjava/src/hmm/HMMObjective.java +++ /dev/null @@ -1,351 +0,0 @@ -package hmm;
-
-import gnu.trove.TIntArrayList;
-import optimization.gradientBasedMethods.ProjectedGradientDescent;
-import optimization.gradientBasedMethods.ProjectedObjective;
-import optimization.gradientBasedMethods.stats.OptimizerStats;
-import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc;
-import optimization.linesearch.InterpolationPickFirstStep;
-import optimization.linesearch.LineSearchMethod;
-import optimization.projections.SimplexProjection;
-import optimization.stopCriteria.CompositeStopingCriteria;
-import optimization.stopCriteria.ProjectedGradientL2Norm;
-import optimization.stopCriteria.StopingCriteria;
-import optimization.stopCriteria.ValueDifference;
-
-public class HMMObjective extends ProjectedObjective{
-
-
- private static final double GRAD_DIFF = 3;
- public static double INIT_STEP_SIZE=10;
- public static double VAL_DIFF=1000;
-
- private HMM hmm;
- double[] newPoint ;
-
- //posterior[sent num][tok num][tag]=index into lambda
- private int posteriorMap[][][];
- //projection[word][tag].get(occurence)=index into lambda
- private TIntArrayList projectionMap[][];
-
- //Size of the simplex
- public double scale=10;
- private SimplexProjection projection;
-
- private int wordFreq[];
- private static int MIN_FREQ=10;
- private int numWordsToProject=0;
-
- private int n_param;
-
- public double loglikelihood;
-
- public HMMObjective(HMM h){
- hmm=h;
-
- countWords();
- buildMap();
-
- gradient=new double [n_param];
- projection = new SimplexProjection(scale);
- newPoint = new double[n_param];
- setInitialParameters(new double[n_param]);
-
- }
-
- /**@brief counts word frequency in the corpus
- *
- */
- private void countWords(){
- wordFreq=new int [hmm.emit[0].length];
- for(int i=0;i<hmm.data.length;i++){
- for(int j=0;j<hmm.data[i].length;j++){
- wordFreq[hmm.data[i][j]]++;
- }
- }
- }
-
- /**@brief build posterior and projection indices
- *
- */
- private void buildMap(){
- //number of sentences hidden states and words
- int n_states=hmm.trans.length;
- int n_words=hmm.emit[0].length;
- int n_sents=hmm.data.length;
-
- n_param=0;
- posteriorMap=new int[n_sents][][];
- projectionMap=new TIntArrayList[n_words][];
- for(int sentNum=0;sentNum<n_sents;sentNum++){
- int [] data=hmm.data[sentNum];
- posteriorMap[sentNum]=new int[data.length][n_states];
- numWordsToProject=0;
- for(int i=0;i<data.length;i++){
- int word=data[i];
- for(int state=0;state<n_states;state++){
- if(wordFreq[word]>MIN_FREQ){
- if(projectionMap[word]==null){
- projectionMap[word]=new TIntArrayList[n_states];
- }
- // if(posteriorMap[sentNum][i]==null){
- // posteriorMap[sentNum][i]=new int[n_states];
- // }
-
- posteriorMap[sentNum][i][state]=n_param;
- if(projectionMap[word][state]==null){
- projectionMap[word][state]=new TIntArrayList();
- numWordsToProject++;
- }
- projectionMap[word][state].add(n_param);
- n_param++;
- }
- else{
- posteriorMap[sentNum][i][state]=-1;
- }
- }
- }
- }
- }
-
- @Override
- public double[] projectPoint(double[] point) {
- // TODO Auto-generated method stub
- for(int i=0;i<projectionMap.length;i++){
-
- if(projectionMap[i]==null){
- //this word is not constrained
- continue;
- }
-
- for(int j=0;j<projectionMap[i].length;j++){
- TIntArrayList instances=projectionMap[i][j];
- double[] toProject = new double[instances.size()];
-
- for (int k = 0; k < toProject.length; k++) {
- // System.out.print(instances.get(k) + " ");
- toProject[k] = point[instances.get(k)];
- }
-
- projection.project(toProject);
- for (int k = 0; k < toProject.length; k++) {
- newPoint[instances.get(k)]=toProject[k];
- }
- }
- }
- return newPoint;
- }
-
- @Override
- public double[] getGradient() {
- // TODO Auto-generated method stub
- gradientCalls++;
- return gradient;
- }
-
- @Override
- public double getValue() {
- // TODO Auto-generated method stub
- functionCalls++;
- return loglikelihood;
- }
-
-
- @Override
- public String toString() {
- // TODO Auto-generated method stub
- StringBuffer sb = new StringBuffer();
- for (int i = 0; i < parameters.length; i++) {
- sb.append(parameters[i]+" ");
- if(i%100==0){
- sb.append("\n");
- }
- }
- sb.append("\n");
- /*
- for (int i = 0; i < gradient.length; i++) {
- sb.append(gradient[i]+" ");
- if(i%100==0){
- sb.append("\n");
- }
- }
- sb.append("\n");
- */
- return sb.toString();
- }
-
-
- /**
- * @param seq
- * @return posterior probability of each transition
- */
- public double [][][]forwardBackward(int sentNum){
- int [] seq=hmm.data[sentNum];
- int n_states=hmm.trans.length;
- double a[][]=new double [seq.length][n_states];
- double b[][]=new double [seq.length][n_states];
-
- int len=seq.length;
-
- boolean constrained=
- (projectionMap[seq[0]]!=null);
-
- //initialize the first step
- for(int i=0;i<n_states;i++){
- a[0][i]=hmm.emit[i][seq[0]]*hmm.pi[i];
- if(constrained){
- a[0][i]*=
- Math.exp(- parameters[ posteriorMap[sentNum][0][i] ] );
- }
- b[len-1][i]=1;
- }
-
- loglikelihood+=Math.log(hmm.l1norm(a[0]));
- hmm.l1normalize(a[0]);
- hmm.l1normalize(b[len-1]);
-
- //forward
- for(int n=1;n<len;n++){
-
- constrained=
- (projectionMap[seq[n]]!=null);
-
- for(int i=0;i<n_states;i++){
- for(int j=0;j<n_states;j++){
- a[n][i]+=hmm.trans[j][i]*a[n-1][j];
- }
- a[n][i]*=hmm.emit[i][seq[n]];
-
- if(constrained){
- a[n][i]*=
- Math.exp(- parameters[ posteriorMap[sentNum][n][i] ] );
- }
-
- }
- loglikelihood+=Math.log(hmm.l1norm(a[n]));
- hmm.l1normalize(a[n]);
- }
-
- //temp variable for e^{-\lambda}
- double factor=1;
- //backward
- for(int n=len-2;n>=0;n--){
-
- constrained=
- (projectionMap[seq[n+1]]!=null);
-
- for(int i=0;i<n_states;i++){
- for(int j=0;j<n_states;j++){
-
- if(constrained){
- factor=
- Math.exp(- parameters[ posteriorMap[sentNum][n+1][j] ] );
- }else{
- factor=1;
- }
-
- b[n][i]+=hmm.trans[i][j]*b[n+1][j]*hmm.emit[j][seq[n+1]]*factor;
-
- }
- }
- hmm.l1normalize(b[n]);
- }
-
- //expected transition
- double p[][][]=new double [seq.length][n_states][n_states];
- for(int n=0;n<len-1;n++){
-
- constrained=
- (projectionMap[seq[n+1]]!=null);
-
- for(int i=0;i<n_states;i++){
- for(int j=0;j<n_states;j++){
-
- if(constrained){
- factor=
- Math.exp(- parameters[ posteriorMap[sentNum][n+1][j] ] );
- }else{
- factor=1;
- }
-
- p[n][i][j]=a[n][i]*hmm.trans[i][j]*
- hmm.emit[j][seq[n+1]]*b[n+1][j]*factor;
-
- }
- }
-
- hmm.l1normalize(p[n]);
- }
- return p;
- }
-
- public void optimizeWithProjectedGradientDescent(){
- LineSearchMethod ls =
- new ArmijoLineSearchMinimizationAlongProjectionArc
- (new InterpolationPickFirstStep(INIT_STEP_SIZE));
-
- OptimizerStats stats = new OptimizerStats();
-
-
- ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls);
- StopingCriteria stopGrad = new ProjectedGradientL2Norm(GRAD_DIFF);
- StopingCriteria stopValue = new ValueDifference(VAL_DIFF);
- CompositeStopingCriteria compositeStop = new CompositeStopingCriteria();
- compositeStop.add(stopGrad);
- compositeStop.add(stopValue);
-
- optimizer.setMaxIterations(10);
- updateFunction();
- boolean succed = optimizer.optimize(this,stats,compositeStop);
- System.out.println("Ended optimzation Projected Gradient Descent\n" + stats.prettyPrint(1));
- if(succed){
- System.out.println("Ended optimization in " + optimizer.getCurrentIteration());
- }else{
- System.out.println("Failed to optimize");
- }
- }
-
- @Override
- public void setParameters(double[] params) {
- super.setParameters(params);
- updateFunction();
- }
-
- private void updateFunction(){
-
- updateCalls++;
- loglikelihood=0;
-
- for(int sentNum=0;sentNum<hmm.data.length;sentNum++){
- double [][][]p=forwardBackward(sentNum);
-
- for(int n=0;n<p.length-1;n++){
- for(int i=0;i<p[n].length;i++){
- if(projectionMap[hmm.data[sentNum][n]]!=null){
- double posterior=0;
- for(int j=0;j<p[n][i].length;j++){
- posterior+=p[n][i][j];
- }
- gradient[posteriorMap[sentNum][n][i]]=-posterior;
- }
- }
- }
-
- //the last state
- int n=p.length-2;
- for(int i=0;i<p[n].length;i++){
- if(projectionMap[hmm.data[sentNum][n+1]]!=null){
-
- double posterior=0;
- for(int j=0;j<p[n].length;j++){
- posterior+=p[n][j][i];
- }
- gradient[posteriorMap[sentNum][n+1][i]]=-posterior;
-
- }
- }
- }
-
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/hmm/POS.java b/gi/posterior-regularisation/prjava/src/hmm/POS.java deleted file mode 100644 index bdcbc683..00000000 --- a/gi/posterior-regularisation/prjava/src/hmm/POS.java +++ /dev/null @@ -1,120 +0,0 @@ -package hmm;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.HashMap;
-
-import data.Corpus;
-
-public class POS {
-
- //public String trainFilename="../posdata/en_train.conll";
- public static String trainFilename="../posdata/small_train.txt";
-// public static String trainFilename="../posdata/en_test.conll";
-// public static String trainFilename="../posdata/trial1.txt";
-
- public static String testFilename="../posdata/en_test.conll";
- //public static String testFilename="../posdata/trial1.txt";
-
- public static String predFilename="../posdata/en_test.predict.conll";
- public static String modelFilename="../posdata/posModel.out";
- public static final int ITER=20;
- public static final int N_STATE=30;
-
- public static void main(String[] args) {
- //POS p=new POS();
- //POS p=new POS(true);
- try {
- PRPOS();
- } catch (FileNotFoundException e) {
- e.printStackTrace();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
- public POS() throws FileNotFoundException, IOException{
- Corpus c= new Corpus(trainFilename);
- //size of vocabulary +1 for unknown tokens
- HMM hmm =new HMM(N_STATE, c.getVocabSize()+1,c.getAllData());
- for(int i=0;i<ITER;i++){
- System.out.println("Iter"+i);
- hmm.EM();
- if((i+1)%10==0){
- hmm.writeModel(modelFilename+i);
- }
- }
-
- hmm.writeModel(modelFilename);
-
- Corpus test=new Corpus(testFilename,c.vocab);
-
- PrintStream ps= io.FileUtil.printstream(new File(predFilename));
-
- int [][]data=test.getAllData();
- for(int i=0;i<data.length;i++){
- int []tag=hmm.viterbi(data[i]);
- String sent[]=test.get(i);
- for(int j=0;j<data[i].length;j++){
- ps.println(sent[j]+"\t"+tag[j]);
- }
- ps.println();
- }
- ps.close();
- }
-
- //POS induction with L1/Linf constraints
- public static void PRPOS() throws FileNotFoundException, IOException{
- Corpus c= new Corpus(trainFilename);
- //size of vocabulary +1 for unknown tokens
- HMM hmm =new HMM(N_STATE, c.getVocabSize()+1,c.getAllData());
- hmm.o=new HMMObjective(hmm);
- for(int i=0;i<ITER;i++){
- System.out.println("Iter: "+i);
- hmm.PREM();
- if((i+1)%10==0){
- hmm.writeModel(modelFilename+i);
- }
- }
-
- hmm.writeModel(modelFilename);
- }
-
-
- public POS(boolean supervised) throws FileNotFoundException, IOException{
- Corpus c= new Corpus(trainFilename);
- //size of vocabulary +1 for unknown tokens
- HMM hmm =new HMM(c.tagVocab.size() , c.getVocabSize()+1,c.getAllData());
- hmm.train(c.getTagData());
-
- hmm.writeModel(modelFilename);
-
- Corpus test=new Corpus(testFilename,c.vocab);
-
- HashMap<String, Integer>tagVocab=
- (HashMap<String, Integer>) io.SerializedObjects.readSerializedObject(Corpus.tagalphaFilename);
- String [] tagdict=new String [tagVocab.size()+1];
- for(String key:tagVocab.keySet()){
- tagdict[tagVocab.get(key)]=key;
- }
- tagdict[tagdict.length-1]=Corpus.UNK_TOK;
-
- System.out.println(c.vocab.get("<e>"));
-
- PrintStream ps= io.FileUtil.printstream(new File(predFilename));
-
- int [][]data=test.getAllData();
- for(int i=0;i<data.length;i++){
- int []tag=hmm.viterbi(data[i]);
- String sent[]=test.get(i);
- for(int j=0;j<data[i].length;j++){
- ps.println(sent[j]+"\t"+tagdict[tag[j]]);
- }
- ps.println();
- }
- ps.close();
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/io/FileUtil.java b/gi/posterior-regularisation/prjava/src/io/FileUtil.java deleted file mode 100644 index 6720d087..00000000 --- a/gi/posterior-regularisation/prjava/src/io/FileUtil.java +++ /dev/null @@ -1,48 +0,0 @@ -package io;
-import java.util.*;
-import java.util.zip.GZIPInputStream;
-import java.util.zip.GZIPOutputStream;
-import java.io.*;
-public class FileUtil
-{
- public static BufferedReader reader(File file) throws FileNotFoundException, IOException
- {
- if (file.getName().endsWith(".gz"))
- return new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(file)), "UTF8"));
- else
- return new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF8"));
- }
-
- public static PrintStream printstream(File file) throws FileNotFoundException, IOException
- {
- if (file.getName().endsWith(".gz"))
- return new PrintStream(new GZIPOutputStream(new FileOutputStream(file)), true, "UTF8");
- else
- return new PrintStream(new FileOutputStream(file), true, "UTF8");
- }
-
- public static Scanner openInFile(String filename)
- {
- Scanner localsc=null;
- try
- {
- localsc=new Scanner(new FileInputStream(filename), "UTF8");
-
- }catch(IOException ioe){
- System.out.println(ioe.getMessage());
- }
- return localsc;
- }
-
- public static FileInputStream openInputStream(String infilename)
- {
- FileInputStream fis=null;
- try {
- fis = new FileInputStream(infilename);
-
- } catch (IOException ioe) {
- System.out.println(ioe.getMessage());
- }
- return fis;
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/io/SerializedObjects.java b/gi/posterior-regularisation/prjava/src/io/SerializedObjects.java deleted file mode 100644 index d1631b51..00000000 --- a/gi/posterior-regularisation/prjava/src/io/SerializedObjects.java +++ /dev/null @@ -1,83 +0,0 @@ -package io; - - - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.ObjectInput; -import java.io.ObjectInputStream; -import java.io.ObjectOutput; -import java.io.ObjectOutputStream; -import java.io.OutputStream; - -public class SerializedObjects -{ - public static void writeSerializedObject(Object object, String outFile) - { - ObjectOutput output = null; - try{ - //use buffering - OutputStream file = new FileOutputStream(outFile); - OutputStream buffer = new BufferedOutputStream( file ); - output = new ObjectOutputStream( buffer ); - output.writeObject(object); - buffer.close(); - file.close(); - } - catch(IOException ex){ - ex.printStackTrace(); - } - finally{ - try { - if (output != null) { - //flush and close "output" and its underlying streams - output.close(); - } - } - catch (IOException ex ){ - ex.printStackTrace(); - } - } - } - - public static Object readSerializedObject(String inputFile) - { - ObjectInput input = null; - Object recoveredObject=null; - try{ - //use buffering - InputStream file = new FileInputStream(inputFile); - InputStream buffer = new BufferedInputStream(file); - input = new ObjectInputStream(buffer); - //deserialize the List - recoveredObject = input.readObject(); - } - catch(IOException ex){ - ex.printStackTrace(); - } - catch (ClassNotFoundException ex){ - ex.printStackTrace(); - } - catch(Exception ex) - { - ex.printStackTrace(); - } - finally{ - try { - if ( input != null ) { - //close "input" and its underlying streams - input.close(); - } - } - catch (IOException ex){ - ex.printStackTrace(); - } - } - return recoveredObject; - } - -}
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/src/optimization/examples/GeneralizedRosenbrock.java b/gi/posterior-regularisation/prjava/src/optimization/examples/GeneralizedRosenbrock.java deleted file mode 100644 index 25fa7f09..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/examples/GeneralizedRosenbrock.java +++ /dev/null @@ -1,110 +0,0 @@ -package optimization.examples; - - -import optimization.gradientBasedMethods.ConjugateGradient; -import optimization.gradientBasedMethods.GradientDescent; -import optimization.gradientBasedMethods.LBFGS; -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.Optimizer; -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.ArmijoLineSearchMinimization; -import optimization.linesearch.LineSearchMethod; -import optimization.stopCriteria.GradientL2Norm; -import optimization.stopCriteria.StopingCriteria; -import optimization.util.MathUtils; - -/** - * - * @author javg - * f(x) = \sum_{i=1}^{N-1} \left[ (1-x_i)^2+ 100 (x_{i+1} - x_i^2 )^2 \right] \quad \forall x\in\mathbb{R}^N. - */ -public class GeneralizedRosenbrock extends Objective{ - - - - public GeneralizedRosenbrock(int dimensions){ - parameters = new double[dimensions]; - java.util.Arrays.fill(parameters, 0); - gradient = new double[dimensions]; - - } - - public GeneralizedRosenbrock(int dimensions, double[] params){ - parameters = params; - gradient = new double[dimensions]; - } - - - public double getValue() { - functionCalls++; - double value = 0; - for(int i = 0; i < parameters.length-1; i++){ - value += MathUtils.square(1-parameters[i]) + 100*MathUtils.square(parameters[i+1] - MathUtils.square(parameters[i])); - } - - return value; - } - - /** - * gx = -2(1-x) -2x200(y-x^2) - * gy = 200(y-x^2) - */ - public double[] getGradient() { - gradientCalls++; - java.util.Arrays.fill(gradient,0); - for(int i = 0; i < parameters.length-1; i++){ - gradient[i]+=-2*(1-parameters[i]) - 400*parameters[i]*(parameters[i+1] - MathUtils.square(parameters[i])); - gradient[i+1]+=200*(parameters[i+1] - MathUtils.square(parameters[i])); - } - return gradient; - } - - - - - - - - public String toString(){ - String res =""; - for(int i = 0; i < parameters.length; i++){ - res += "P" + i+ " " + parameters[i]; - } - res += " Value " + getValue(); - return res; - } - - public static void main(String[] args) { - - GeneralizedRosenbrock o = new GeneralizedRosenbrock(2); - System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - ; - - System.out.println("Doing Gradient descent"); - //LineSearchMethod wolfe = new WolfRuleLineSearch(new InterpolationPickFirstStep(1),100,0.001,0.1); - StopingCriteria stop = new GradientL2Norm(0.001); - LineSearchMethod ls = new ArmijoLineSearchMinimization(); - Optimizer optimizer = new GradientDescent(ls); - OptimizerStats stats = new OptimizerStats(); - optimizer.setMaxIterations(1000); - boolean succed = optimizer.optimize(o,stats, stop); - System.out.println("Suceess " + succed + "/n"+stats.prettyPrint(1)); - System.out.println("Doing Conjugate Gradient descent"); - o = new GeneralizedRosenbrock(2); - // wolfe = new WolfRuleLineSearch(new InterpolationPickFirstStep(1),100,0.001,0.1); - optimizer = new ConjugateGradient(ls); - stats = new OptimizerStats(); - optimizer.setMaxIterations(1000); - succed = optimizer.optimize(o,stats,stop); - System.out.println("Suceess " + succed + "/n"+stats.prettyPrint(1)); - System.out.println("Doing Quasi newton descent"); - o = new GeneralizedRosenbrock(2); - optimizer = new LBFGS(ls,10); - stats = new OptimizerStats(); - optimizer.setMaxIterations(1000); - succed = optimizer.optimize(o,stats,stop); - System.out.println("Suceess " + succed + "/n"+stats.prettyPrint(1)); - - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2.java b/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2.java deleted file mode 100644 index f087681e..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2.java +++ /dev/null @@ -1,128 +0,0 @@ -package optimization.examples; - - -import optimization.gradientBasedMethods.ConjugateGradient; - -import optimization.gradientBasedMethods.GradientDescent; -import optimization.gradientBasedMethods.LBFGS; -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.GenericPickFirstStep; -import optimization.linesearch.LineSearchMethod; -import optimization.linesearch.WolfRuleLineSearch; -import optimization.stopCriteria.GradientL2Norm; -import optimization.stopCriteria.StopingCriteria; - - -/** - * @author javg - * - */ -public class x2y2 extends Objective{ - - - //Implements function ax2+ by2 - double a, b; - public x2y2(double a, double b){ - this.a = a; - this.b = b; - parameters = new double[2]; - parameters[0] = 4; - parameters[1] = 4; - gradient = new double[2]; - } - - public double getValue() { - functionCalls++; - return a*parameters[0]*parameters[0]+b*parameters[1]*parameters[1]; - } - - public double[] getGradient() { - gradientCalls++; - gradient[0]=2*a*parameters[0]; - gradient[1]=2*b*parameters[1]; - return gradient; -// if(debugLevel >=2){ -// double[] numericalGradient = DebugHelpers.getNumericalGradient(this, parameters, 0.000001); -// for(int i = 0; i < parameters.length; i++){ -// double diff = Math.abs(gradient[i]-numericalGradient[i]); -// if(diff > 0.00001){ -// System.out.println("Numerical Gradient does not match"); -// System.exit(1); -// } -// } -// } - } - - - - public void optimizeWithGradientDescent(LineSearchMethod ls, OptimizerStats stats, x2y2 o){ - GradientDescent optimizer = new GradientDescent(ls); - StopingCriteria stop = new GradientL2Norm(0.001); -// optimizer.setGradientConvergenceValue(0.001); - optimizer.setMaxIterations(100); - boolean succed = optimizer.optimize(o,stats,stop); - System.out.println("Ended optimzation Gradient Descent\n" + stats.prettyPrint(1)); - System.out.println("Solution: " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - if(succed){ - System.out.println("Ended optimization in " + optimizer.getCurrentIteration()); - }else{ - System.out.println("Failed to optimize"); - } - } - - public void optimizeWithConjugateGradient(LineSearchMethod ls, OptimizerStats stats, x2y2 o){ - ConjugateGradient optimizer = new ConjugateGradient(ls); - StopingCriteria stop = new GradientL2Norm(0.001); - - optimizer.setMaxIterations(10); - boolean succed = optimizer.optimize(o,stats,stop); - System.out.println("Ended optimzation Conjugate Gradient\n" + stats.prettyPrint(1)); - System.out.println("Solution: " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - if(succed){ - System.out.println("Ended optimization in " + optimizer.getCurrentIteration()); - }else{ - System.out.println("Failed to optimize"); - } - } - - public void optimizeWithLBFGS(LineSearchMethod ls, OptimizerStats stats, x2y2 o){ - LBFGS optimizer = new LBFGS(ls,10); - StopingCriteria stop = new GradientL2Norm(0.001); - optimizer.setMaxIterations(10); - boolean succed = optimizer.optimize(o,stats,stop); - System.out.println("Ended optimzation LBFGS\n" + stats.prettyPrint(1)); - System.out.println("Solution: " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - if(succed){ - System.out.println("Ended optimization in " + optimizer.getCurrentIteration()); - }else{ - System.out.println("Failed to optimize"); - } - } - - public static void main(String[] args) { - x2y2 o = new x2y2(1,10); - System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - o.setDebugLevel(4); - LineSearchMethod wolfe = new WolfRuleLineSearch(new GenericPickFirstStep(1),0.001,0.9);; -// LineSearchMethod ls = new ArmijoLineSearchMinimization(); - OptimizerStats stats = new OptimizerStats(); - o.optimizeWithGradientDescent(wolfe, stats, o); - o = new x2y2(1,10); - System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); -// ls = new ArmijoLineSearchMinimization(); - stats = new OptimizerStats(); - o.optimizeWithConjugateGradient(wolfe, stats, o); - o = new x2y2(1,10); - System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); -// ls = new ArmijoLineSearchMinimization(); - stats = new OptimizerStats(); - o.optimizeWithLBFGS(wolfe, stats, o); - } - - public String toString(){ - return "P1: " + parameters[0] + " P2: " + parameters[1] + " value " + getValue(); - } - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2WithConstraints.java b/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2WithConstraints.java deleted file mode 100644 index 391775b7..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/examples/x2y2WithConstraints.java +++ /dev/null @@ -1,127 +0,0 @@ -package optimization.examples; - - -import optimization.gradientBasedMethods.ProjectedGradientDescent; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc; -import optimization.linesearch.InterpolationPickFirstStep; -import optimization.linesearch.LineSearchMethod; -import optimization.projections.BoundsProjection; -import optimization.projections.Projection; -import optimization.projections.SimplexProjection; -import optimization.stopCriteria.CompositeStopingCriteria; -import optimization.stopCriteria.GradientL2Norm; -import optimization.stopCriteria.ProjectedGradientL2Norm; -import optimization.stopCriteria.StopingCriteria; -import optimization.stopCriteria.ValueDifference; - - -/** - * @author javg - * - * - *ax2+ b(y2 -displacement) - */ -public class x2y2WithConstraints extends ProjectedObjective{ - - - double a, b; - double dx; - double dy; - Projection projection; - - - public x2y2WithConstraints(double a, double b, double[] params, double dx, double dy, Projection proj){ - //projection = new BoundsProjection(0.2,Double.MAX_VALUE); - super(); - projection = proj; - this.a = a; - this.b = b; - this.dx = dx; - this.dy = dy; - setInitialParameters(params); - System.out.println("Function " +a+"(x-"+dx+")^2 + "+b+"(y-"+dy+")^2"); - System.out.println("Gradient " +(2*a)+"(x-"+dx+") ; "+(b*2)+"(y-"+dy+")"); - printParameters(); - projection.project(parameters); - printParameters(); - gradient = new double[2]; - } - - public double getValue() { - functionCalls++; - return a*(parameters[0]-dx)*(parameters[0]-dx)+b*((parameters[1]-dy)*(parameters[1]-dy)); - } - - public double[] getGradient() { - if(gradient == null){ - gradient = new double[2]; - } - gradientCalls++; - gradient[0]=2*a*(parameters[0]-dx); - gradient[1]=2*b*(parameters[1]-dy); - return gradient; - } - - - public double[] projectPoint(double[] point) { - double[] newPoint = point.clone(); - projection.project(newPoint); - return newPoint; - } - - public void optimizeWithProjectedGradientDescent(LineSearchMethod ls, OptimizerStats stats, x2y2WithConstraints o){ - ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls); - StopingCriteria stopGrad = new ProjectedGradientL2Norm(0.001); - StopingCriteria stopValue = new ValueDifference(0.001); - CompositeStopingCriteria compositeStop = new CompositeStopingCriteria(); - compositeStop.add(stopGrad); - compositeStop.add(stopValue); - - optimizer.setMaxIterations(5); - boolean succed = optimizer.optimize(o,stats,compositeStop); - System.out.println("Ended optimzation Projected Gradient Descent\n" + stats.prettyPrint(1)); - System.out.println("Solution: " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]); - if(succed){ - System.out.println("Ended optimization in " + optimizer.getCurrentIteration()); - }else{ - System.out.println("Failed to optimize"); - } - } - - - - public String toString(){ - - return "P1: " + parameters[0] + " P2: " + parameters[1] + " value " + getValue() + " grad (" + getGradient()[0] + ":" + getGradient()[1]+")"; - } - - public static void main(String[] args) { - double a = 1; - double b=1; - double x0 = 0; - double y0 =1; - double dx = 0.5; - double dy = 0.5 ; - double [] parameters = new double[2]; - parameters[0] = x0; - parameters[1] = y0; - x2y2WithConstraints o = new x2y2WithConstraints(a,b,parameters,dx,dy, new SimplexProjection(0.5)); - System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1] + " a " + a + " b "+b ); - o.setDebugLevel(4); - - LineSearchMethod ls = new ArmijoLineSearchMinimizationAlongProjectionArc(new InterpolationPickFirstStep(1)); - - OptimizerStats stats = new OptimizerStats(); - o.optimizeWithProjectedGradientDescent(ls, stats, o); - -// o = new x2y2WithConstraints(a,b,x0,y0,dx,dy); -// stats = new OptimizerStats(); -// o.optimizeWithSpectralProjectedGradientDescent(stats, o); - } - - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/AbstractGradientBaseMethod.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/AbstractGradientBaseMethod.java deleted file mode 100644 index 2fcb7990..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/AbstractGradientBaseMethod.java +++ /dev/null @@ -1,120 +0,0 @@ -package optimization.gradientBasedMethods; - -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.DifferentiableLineSearchObjective; -import optimization.linesearch.LineSearchMethod; -import optimization.stopCriteria.StopingCriteria; -import optimization.util.MathUtils; - -/** - * - * @author javg - * - */ -public abstract class AbstractGradientBaseMethod implements Optimizer{ - - protected int maxNumberOfIterations=10000; - - - - protected int currentProjectionIteration; - protected double currValue; - protected double previousValue = Double.MAX_VALUE;; - protected double step; - protected double[] gradient; - public double[] direction; - - //Original values - protected double originalGradientL2Norm; - - protected LineSearchMethod lineSearch; - DifferentiableLineSearchObjective lso; - - - public void reset(){ - direction = null; - gradient = null; - previousValue = Double.MAX_VALUE; - currentProjectionIteration = 0; - originalGradientL2Norm = 0; - step = 0; - currValue = 0; - } - - public void initializeStructures(Objective o,OptimizerStats stats, StopingCriteria stop){ - lso = new DifferentiableLineSearchObjective(o); - } - public void updateStructuresBeforeStep(Objective o,OptimizerStats stats, StopingCriteria stop){ - } - - public void updateStructuresAfterStep(Objective o,OptimizerStats stats, StopingCriteria stop){ - } - - public boolean optimize(Objective o,OptimizerStats stats, StopingCriteria stop){ - //Initialize structures - - stats.collectInitStats(this, o); - direction = new double[o.getNumParameters()]; - initializeStructures(o, stats, stop); - for (currentProjectionIteration = 1; currentProjectionIteration < maxNumberOfIterations; currentProjectionIteration++){ - //System.out.println("\tgradient descent iteration " + currentProjectionIteration); - //System.out.print("\tparameters:" ); - //o.printParameters(); - previousValue = currValue; - currValue = o.getValue(); - gradient = o.getGradient(); - if(stop.stopOptimization(o)){ - stats.collectFinalStats(this, o); - return true; - } - - getDirection(); - if(MathUtils.dotProduct(gradient, direction) > 0){ - System.out.println("Not a descent direction"); - System.out.println(" current stats " + stats.prettyPrint(1)); - System.exit(-1); - } - updateStructuresBeforeStep(o, stats, stop); - lso.reset(direction); - step = lineSearch.getStepSize(lso); - //System.out.println("\t\tLeave with step: " + step); - if(step==-1){ - System.out.println("Failed to find step"); - stats.collectFinalStats(this, o); - return false; - } - updateStructuresAfterStep( o, stats, stop); -// previousValue = currValue; -// currValue = o.getValue(); -// gradient = o.getGradient(); - stats.collectIterationStats(this, o); - } - stats.collectFinalStats(this, o); - return false; - } - - - public int getCurrentIteration() { - return currentProjectionIteration; - } - - - /** - * Method specific - */ - public abstract double[] getDirection(); - - public double getCurrentStep() { - return step; - } - - - - public void setMaxIterations(int max) { - maxNumberOfIterations = max; - } - - public double getCurrentValue() { - return currValue; - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ConjugateGradient.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ConjugateGradient.java deleted file mode 100644 index 28295729..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ConjugateGradient.java +++ /dev/null @@ -1,92 +0,0 @@ -package optimization.gradientBasedMethods; - -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.DifferentiableLineSearchObjective; -import optimization.linesearch.LineSearchMethod; -import optimization.stopCriteria.StopingCriteria; -import optimization.util.MathUtils; - - - -public class ConjugateGradient extends AbstractGradientBaseMethod{ - - - double[] previousGradient; - double[] previousDirection; - - public ConjugateGradient(LineSearchMethod lineSearch) { - this.lineSearch = lineSearch; - } - - public void reset(){ - super.reset(); - java.util.Arrays.fill(previousDirection, 0); - java.util.Arrays.fill(previousGradient, 0); - } - - public void initializeStructures(Objective o,OptimizerStats stats, StopingCriteria stop){ - super.initializeStructures(o, stats, stop); - previousGradient = new double[o.getNumParameters()]; - previousDirection = new double[o.getNumParameters()]; - } - public void updateStructuresBeforeStep(Objective o,OptimizerStats stats, StopingCriteria stop){ - System.arraycopy(gradient, 0, previousGradient, 0, gradient.length); - System.arraycopy(direction, 0, previousDirection, 0, direction.length); - } - -// public boolean optimize(Objective o,OptimizerStats stats, StopingCriteria stop){ -// DifferentiableLineSearchObjective lso = new DifferentiableLineSearchObjective(o); -// stats.collectInitStats(this, o); -// direction = new double[o.getNumParameters()]; -// initializeStructures(o, stats, stop); -// for (currentProjectionIteration = 0; currentProjectionIteration < maxNumberOfIterations; currentProjectionIteration++){ -// previousValue = currValue; -// currValue = o.getValue(); -// gradient =o.getGradient(); -// if(stop.stopOptimization(gradient)){ -// stats.collectFinalStats(this, o); -// return true; -// } -// getDirection(); -// updateStructures(o, stats, stop); -// lso.reset(direction); -// step = lineSearch.getStepSize(lso); -// if(step==-1){ -// System.out.println("Failed to find a step size"); -// System.out.println("Failed to find step"); -// stats.collectFinalStats(this, o); -// return false; -// } -// -// stats.collectIterationStats(this, o); -// } -// stats.collectFinalStats(this, o); -// return false; -// } - - public double[] getDirection(){ - direction = MathUtils.negation(gradient); - if(currentProjectionIteration != 1){ - //Using Polak-Ribiere method (book equation 5.45) - double b = MathUtils.dotProduct(gradient, MathUtils.arrayMinus(gradient, previousGradient)) - /MathUtils.dotProduct(previousGradient, previousGradient); - if(b<0){ - System.out.println("Defaulting to gradient descent"); - b = Math.max(b, 0); - } - MathUtils.plusEquals(direction, previousDirection, b); - //Debug code - if(MathUtils.dotProduct(direction, gradient) > 0){ - System.out.println("Not an descent direction reseting to gradien"); - direction = MathUtils.negation(gradient); - } - } - return direction; - } - - - - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/DebugHelpers.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/DebugHelpers.java deleted file mode 100644 index 6dc4ef6c..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/DebugHelpers.java +++ /dev/null @@ -1,65 +0,0 @@ -package optimization.gradientBasedMethods; - -import java.util.ArrayList; - -import optimization.util.MathUtils; - - - -public class DebugHelpers { - public static void getLineSearchGraph(Objective o, double[] direction, - double[] parameters, double originalObj, - double originalDot, double c1, double c2){ - ArrayList<Double> stepS = new ArrayList<Double>(); - ArrayList<Double> obj = new ArrayList<Double>(); - ArrayList<Double> norm = new ArrayList<Double>(); - double[] gradient = new double[o.getNumParameters()]; - double[] newParameters = parameters.clone(); - MathUtils.plusEquals(newParameters,direction,0); - o.setParameters(newParameters); - double minValue = o.getValue(); - int valuesBiggerThanMax = 0; - for(double step = 0; step < 2; step +=0.01 ){ - newParameters = parameters.clone(); - MathUtils.plusEquals(newParameters,direction,step); - o.setParameters(newParameters); - double newValue = o.getValue(); - gradient = o.getGradient(); - double newgradDirectionDot = MathUtils.dotProduct(gradient,direction); - stepS.add(step); - obj.add(newValue); - norm.add(newgradDirectionDot); - if(newValue <= minValue){ - minValue = newValue; - }else{ - valuesBiggerThanMax++; - } - - if(valuesBiggerThanMax > 10){ - break; - } - - } - System.out.println("step\torigObj\tobj\tsuffdec\tnorm\tcurvature1"); - for(int i = 0; i < stepS.size(); i++){ - double cnorm= norm.get(i); - System.out.println(stepS.get(i)+"\t"+originalObj +"\t"+obj.get(i) + "\t" + - (originalObj + originalDot*((Double)stepS.get(i))*c1) +"\t"+Math.abs(cnorm) +"\t"+c2*Math.abs(originalDot)); - } - } - - public static double[] getNumericalGradient(Objective o, double[] parameters, double epsilon){ - int nrParameters = o.getNumParameters(); - double[] gradient = new double[nrParameters]; - double[] newParameters; - double originalValue = o.getValue(); - for(int parameter = 0; parameter < nrParameters; parameter++){ - newParameters = parameters.clone(); - newParameters[parameter]+=epsilon; - o.setParameters(newParameters); - double newValue = o.getValue(); - gradient[parameter]=(newValue-originalValue)/epsilon; - } - return gradient; - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/GradientDescent.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/GradientDescent.java deleted file mode 100644 index 9a53cef4..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/GradientDescent.java +++ /dev/null @@ -1,19 +0,0 @@ -package optimization.gradientBasedMethods; - -import optimization.linesearch.LineSearchMethod; - - - -public class GradientDescent extends AbstractGradientBaseMethod{ - - public GradientDescent(LineSearchMethod lineSearch) { - this.lineSearch = lineSearch; - } - - public double[] getDirection(){ - for(int i = 0; i< gradient.length; i++){ - direction[i] = -gradient[i]; - } - return direction; - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/LBFGS.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/LBFGS.java deleted file mode 100644 index dedbc942..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/LBFGS.java +++ /dev/null @@ -1,234 +0,0 @@ -package optimization.gradientBasedMethods; - - -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.DifferentiableLineSearchObjective; -import optimization.linesearch.LineSearchMethod; -import optimization.stopCriteria.StopingCriteria; -import optimization.util.MathUtils; - -public class LBFGS extends AbstractGradientBaseMethod{ - - //How many previous values are being saved - int history; - double[][] skList; - double[][] ykList; - double initialHessianParameters; - double[] previousGradient; - double[] previousParameters; - - //auxiliar structures - double q[]; - double[] roi; - double[] alphai; - - public LBFGS(LineSearchMethod ls, int history) { - lineSearch = ls; - this.history = history; - skList = new double[history][]; - ykList = new double[history][]; - - } - - public void reset(){ - super.reset(); - initialHessianParameters = 0; - previousParameters = null; - previousGradient = null; - skList = new double[history][]; - ykList = new double[history][]; - q = null; - roi = null; - alphai = null; - } - - public double[] LBFGSTwoLoopRecursion(double hessianConst){ - //Only create array once - if(q == null){ - q = new double[gradient.length]; - } - System.arraycopy(gradient, 0, q, 0, gradient.length); - //Only create array once - if(roi == null){ - roi = new double[history]; - } - //Only create array once - if(alphai == null){ - alphai = new double[history]; - } - - for(int i = history-1; i >=0 && skList[i]!= null && ykList[i]!=null; i-- ){ - // System.out.println("New to Old proj " + currentProjectionIteration + " history "+history + " index " + i); - double[] si = skList[i]; - double[] yi = ykList[i]; - roi[i]= 1.0/MathUtils.dotProduct(yi,si); - alphai[i] = MathUtils.dotProduct(si, q)*roi[i]; - MathUtils.plusEquals(q, yi, -alphai[i]); - } - //Initial Hessian is just a constant - MathUtils.scalarMultiplication(q, hessianConst); - for(int i = 0; i <history && skList[i]!= null && ykList[i]!=null; i++ ){ - // System.out.println("Old to New proj " + currentProjectionIteration + " history "+history + " index " + i); - double beta = MathUtils.dotProduct(ykList[i], q)*roi[i]; - MathUtils.plusEquals(q, skList[i], (alphai[i]-beta)); - } - return q; - } - - - - - @Override - public double[] getDirection() { - - calculateInitialHessianParameter(); -// System.out.println("Initial hessian " + initialHessianParameters); - return direction = MathUtils.negation(LBFGSTwoLoopRecursion(initialHessianParameters)); - } - - public void calculateInitialHessianParameter(){ - if(currentProjectionIteration == 1){ - //Use gradient - initialHessianParameters = 1; - }else if(currentProjectionIteration <= history){ - double[] sk = skList[currentProjectionIteration-2]; - double[] yk = ykList[currentProjectionIteration-2]; - initialHessianParameters = MathUtils.dotProduct(sk, yk)/MathUtils.dotProduct(yk, yk); - }else{ - //get the last one - double[] sk = skList[history-1]; - double[] yk = ykList[history-1]; - initialHessianParameters = MathUtils.dotProduct(sk, yk)/MathUtils.dotProduct(yk, yk); - } - } - - //TODO if structures exit just reset them to zero - public void initializeStructures(Objective o,OptimizerStats stats, StopingCriteria stop){ - super.initializeStructures(o, stats, stop); - previousParameters = new double[o.getNumParameters()]; - previousGradient = new double[o.getNumParameters()]; - } - public void updateStructuresBeforeStep(Objective o,OptimizerStats stats, StopingCriteria stop){ - super.initializeStructures(o, stats, stop); - System.arraycopy(o.getParameters(), 0, previousParameters, 0, previousParameters.length); - System.arraycopy(gradient, 0, previousGradient, 0, gradient.length); - } - - public void updateStructuresAfterStep( Objective o,OptimizerStats stats, StopingCriteria stop){ - double[] diffX = MathUtils.arrayMinus(o.getParameters(), previousParameters); - double[] diffGrad = MathUtils.arrayMinus(gradient, previousGradient); - //Save new values and discard new ones - if(currentProjectionIteration > history){ - for(int i = 0; i < history-1;i++){ - skList[i]=skList[i+1]; - ykList[i]=ykList[i+1]; - } - skList[history-1]=diffX; - ykList[history-1]=diffGrad; - }else{ - skList[currentProjectionIteration-1]=diffX; - ykList[currentProjectionIteration-1]=diffGrad; - } - } - -// public boolean optimize(Objective o, OptimizerStats stats, StopingCriteria stop) { -// DifferentiableLineSearchObjective lso = new DifferentiableLineSearchObjective(o); -// gradient = o.getGradient(); -// direction = new double[o.getNumParameters()]; -// previousGradient = new double[o.getNumParameters()]; -// -// previousParameters = new double[o.getNumParameters()]; -// -// stats.collectInitStats(this, o); -// previousValue = Double.MAX_VALUE; -// currValue= o.getValue(); -// //Used for stopping criteria -// double[] originalGradient = o.getGradient(); -// -// originalGradientL2Norm = MathUtils.L2Norm(originalGradient); -// if(stop.stopOptimization(originalGradient)){ -// stats.collectFinalStats(this, o); -// return true; -// } -// for (currentProjectionIteration = 1; currentProjectionIteration < maxNumberOfIterations; currentProjectionIteration++){ -// -// -// currValue = o.getValue(); -// gradient = o.getGradient(); -// currParameters = o.getParameters(); -// -// -// if(currentProjectionIteration == 1){ -// //Use gradient -// initialHessianParameters = 1; -// }else if(currentProjectionIteration <= history){ -// double[] sk = skList[currentProjectionIteration-2]; -// double[] yk = ykList[currentProjectionIteration-2]; -// initialHessianParameters = MathUtils.dotProduct(sk, yk)/MathUtils.dotProduct(yk, yk); -// }else{ -// //get the last one -// double[] sk = skList[history-1]; -// double[] yk = ykList[history-1]; -// initialHessianParameters = MathUtils.dotProduct(sk, yk)/MathUtils.dotProduct(yk, yk); -// } -// -// getDirection(); -// -// //MatrixOutput.printDoubleArray(direction, "direction"); -// double dot = MathUtils.dotProduct(direction, gradient); -// if(dot > 0){ -// throw new RuntimeException("Not a descent direction"); -// } if (Double.isNaN(dot)){ -// throw new RuntimeException("dot is not a number!!"); -// } -// System.arraycopy(currParameters, 0, previousParameters, 0, currParameters.length); -// System.arraycopy(gradient, 0, previousGradient, 0, gradient.length); -// lso.reset(direction); -// step = lineSearch.getStepSize(lso); -// if(step==-1){ -// System.out.println("Failed to find a step size"); -//// lso.printLineSearchSteps(); -//// System.out.println(stats.prettyPrint(1)); -// stats.collectFinalStats(this, o); -// return false; -// } -// stats.collectIterationStats(this, o); -// -// //We are not updating the alpha since it is done in line search already -// currParameters = o.getParameters(); -// gradient = o.getGradient(); -// -// if(stop.stopOptimization(gradient)){ -// stats.collectFinalStats(this, o); -// return true; -// } -// double[] diffX = MathUtils.arrayMinus(currParameters, previousParameters); -// double[] diffGrad = MathUtils.arrayMinus(gradient, previousGradient); -// //Save new values and discard new ones -// if(currentProjectionIteration > history){ -// for(int i = 0; i < history-1;i++){ -// skList[i]=skList[i+1]; -// ykList[i]=ykList[i+1]; -// } -// skList[history-1]=diffX; -// ykList[history-1]=diffGrad; -// }else{ -// skList[currentProjectionIteration-1]=diffX; -// ykList[currentProjectionIteration-1]=diffGrad; -// } -// previousValue = currValue; -// } -// stats.collectFinalStats(this, o); -// return false; -// } - - - - - - - - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Objective.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Objective.java deleted file mode 100644 index 6be01bf9..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Objective.java +++ /dev/null @@ -1,87 +0,0 @@ -package optimization.gradientBasedMethods; - - -/** - * Defines an optimization objective: - * - * - * @author javg - * - */ -public abstract class Objective { - - protected int functionCalls = 0; - protected int gradientCalls = 0; - protected int updateCalls = 0; - - protected double[] parameters; - - //Contains a cache with the gradient - public double[] gradient; - int debugLevel = 0; - - public void setDebugLevel(int level){ - debugLevel = level; - } - - public int getNumParameters() { - return parameters.length; - } - - public double getParameter(int index) { - return parameters[index]; - } - - public double[] getParameters() { - return parameters; - } - - public abstract double[] getGradient( ); - - public void setParameter(int index, double value) { - parameters[index]=value; - } - - public void setParameters(double[] params) { - if(parameters == null){ - parameters = new double[params.length]; - } - updateCalls++; - System.arraycopy(params, 0, parameters, 0, params.length); - } - - - public int getNumberFunctionCalls() { - return functionCalls; - } - - public int getNumberGradientCalls() { - return gradientCalls; - } - - public int getNumberUpdateCalls() { - return updateCalls; - } - - public String finalInfoString() { - return "FE: " + functionCalls + " GE " + gradientCalls + " Params updates" + - updateCalls; - } - public void printParameters() { - System.out.println(toString()); - } - - public abstract String toString(); - public abstract double getValue (); - - /** - * Sets the initial objective parameters - * For unconstrained models this just sets the objective params = argument no copying - * For a constrained objective project the parameters and then set - * @param params - */ - public void setInitialParameters(double[] params){ - parameters = params; - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Optimizer.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Optimizer.java deleted file mode 100644 index 96fce5b0..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/Optimizer.java +++ /dev/null @@ -1,19 +0,0 @@ -package optimization.gradientBasedMethods; - -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.stopCriteria.StopingCriteria; - -public interface Optimizer { - public boolean optimize(Objective o,OptimizerStats stats, StopingCriteria stoping); - - - public double[] getDirection(); - public double getCurrentStep(); - public double getCurrentValue(); - public int getCurrentIteration(); - public void reset(); - - public void setMaxIterations(int max); - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedAbstractGradientBaseMethod.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedAbstractGradientBaseMethod.java deleted file mode 100644 index afb29d04..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedAbstractGradientBaseMethod.java +++ /dev/null @@ -1,11 +0,0 @@ -package optimization.gradientBasedMethods; - - -/** - * - * @author javg - * - */ -public abstract class ProjectedAbstractGradientBaseMethod extends AbstractGradientBaseMethod implements ProjectedOptimizer{ - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedGradientDescent.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedGradientDescent.java deleted file mode 100644 index 0186e945..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedGradientDescent.java +++ /dev/null @@ -1,154 +0,0 @@ -package optimization.gradientBasedMethods; - -import java.io.IOException; - -import optimization.gradientBasedMethods.stats.OptimizerStats; -import optimization.linesearch.DifferentiableLineSearchObjective; -import optimization.linesearch.LineSearchMethod; -import optimization.linesearch.ProjectedDifferentiableLineSearchObjective; -import optimization.stopCriteria.StopingCriteria; -import optimization.util.MathUtils; - - -/** - * This class implements the projected gradiend - * as described in Bertsekas "Non Linear Programming" - * section 2.3. - * - * The update is given by: - * x_k+1 = x_k + alpha^k(xbar_k-x_k) - * Where xbar is: - * xbar = [x_k -s_k grad(f(x_k))]+ - * where []+ is the projection into the feasibility set - * - * alpha is the step size - * s_k - is a positive scalar which can be view as a step size as well, by - * setting alpha to 1, then x_k+1 = [x_k -s_k grad(f(x_k))]+ - * This is called taking a step size along the projection arc (Bertsekas) which - * we will use by default. - * - * Note that the only place where we actually take a step size is on pick a step size - * so this is going to be just like a normal gradient descent but use a different - * armijo line search where we project after taking a step. - * - * - * @author javg - * - */ -public class ProjectedGradientDescent extends ProjectedAbstractGradientBaseMethod{ - - - - - public ProjectedGradientDescent(LineSearchMethod lineSearch) { - this.lineSearch = lineSearch; - } - - //Use projected differential objective instead - public void initializeStructures(Objective o, OptimizerStats stats, StopingCriteria stop) { - lso = new ProjectedDifferentiableLineSearchObjective(o); - }; - - - ProjectedObjective obj; - public boolean optimize(ProjectedObjective o,OptimizerStats stats, StopingCriteria stop){ - obj = o; - return super.optimize(o, stats, stop); - } - - public double[] getDirection(){ - for(int i = 0; i< gradient.length; i++){ - direction[i] = -gradient[i]; - } - return direction; - } - - - - -} - - - - - - - -///OLD CODE - -//Use projected gradient norm -//public boolean stopCriteria(double[] gradient){ -// if(originalDirenctionL2Norm == 0){ -// System.out.println("Leaving original direction norm is zero"); -// return true; -// } -// if(MathUtils.L2Norm(direction)/originalDirenctionL2Norm < gradientConvergenceValue){ -// System.out.println("Leaving projected gradient Norm smaller than epsilon"); -// return true; -// } -// if((previousValue - currValue)/Math.abs(previousValue) < valueConvergenceValue) { -// System.out.println("Leaving value change below treshold " + previousValue + " - " + currValue); -// System.out.println(previousValue/currValue + " - " + currValue/currValue -// + " = " + (previousValue - currValue)/Math.abs(previousValue)); -// return true; -// } -// return false; -//} -// - -//public boolean optimize(ProjectedObjective o,OptimizerStats stats, StopingCriteria stop){ -// stats.collectInitStats(this, o); -// obj = o; -// step = 0; -// currValue = o.getValue(); -// previousValue = Double.MAX_VALUE; -// gradient = o.getGradient(); -// originalGradientL2Norm = MathUtils.L2Norm(gradient); -// parameterChange = new double[gradient.length]; -// getDirection(); -// ProjectedDifferentiableLineSearchObjective lso = new ProjectedDifferentiableLineSearchObjective(o,direction); -// -// originalDirenctionL2Norm = MathUtils.L2Norm(direction); -// //MatrixOutput.printDoubleArray(currParameters, "parameters"); -// for (currentProjectionIteration = 0; currentProjectionIteration < maxNumberOfIterations; currentProjectionIteration++){ -// // System.out.println("Iter " + currentProjectionIteration); -// //o.printParameters(); -// -// -// -// if(stop.stopOptimization(gradient)){ -// stats.collectFinalStats(this, o); -// lastStepUsed = step; -// return true; -// } -// lso.reset(direction); -// step = lineSearch.getStepSize(lso); -// if(step==-1){ -// System.out.println("Failed to find step"); -// stats.collectFinalStats(this, o); -// return false; -// -// } -// -// //Update the direction for stopping criteria -// previousValue = currValue; -// currValue = o.getValue(); -// gradient = o.getGradient(); -// direction = getDirection(); -// if(MathUtils.dotProduct(gradient, direction) > 0){ -// System.out.println("Not a descent direction"); -// System.out.println(" current stats " + stats.prettyPrint(1)); -// System.exit(-1); -// } -// stats.collectIterationStats(this, o); -// } -// lastStepUsed = step; -// stats.collectFinalStats(this, o); -// return false; -// } - -//public boolean optimize(Objective o,OptimizerStats stats, StopingCriteria stop){ -// System.out.println("Objective is not a projected objective"); -// throw new RuntimeException(); -//} - diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedObjective.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedObjective.java deleted file mode 100644 index c3d21393..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedObjective.java +++ /dev/null @@ -1,29 +0,0 @@ -package optimization.gradientBasedMethods; - -import optimization.util.MathUtils; - - -/** - * Computes a projected objective - * When we tell it to set some parameters it automatically projects the parameters back into the simplex: - * - * - * When we tell it to get the gradient in automatically returns the projected gradient: - * @author javg - * - */ -public abstract class ProjectedObjective extends Objective{ - - public abstract double[] projectPoint (double[] point); - - public double[] auxParameters; - - - public void setInitialParameters(double[] params){ - setParameters(projectPoint(params)); - } - - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedOptimizer.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedOptimizer.java deleted file mode 100644 index 81d8403e..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/ProjectedOptimizer.java +++ /dev/null @@ -1,10 +0,0 @@ -package optimization.gradientBasedMethods; - - - -public interface ProjectedOptimizer extends Optimizer{ - - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/OptimizerStats.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/OptimizerStats.java deleted file mode 100644 index 6340ef73..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/OptimizerStats.java +++ /dev/null @@ -1,86 +0,0 @@ -package optimization.gradientBasedMethods.stats; - -import java.util.ArrayList; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.Optimizer; -import optimization.util.MathUtils; -import optimization.util.StaticTools; - - -public class OptimizerStats { - - double start = 0; - double totalTime = 0; - - String objectiveFinalStats; - - ArrayList<Double> gradientNorms = new ArrayList<Double>(); - ArrayList<Double> steps = new ArrayList<Double>(); - ArrayList<Double> value = new ArrayList<Double>(); - ArrayList<Integer> iterations = new ArrayList<Integer>(); - double prevValue =0; - - public void reset(){ - start = 0; - totalTime = 0; - - objectiveFinalStats=""; - - gradientNorms.clear(); - steps.clear(); - value.clear(); - iterations.clear(); - prevValue =0; - } - - public void startTime() { - start = System.currentTimeMillis(); - } - public void stopTime() { - totalTime += System.currentTimeMillis() - start; - } - - public String prettyPrint(int level){ - StringBuffer res = new StringBuffer(); - res.append("Total time " + totalTime/1000 + " seconds \n" + "Iterations " + iterations.size() + "\n"); - res.append(objectiveFinalStats+"\n"); - if(level > 0){ - if(iterations.size() > 0){ - res.append("\tIteration"+iterations.get(0)+"\tstep: "+StaticTools.prettyPrint(steps.get(0), "0.00E00", 6)+ "\tgradientNorm "+ - StaticTools.prettyPrint(gradientNorms.get(0), "0.00000E00", 10)+ "\tvalue "+ StaticTools.prettyPrint(value.get(0), "0.000000E00",11)+"\n"); - } - for(int i = 1; i < iterations.size(); i++){ - res.append("\tIteration:\t"+iterations.get(i)+"\tstep:"+StaticTools.prettyPrint(steps.get(i), "0.00E00", 6)+ "\tgradientNorm "+ - StaticTools.prettyPrint(gradientNorms.get(i), "0.00000E00", 10)+ - "\tvalue:\t"+ StaticTools.prettyPrint(value.get(i), "0.000000E00",11)+ - "\tvalueDiff:\t"+ StaticTools.prettyPrint((value.get(i-1)-value.get(i)), "0.000000E00",11)+ - "\n"); - } - } - return res.toString(); - } - - - public void collectInitStats(Optimizer optimizer, Objective objective){ - startTime(); - iterations.add(-1); - gradientNorms.add(MathUtils.L2Norm(objective.getGradient())); - steps.add(0.0); - value.add(objective.getValue()); - } - - public void collectIterationStats(Optimizer optimizer, Objective objective){ - iterations.add(optimizer.getCurrentIteration()); - gradientNorms.add(MathUtils.L2Norm(objective.getGradient())); - steps.add(optimizer.getCurrentStep()); - value.add(optimizer.getCurrentValue()); - } - - - public void collectFinalStats(Optimizer optimizer, Objective objective){ - stopTime(); - objectiveFinalStats = objective.finalInfoString(); - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/ProjectedOptimizerStats.java b/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/ProjectedOptimizerStats.java deleted file mode 100644 index d65a1267..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/gradientBasedMethods/stats/ProjectedOptimizerStats.java +++ /dev/null @@ -1,70 +0,0 @@ -package optimization.gradientBasedMethods.stats; - -import java.util.ArrayList; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.Optimizer; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.gradientBasedMethods.ProjectedOptimizer; -import optimization.util.MathUtils; -import optimization.util.StaticTools; - - -public class ProjectedOptimizerStats extends OptimizerStats{ - - - - public void reset(){ - super.reset(); - projectedGradientNorms.clear(); - } - - ArrayList<Double> projectedGradientNorms = new ArrayList<Double>(); - - public String prettyPrint(int level){ - StringBuffer res = new StringBuffer(); - res.append("Total time " + totalTime/1000 + " seconds \n" + "Iterations " + iterations.size() + "\n"); - res.append(objectiveFinalStats+"\n"); - if(level > 0){ - if(iterations.size() > 0){ - res.append("\tIteration"+iterations.get(0)+"\tstep: "+ - StaticTools.prettyPrint(steps.get(0), "0.00E00", 6)+ "\tgradientNorm "+ - StaticTools.prettyPrint(gradientNorms.get(0), "0.00000E00", 10) - + "\tdirection"+ - StaticTools.prettyPrint(projectedGradientNorms.get(0), "0.00000E00", 10)+ - "\tvalue "+ StaticTools.prettyPrint(value.get(0), "0.000000E00",11)+"\n"); - } - for(int i = 1; i < iterations.size(); i++){ - res.append("\tIteration"+iterations.get(i)+"\tstep: "+StaticTools.prettyPrint(steps.get(i), "0.00E00", 6)+ "\tgradientNorm "+ - StaticTools.prettyPrint(gradientNorms.get(i), "0.00000E00", 10)+ - "\t direction "+ - StaticTools.prettyPrint(projectedGradientNorms.get(i), "0.00000E00", 10)+ - "\tvalue "+ StaticTools.prettyPrint(value.get(i), "0.000000E00",11)+ - "\tvalueDiff "+ StaticTools.prettyPrint((value.get(i-1)-value.get(i)), "0.000000E00",11)+ - "\n"); - } - } - return res.toString(); - } - - - public void collectInitStats(Optimizer optimizer, Objective objective){ - startTime(); - } - - public void collectIterationStats(Optimizer optimizer, Objective objective){ - iterations.add(optimizer.getCurrentIteration()); - gradientNorms.add(MathUtils.L2Norm(objective.getGradient())); - projectedGradientNorms.add(MathUtils.L2Norm(optimizer.getDirection())); - steps.add(optimizer.getCurrentStep()); - value.add(optimizer.getCurrentValue()); - } - - - - public void collectFinalStats(Optimizer optimizer, Objective objective){ - stopTime(); - objectiveFinalStats = objective.finalInfoString(); - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimization.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimization.java deleted file mode 100644 index c9f9b8df..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimization.java +++ /dev/null @@ -1,102 +0,0 @@ -package optimization.linesearch; - -import optimization.util.Interpolation; - - -/** - * Implements Back Tracking Line Search as described on page 37 of Numerical Optimization. - * Also known as armijo rule - * @author javg - * - */ -public class ArmijoLineSearchMinimization implements LineSearchMethod{ - - /** - * How much should the step size decrease at each iteration. - */ - double contractionFactor = 0.5; - double c1 = 0.0001; - - double sigma1 = 0.1; - double sigma2 = 0.9; - - - - double initialStep; - int maxIterations = 10; - - - public ArmijoLineSearchMinimization(){ - this.initialStep = 1; - } - - //Experiment - double previousStepPicked = -1;; - double previousInitGradientDot = -1; - double currentInitGradientDot = -1; - - - public void reset(){ - previousStepPicked = -1;; - previousInitGradientDot = -1; - currentInitGradientDot = -1; - } - - public void setInitialStep(double initial){ - initialStep = initial; - } - - /** - * - */ - - public double getStepSize(DifferentiableLineSearchObjective o) { - currentInitGradientDot = o.getInitialGradient(); - //Should update all in the objective - o.updateAlpha(initialStep); - int nrIterations = 0; - //System.out.println("tried alpha" + initialStep + " value " + o.getCurrentValue()); - while(!WolfeConditions.suficientDecrease(o,c1)){ - if(nrIterations >= maxIterations){ - o.printLineSearchSteps(); - return -1; - } - double alpha=o.getAlpha(); - double alphaTemp = - Interpolation.quadraticInterpolation(o.getOriginalValue(), o.getInitialGradient(), alpha, o.getCurrentValue()); - if(alphaTemp >= sigma1 || alphaTemp <= sigma2*o.getAlpha()){ -// System.out.println("using alpha temp " + alphaTemp); - alpha = alphaTemp; - }else{ -// System.out.println("Discarding alpha temp " + alphaTemp); - alpha = alpha*contractionFactor; - } -// double alpha =o.getAlpha()*contractionFactor; - - o.updateAlpha(alpha); - //System.out.println("tried alpha" + alpha+ " value " + o.getCurrentValue()); - nrIterations++; - } - - //System.out.println("Leavning line search used:"); - //o.printLineSearchSteps(); - - previousInitGradientDot = currentInitGradientDot; - previousStepPicked = o.getAlpha(); - return o.getAlpha(); - } - - public double getInitialGradient() { - return currentInitGradientDot; - - } - - public double getPreviousInitialGradient() { - return previousInitGradientDot; - } - - public double getPreviousStepUsed() { - return previousStepPicked; - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimizationAlongProjectionArc.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimizationAlongProjectionArc.java deleted file mode 100644 index e153f2da..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ArmijoLineSearchMinimizationAlongProjectionArc.java +++ /dev/null @@ -1,141 +0,0 @@ -package optimization.linesearch; - -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.util.Interpolation; -import optimization.util.MathUtils; - - - - - -/** - * Implements Armijo Rule Line search along the projection arc (Non-Linear Programming page 230) - * To be used with Projected gradient Methods. - * - * Recall that armijo tries successive step sizes alpha until the sufficient decrease is satisfied: - * f(x+alpha*direction) < f(x) + alpha*c1*grad(f)*direction - * - * In this case we are optimizing over a convex set X so we must guarantee that the new point stays inside the - * constraints. - * First the direction as to be feasible (inside constraints) and will be define as: - * d = (x_k_f - x_k) where x_k_f is a feasible point. - * so the armijo condition can be rewritten as: - * f(x+alpha(x_k_f - x_k)) < f(x) + c1*grad(f)*(x_k_f - x_k) - * and x_k_f is defined as: - * [x_k-alpha*grad(f)]+ - * where []+ mean a projection to the feasibility set. - * So this means that we take a step on the negative gradient (gradient descent) and then obtain then project - * that point to the feasibility set. - * Note that if the point is already feasible then we are back to the normal armijo rule. - * - * @author javg - * - */ -public class ArmijoLineSearchMinimizationAlongProjectionArc implements LineSearchMethod{ - - /** - * How much should the step size decrease at each iteration. - */ - double contractionFactor = 0.5; - double c1 = 0.0001; - - - double initialStep; - int maxIterations = 100; - - - double sigma1 = 0.1; - double sigma2 = 0.9; - - //Experiment - double previousStepPicked = -1;; - double previousInitGradientDot = -1; - double currentInitGradientDot = -1; - - GenericPickFirstStep strategy; - - - public void reset(){ - previousStepPicked = -1;; - previousInitGradientDot = -1; - currentInitGradientDot = -1; - } - - - public ArmijoLineSearchMinimizationAlongProjectionArc(){ - this.initialStep = 1; - } - - public ArmijoLineSearchMinimizationAlongProjectionArc(GenericPickFirstStep strategy){ - this.strategy = strategy; - this.initialStep = strategy.getFirstStep(this); - } - - - public void setInitialStep(double initial){ - this.initialStep = initial; - } - - /** - * - */ - - public double getStepSize(DifferentiableLineSearchObjective o) { - - - //Should update all in the objective - initialStep = strategy.getFirstStep(this); - o.updateAlpha(initialStep); - previousInitGradientDot=currentInitGradientDot; - currentInitGradientDot=o.getCurrentGradient(); - int nrIterations = 0; - - //Armijo rule, the current value has to be smaller than the original value plus a small step of the gradient - while(o.getCurrentValue() > - o.getOriginalValue() + c1*(o.getCurrentGradient())){ -// System.out.println("curr value "+o.getCurrentValue()); -// System.out.println("original value "+o.getOriginalValue()); -// System.out.println("GRADIENT decrease" +(MathUtils.dotProduct(o.o.gradient, -// MathUtils.arrayMinus(o.originalParameters,((ProjectedObjective)o.o).auxParameters)))); -// System.out.println("GRADIENT SAVED" + o.getCurrentGradient()); - if(nrIterations >= maxIterations){ - System.out.println("Could not find a step leaving line search with -1"); - o.printLineSearchSteps(); - return -1; - } - double alpha=o.getAlpha(); - double alphaTemp = - Interpolation.quadraticInterpolation(o.getOriginalValue(), o.getInitialGradient(), alpha, o.getCurrentValue()); - if(alphaTemp >= sigma1 || alphaTemp <= sigma2*o.getAlpha()){ - alpha = alphaTemp; - }else{ - alpha = alpha*contractionFactor; - } -// double alpha =obj.getAlpha()*contractionFactor; - o.updateAlpha(alpha); - nrIterations++; - } -// System.out.println("curr value "+o.getCurrentValue()); -// System.out.println("original value "+o.getOriginalValue()); -// System.out.println("sufficient decrease" +c1*o.getCurrentGradient()); -// System.out.println("Leavning line search used:"); -// o.printSmallLineSearchSteps(); - - previousStepPicked = o.getAlpha(); - return o.getAlpha(); - } - - public double getInitialGradient() { - return currentInitGradientDot; - - } - - public double getPreviousInitialGradient() { - return previousInitGradientDot; - } - - public double getPreviousStepUsed() { - return previousStepPicked; - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/DifferentiableLineSearchObjective.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/DifferentiableLineSearchObjective.java deleted file mode 100644 index a5bc958e..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/DifferentiableLineSearchObjective.java +++ /dev/null @@ -1,185 +0,0 @@ -package optimization.linesearch; - -import gnu.trove.TDoubleArrayList; -import gnu.trove.TIntArrayList; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; - -import optimization.gradientBasedMethods.Objective; -import optimization.util.MathUtils; -import optimization.util.StaticTools; - - - -import util.MathUtil; -import util.Printing; - - -/** - * A wrapper class for the actual objective in order to perform - * line search. The optimization code assumes that this does a lot - * of caching in order to simplify legibility. For the applications - * we use it for, caching the entire history of evaluations should be - * a win. - * - * Note: the lastEvaluatedAt value is very important, since we will use - * it to avoid doing an evaluation of the gradient after the line search. - * - * The differentiable line search objective defines a search along the ray - * given by a direction of the main objective. - * It defines the following function, - * where f is the original objective function: - * g(alpha) = f(x_0 + alpha*direction) - * g'(alpha) = f'(x_0 + alpha*direction)*direction - * - * @author joao - * - */ -public class DifferentiableLineSearchObjective { - - - - Objective o; - int nrIterations; - TDoubleArrayList steps; - TDoubleArrayList values; - TDoubleArrayList gradients; - - //This variables cannot change - public double[] originalParameters; - public double[] searchDirection; - - - /** - * Defines a line search objective: - * Receives: - * Objective to each we are performing the line search, is used to calculate values and gradients - * Direction where to do the ray search, note that the direction does not depend of the - * objective but depends from the method. - * @param o - * @param direction - */ - public DifferentiableLineSearchObjective(Objective o) { - this.o = o; - originalParameters = new double[o.getNumParameters()]; - searchDirection = new double[o.getNumParameters()]; - steps = new TDoubleArrayList(); - values = new TDoubleArrayList(); - gradients = new TDoubleArrayList(); - } - /** - * Called whenever we start a new iteration. - * Receives the ray where we are searching for and resets all values - * - */ - public void reset(double[] direction){ - //Copy initial values - System.arraycopy(o.getParameters(), 0, originalParameters, 0, o.getNumParameters()); - System.arraycopy(direction, 0, searchDirection, 0, o.getNumParameters()); - - //Initialize variables - nrIterations = 0; - steps.clear(); - values.clear(); - gradients.clear(); - - values.add(o.getValue()); - gradients.add(MathUtils.dotProduct(o.getGradient(),direction)); - steps.add(0); - } - - - /** - * update the current value of alpha. - * Takes a step with that alpha in direction - * Get the real objective value and gradient and calculate all required information. - */ - public void updateAlpha(double alpha){ - if(alpha < 0){ - System.out.println("alpha may not be smaller that zero"); - throw new RuntimeException(); - } - nrIterations++; - steps.add(alpha); - //x_t+1 = x_t + alpha*direction - System.arraycopy(originalParameters,0, o.getParameters(), 0, originalParameters.length); - MathUtils.plusEquals(o.getParameters(), searchDirection, alpha); - o.setParameters(o.getParameters()); -// System.out.println("Took a step of " + alpha + " new value " + o.getValue()); - values.add(o.getValue()); - gradients.add(MathUtils.dotProduct(o.getGradient(),searchDirection)); - } - - - - public int getNrIterations(){ - return nrIterations; - } - - /** - * return g(alpha) for the current value of alpha - * @param iter - * @return - */ - public double getValue(int iter){ - return values.get(iter); - } - - public double getCurrentValue(){ - return values.get(nrIterations); - } - - public double getOriginalValue(){ - return values.get(0); - } - - /** - * return g'(alpha) for the current value of alpha - * @param iter - * @return - */ - public double getGradient(int iter){ - return gradients.get(iter); - } - - public double getCurrentGradient(){ - return gradients.get(nrIterations); - } - - public double getInitialGradient(){ - return gradients.get(0); - } - - - - - public double getAlpha(){ - return steps.get(nrIterations); - } - - public void printLineSearchSteps(){ - System.out.println( - " Steps size "+steps.size() + - "Values size "+values.size() + - "Gradeients size "+gradients.size()); - for(int i =0; i < steps.size();i++){ - System.out.println("Iter " + i + " step " + steps.get(i) + - " value " + values.get(i) + " grad " + gradients.get(i)); - } - } - - public void printSmallLineSearchSteps(){ - for(int i =0; i < steps.size();i++){ - System.out.print(StaticTools.prettyPrint(steps.get(i), "0.0000E00",8) + " "); - } - System.out.println(); - } - - public static void main(String[] args) { - - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/GenericPickFirstStep.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/GenericPickFirstStep.java deleted file mode 100644 index a33eb311..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/GenericPickFirstStep.java +++ /dev/null @@ -1,20 +0,0 @@ -package optimization.linesearch; - - -public class GenericPickFirstStep{ - double _initValue; - public GenericPickFirstStep(double initValue) { - _initValue = initValue; - } - - public double getFirstStep(LineSearchMethod ls){ - return _initValue; - } - public void collectInitValues(LineSearchMethod ls){ - - } - - public void collectFinalValues(LineSearchMethod ls){ - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/InterpolationPickFirstStep.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/InterpolationPickFirstStep.java deleted file mode 100644 index 0deebcdb..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/InterpolationPickFirstStep.java +++ /dev/null @@ -1,25 +0,0 @@ -package optimization.linesearch; - - -public class InterpolationPickFirstStep extends GenericPickFirstStep{ - public InterpolationPickFirstStep(double initValue) { - super(initValue); - } - - public double getFirstStep(LineSearchMethod ls){ - if(ls.getPreviousStepUsed() != -1 && ls.getPreviousInitialGradient()!=0){ - double newStep = Math.min(300, 1.02*ls.getPreviousInitialGradient()*ls.getPreviousStepUsed()/ls.getInitialGradient()); - // System.out.println("proposing " + newStep); - return newStep; - - } - return _initValue; - } - public void collectInitValues(WolfRuleLineSearch ls){ - - } - - public void collectFinalValues(WolfRuleLineSearch ls){ - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/LineSearchMethod.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/LineSearchMethod.java deleted file mode 100644 index 80cd7f39..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/LineSearchMethod.java +++ /dev/null @@ -1,14 +0,0 @@ -package optimization.linesearch; - - -public interface LineSearchMethod { - - double getStepSize(DifferentiableLineSearchObjective o); - - public double getInitialGradient(); - public double getPreviousInitialGradient(); - public double getPreviousStepUsed(); - - public void setInitialStep(double initial); - public void reset(); -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/NonNewtonInterpolationPickFirstStep.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/NonNewtonInterpolationPickFirstStep.java deleted file mode 100644 index 4b354fd9..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/NonNewtonInterpolationPickFirstStep.java +++ /dev/null @@ -1,33 +0,0 @@ -package optimization.linesearch; - -/** - * Non newtwon since we don't always try 1... - * Not sure if that is even usefull for newton - * @author javg - * - */ -public class NonNewtonInterpolationPickFirstStep extends GenericPickFirstStep{ - public NonNewtonInterpolationPickFirstStep(double initValue) { - super(initValue); - } - - public double getFirstStep(LineSearchMethod ls){ -// System.out.println("Previous step used " + ls.getPreviousStepUsed()); -// System.out.println("PreviousGradinebt " + ls.getPreviousInitialGradient()); -// System.out.println("CurrentGradinebt " + ls.getInitialGradient()); - if(ls.getPreviousStepUsed() != -1 && ls.getPreviousInitialGradient()!=0){ - double newStep = 1.01*ls.getPreviousInitialGradient()*ls.getPreviousStepUsed()/ls.getInitialGradient(); - //System.out.println("Suggesting " + newStep); - return newStep; - - } - return _initValue; - } - public void collectInitValues(WolfRuleLineSearch ls){ - - } - - public void collectFinalValues(WolfRuleLineSearch ls){ - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ProjectedDifferentiableLineSearchObjective.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/ProjectedDifferentiableLineSearchObjective.java deleted file mode 100644 index 29ccbc32..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/ProjectedDifferentiableLineSearchObjective.java +++ /dev/null @@ -1,137 +0,0 @@ -package optimization.linesearch; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.util.MathUtils; -import optimization.util.MatrixOutput; - - -/** - * See ArmijoLineSearchMinimizationAlongProjectionArc for description - * @author javg - * - */ -public class ProjectedDifferentiableLineSearchObjective extends DifferentiableLineSearchObjective{ - - - - ProjectedObjective obj; - public ProjectedDifferentiableLineSearchObjective(Objective o) { - super(o); - if(!(o instanceof ProjectedObjective)){ - System.out.println("Must receive a projected objective"); - throw new RuntimeException(); - } - obj = (ProjectedObjective) o; - } - - - - public double[] projectPoint (double[] point){ - return ((ProjectedObjective)o).projectPoint(point); - } - public void updateAlpha(double alpha){ - if(alpha < 0){ - System.out.println("alpha may not be smaller that zero"); - throw new RuntimeException(); - } - - if(obj.auxParameters == null){ - obj.auxParameters = new double[obj.getParameters().length]; - } - - nrIterations++; - - steps.add(alpha); - System.arraycopy(originalParameters, 0, obj.auxParameters, 0, obj.auxParameters.length); - - //Take a step into the search direction - -// MatrixOutput.printDoubleArray(obj.getGradient(), "gradient"); - -// alpha=gradients.get(0)*alpha/(gradients.get(gradients.size()-1)); - - //x_t+1 = x_t - alpha*gradient = x_t + alpha*direction - MathUtils.plusEquals(obj.auxParameters, searchDirection, alpha); -// MatrixOutput.printDoubleArray(obj.auxParameters, "before projection"); - obj.auxParameters = projectPoint(obj.auxParameters); -// MatrixOutput.printDoubleArray(obj.auxParameters, "after projection"); - o.setParameters(obj.auxParameters); -// System.out.println("new parameters"); -// o.printParameters(); - values.add(o.getValue()); - //Computes the new gradient x_k-[x_k-alpha*Gradient(x_k)]+ - MathUtils.minusEqualsInverse(originalParameters,obj.auxParameters,1); -// MatrixOutput.printDoubleArray(obj.auxParameters, "new gradient"); - //Dot product between the new direction and the new gradient - double gradient = MathUtils.dotProduct(obj.auxParameters,searchDirection); - gradients.add(gradient); - if(gradient > 0){ - System.out.println("Gradient on line search has to be smaller than zero"); - System.out.println("Iter: " + nrIterations); - MatrixOutput.printDoubleArray(obj.auxParameters, "new direction"); - MatrixOutput.printDoubleArray(searchDirection, "search direction"); - throw new RuntimeException(); - - } - - } - - /** - * - */ -// public void updateAlpha(double alpha){ -// -// if(alpha < 0){ -// System.out.println("alpha may not be smaller that zero"); -// throw new RuntimeException(); -// } -// -// nrIterations++; -// steps.add(alpha); -// //x_t+1 = x_t - alpha*direction -// System.arraycopy(originalParameters, 0, parametersChange, 0, parametersChange.length); -//// MatrixOutput.printDoubleArray(parametersChange, "parameters before step"); -//// System.out.println("Step" + alpha); -// MatrixOutput.printDoubleArray(originalGradient, "gradient + " + alpha); -// -// MathUtils.minusEquals(parametersChange, originalGradient, alpha); -// -// //Project the points into the feasibility set -//// MatrixOutput.printDoubleArray(parametersChange, "before projection"); -// //x_k(alpha) = [x_k - alpha*grad f(x_k)]+ -// parametersChange = projectPoint(parametersChange); -//// MatrixOutput.printDoubleArray(parametersChange, "after projection"); -// o.setParameters(parametersChange); -// values.add(o.getValue()); -// //Computes the new direction x_k-[x_k-alpha*Gradient(x_k)]+ -// -// direction=MathUtils.arrayMinus(parametersChange,originalParameters); -//// MatrixOutput.printDoubleArray(direction, "new direction"); -// -// double gradient = MathUtils.dotProduct(originalGradient,direction); -// gradients.add(gradient); -// if(gradient > 1E-10){ -// System.out.println("cosine " + gradient/(MathUtils.L2Norm(originalGradient)*MathUtils.L2Norm(direction))); -// -// -// System.out.println("not a descent direction for alpha " + alpha); -// System.arraycopy(originalParameters, 0, parametersChange, 0, parametersChange.length); -// MathUtils.minusEquals(parametersChange, originalGradient, 1E-20); -// -// parametersChange = projectPoint(parametersChange); -// direction=MathUtils.arrayMinus(parametersChange,originalParameters); -// gradient = MathUtils.dotProduct(originalGradient,direction); -// if(gradient > 0){ -// System.out.println("Direction is really non-descent evern for small alphas:" + gradient); -// } -// System.out.println("ProjecteLineSearchObjective: Should be a descent direction at " + nrIterations + ": "+ gradient); -//// System.out.println(Printing.doubleArrayToString(originalGradient, null,"Original gradient")); -//// System.out.println(Printing.doubleArrayToString(originalParameters, null,"Original parameters")); -//// System.out.println(Printing.doubleArrayToString(parametersChange, null,"Projected parameters")); -//// System.out.println(Printing.doubleArrayToString(direction, null,"Direction")); -// throw new RuntimeException(); -// } -// } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfRuleLineSearch.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfRuleLineSearch.java deleted file mode 100644 index 5489f2d0..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfRuleLineSearch.java +++ /dev/null @@ -1,300 +0,0 @@ -package optimization.linesearch; - -import java.io.PrintStream; -import java.util.ArrayList; - -import optimization.util.Interpolation; - - - - -/** - * - * @author javg - * - */ -public class WolfRuleLineSearch implements LineSearchMethod{ - - GenericPickFirstStep pickFirstStep; - - double c1 = 1.0E-4; - double c2 = 0.9; - - //Application dependent - double maxStep=100; - - int extrapolationIteration; - int maxExtrapolationIteration = 1000; - - - double minZoomDiffTresh = 10E-10; - - - ArrayList<Double> steps; - ArrayList<Double> gradientDots; - ArrayList<Double> functionVals; - - int debugLevel = 0; - boolean foudStep = false; - - public WolfRuleLineSearch(GenericPickFirstStep pickFirstStep){ - this.pickFirstStep = pickFirstStep; - - } - - - - - public WolfRuleLineSearch(GenericPickFirstStep pickFirstStep, double c1, double c2){ - this.pickFirstStep = pickFirstStep; - initialStep = pickFirstStep.getFirstStep(this); - this.c1 = c1; - this.c2 = c2; - } - - public void setDebugLevel(int level){ - debugLevel = level; - } - - //Experiment - double previousStepPicked = -1;; - double previousInitGradientDot = -1; - double currentInitGradientDot = -1; - - double initialStep; - - - public void reset(){ - previousStepPicked = -1;; - previousInitGradientDot = -1; - currentInitGradientDot = -1; - if(steps != null) - steps.clear(); - if(gradientDots != null) - gradientDots.clear(); - if(functionVals != null) - functionVals.clear(); - } - - public void setInitialStep(double initial){ - initialStep = pickFirstStep.getFirstStep(this); - } - - - - /** - * Implements Wolf Line search as described in nocetal. - * This process consists in two stages. The first stage we try to satisfy the - * biggest step size that still satisfies the curvature condition. We keep increasing - * the initial step size until we find a step satisfying the curvature condition, we return - * success, we failed the sufficient increase so we cannot increase more and we can call zoom with - * that maximum step, or we pass the minimum in which case we can call zoom the same way. - * - */ - public double getStepSize(DifferentiableLineSearchObjective objective){ - //System.out.println("entering line search"); - - foudStep = false; - if(debugLevel >= 1){ - steps = new ArrayList<Double>(); - gradientDots = new ArrayList<Double>(); - functionVals =new ArrayList<Double>(); - } - - //test - currentInitGradientDot = objective.getInitialGradient(); - - - double previousValue = objective.getCurrentValue(); - double previousStep = 0; - double currentStep =pickFirstStep.getFirstStep(this); - for(extrapolationIteration = 0; - extrapolationIteration < maxExtrapolationIteration; extrapolationIteration++){ - - objective.updateAlpha(currentStep); - double currentValue = objective.getCurrentValue(); - if(debugLevel >= 1){ - steps.add(currentStep); - functionVals.add(currentValue); - gradientDots.add(objective.getCurrentGradient()); - } - - - //The current step does not satisfy the sufficient decrease condition anymore - // so we cannot get bigger than that calling zoom. - if(!WolfeConditions.suficientDecrease(objective,c1)|| - (extrapolationIteration > 0 && currentValue >= previousValue)){ - currentStep = zoom(objective,previousStep,currentStep,objective.nrIterations-1,objective.nrIterations); - break; - } - - //Satisfying both conditions ready to leave - if(WolfeConditions.sufficientCurvature(objective,c1,c2)){ - //Found step - foudStep = true; - break; - } - - /** - * This means that we passed the minimum already since the dot product that should be - * negative (descent direction) is now positive. So we cannot increase more. On the other hand - * since we know the direction is a descent direction the value the objective at the current step - * is for sure smaller than the preivous step so we change the order. - */ - if(objective.getCurrentGradient() >= 0){ - currentStep = zoom(objective,currentStep,previousStep,objective.nrIterations,objective.nrIterations-1); - break; - } - - - //Ok, so we can still get a bigger step, - double aux = currentStep; - //currentStep = currentStep*2; - if(Math.abs(currentStep-maxStep)>1.1e-2){ - currentStep = (currentStep+maxStep)/2; - }else{ - currentStep = currentStep*2; - } - previousStep = aux; - previousValue = currentValue; - //Could be done better - if(currentStep >= maxStep){ - System.out.println("Excedded max step...calling zoom with maxStepSize"); - currentStep = zoom(objective,previousStep,currentStep,objective.nrIterations-1,objective.nrIterations); - } - } - if(!foudStep){ - System.out.println("Wolfe Rule exceed number of iterations"); - if(debugLevel >= 1){ - printSmallWolfeStats(System.out); -// System.out.println("Line search values"); -// DebugHelpers.getLineSearchGraph(o, direction, originalParameters,origValue, origGradDirectionDot,c1,c2); - } - return -1; - } - if(debugLevel >= 1){ - printSmallWolfeStats(System.out); - } - - previousStepPicked = currentStep; - previousInitGradientDot = currentInitGradientDot; -// objective.printLineSearchSteps(); - return currentStep; - } - - - - - - public void printWolfeStats(PrintStream out){ - for(int i = 0; i < steps.size(); i++){ - out.println("Step " + steps.get(i) + " value " + functionVals.get(i) + " dot " + gradientDots.get(i)); - } - } - - public void printSmallWolfeStats(PrintStream out){ - for(int i = 0; i < steps.size(); i++){ - out.print(steps.get(i) + ":"+functionVals.get(i)+":"+gradientDots.get(i)+" "); - } - System.out.println(); - } - - - - /** - * Pick a step satisfying the strong wolfe condition from an given from lowerStep and higherStep - * picked on the routine above. - * - * Both lowerStep and higherStep have been evaluated, so we only need to pass the iteration where they have - * been evaluated and save extra evaluations. - * - * We know that lowerStepValue as to be smaller than higherStepValue, and that a point - * satisfying both conditions exists in such interval. - * - * LowerStep always satisfies at least the sufficient decrease - * @return - */ - public double zoom(DifferentiableLineSearchObjective o, double lowerStep, double higherStep, - int lowerStepIter, int higherStepIter){ - - if(debugLevel >=2){ - System.out.println("Entering zoom with " + lowerStep+"-"+higherStep); - } - - double currentStep=-1; - - int zoomIter = 0; - while(zoomIter < 1000){ - if(Math.abs(lowerStep-higherStep) < minZoomDiffTresh){ - o.updateAlpha(lowerStep); - if(debugLevel >= 1){ - steps.add(lowerStep); - functionVals.add(o.getCurrentValue()); - gradientDots.add(o.getCurrentGradient()); - } - foudStep = true; - return lowerStep; - } - - //Cubic interpolation - currentStep = - Interpolation.cubicInterpolation(lowerStep, o.getValue(lowerStepIter), o.getGradient(lowerStepIter), - higherStep, o.getValue(higherStepIter), o.getGradient(higherStepIter)); - - //Safeguard.... should not be required check in what condtions it is required - if(currentStep < 0 ){ - currentStep = (lowerStep+higherStep)/2; - } - if(Double.isNaN(currentStep) || Double.isInfinite(currentStep)){ - currentStep = (lowerStep+higherStep)/2; - } -// currentStep = (lowerStep+higherStep)/2; -// System.out.println("Trying "+currentStep); - o.updateAlpha(currentStep); - if(debugLevel >=1){ - steps.add(currentStep); - functionVals.add(o.getCurrentValue()); - gradientDots.add(o.getCurrentGradient()); - } - if(!WolfeConditions.suficientDecrease(o,c1) - || o.getCurrentValue() >= o.getValue(lowerStepIter)){ - higherStepIter = o.nrIterations; - higherStep = currentStep; - } - //Note when entering here the new step satisfies the sufficent decrease and - // or as a function value that is better than the previous best (lowerStepFunctionValues) - // so we either leave or change the value of the alpha low. - else{ - if(WolfeConditions.sufficientCurvature(o,c1,c2)){ - //Satisfies the both wolf conditions - foudStep = true; - break; - } - //If does not satisfy curvature - if(o.getCurrentGradient()*(higherStep-lowerStep) >= 0){ - higherStep = lowerStep; - higherStepIter = lowerStepIter; - } - lowerStep = currentStep; - lowerStepIter = o.nrIterations; - } - zoomIter++; - } - return currentStep; - } - - public double getInitialGradient() { - return currentInitGradientDot; - - } - - public double getPreviousInitialGradient() { - return previousInitGradientDot; - } - - public double getPreviousStepUsed() { - return previousStepPicked; - } - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfeConditions.java b/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfeConditions.java deleted file mode 100644 index dcc704eb..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/linesearch/WolfeConditions.java +++ /dev/null @@ -1,45 +0,0 @@ -package optimization.linesearch; - - -public class WolfeConditions { - - /** - * Sufficient Increase number. Default constant - */ - - - /** - * Value for suficient curvature: - * 0.9 - For newton and quase netwon methods - * 0.1 - Non linear conhugate gradient - */ - - int debugLevel = 0; - public void setDebugLevel(int level){ - debugLevel = level; - } - - public static boolean suficientDecrease(DifferentiableLineSearchObjective o, double c1){ - double value = o.getOriginalValue()+c1*o.getAlpha()*o.getInitialGradient(); -// System.out.println("Sufficient Decrease original "+value+" new "+ o.getCurrentValue()); - return o.getCurrentValue() <= value; - } - - - - - public static boolean sufficientCurvature(DifferentiableLineSearchObjective o, double c1, double c2){ -// if(debugLevel >= 2){ -// double current = Math.abs(o.getCurrentGradient()); -// double orig = -c2*o.getInitialGradient(); -// if(current <= orig){ -// return true; -// }else{ -// System.out.println("Not satistfying curvature condition curvature " + current + " wants " + orig); -// return false; -// } -// } - return Math.abs(o.getCurrentGradient()) <= -c2*o.getInitialGradient(); - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/projections/BoundsProjection.java b/gi/posterior-regularisation/prjava/src/optimization/projections/BoundsProjection.java deleted file mode 100644 index 0429d531..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/projections/BoundsProjection.java +++ /dev/null @@ -1,104 +0,0 @@ -package optimization.projections; - - -import java.util.Random; - -import optimization.util.MathUtils; -import optimization.util.MatrixOutput; - -/** - * Implements a projection into a box set defined by a and b. - * If either a or b are infinity then that bound is ignored. - * @author javg - * - */ -public class BoundsProjection extends Projection{ - - double a,b; - boolean ignoreA = false; - boolean ignoreB = false; - public BoundsProjection(double lowerBound, double upperBound) { - if(Double.isInfinite(lowerBound)){ - this.ignoreA = true; - }else{ - this.a =lowerBound; - } - if(Double.isInfinite(upperBound)){ - this.ignoreB = true; - }else{ - this.b =upperBound; - } - } - - - - /** - * Projects into the bounds - * a <= x_i <=b - */ - public void project(double[] original){ - for (int i = 0; i < original.length; i++) { - if(!ignoreA && original[i] < a){ - original[i] = a; - }else if(!ignoreB && original[i]>b){ - original[i]=b; - } - } - } - - /** - * Generates a random number between a and b. - */ - - Random r = new Random(); - - public double[] samplePoint(int numParams) { - double[] point = new double[numParams]; - for (int i = 0; i < point.length; i++) { - double rand = r.nextDouble(); - if(ignoreA && ignoreB){ - //Use const to avoid number near overflow - point[i] = rand*(1.E100+1.E100)-1.E100; - }else if(ignoreA){ - point[i] = rand*(b-1.E100)-1.E100; - }else if(ignoreB){ - point[i] = rand*(1.E100-a)-a; - }else{ - point[i] = rand*(b-a)-a; - } - } - return point; - } - - public static void main(String[] args) { - BoundsProjection sp = new BoundsProjection(0,Double.POSITIVE_INFINITY); - - - MatrixOutput.printDoubleArray(sp.samplePoint(3), "random 1"); - MatrixOutput.printDoubleArray(sp.samplePoint(3), "random 2"); - MatrixOutput.printDoubleArray(sp.samplePoint(3), "random 3"); - - double[] d = {-1.1,1.2,1.4}; - double[] original = d.clone(); - MatrixOutput.printDoubleArray(d, "before"); - - sp.project(d); - MatrixOutput.printDoubleArray(d, "after"); - System.out.println("Test projection: " + sp.testProjection(original, d)); - } - - double epsilon = 1.E-10; - public double[] perturbePoint(double[] point, int parameter){ - double[] newPoint = point.clone(); - if(!ignoreA && MathUtils.almost(point[parameter], a)){ - newPoint[parameter]+=epsilon; - }else if(!ignoreB && MathUtils.almost(point[parameter], b)){ - newPoint[parameter]-=epsilon; - }else{ - newPoint[parameter]-=epsilon; - } - return newPoint; - } - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/projections/Projection.java b/gi/posterior-regularisation/prjava/src/optimization/projections/Projection.java deleted file mode 100644 index b5a9f92f..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/projections/Projection.java +++ /dev/null @@ -1,72 +0,0 @@ -package optimization.projections; - -import optimization.util.MathUtils; -import optimization.util.MatrixOutput; -import util.ArrayMath; -import util.Printing; - - - -public abstract class Projection { - - - public abstract void project(double[] original); - - - /** - * From the projection theorem "Non-Linear Programming" page - * 201 fact 2. - * - * Given some z in R, and a vector x* in X; - * x* = z+ iif for all x in X - * (z-x*)'(x-x*) <= 0 where 0 is when x*=x - * See figure 2.16 in book - * - * @param original - * @param projected - * @return - */ - public boolean testProjection(double[] original, double[] projected){ - double[] original1 = original.clone(); - //System.out.println(Printing.doubleArrayToString(original1, null, "original")); - //System.out.println(Printing.doubleArrayToString(projected, null, "projected")); - MathUtils.minusEquals(original1, projected, 1); - //System.out.println(Printing.doubleArrayToString(original1, null, "minus1")); - for(int i = 0; i < 10; i++){ - double[] x = samplePoint(original.length); - // System.out.println(Printing.doubleArrayToString(x, null, "sample")); - //If the same this returns zero so we are there. - MathUtils.minusEquals(x, projected, 1); - // System.out.println(Printing.doubleArrayToString(x, null, "minus2")); - double dotProd = MathUtils.dotProduct(original1, x); - - // System.out.println("dot " + dotProd); - if(dotProd > 0) return false; - } - - //Perturbs the point a bit in all possible directions - for(int i = 0; i < original.length; i++){ - double[] x = perturbePoint(projected,i); - // System.out.println(Printing.doubleArrayToString(x, null, "perturbed")); - //If the same this returns zero so we are there. - MathUtils.minusEquals(x, projected, 1); - // System.out.println(Printing.doubleArrayToString(x, null, "minus2")); - double dotProd = MathUtils.dotProduct(original1, x); - - // System.out.println("dot " + dotProd); - if(dotProd > 0) return false; - } - - - - return true; - } - - //Samples a point from the constrained set - public abstract double[] samplePoint(int dimensions); - - //Perturbs a point a bit still leaving it at the constraints set - public abstract double[] perturbePoint(double[] point, int parameter); - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/projections/SimplexProjection.java b/gi/posterior-regularisation/prjava/src/optimization/projections/SimplexProjection.java deleted file mode 100644 index f22afcaf..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/projections/SimplexProjection.java +++ /dev/null @@ -1,127 +0,0 @@ -package optimization.projections; - - - -import java.util.Random; - -import optimization.util.MathUtils; -import optimization.util.MatrixOutput; - -public class SimplexProjection extends Projection{ - - double scale; - public SimplexProjection(double scale) { - this.scale = scale; - } - - /** - * projects the numbers of the array - * into a simplex of size. - * We follow the description of the paper - * "Efficient Projetions onto the l1-Ball - * for learning in high dimensions" - */ - public void project(double[] original){ - double[] ds = new double[original.length]; - System.arraycopy(original, 0, ds, 0, ds.length); - //If sum is smaller then zero then its ok - for (int i = 0; i < ds.length; i++) ds[i] = ds[i]>0? ds[i]:0; - double sum = MathUtils.sum(ds); - if (scale - sum >= -1.E-10 ){ - System.arraycopy(ds, 0, original, 0, ds.length); - //System.out.println("Not projecting"); - return; - } - //System.out.println("projecting " + sum + " scontraints " + scale); - util.Array.sortDescending(ds); - double currentSum = 0; - double previousTheta = 0; - double theta = 0; - for (int i = 0; i < ds.length; i++) { - currentSum+=ds[i]; - theta = (currentSum-scale)/(i+1); - if(ds[i]-theta < -1e-10){ - break; - } - previousTheta = theta; - } - //DEBUG - if(previousTheta < 0){ - System.out.println("Simple Projection: Theta is smaller than zero: " + previousTheta); - System.exit(-1); - } - for (int i = 0; i < original.length; i++) { - original[i] = Math.max(original[i]-previousTheta, 0); - } - } - - - - - - - /** - * Samples a point from the simplex of scale. Just sample - * random number from 0-scale and then if - * their sum is bigger then sum make them normalize. - * This is probably not sampling uniformly from the simplex but it is - * enough for our goals in here. - */ - Random r = new Random(); - public double[] samplePoint(int dimensions) { - double[] newPoint = new double[dimensions]; - double sum =0; - for (int i = 0; i < newPoint.length; i++) { - double rand = r.nextDouble()*scale; - sum+=rand; - newPoint[i]=rand; - } - //Normalize - if(sum > scale){ - for (int i = 0; i < newPoint.length; i++) { - newPoint[i]=scale*newPoint[i]/sum; - } - } - return newPoint; - } - - public static void main(String[] args) { - SimplexProjection sp = new SimplexProjection(1); - - - double[] point = sp.samplePoint(3); - MatrixOutput.printDoubleArray(point , "random 1 sum:" + MathUtils.sum(point)); - point = sp.samplePoint(3); - MatrixOutput.printDoubleArray(point , "random 2 sum:" + MathUtils.sum(point)); - point = sp.samplePoint(3); - MatrixOutput.printDoubleArray(point , "random 3 sum:" + MathUtils.sum(point)); - - double[] d = {0,1.1,-10}; - double[] original = d.clone(); - MatrixOutput.printDoubleArray(d, "before"); - - sp.project(d); - MatrixOutput.printDoubleArray(d, "after"); - System.out.println("Test projection: " + sp.testProjection(original, d)); - - } - - - double epsilon = 1.E-10; - public double[] perturbePoint(double[] point, int parameter){ - double[] newPoint = point.clone(); - if(MathUtils.almost(MathUtils.sum(point), scale)){ - newPoint[parameter]-=epsilon; - } - else if(point[parameter]==0){ - newPoint[parameter]+=epsilon; - }else if(MathUtils.almost(point[parameter], scale)){ - newPoint[parameter]-=epsilon; - } - else{ - newPoint[parameter]-=epsilon; - } - return newPoint; - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/CompositeStopingCriteria.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/CompositeStopingCriteria.java deleted file mode 100644 index 15760f18..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/CompositeStopingCriteria.java +++ /dev/null @@ -1,33 +0,0 @@ -package optimization.stopCriteria; - -import java.util.ArrayList; - -import optimization.gradientBasedMethods.Objective; - -public class CompositeStopingCriteria implements StopingCriteria { - - ArrayList<StopingCriteria> criterias; - - public CompositeStopingCriteria() { - criterias = new ArrayList<StopingCriteria>(); - } - - public void add(StopingCriteria criteria){ - criterias.add(criteria); - } - - public boolean stopOptimization(Objective obj){ - for(StopingCriteria criteria: criterias){ - if(criteria.stopOptimization(obj)){ - return true; - } - } - return false; - } - - public void reset(){ - for(StopingCriteria criteria: criterias){ - criteria.reset(); - } - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/GradientL2Norm.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/GradientL2Norm.java deleted file mode 100644 index 534ff833..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/GradientL2Norm.java +++ /dev/null @@ -1,30 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.util.MathUtils; - -public class GradientL2Norm implements StopingCriteria{ - - /** - * Stop if gradientNorm/(originalGradientNorm) smaller - * than gradientConvergenceValue - */ - protected double gradientConvergenceValue; - - - public GradientL2Norm(double gradientConvergenceValue){ - this.gradientConvergenceValue = gradientConvergenceValue; - } - - public void reset(){} - - public boolean stopOptimization(Objective obj){ - double norm = MathUtils.L2Norm(obj.gradient); - if(norm < gradientConvergenceValue){ - System.out.println("Gradient norm below treshold"); - return true; - } - return false; - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedGradientL2Norm.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedGradientL2Norm.java deleted file mode 100644 index 4a489641..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedGradientL2Norm.java +++ /dev/null @@ -1,48 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.util.MathUtils; - -/** - * Divides the norm by the norm at the begining of the iteration - * @author javg - * - */ -public class NormalizedGradientL2Norm extends GradientL2Norm{ - - /** - * Stop if gradientNorm/(originalGradientNorm) smaller - * than gradientConvergenceValue - */ - protected double originalGradientNorm = -1; - - public void reset(){ - originalGradientNorm = -1; - } - public NormalizedGradientL2Norm(double gradientConvergenceValue){ - super(gradientConvergenceValue); - } - - - - - public boolean stopOptimization(Objective obj){ - double norm = MathUtils.L2Norm(obj.gradient); - if(originalGradientNorm == -1){ - originalGradientNorm = norm; - } - if(originalGradientNorm < 1E-10){ - System.out.println("Gradient norm is zero " + originalGradientNorm); - return true; - } - double normalizedNorm = 1.0*norm/originalGradientNorm; - if( normalizedNorm < gradientConvergenceValue){ - System.out.println("Gradient norm below normalized normtreshold: " + norm + " original: " + originalGradientNorm + " normalized norm: " + normalizedNorm); - return true; - }else{ -// System.out.println("projected gradient norm: " + norm); - return false; - } - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedProjectedGradientL2Norm.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedProjectedGradientL2Norm.java deleted file mode 100644 index 5ae554c2..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedProjectedGradientL2Norm.java +++ /dev/null @@ -1,60 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.util.MathUtils; - -/** - * Divides the norm by the norm at the begining of the iteration - * @author javg - * - */ -public class NormalizedProjectedGradientL2Norm extends ProjectedGradientL2Norm{ - - /** - * Stop if gradientNorm/(originalGradientNorm) smaller - * than gradientConvergenceValue - */ - double originalProjectedNorm = -1; - - public NormalizedProjectedGradientL2Norm(double gradientConvergenceValue){ - super(gradientConvergenceValue); - } - - public void reset(){ - originalProjectedNorm = -1; - } - - - double[] projectGradient(ProjectedObjective obj){ - - if(obj.auxParameters == null){ - obj.auxParameters = new double[obj.getNumParameters()]; - } - System.arraycopy(obj.getParameters(), 0, obj.auxParameters, 0, obj.getNumParameters()); - MathUtils.minusEquals(obj.auxParameters, obj.gradient, 1); - obj.auxParameters = obj.projectPoint(obj.auxParameters); - MathUtils.minusEquals(obj.auxParameters,obj.getParameters(),1); - return obj.auxParameters; - } - - public boolean stopOptimization(Objective obj){ - if(obj instanceof ProjectedObjective) { - ProjectedObjective o = (ProjectedObjective) obj; - double norm = MathUtils.L2Norm(projectGradient(o)); - if(originalProjectedNorm == -1){ - originalProjectedNorm = norm; - } - double normalizedNorm = 1.0*norm/originalProjectedNorm; - if( normalizedNorm < gradientConvergenceValue){ - System.out.println("Gradient norm below normalized normtreshold: " + norm + " original: " + originalProjectedNorm + " normalized norm: " + normalizedNorm); - return true; - }else{ -// System.out.println("projected gradient norm: " + norm); - return false; - } - } - System.out.println("Not a projected objective"); - throw new RuntimeException(); - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedValueDifference.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedValueDifference.java deleted file mode 100644 index 6dbbc50d..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/NormalizedValueDifference.java +++ /dev/null @@ -1,54 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.util.MathUtils; - -public class NormalizedValueDifference implements StopingCriteria{ - - /** - * Stop if the different between values is smaller than a treshold - */ - protected double valueConvergenceValue=0.01; - protected double previousValue = Double.NaN; - protected double currentValue = Double.NaN; - - public NormalizedValueDifference(double valueConvergenceValue){ - this.valueConvergenceValue = valueConvergenceValue; - } - - public void reset(){ - previousValue = Double.NaN; - currentValue = Double.NaN; - } - - - public boolean stopOptimization(Objective obj){ - if(Double.isNaN(currentValue)){ - currentValue = obj.getValue(); - return false; - }else { - previousValue = currentValue; - currentValue = obj.getValue(); - if(previousValue != 0){ - double valueDiff = Math.abs(previousValue - currentValue)/Math.abs(previousValue); - if( valueDiff < valueConvergenceValue){ - System.out.println("Leaving different in values is to small: Prev " - + (previousValue/previousValue) + " Curr: " + (currentValue/previousValue) - + " diff: " + valueDiff); - return true; - } - }else{ - double valueDiff = Math.abs(previousValue - currentValue); - if( valueDiff < valueConvergenceValue){ - System.out.println("Leaving different in values is to small: Prev " - + (previousValue) + " Curr: " + (currentValue) - + " diff: " + valueDiff); - return true; - } - } - - return false; - } - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ProjectedGradientL2Norm.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ProjectedGradientL2Norm.java deleted file mode 100644 index aadf1fd5..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ProjectedGradientL2Norm.java +++ /dev/null @@ -1,51 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.gradientBasedMethods.ProjectedObjective; -import optimization.util.MathUtils; - -public class ProjectedGradientL2Norm implements StopingCriteria{ - - /** - * Stop if gradientNorm/(originalGradientNorm) smaller - * than gradientConvergenceValue - */ - protected double gradientConvergenceValue; - - - public ProjectedGradientL2Norm(double gradientConvergenceValue){ - this.gradientConvergenceValue = gradientConvergenceValue; - } - - public void reset(){ - - } - - double[] projectGradient(ProjectedObjective obj){ - - if(obj.auxParameters == null){ - obj.auxParameters = new double[obj.getNumParameters()]; - } - System.arraycopy(obj.getParameters(), 0, obj.auxParameters, 0, obj.getNumParameters()); - MathUtils.minusEquals(obj.auxParameters, obj.gradient, 1); - obj.auxParameters = obj.projectPoint(obj.auxParameters); - MathUtils.minusEquals(obj.auxParameters,obj.getParameters(),1); - return obj.auxParameters; - } - - public boolean stopOptimization(Objective obj){ - if(obj instanceof ProjectedObjective) { - ProjectedObjective o = (ProjectedObjective) obj; - double norm = MathUtils.L2Norm(projectGradient(o)); - if(norm < gradientConvergenceValue){ - // System.out.println("Gradient norm below treshold: " + norm); - return true; - }else{ -// System.out.println("projected gradient norm: " + norm); - return false; - } - } - System.out.println("Not a projected objective"); - throw new RuntimeException(); - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/StopingCriteria.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/StopingCriteria.java deleted file mode 100644 index 10cf0522..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/StopingCriteria.java +++ /dev/null @@ -1,8 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; - -public interface StopingCriteria { - public boolean stopOptimization(Objective obj); - public void reset(); -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ValueDifference.java b/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ValueDifference.java deleted file mode 100644 index e5d07229..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/stopCriteria/ValueDifference.java +++ /dev/null @@ -1,41 +0,0 @@ -package optimization.stopCriteria; - -import optimization.gradientBasedMethods.Objective; -import optimization.util.MathUtils; - -public class ValueDifference implements StopingCriteria{ - - /** - * Stop if the different between values is smaller than a treshold - */ - protected double valueConvergenceValue=0.01; - protected double previousValue = Double.NaN; - protected double currentValue = Double.NaN; - - public ValueDifference(double valueConvergenceValue){ - this.valueConvergenceValue = valueConvergenceValue; - } - - public void reset(){ - previousValue = Double.NaN; - currentValue = Double.NaN; - } - - public boolean stopOptimization(Objective obj){ - if(Double.isNaN(currentValue)){ - currentValue = obj.getValue(); - return false; - }else { - previousValue = currentValue; - currentValue = obj.getValue(); - if(previousValue - currentValue < valueConvergenceValue){ -// System.out.println("Leaving different in values is to small: Prev " -// + previousValue + " Curr: " + currentValue -// + " diff: " + (previousValue - currentValue)); - return true; - } - return false; - } - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/util/Interpolation.java b/gi/posterior-regularisation/prjava/src/optimization/util/Interpolation.java deleted file mode 100644 index cdbdefc6..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/util/Interpolation.java +++ /dev/null @@ -1,37 +0,0 @@ -package optimization.util; - -public class Interpolation { - - /** - * Fits a cubic polinomyal to a function given two points, - * such that either gradB is bigger than zero or funcB >= funcA - * - * NonLinear Programming appendix C - * @param funcA - * @param gradA - * @param funcB - * @param gradB - */ - public final static double cubicInterpolation(double a, - double funcA, double gradA, double b,double funcB, double gradB ){ - if(gradB < 0 && funcA > funcB){ - System.out.println("Cannot call cubic interpolation"); - return -1; - } - - double z = 3*(funcA-funcB)/(b-a) + gradA + gradB; - double w = Math.sqrt(z*z - gradA*gradB); - double min = b -(gradB+w-z)*(b-a)/(gradB-gradA+2*w); - return min; - } - - public final static double quadraticInterpolation(double initFValue, - double initGrad, double point,double pointFValue){ - double min = -1*initGrad*point*point/(2*(pointFValue-initGrad*point-initFValue)); - return min; - } - - public static void main(String[] args) { - - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/util/Logger.java b/gi/posterior-regularisation/prjava/src/optimization/util/Logger.java deleted file mode 100644 index 5343a39b..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/util/Logger.java +++ /dev/null @@ -1,7 +0,0 @@ -package optimization.util; - -public class Logger { - - - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/util/MathUtils.java b/gi/posterior-regularisation/prjava/src/optimization/util/MathUtils.java deleted file mode 100644 index af66f82c..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/util/MathUtils.java +++ /dev/null @@ -1,339 +0,0 @@ -package optimization.util; - -import java.util.Arrays; - - - -public class MathUtils { - - /** - * - * @param vector - * @return - */ - public static double L2Norm(double[] vector){ - double value = 0; - for(int i = 0; i < vector.length; i++){ - double v = vector[i]; - value+=v*v; - } - return Math.sqrt(value); - } - - public static double sum(double[] v){ - double sum = 0; - for (int i = 0; i < v.length; i++) { - sum+=v[i]; - } - return sum; - } - - - - - /** - * w = w + v - * @param w - * @param v - */ - public static void plusEquals(double[] w, double[] v) { - for(int i=0; i<w.length;i++){ - w[i] += w[i] + v[i]; - } - } - - /** - * w[i] = w[i] + v - * @param w - * @param v - */ - public static void plusEquals(double[] w, double v) { - for(int i=0; i<w.length;i++){ - w[i] += w[i] + v; - } - } - - /** - * w[i] = w[i] - v - * @param w - * @param v - */ - public static void minusEquals(double[] w, double v) { - for(int i=0; i<w.length;i++){ - w[i] -= w[i] + v; - } - } - - /** - * w = w + a*v - * @param w - * @param v - * @param a - */ - public static void plusEquals(double[] w, double[] v, double a) { - for(int i=0; i<w.length;i++){ - w[i] += a*v[i]; - } - } - - /** - * w = w - a*v - * @param w - * @param v - * @param a - */ - public static void minusEquals(double[] w, double[] v, double a) { - for(int i=0; i<w.length;i++){ - w[i] -= a*v[i]; - } - } - /** - * v = w - a*v - * @param w - * @param v - * @param a - */ - public static void minusEqualsInverse(double[] w, double[] v, double a) { - for(int i=0; i<w.length;i++){ - v[i] = w[i] - a*v[i]; - } - } - - public static double dotProduct(double[] w, double[] v){ - double accum = 0; - for(int i=0; i<w.length;i++){ - accum += w[i]*v[i]; - } - return accum; - } - - public static double[] arrayMinus(double[]w, double[]v){ - double result[] = w.clone(); - for(int i=0; i<w.length;i++){ - result[i] -= v[i]; - } - return result; - } - - public static double[] arrayMinus(double[] result , double[]w, double[]v){ - for(int i=0; i<w.length;i++){ - result[i] = w[i]-v[i]; - } - return result; - } - - public static double[] negation(double[]w){ - double result[] = new double[w.length]; - for(int i=0; i<w.length;i++){ - result[i] = -w[i]; - } - return result; - } - - public static double square(double value){ - return value*value; - } - public static double[][] outerProduct(double[] w, double[] v){ - double[][] result = new double[w.length][v.length]; - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < v.length; j++){ - result[i][j] = w[i]*v[j]; - } - } - return result; - } - /** - * results = a*W*V - * @param w - * @param v - * @param a - * @return - */ - public static double[][] weightedouterProduct(double[] w, double[] v, double a){ - double[][] result = new double[w.length][v.length]; - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < v.length; j++){ - result[i][j] = a*w[i]*v[j]; - } - } - return result; - } - - public static double[][] identity(int size){ - double[][] result = new double[size][size]; - for(int i = 0; i < size; i++){ - result[i][i] = 1; - } - return result; - } - - /** - * v -= w - * @param v - * @param w - */ - public static void minusEquals(double[][] w, double[][] v){ - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < w[0].length; j++){ - w[i][j] -= v[i][j]; - } - } - } - - /** - * v[i][j] -= a*w[i][j] - * @param v - * @param w - */ - public static void minusEquals(double[][] w, double[][] v, double a){ - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < w[0].length; j++){ - w[i][j] -= a*v[i][j]; - } - } - } - - /** - * v += w - * @param v - * @param w - */ - public static void plusEquals(double[][] w, double[][] v){ - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < w[0].length; j++){ - w[i][j] += v[i][j]; - } - } - } - - /** - * v[i][j] += a*w[i][j] - * @param v - * @param w - */ - public static void plusEquals(double[][] w, double[][] v, double a){ - for(int i = 0; i < w.length; i++){ - for(int j = 0; j < w[0].length; j++){ - w[i][j] += a*v[i][j]; - } - } - } - - - /** - * results = w*v - * @param w - * @param v - * @return - */ - public static double[][] matrixMultiplication(double[][] w,double[][] v){ - int w1 = w.length; - int w2 = w[0].length; - int v1 = v.length; - int v2 = v[0].length; - - if(w2 != v1){ - System.out.println("Matrix dimensions do not agree..."); - System.exit(-1); - } - - double[][] result = new double[w1][v2]; - for(int w_i1 = 0; w_i1 < w1; w_i1++){ - for(int v_i2 = 0; v_i2 < v2; v_i2++){ - double sum = 0; - for(int w_i2 = 0; w_i2 < w2; w_i2++){ - sum += w[w_i1 ][w_i2]*v[w_i2][v_i2]; - } - result[w_i1][v_i2] = sum; - } - } - return result; - } - - /** - * w = w.*v - * @param w - * @param v - */ - public static void matrixScalarMultiplication(double[][] w,double v){ - int w1 = w.length; - int w2 = w[0].length; - for(int w_i1 = 0; w_i1 < w1; w_i1++){ - for(int w_i2 = 0; w_i2 < w2; w_i2++){ - w[w_i1 ][w_i2] *= v; - } - } - } - - public static void scalarMultiplication(double[] w,double v){ - int w1 = w.length; - for(int w_i1 = 0; w_i1 < w1; w_i1++){ - w[w_i1 ] *= v; - } - - } - - public static double[] matrixVector(double[][] w,double[] v){ - int w1 = w.length; - int w2 = w[0].length; - int v1 = v.length; - - if(w2 != v1){ - System.out.println("Matrix dimensions do not agree..."); - System.exit(-1); - } - - double[] result = new double[w1]; - for(int w_i1 = 0; w_i1 < w1; w_i1++){ - double sum = 0; - for(int w_i2 = 0; w_i2 < w2; w_i2++){ - sum += w[w_i1 ][w_i2]*v[w_i2]; - } - result[w_i1] = sum; - } - return result; - } - - public static boolean allPositive(double[] array){ - for (int i = 0; i < array.length; i++) { - if(array[i] < 0) return false; - } - return true; - } - - - - - - public static void main(String[] args) { - double[][] m1 = new double[2][2]; - m1[0][0]=2; - m1[1][0]=2; - m1[0][1]=2; - m1[1][1]=2; - MatrixOutput.printDoubleArray(m1, "m1"); - double[][] m2 = new double[2][2]; - m2[0][0]=3; - m2[1][0]=3; - m2[0][1]=3; - m2[1][1]=3; - MatrixOutput.printDoubleArray(m2, "m2"); - double[][] result = matrixMultiplication(m1, m2); - MatrixOutput.printDoubleArray(result, "result"); - matrixScalarMultiplication(result, 3); - MatrixOutput.printDoubleArray(result, "result after multiply by 3"); - } - - public static boolean almost(double a, double b, double prec){ - return Math.abs(a-b)/Math.abs(a+b) <= prec || (almostZero(a) && almostZero(b)); - } - - public static boolean almost(double a, double b){ - return Math.abs(a-b)/Math.abs(a+b) <= 1e-10 || (almostZero(a) && almostZero(b)); - } - - public static boolean almostZero(double a) { - return Math.abs(a) <= 1e-30; - } - -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/util/MatrixOutput.java b/gi/posterior-regularisation/prjava/src/optimization/util/MatrixOutput.java deleted file mode 100644 index 9fbdf955..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/util/MatrixOutput.java +++ /dev/null @@ -1,28 +0,0 @@ -package optimization.util; - - -public class MatrixOutput { - public static void printDoubleArray(double[][] array, String arrayName) { - int size1 = array.length; - int size2 = array[0].length; - System.out.println(arrayName); - for (int i = 0; i < size1; i++) { - for (int j = 0; j < size2; j++) { - System.out.print(" " + StaticTools.prettyPrint(array[i][j], - "00.00E00", 4) + " "); - - } - System.out.println(); - } - System.out.println(); - } - - public static void printDoubleArray(double[] array, String arrayName) { - System.out.println(arrayName); - for (int i = 0; i < array.length; i++) { - System.out.print(" " + StaticTools.prettyPrint(array[i], - "00.00E00", 4) + " "); - } - System.out.println(); - } -} diff --git a/gi/posterior-regularisation/prjava/src/optimization/util/StaticTools.java b/gi/posterior-regularisation/prjava/src/optimization/util/StaticTools.java deleted file mode 100644 index bcabee06..00000000 --- a/gi/posterior-regularisation/prjava/src/optimization/util/StaticTools.java +++ /dev/null @@ -1,180 +0,0 @@ -package optimization.util; - - -import java.io.File; -import java.io.PrintStream; - -public class StaticTools { - - static java.text.DecimalFormat fmt = new java.text.DecimalFormat(); - - public static void createDir(String directory) { - - File dir = new File(directory); - if (!dir.isDirectory()) { - boolean success = dir.mkdirs(); - if (!success) { - System.out.println("Unable to create directory " + directory); - System.exit(0); - } - System.out.println("Created directory " + directory); - } else { - System.out.println("Reusing directory " + directory); - } - } - - /* - * q and p are indexed by source/foreign Sum_S(q) = 1 the same for p KL(q,p) = - * Eq*q/p - */ - public static double KLDistance(double[][] p, double[][] q, int sourceSize, - int foreignSize) { - double totalKL = 0; - // common.StaticTools.printMatrix(q, sourceSize, foreignSize, "q", - // System.out); - // common.StaticTools.printMatrix(p, sourceSize, foreignSize, "p", - // System.out); - for (int i = 0; i < sourceSize; i++) { - double kl = 0; - for (int j = 0; j < foreignSize; j++) { - assert !Double.isNaN(q[i][j]) : "KLDistance q: prob is NaN"; - assert !Double.isNaN(p[i][j]) : "KLDistance p: prob is NaN"; - if (p[i][j] == 0 || q[i][j] == 0) { - continue; - } else { - kl += q[i][j] * Math.log(q[i][j] / p[i][j]); - } - - } - totalKL += kl; - } - assert !Double.isNaN(totalKL) : "KLDistance: prob is NaN"; - if (totalKL < -1.0E-10) { - System.out.println("KL Smaller than zero " + totalKL); - System.out.println("Source Size" + sourceSize); - System.out.println("Foreign Size" + foreignSize); - StaticTools.printMatrix(q, sourceSize, foreignSize, "q", - System.out); - StaticTools.printMatrix(p, sourceSize, foreignSize, "p", - System.out); - System.exit(-1); - } - return totalKL / sourceSize; - } - - /* - * indexed the by [fi][si] - */ - public static double KLDistancePrime(double[][] p, double[][] q, - int sourceSize, int foreignSize) { - double totalKL = 0; - for (int i = 0; i < sourceSize; i++) { - double kl = 0; - for (int j = 0; j < foreignSize; j++) { - assert !Double.isNaN(q[j][i]) : "KLDistance q: prob is NaN"; - assert !Double.isNaN(p[j][i]) : "KLDistance p: prob is NaN"; - if (p[j][i] == 0 || q[j][i] == 0) { - continue; - } else { - kl += q[j][i] * Math.log(q[j][i] / p[j][i]); - } - - } - totalKL += kl; - } - assert !Double.isNaN(totalKL) : "KLDistance: prob is NaN"; - return totalKL / sourceSize; - } - - public static double Entropy(double[][] p, int sourceSize, int foreignSize) { - double totalE = 0; - for (int i = 0; i < foreignSize; i++) { - double e = 0; - for (int j = 0; j < sourceSize; j++) { - e += p[i][j] * Math.log(p[i][j]); - } - totalE += e; - } - return totalE / sourceSize; - } - - public static double[][] copyMatrix(double[][] original, int sourceSize, - int foreignSize) { - double[][] result = new double[sourceSize][foreignSize]; - for (int i = 0; i < sourceSize; i++) { - for (int j = 0; j < foreignSize; j++) { - result[i][j] = original[i][j]; - } - } - return result; - } - - public static void printMatrix(double[][] matrix, int sourceSize, - int foreignSize, String info, PrintStream out) { - - java.text.DecimalFormat fmt = new java.text.DecimalFormat(); - fmt.setMaximumFractionDigits(3); - fmt.setMaximumIntegerDigits(3); - fmt.setMinimumFractionDigits(3); - fmt.setMinimumIntegerDigits(3); - - out.println(info); - - for (int i = 0; i < foreignSize; i++) { - for (int j = 0; j < sourceSize; j++) { - out.print(prettyPrint(matrix[j][i], ".00E00", 6) + " "); - } - out.println(); - } - out.println(); - out.println(); - } - - public static void printMatrix(int[][] matrix, int sourceSize, - int foreignSize, String info, PrintStream out) { - - out.println(info); - for (int i = 0; i < foreignSize; i++) { - for (int j = 0; j < sourceSize; j++) { - out.print(matrix[j][i] + " "); - } - out.println(); - } - out.println(); - out.println(); - } - - public static String formatTime(long duration) { - StringBuilder sb = new StringBuilder(); - double d = duration / 1000; - fmt.applyPattern("00"); - sb.append(fmt.format((int) (d / (60 * 60))) + ":"); - d -= ((int) d / (60 * 60)) * 60 * 60; - sb.append(fmt.format((int) (d / 60)) + ":"); - d -= ((int) d / 60) * 60; - fmt.applyPattern("00.0"); - sb.append(fmt.format(d)); - return sb.toString(); - } - - public static String prettyPrint(double d, String patt, int len) { - fmt.applyPattern(patt); - String s = fmt.format(d); - while (s.length() < len) { - s = " " + s; - } - return s; - } - - - public static long getUsedMemory(){ - System.gc(); - return (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory())/ (1024 * 1024); - } - - public final static boolean compareDoubles(double d1, double d2){ - return Math.abs(d1-d2) <= 1.E-10; - } - - -} diff --git a/gi/posterior-regularisation/prjava/src/phrase/Agree.java b/gi/posterior-regularisation/prjava/src/phrase/Agree.java deleted file mode 100644 index 8f7b499e..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/Agree.java +++ /dev/null @@ -1,204 +0,0 @@ -package phrase;
-
-import gnu.trove.TIntArrayList;
-
-import io.FileUtil;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.List;
-
-import phrase.Corpus.Edge;
-
-public class Agree {
- PhraseCluster model1;
- C2F model2;
- Corpus c;
- private int K,n_phrases, n_words, n_contexts, n_positions1,n_positions2;
-
- /**@brief sum of loglikelihood of two
- * individual models
- */
- public double llh;
- /**@brief Bhattacharyya distance
- *
- */
- public double bdist;
- /**
- *
- * @param numCluster
- * @param corpus
- */
- public Agree(int numCluster, Corpus corpus){
-
- model1=new PhraseCluster(numCluster, corpus);
- model2=new C2F(numCluster,corpus);
- c=corpus;
- n_words=c.getNumWords();
- n_phrases=c.getNumPhrases();
- n_contexts=c.getNumContexts();
- n_positions1=c.getNumContextPositions();
- n_positions2=2;
- K=numCluster;
-
- }
-
- /**@brief test
- *
- */
- public static void main(String args[]){
- //String in="../pdata/canned.con";
- String in="../pdata/btec.con";
- String out="../pdata/posterior.out";
- int numCluster=25;
- Corpus corpus = null;
- File infile = new File(in);
- try {
- System.out.println("Reading concordance from " + infile);
- corpus = Corpus.readFromFile(FileUtil.reader(infile));
- corpus.printStats(System.out);
- } catch (IOException e) {
- System.err.println("Failed to open input file: " + infile);
- e.printStackTrace();
- System.exit(1);
- }
-
- Agree agree=new Agree(numCluster, corpus);
- int iter=20;
- for(int i=0;i<iter;i++){
- agree.EM();
- System.out.println("Iter"+i+", llh: "+agree.llh+
- ", divergence:"+agree.bdist+
- " sum: "+(agree.llh+agree.bdist));
- }
-
- File outfile = new File (out);
- try {
- PrintStream ps = FileUtil.printstream(outfile);
- agree.displayPosterior(ps);
- // ps.println();
- // c2f.displayModelParam(ps);
- ps.close();
- } catch (IOException e) {
- System.err.println("Failed to open output file: " + outfile);
- e.printStackTrace();
- System.exit(1);
- }
-
- }
-
- public double EM(){
-
- double [][][]exp_emit1=new double [K][n_positions1][n_words];
- double [][]exp_pi1=new double[n_phrases][K];
-
- double [][][]exp_emit2=new double [K][n_positions2][n_words];
- double [][]exp_pi2=new double[n_contexts][K];
-
- llh=0;
- bdist=0;
- //E
- for(int context=0; context< n_contexts; context++){
-
- List<Edge> contexts = c.getEdgesForContext(context);
-
- for (int ctx=0; ctx<contexts.size(); ctx++){
- Edge edge = contexts.get(ctx);
- int phrase=edge.getPhraseId();
- double p[]=posterior(edge);
- double z = arr.F.l1norm(p);
- assert z > 0;
- bdist += edge.getCount() * Math.log(z);
- arr.F.l1normalize(p);
-
- double count = edge.getCount();
- //increment expected count
- TIntArrayList phraseToks = edge.getPhrase();
- TIntArrayList contextToks = edge.getContext();
- for(int tag=0;tag<K;tag++){
-
- for(int position=0;position<n_positions1;position++){
- exp_emit1[tag][position][contextToks.get(position)]+=p[tag]*count;
- }
-
- exp_emit2[tag][0][phraseToks.get(0)]+=p[tag]*count;
- exp_emit2[tag][1][phraseToks.get(phraseToks.size()-1)]+=p[tag]*count;
-
- exp_pi1[phrase][tag]+=p[tag]*count;
- exp_pi2[context][tag]+=p[tag]*count;
- }
- }
- }
-
- //System.out.println("Log likelihood: "+loglikelihood);
-
- //M
- for(double [][]i:exp_emit1){
- for(double []j:i){
- arr.F.l1normalize(j);
- }
- }
-
- for(double []j:exp_pi1){
- arr.F.l1normalize(j);
- }
-
- for(double [][]i:exp_emit2){
- for(double []j:i){
- arr.F.l1normalize(j);
- }
- }
-
- for(double []j:exp_pi2){
- arr.F.l1normalize(j);
- }
-
- model1.emit=exp_emit1;
- model1.pi=exp_pi1;
- model2.emit=exp_emit2;
- model2.pi=exp_pi2;
-
- return llh;
- }
-
- public double[] posterior(Corpus.Edge edge)
- {
- double[] prob1=model1.posterior(edge);
- double[] prob2=model2.posterior(edge);
-
- llh+=edge.getCount()*Math.log(arr.F.l1norm(prob1));
- llh+=edge.getCount()*Math.log(arr.F.l1norm(prob2));
- arr.F.l1normalize(prob1);
- arr.F.l1normalize(prob2);
-
- for(int i=0;i<prob1.length;i++){
- prob1[i]*=prob2[i];
- prob1[i]=Math.sqrt(prob1[i]);
- }
-
- return prob1;
- }
-
- public void displayPosterior(PrintStream ps)
- {
- displayPosterior(ps, c.getEdges());
- }
-
- public void displayPosterior(PrintStream ps, List<Edge> test)
- {
- for (Edge edge : test)
- {
- double probs[] = posterior(edge);
- arr.F.l1normalize(probs);
-
- // emit phrase
- ps.print(edge.getPhraseString());
- ps.print("\t");
- ps.print(edge.getContextString(true));
- int t=arr.F.argmax(probs);
- ps.println(" ||| C=" + t);
- }
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/Agree2Sides.java b/gi/posterior-regularisation/prjava/src/phrase/Agree2Sides.java deleted file mode 100644 index 031f887f..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/Agree2Sides.java +++ /dev/null @@ -1,197 +0,0 @@ -package phrase;
-
-import gnu.trove.TIntArrayList;
-
-import io.FileUtil;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.List;
-
-import phrase.Corpus.Edge;
-
-public class Agree2Sides {
- PhraseCluster model1,model2;
- Corpus c1,c2;
- private int K;
-
- /**@brief sum of loglikelihood of two
- * individual models
- */
- public double llh;
- /**@brief Bhattacharyya distance
- *
- */
- public double bdist;
- /**
- *
- * @param numCluster
- * @param corpus
- */
- public Agree2Sides(int numCluster, Corpus corpus1 , Corpus corpus2 ){
-
- model1=new PhraseCluster(numCluster, corpus1);
- model2=new PhraseCluster(numCluster,corpus2);
- c1=corpus1;
- c2=corpus2;
- K=numCluster;
-
- }
-
- /**@brief test
- *
- */
- public static void main(String args[]){
- //String in="../pdata/canned.con";
- // String in="../pdata/btec.con";
- String in1="../pdata/source.txt";
- String in2="../pdata/target.txt";
- String out="../pdata/posterior.out";
- int numCluster=25;
- Corpus corpus1 = null,corpus2=null;
- File infile1 = new File(in1),infile2=new File(in2);
- try {
- System.out.println("Reading concordance from " + infile1);
- corpus1 = Corpus.readFromFile(FileUtil.reader(infile1));
- System.out.println("Reading concordance from " + infile2);
- corpus2 = Corpus.readFromFile(FileUtil.reader(infile2));
- corpus1.printStats(System.out);
- } catch (IOException e) {
- System.err.println("Failed to open input file: " + infile1);
- e.printStackTrace();
- System.exit(1);
- }
-
- Agree2Sides agree=new Agree2Sides(numCluster, corpus1,corpus2);
- int iter=20;
- for(int i=0;i<iter;i++){
- agree.EM();
- System.out.println("Iter"+i+", llh: "+agree.llh+
- ", divergence:"+agree.bdist+
- " sum: "+(agree.llh+agree.bdist));
- }
-
- File outfile = new File (out);
- try {
- PrintStream ps = FileUtil.printstream(outfile);
- agree.displayPosterior(ps);
- // ps.println();
- // c2f.displayModelParam(ps);
- ps.close();
- } catch (IOException e) {
- System.err.println("Failed to open output file: " + outfile);
- e.printStackTrace();
- System.exit(1);
- }
-
- }
-
- public double EM(){
-
- double [][][]exp_emit1=new double [K][c1.getNumContextPositions()][c1.getNumWords()];
- double [][]exp_pi1=new double[c1.getNumPhrases()][K];
-
- double [][][]exp_emit2=new double [K][c2.getNumContextPositions()][c2.getNumWords()];
- double [][]exp_pi2=new double[c2.getNumPhrases()][K];
-
- llh=0;
- bdist=0;
- //E
- for(int i=0;i<c1.getEdges().size();i++){
- Edge edge1=c1.getEdges().get(i);
- Edge edge2=c2.getEdges().get(i);
- double p[]=posterior(i);
- double z = arr.F.l1norm(p);
- assert z > 0;
- bdist += edge1.getCount() * Math.log(z);
- arr.F.l1normalize(p);
- double count = edge1.getCount();
- //increment expected count
- TIntArrayList contextToks1 = edge1.getContext();
- TIntArrayList contextToks2 = edge2.getContext();
- int phrase1=edge1.getPhraseId();
- int phrase2=edge2.getPhraseId();
- for(int tag=0;tag<K;tag++){
- for(int position=0;position<c1.getNumContextPositions();position++){
- exp_emit1[tag][position][contextToks1.get(position)]+=p[tag]*count;
- }
- for(int position=0;position<c2.getNumContextPositions();position++){
- exp_emit2[tag][position][contextToks2.get(position)]+=p[tag]*count;
- }
- exp_pi1[phrase1][tag]+=p[tag]*count;
- exp_pi2[phrase2][tag]+=p[tag]*count;
- }
- }
-
- //System.out.println("Log likelihood: "+loglikelihood);
-
- //M
- for(double [][]i:exp_emit1){
- for(double []j:i){
- arr.F.l1normalize(j);
- }
- }
-
- for(double []j:exp_pi1){
- arr.F.l1normalize(j);
- }
-
- for(double [][]i:exp_emit2){
- for(double []j:i){
- arr.F.l1normalize(j);
- }
- }
-
- for(double []j:exp_pi2){
- arr.F.l1normalize(j);
- }
-
- model1.emit=exp_emit1;
- model1.pi=exp_pi1;
- model2.emit=exp_emit2;
- model2.pi=exp_pi2;
-
- return llh;
- }
-
- public double[] posterior(int edgeIdx)
- {
- return posterior(c1.getEdges().get(edgeIdx), c2.getEdges().get(edgeIdx));
- }
-
- public double[] posterior(Edge e1, Edge e2)
- {
- double[] prob1=model1.posterior(e1);
- double[] prob2=model2.posterior(e2);
-
- llh+=e1.getCount()*Math.log(arr.F.l1norm(prob1));
- llh+=e2.getCount()*Math.log(arr.F.l1norm(prob2));
- arr.F.l1normalize(prob1);
- arr.F.l1normalize(prob2);
-
- for(int i=0;i<prob1.length;i++){
- prob1[i]*=prob2[i];
- prob1[i]=Math.sqrt(prob1[i]);
- }
-
- return prob1;
- }
-
- public void displayPosterior(PrintStream ps)
- {
- for (int i=0;i<c1.getEdges().size();i++)
- {
- Edge edge=c1.getEdges().get(i);
- double probs[] = posterior(i);
- arr.F.l1normalize(probs);
-
- // emit phrase
- ps.print(edge.getPhraseString());
- ps.print("\t");
- ps.print(edge.getContextString(true));
- int t=arr.F.argmax(probs);
- ps.println(" ||| C=" + t);
- }
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/C2F.java b/gi/posterior-regularisation/prjava/src/phrase/C2F.java deleted file mode 100644 index e8783950..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/C2F.java +++ /dev/null @@ -1,216 +0,0 @@ -package phrase;
-
-import gnu.trove.TIntArrayList;
-
-import io.FileUtil;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.List;
-
-import phrase.Corpus.Edge;
-
-/**
- * @brief context generates phrase
- * @author desaic
- *
- */
-public class C2F {
- public int K;
- private int n_words, n_contexts, n_positions;
- public Corpus c;
-
- /**@brief
- * emit[tag][position][word] = p(word | tag, position in phrase)
- */
- public double emit[][][];
- /**@brief
- * pi[context][tag] = p(tag | context)
- */
- public double pi[][];
-
- public C2F(int numCluster, Corpus corpus){
- K=numCluster;
- c=corpus;
- n_words=c.getNumWords();
- n_contexts=c.getNumContexts();
-
- //number of words in a phrase to be considered
- //currently the first and last word in source and target
- //if the phrase has length 1 in either dimension then
- //we use the same word for two positions
- n_positions=c.phraseEdges(c.getEdges().get(0).getPhrase()).size();
-
- emit=new double [K][n_positions][n_words];
- pi=new double[n_contexts][K];
-
- for(double [][]i:emit){
- for(double []j:i){
- arr.F.randomise(j);
- }
- }
-
- for(double []j:pi){
- arr.F.randomise(j);
- }
- }
-
- /**@brief test
- *
- */
- public static void main(String args[]){
- String in="../pdata/canned.con";
- String out="../pdata/posterior.out";
- int numCluster=25;
- Corpus corpus = null;
- File infile = new File(in);
- try {
- System.out.println("Reading concordance from " + infile);
- corpus = Corpus.readFromFile(FileUtil.reader(infile));
- corpus.printStats(System.out);
- } catch (IOException e) {
- System.err.println("Failed to open input file: " + infile);
- e.printStackTrace();
- System.exit(1);
- }
-
- C2F c2f=new C2F(numCluster,corpus);
- int iter=20;
- double llh=0;
- for(int i=0;i<iter;i++){
- llh=c2f.EM();
- System.out.println("Iter"+i+", llh: "+llh);
- }
-
- File outfile = new File (out);
- try {
- PrintStream ps = FileUtil.printstream(outfile);
- c2f.displayPosterior(ps);
- // ps.println();
- // c2f.displayModelParam(ps);
- ps.close();
- } catch (IOException e) {
- System.err.println("Failed to open output file: " + outfile);
- e.printStackTrace();
- System.exit(1);
- }
-
- }
-
- public double EM(){
- double [][][]exp_emit=new double [K][n_positions][n_words];
- double [][]exp_pi=new double[n_contexts][K];
-
- double loglikelihood=0;
-
- //E
- for(int context=0; context< n_contexts; context++){
-
- List<Edge> contexts = c.getEdgesForContext(context);
-
- for (int ctx=0; ctx<contexts.size(); ctx++){
- Edge edge = contexts.get(ctx);
- double p[]=posterior(edge);
- double z = arr.F.l1norm(p);
- assert z > 0;
- loglikelihood += edge.getCount() * Math.log(z);
- arr.F.l1normalize(p);
-
- double count = edge.getCount();
- //increment expected count
- TIntArrayList phrase= edge.getPhrase();
- for(int tag=0;tag<K;tag++){
-
- exp_emit[tag][0][phrase.get(0)]+=p[tag]*count;
- exp_emit[tag][1][phrase.get(phrase.size()-1)]+=p[tag]*count;
-
- exp_pi[context][tag]+=p[tag]*count;
- }
- }
- }
-
- //System.out.println("Log likelihood: "+loglikelihood);
-
- //M
- for(double [][]i:exp_emit){
- for(double []j:i){
- arr.F.l1normalize(j);
- }
- }
-
- emit=exp_emit;
-
- for(double []j:exp_pi){
- arr.F.l1normalize(j);
- }
-
- pi=exp_pi;
-
- return loglikelihood;
- }
-
- public double[] posterior(Corpus.Edge edge)
- {
- double[] prob=Arrays.copyOf(pi[edge.getContextId()], K);
-
- TIntArrayList phrase = edge.getPhrase();
- TIntArrayList offsets = c.phraseEdges(phrase);
- for(int tag=0;tag<K;tag++)
- {
- for (int i=0; i < offsets.size(); ++i)
- prob[tag]*=emit[tag][i][phrase.get(offsets.get(i))];
- }
-
- return prob;
- }
-
- public void displayPosterior(PrintStream ps)
- {
- for (Edge edge : c.getEdges())
- {
- double probs[] = posterior(edge);
- arr.F.l1normalize(probs);
-
- // emit phrase
- ps.print(edge.getPhraseString());
- ps.print("\t");
- ps.print(edge.getContextString(true));
- int t=arr.F.argmax(probs);
- ps.println(" ||| C=" + t);
- }
- }
-
- public void displayModelParam(PrintStream ps)
- {
- final double EPS = 1e-6;
-
- ps.println("P(tag|context)");
- for (int i = 0; i < n_contexts; ++i)
- {
- ps.print(c.getContext(i));
- for(int j=0;j<pi[i].length;j++){
- if (pi[i][j] > EPS)
- ps.print("\t" + j + ": " + pi[i][j]);
- }
- ps.println();
- }
-
- ps.println("P(word|tag,position)");
- for (int i = 0; i < K; ++i)
- {
- for(int position=0;position<n_positions;position++){
- ps.println("tag " + i + " position " + position);
- for(int word=0;word<emit[i][position].length;word++){
- if (emit[i][position][word] > EPS)
- ps.print(c.getWord(word)+"="+emit[i][position][word]+"\t");
- }
- ps.println();
- }
- ps.println();
- }
-
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/Corpus.java b/gi/posterior-regularisation/prjava/src/phrase/Corpus.java deleted file mode 100644 index 4b1939cd..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/Corpus.java +++ /dev/null @@ -1,288 +0,0 @@ -package phrase; - -import gnu.trove.TIntArrayList; - -import java.io.*; -import java.util.*; -import java.util.regex.Pattern; - - -public class Corpus -{ - private Lexicon<String> wordLexicon = new Lexicon<String>(); - private Lexicon<TIntArrayList> phraseLexicon = new Lexicon<TIntArrayList>(); - private Lexicon<TIntArrayList> contextLexicon = new Lexicon<TIntArrayList>(); - private List<Edge> edges = new ArrayList<Edge>(); - private List<List<Edge>> phraseToContext = new ArrayList<List<Edge>>(); - private List<List<Edge>> contextToPhrase = new ArrayList<List<Edge>>(); - public int splitSentinel; - public int phraseSentinel; - public int rareSentinel; - - public Corpus() - { - splitSentinel = wordLexicon.insert("<SPLIT>"); - phraseSentinel = wordLexicon.insert("<PHRASE>"); - rareSentinel = wordLexicon.insert("<RARE>"); - } - - public class Edge - { - - Edge(int phraseId, int contextId, double count,int tag) - { - this.phraseId = phraseId; - this.contextId = contextId; - this.count = count; - fixTag=tag; - } - - Edge(int phraseId, int contextId, double count) - { - this.phraseId = phraseId; - this.contextId = contextId; - this.count = count; - fixTag=-1; - } - public int getTag(){ - return fixTag; - } - - public int getPhraseId() - { - return phraseId; - } - public TIntArrayList getPhrase() - { - return Corpus.this.getPhrase(phraseId); - } - public String getPhraseString() - { - return Corpus.this.getPhraseString(phraseId); - } - public int getContextId() - { - return contextId; - } - public TIntArrayList getContext() - { - return Corpus.this.getContext(contextId); - } - public String getContextString(boolean insertPhraseSentinel) - { - return Corpus.this.getContextString(contextId, insertPhraseSentinel); - } - public double getCount() - { - return count; - } - public boolean equals(Object other) - { - if (other instanceof Edge) - { - Edge oe = (Edge) other; - return oe.phraseId == phraseId && oe.contextId == contextId; - } - else return false; - } - public int hashCode() - { // this is how boost's hash_combine does it - int seed = phraseId; - seed ^= contextId + 0x9e3779b9 + (seed << 6) + (seed >> 2); - return seed; - } - public String toString() - { - return getPhraseString() + "\t" + getContextString(true); - } - - private int phraseId; - private int contextId; - private double count; - private int fixTag; - } - - List<Edge> getEdges() - { - return edges; - } - - int getNumEdges() - { - return edges.size(); - } - - int getNumPhrases() - { - return phraseLexicon.size(); - } - - int getNumContextPositions() - { - return contextLexicon.lookup(0).size(); - } - - List<Edge> getEdgesForPhrase(int phraseId) - { - return phraseToContext.get(phraseId); - } - - int getNumContexts() - { - return contextLexicon.size(); - } - - List<Edge> getEdgesForContext(int contextId) - { - return contextToPhrase.get(contextId); - } - - int getNumWords() - { - return wordLexicon.size(); - } - - String getWord(int wordId) - { - return wordLexicon.lookup(wordId); - } - - public TIntArrayList getPhrase(int phraseId) - { - return phraseLexicon.lookup(phraseId); - } - - public String getPhraseString(int phraseId) - { - StringBuffer b = new StringBuffer(); - for (int tid: getPhrase(phraseId).toNativeArray()) - { - if (b.length() > 0) - b.append(" "); - b.append(wordLexicon.lookup(tid)); - } - return b.toString(); - } - - public TIntArrayList getContext(int contextId) - { - return contextLexicon.lookup(contextId); - } - - public String getContextString(int contextId, boolean insertPhraseSentinel) - { - StringBuffer b = new StringBuffer(); - TIntArrayList c = getContext(contextId); - for (int i = 0; i < c.size(); ++i) - { - if (i > 0) b.append(" "); - //if (i == c.size() / 2) b.append("<PHRASE> "); - b.append(wordLexicon.lookup(c.get(i))); - } - return b.toString(); - } - - public boolean isSentinel(int wordId) - { - return wordId == splitSentinel || wordId == phraseSentinel; - } - - List<Edge> readEdges(Reader in) throws IOException - { - // read in line-by-line - BufferedReader bin = new BufferedReader(in); - String line; - Pattern separator = Pattern.compile(" \\|\\|\\| "); - - List<Edge> edges = new ArrayList<Edge>(); - while ((line = bin.readLine()) != null) - { - // split into phrase and contexts - StringTokenizer st = new StringTokenizer(line, "\t"); - assert (st.hasMoreTokens()); - String phraseToks = st.nextToken(); - assert (st.hasMoreTokens()); - String rest = st.nextToken(); - assert (!st.hasMoreTokens()); - - // process phrase - st = new StringTokenizer(phraseToks, " "); - TIntArrayList ptoks = new TIntArrayList(); - while (st.hasMoreTokens()) - ptoks.add(wordLexicon.insert(st.nextToken())); - int phraseId = phraseLexicon.insert(ptoks); - - // process contexts - String[] parts = separator.split(rest); - assert (parts.length % 2 == 0); - for (int i = 0; i < parts.length; i += 2) - { - // process pairs of strings - context and count - String ctxString = parts[i]; - String countString = parts[i + 1]; - - assert (countString.startsWith("C=")); - - String []countToks=countString.split(" "); - - double count = Double.parseDouble(countToks[0].substring(2).trim()); - - TIntArrayList ctx = new TIntArrayList(); - StringTokenizer ctxStrtok = new StringTokenizer(ctxString, " "); - while (ctxStrtok.hasMoreTokens()) - { - String token = ctxStrtok.nextToken(); - ctx.add(wordLexicon.insert(token)); - } - int contextId = contextLexicon.insert(ctx); - - - if(countToks.length<2){ - edges.add(new Edge(phraseId, contextId, count)); - } - else{ - int tag=Integer.parseInt(countToks[1].substring(2)); - edges.add(new Edge(phraseId, contextId, count,tag)); - } - } - } - return edges; - } - - static Corpus readFromFile(Reader in) throws IOException - { - Corpus c = new Corpus(); - c.edges = c.readEdges(in); - for (Edge edge: c.edges) - { - while (edge.getPhraseId() >= c.phraseToContext.size()) - c.phraseToContext.add(new ArrayList<Edge>()); - while (edge.getContextId() >= c.contextToPhrase.size()) - c.contextToPhrase.add(new ArrayList<Edge>()); - - // index the edge for fast phrase, context lookup - c.phraseToContext.get(edge.getPhraseId()).add(edge); - c.contextToPhrase.get(edge.getContextId()).add(edge); - } - return c; - } - - TIntArrayList phraseEdges(TIntArrayList phrase) - { - TIntArrayList r = new TIntArrayList(4); - for (int p = 0; p < phrase.size(); ++p) - { - if (p == 0 || phrase.get(p-1) == splitSentinel) - r.add(p); - if (p == phrase.size() - 1 || phrase.get(p+1) == splitSentinel) - r.add(p); - } - return r; - } - - public void printStats(PrintStream out) - { - out.println("Corpus has " + edges.size() + " edges " + phraseLexicon.size() + " phrases " - + contextLexicon.size() + " contexts and " + wordLexicon.size() + " word types"); - } -}
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/src/phrase/Lexicon.java b/gi/posterior-regularisation/prjava/src/phrase/Lexicon.java deleted file mode 100644 index a386e4a3..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/Lexicon.java +++ /dev/null @@ -1,34 +0,0 @@ -package phrase; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class Lexicon<T> -{ - public int insert(T word) - { - Integer i = wordToIndex.get(word); - if (i == null) - { - i = indexToWord.size(); - wordToIndex.put(word, i); - indexToWord.add(word); - } - return i; - } - - public T lookup(int index) - { - return indexToWord.get(index); - } - - public int size() - { - return indexToWord.size(); - } - - private Map<T, Integer> wordToIndex = new HashMap<T, Integer>(); - private List<T> indexToWord = new ArrayList<T>(); -}
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/src/phrase/PhraseCluster.java b/gi/posterior-regularisation/prjava/src/phrase/PhraseCluster.java deleted file mode 100644 index c032bb2b..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/PhraseCluster.java +++ /dev/null @@ -1,540 +0,0 @@ -package phrase;
-
-import gnu.trove.TIntArrayList;
-import org.apache.commons.math.special.Gamma;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Pattern;
-
-import phrase.Corpus.Edge;
-
-
-public class PhraseCluster {
-
- public int K;
- private int n_phrases, n_words, n_contexts, n_positions;
- public Corpus c;
- public ExecutorService pool;
-
- double[] lambdaPTCT;
- double[][] lambdaPT;
- boolean cacheLambda = true;
-
- // emit[tag][position][word] = p(word | tag, position in context)
- double emit[][][];
- // pi[phrase][tag] = p(tag | phrase)
- double pi[][];
-
- public PhraseCluster(int numCluster, Corpus corpus)
- {
- K=numCluster;
- c=corpus;
- n_words=c.getNumWords();
- n_phrases=c.getNumPhrases();
- n_contexts=c.getNumContexts();
- n_positions=c.getNumContextPositions();
-
- emit=new double [K][n_positions][n_words];
- pi=new double[n_phrases][K];
-
- for(double [][]i:emit)
- for(double []j:i)
- arr.F.randomise(j, true);
-
- for(double []j:pi)
- arr.F.randomise(j, true);
- }
-
- void useThreadPool(ExecutorService pool)
- {
- this.pool = pool;
- }
-
- public double EM(int phraseSizeLimit)
- {
- double [][][]exp_emit=new double [K][n_positions][n_words];
- double []exp_pi=new double[K];
-
- for(double [][]i:exp_emit)
- for(double []j:i)
- Arrays.fill(j, 1e-10);
-
- double loglikelihood=0;
-
- //E
- for(int phrase=0; phrase < n_phrases; phrase++)
- {
- if (phraseSizeLimit >= 1 && c.getPhrase(phrase).size() > phraseSizeLimit)
- continue;
-
- Arrays.fill(exp_pi, 1e-10);
-
- List<Edge> contexts = c.getEdgesForPhrase(phrase);
-
- for (int ctx=0; ctx<contexts.size(); ctx++)
- {
- Edge edge = contexts.get(ctx);
-
- double p[]=posterior(edge);
- double z = arr.F.l1norm(p);
- assert z > 0;
- loglikelihood += edge.getCount() * Math.log(z);
- arr.F.l1normalize(p);
-
- double count = edge.getCount();
- //increment expected count
- TIntArrayList context = edge.getContext();
- for(int tag=0;tag<K;tag++)
- {
- for(int pos=0;pos<n_positions;pos++){
- exp_emit[tag][pos][context.get(pos)]+=p[tag]*count;
- }
- exp_pi[tag]+=p[tag]*count;
- }
- }
- arr.F.l1normalize(exp_pi);
- System.arraycopy(exp_pi, 0, pi[phrase], 0, K);
- }
-
- //M
- for(double [][]i:exp_emit)
- for(double []j:i)
- arr.F.l1normalize(j);
-
- emit=exp_emit;
-
- return loglikelihood;
- }
-
- public double PREM(double scalePT, double scaleCT, int phraseSizeLimit)
- {
- if (scaleCT == 0)
- {
- if (pool != null)
- return PREM_phrase_constraints_parallel(scalePT, phraseSizeLimit);
- else
- return PREM_phrase_constraints(scalePT, phraseSizeLimit);
- }
- else // FIXME: ignores phraseSizeLimit
- return this.PREM_phrase_context_constraints(scalePT, scaleCT);
- }
-
-
- public double PREM_phrase_constraints(double scalePT, int phraseSizeLimit)
- {
- double [][][]exp_emit=new double[K][n_positions][n_words];
- double []exp_pi=new double[K];
-
- for(double [][]i:exp_emit)
- for(double []j:i)
- Arrays.fill(j, 1e-10);
-
- if (lambdaPT == null && cacheLambda)
- lambdaPT = new double[n_phrases][];
-
- double loglikelihood=0, kl=0, l1lmax=0, primal=0;
- int failures=0, iterations=0;
- long start = System.currentTimeMillis();
- //E
- for(int phrase=0; phrase<n_phrases; phrase++)
- {
- if (phraseSizeLimit >= 1 && c.getPhrase(phrase).size() > phraseSizeLimit)
- {
- //System.arraycopy(pi[phrase], 0, exp_pi[phrase], 0, K);
- continue;
- }
-
- Arrays.fill(exp_pi, 1e-10);
-
- // FIXME: add rare edge check to phrase objective & posterior processing
- PhraseObjective po = new PhraseObjective(this, phrase, scalePT, (cacheLambda) ? lambdaPT[phrase] : null);
- boolean ok = po.optimizeWithProjectedGradientDescent();
- if (!ok) ++failures;
- if (cacheLambda) lambdaPT[phrase] = po.getParameters();
- iterations += po.getNumberUpdateCalls();
- double [][] q=po.posterior();
- loglikelihood += po.loglikelihood();
- kl += po.KL_divergence();
- l1lmax += po.l1lmax();
- primal += po.primal(scalePT);
- List<Edge> edges = c.getEdgesForPhrase(phrase);
-
- for(int edge=0;edge<q.length;edge++){
- Edge e = edges.get(edge);
- TIntArrayList context = e.getContext();
- double contextCnt = e.getCount();
- //increment expected count
- for(int tag=0;tag<K;tag++){
- for(int pos=0;pos<n_positions;pos++){
- exp_emit[tag][pos][context.get(pos)]+=q[edge][tag]*contextCnt;
- }
-
- exp_pi[tag]+=q[edge][tag]*contextCnt;
-
- }
- }
- arr.F.l1normalize(exp_pi);
- System.arraycopy(exp_pi, 0, pi[phrase], 0, K);
- }
-
- long end = System.currentTimeMillis();
- if (failures > 0)
- System.out.println("WARNING: failed to converge in " + failures + "/" + n_phrases + " cases");
- System.out.println("\tmean iters: " + iterations/(double)n_phrases + " elapsed time " + (end - start) / 1000.0);
- System.out.println("\tllh: " + loglikelihood);
- System.out.println("\tKL: " + kl);
- System.out.println("\tphrase l1lmax: " + l1lmax);
-
- //M
- for(double [][]i:exp_emit)
- for(double []j:i)
- arr.F.l1normalize(j);
- emit=exp_emit;
-
- return primal;
- }
-
- public double PREM_phrase_constraints_parallel(final double scalePT, int phraseSizeLimit)
- {
- assert(pool != null);
-
- final LinkedBlockingQueue<PhraseObjective> expectations
- = new LinkedBlockingQueue<PhraseObjective>();
-
- double [][][]exp_emit=new double [K][n_positions][n_words];
- double [][]exp_pi=new double[n_phrases][K];
-
- for(double [][]i:exp_emit)
- for(double []j:i)
- Arrays.fill(j, 1e-10);
- for(double []j:exp_pi)
- Arrays.fill(j, 1e-10);
-
- double loglikelihood=0, kl=0, l1lmax=0, primal=0;
- final AtomicInteger failures = new AtomicInteger(0);
- final AtomicLong elapsed = new AtomicLong(0l);
- int iterations=0;
- long start = System.currentTimeMillis();
- List<Future<PhraseObjective>> results = new ArrayList<Future<PhraseObjective>>();
-
- if (lambdaPT == null && cacheLambda)
- lambdaPT = new double[n_phrases][];
-
- //E
- for(int phrase=0;phrase<n_phrases;phrase++) {
- if (phraseSizeLimit >= 1 && c.getPhrase(phrase).size() > phraseSizeLimit) {
- System.arraycopy(pi[phrase], 0, exp_pi[phrase], 0, K);
- continue;
- }
-
- final int p=phrase;
- results.add(pool.submit(new Callable<PhraseObjective>() {
- public PhraseObjective call() {
- //System.out.println("" + Thread.currentThread().getId() + " optimising lambda for " + p);
- long start = System.currentTimeMillis();
- PhraseObjective po = new PhraseObjective(PhraseCluster.this, p, scalePT, (cacheLambda) ? lambdaPT[p] : null);
- boolean ok = po.optimizeWithProjectedGradientDescent();
- if (!ok) failures.incrementAndGet();
- long end = System.currentTimeMillis();
- elapsed.addAndGet(end - start);
- //System.out.println("" + Thread.currentThread().getId() + " done optimising lambda for " + p);
- return po;
- }
- }));
- }
-
- // aggregate the expectations as they become available
- for (Future<PhraseObjective> fpo : results)
- {
- try {
- //System.out.println("" + Thread.currentThread().getId() + " reading queue #" + count);
-
- // wait (blocking) until something is ready
- PhraseObjective po = fpo.get();
- // process
- int phrase = po.phrase;
- if (cacheLambda) lambdaPT[phrase] = po.getParameters();
- //System.out.println("" + Thread.currentThread().getId() + " taken phrase " + phrase);
- double [][] q=po.posterior();
- loglikelihood += po.loglikelihood();
- kl += po.KL_divergence();
- l1lmax += po.l1lmax();
- primal += po.primal(scalePT);
- iterations += po.getNumberUpdateCalls();
-
- List<Edge> edges = c.getEdgesForPhrase(phrase);
- for(int edge=0;edge<q.length;edge++){
- Edge e = edges.get(edge);
- TIntArrayList context = e.getContext();
- double contextCnt = e.getCount();
- //increment expected count
- for(int tag=0;tag<K;tag++){
- for(int pos=0;pos<n_positions;pos++){
- exp_emit[tag][pos][context.get(pos)]+=q[edge][tag]*contextCnt;
- }
- exp_pi[phrase][tag]+=q[edge][tag]*contextCnt;
- }
- }
- } catch (InterruptedException e) {
- System.err.println("M-step thread interrupted. Probably fatal!");
- throw new RuntimeException(e);
- } catch (ExecutionException e) {
- System.err.println("M-step thread execution died. Probably fatal!");
- throw new RuntimeException(e);
- }
- }
-
- long end = System.currentTimeMillis();
-
- if (failures.get() > 0)
- System.out.println("WARNING: failed to converge in " + failures.get() + "/" + n_phrases + " cases");
- System.out.println("\tmean iters: " + iterations/(double)n_phrases + " walltime " + (end-start)/1000.0 + " threads " + elapsed.get() / 1000.0);
- System.out.println("\tllh: " + loglikelihood);
- System.out.println("\tKL: " + kl);
- System.out.println("\tphrase l1lmax: " + l1lmax);
-
- //M
- for(double [][]i:exp_emit)
- for(double []j:i)
- arr.F.l1normalize(j);
- emit=exp_emit;
-
- for(double []j:exp_pi)
- arr.F.l1normalize(j);
- pi=exp_pi;
-
- return primal;
- }
-
- public double PREM_phrase_context_constraints(double scalePT, double scaleCT)
- {
- double[][][] exp_emit = new double [K][n_positions][n_words];
- double[][] exp_pi = new double[n_phrases][K];
-
- //E step
- PhraseContextObjective pco = new PhraseContextObjective(this, lambdaPTCT, pool, scalePT, scaleCT);
- boolean ok = pco.optimizeWithProjectedGradientDescent();
- if (cacheLambda) lambdaPTCT = pco.getParameters();
-
- //now extract expectations
- List<Corpus.Edge> edges = c.getEdges();
- for(int e = 0; e < edges.size(); ++e)
- {
- double [] q = pco.posterior(e);
- Corpus.Edge edge = edges.get(e);
-
- TIntArrayList context = edge.getContext();
- double contextCnt = edge.getCount();
- //increment expected count
- for(int tag=0;tag<K;tag++)
- {
- for(int pos=0;pos<n_positions;pos++)
- exp_emit[tag][pos][context.get(pos)]+=q[tag]*contextCnt;
- exp_pi[edge.getPhraseId()][tag]+=q[tag]*contextCnt;
- }
- }
-
- System.out.println("\tllh: " + pco.loglikelihood());
- System.out.println("\tKL: " + pco.KL_divergence());
- System.out.println("\tphrase l1lmax: " + pco.phrase_l1lmax());
- System.out.println("\tcontext l1lmax: " + pco.context_l1lmax());
-
- //M step
- for(double [][]i:exp_emit)
- for(double []j:i)
- arr.F.l1normalize(j);
- emit=exp_emit;
-
- for(double []j:exp_pi)
- arr.F.l1normalize(j);
- pi=exp_pi;
-
- return pco.primal();
- }
-
- /**
- * @param phrase index of phrase
- * @param ctx array of context
- * @return unnormalized posterior
- */
- public double[] posterior(Corpus.Edge edge)
- {
- double[] prob;
-
- if(edge.getTag()>=0){
- prob=new double[K];
- prob[edge.getTag()]=1;
- return prob;
- }
-
- if (edge.getPhraseId() < n_phrases)
- prob = Arrays.copyOf(pi[edge.getPhraseId()], K);
- else
- {
- prob = new double[K];
- Arrays.fill(prob, 1.0);
- }
-
- TIntArrayList ctx = edge.getContext();
- for(int tag=0;tag<K;tag++)
- {
- for(int c=0;c<n_positions;c++)
- {
- int word = ctx.get(c);
- if (!this.c.isSentinel(word) && word < n_words)
- prob[tag]*=emit[tag][c][word];
- }
- }
-
- return prob;
- }
-
- public void displayPosterior(PrintStream ps, List<Edge> testing)
- {
- for (Edge edge : testing)
- {
- double probs[] = posterior(edge);
- arr.F.l1normalize(probs);
-
- // emit phrase
- ps.print(edge.getPhraseString());
- ps.print("\t");
- ps.print(edge.getContextString(true));
- int t=arr.F.argmax(probs);
- ps.println(" ||| C=" + t + " T=" + edge.getCount() + " P=" + probs[t]);
- //ps.println("# probs " + Arrays.toString(probs));
- }
- }
-
- public void displayModelParam(PrintStream ps)
- {
- final double EPS = 1e-6;
- ps.println("phrases " + n_phrases + " tags " + K + " positions " + n_positions);
-
- for (int i = 0; i < n_phrases; ++i)
- for(int j=0;j<pi[i].length;j++)
- if (pi[i][j] > EPS)
- ps.println(i + " " + j + " " + pi[i][j]);
-
- ps.println();
- for (int i = 0; i < K; ++i)
- {
- for(int position=0;position<n_positions;position++)
- {
- for(int word=0;word<emit[i][position].length;word++)
- {
- if (emit[i][position][word] > EPS)
- ps.println(i + " " + position + " " + word + " " + emit[i][position][word]);
- }
- }
- }
- }
-
- double phrase_l1lmax()
- {
- double sum=0;
- for(int phrase=0; phrase<n_phrases; phrase++)
- {
- double [] maxes = new double[K];
- for (Edge edge : c.getEdgesForPhrase(phrase))
- {
- double p[] = posterior(edge);
- arr.F.l1normalize(p);
- for(int tag=0;tag<K;tag++)
- maxes[tag] = Math.max(maxes[tag], p[tag]);
- }
- for(int tag=0;tag<K;tag++)
- sum += maxes[tag];
- }
- return sum;
- }
-
- double context_l1lmax()
- {
- double sum=0;
- for(int context=0; context<n_contexts; context++)
- {
- double [] maxes = new double[K];
- for (Edge edge : c.getEdgesForContext(context))
- {
- double p[] = posterior(edge);
- arr.F.l1normalize(p);
- for(int tag=0;tag<K;tag++)
- maxes[tag] = Math.max(maxes[tag], p[tag]);
- }
- for(int tag=0;tag<K;tag++)
- sum += maxes[tag];
- }
- return sum;
- }
-
- public void loadParameters(BufferedReader input) throws IOException
- {
- final double EPS = 1e-50;
-
- // overwrite pi, emit with ~zeros
- for(double [][]i:emit)
- for(double []j:i)
- Arrays.fill(j, EPS);
-
- for(double []j:pi)
- Arrays.fill(j, EPS);
-
- String line = input.readLine();
- assert line != null;
-
- Pattern space = Pattern.compile(" +");
- String[] parts = space.split(line);
- assert parts.length == 6;
-
- assert parts[0].equals("phrases");
- int phrases = Integer.parseInt(parts[1]);
- int tags = Integer.parseInt(parts[3]);
- int positions = Integer.parseInt(parts[5]);
-
- assert phrases == n_phrases;
- assert tags == K;
- assert positions == n_positions;
-
- // read in pi
- while ((line = input.readLine()) != null)
- {
- line = line.trim();
- if (line.isEmpty()) break;
-
- String[] tokens = space.split(line);
- assert tokens.length == 3;
- int p = Integer.parseInt(tokens[0]);
- int t = Integer.parseInt(tokens[1]);
- double v = Double.parseDouble(tokens[2]);
-
- pi[p][t] = v;
- }
-
- // read in emissions
- while ((line = input.readLine()) != null)
- {
- String[] tokens = space.split(line);
- assert tokens.length == 4;
- int t = Integer.parseInt(tokens[0]);
- int p = Integer.parseInt(tokens[1]);
- int w = Integer.parseInt(tokens[2]);
- double v = Double.parseDouble(tokens[3]);
-
- emit[t][p][w] = v;
- }
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/PhraseContextObjective.java b/gi/posterior-regularisation/prjava/src/phrase/PhraseContextObjective.java deleted file mode 100644 index 646ff392..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/PhraseContextObjective.java +++ /dev/null @@ -1,436 +0,0 @@ -package phrase;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import optimization.gradientBasedMethods.ProjectedGradientDescent;
-import optimization.gradientBasedMethods.ProjectedObjective;
-import optimization.gradientBasedMethods.stats.OptimizerStats;
-import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc;
-import optimization.linesearch.InterpolationPickFirstStep;
-import optimization.linesearch.LineSearchMethod;
-import optimization.projections.SimplexProjection;
-import optimization.stopCriteria.CompositeStopingCriteria;
-import optimization.stopCriteria.ProjectedGradientL2Norm;
-import optimization.stopCriteria.StopingCriteria;
-import optimization.stopCriteria.ValueDifference;
-import optimization.util.MathUtils;
-import phrase.Corpus.Edge;
-
-public class PhraseContextObjective extends ProjectedObjective
-{
- private static final double GRAD_DIFF = 0.00002;
- private static double INIT_STEP_SIZE = 300;
- private static double VAL_DIFF = 1e-8;
- private static int ITERATIONS = 20;
- boolean debug = false;
-
- private PhraseCluster c;
-
- // un-regularized unnormalized posterior, p[edge][tag]
- // P(tag|edge) \propto P(tag|phrase)P(context|tag)
- private double p[][];
-
- // regularized unnormalized posterior
- // q[edge][tag] propto p[edge][tag]*exp(-lambda)
- private double q[][];
- private List<Corpus.Edge> data;
-
- // log likelihood under q
- private double loglikelihood;
- private SimplexProjection projectionPhrase;
- private SimplexProjection projectionContext;
-
- double[] newPoint;
- private int n_param;
-
- // likelihood under p
- public double llh;
-
- private static Map<Corpus.Edge, Integer> edgeIndex;
-
- private long projectionTime;
- private long objectiveTime;
- private long actualProjectionTime;
- private ExecutorService pool;
-
- double scalePT;
- double scaleCT;
-
- public PhraseContextObjective(PhraseCluster cluster, double[] startingParameters, ExecutorService pool,
- double scalePT, double scaleCT)
- {
- c=cluster;
- data=c.c.getEdges();
- n_param=data.size()*c.K*2;
- this.pool=pool;
- this.scalePT = scalePT;
- this.scaleCT = scaleCT;
-
- parameters = startingParameters;
- if (parameters == null)
- parameters = new double[n_param];
-
- System.out.println("Num parameters " + n_param);
- newPoint = new double[n_param];
- gradient = new double[n_param];
- initP();
- projectionPhrase = new SimplexProjection(scalePT);
- projectionContext = new SimplexProjection(scaleCT);
- q=new double [data.size()][c.K];
-
- if (edgeIndex == null) {
- edgeIndex = new HashMap<Edge, Integer>();
- for (int e=0; e<data.size(); e++)
- {
- edgeIndex.put(data.get(e), e);
- //if (debug) System.out.println("Edge " + data.get(e) + " index " + e);
- }
- }
-
- setParameters(parameters);
- }
-
- private void initP(){
- p=new double[data.size()][];
- for(int edge=0;edge<data.size();edge++)
- {
- p[edge]=c.posterior(data.get(edge));
- llh += data.get(edge).getCount() * Math.log(arr.F.l1norm(p[edge]));
- arr.F.l1normalize(p[edge]);
- }
- }
-
- @Override
- public void setParameters(double[] params) {
- //System.out.println("setParameters " + Arrays.toString(parameters));
- // TODO: test if params have changed and skip update otherwise
- super.setParameters(params);
- updateFunction();
- }
-
- private void updateFunction()
- {
- updateCalls++;
- loglikelihood=0;
-
- System.out.print(".");
- System.out.flush();
-
- long begin = System.currentTimeMillis();
- for (int e=0; e<data.size(); e++)
- {
- Edge edge = data.get(e);
- for(int tag=0; tag<c.K; tag++)
- {
- int ip = index(e, tag, true);
- int ic = index(e, tag, false);
- q[e][tag] = p[e][tag]*
- Math.exp((-parameters[ip]-parameters[ic]) / edge.getCount());
- //if (debug)
- //System.out.println("\tposterior " + edge + " with tag " + tag + " p " + p[e][tag] + " params " + parameters[ip] + " and " + parameters[ic] + " q " + q[e][tag]);
- }
- }
-
- for(int edge=0;edge<data.size();edge++) {
- loglikelihood+=data.get(edge).getCount() * Math.log(arr.F.l1norm(q[edge]));
- arr.F.l1normalize(q[edge]);
- }
-
- for (int e=0; e<data.size(); e++)
- {
- for(int tag=0; tag<c.K; tag++)
- {
- int ip = index(e, tag, true);
- int ic = index(e, tag, false);
- gradient[ip]=-q[e][tag];
- gradient[ic]=-q[e][tag];
- }
- }
- //if (debug) {
- //System.out.println("objective " + loglikelihood + " ||gradient||_2: " + arr.F.l2norm(gradient));
- //System.out.println("gradient " + Arrays.toString(gradient));
- //}
- objectiveTime += System.currentTimeMillis() - begin;
- }
-
- @Override
- public double[] projectPoint(double[] point)
- {
- long begin = System.currentTimeMillis();
- List<Future<?>> tasks = new ArrayList<Future<?>>();
-
- System.out.print(",");
- System.out.flush();
-
- Arrays.fill(newPoint, 0, newPoint.length, 0);
-
- // first project using the phrase-tag constraints,
- // for all p,t: sum_c lambda_ptc < scaleP
- if (pool == null)
- {
- for (int p = 0; p < c.c.getNumPhrases(); ++p)
- {
- List<Edge> edges = c.c.getEdgesForPhrase(p);
- double[] toProject = new double[edges.size()];
- for(int tag=0;tag<c.K;tag++)
- {
- // FIXME: slow hash lookup for e (twice)
- for(int e=0; e<edges.size(); e++)
- toProject[e] = point[index(edges.get(e), tag, true)];
- long lbegin = System.currentTimeMillis();
- projectionPhrase.project(toProject);
- actualProjectionTime += System.currentTimeMillis() - lbegin;
- for(int e=0; e<edges.size(); e++)
- newPoint[index(edges.get(e), tag, true)] = toProject[e];
- }
- }
- }
- else // do above in parallel using thread pool
- {
- for (int p = 0; p < c.c.getNumPhrases(); ++p)
- {
- final int phrase = p;
- final double[] inPoint = point;
- Runnable task = new Runnable()
- {
- public void run()
- {
- List<Edge> edges = c.c.getEdgesForPhrase(phrase);
- double toProject[] = new double[edges.size()];
- for(int tag=0;tag<c.K;tag++)
- {
- // FIXME: slow hash lookup for e
- for(int e=0; e<edges.size(); e++)
- toProject[e] = inPoint[index(edges.get(e), tag, true)];
- projectionPhrase.project(toProject);
- for(int e=0; e<edges.size(); e++)
- newPoint[index(edges.get(e), tag, true)] = toProject[e];
- }
- }
- };
- tasks.add(pool.submit(task));
- }
- }
- //System.out.println("after PT " + Arrays.toString(newPoint));
-
- // now project using the context-tag constraints,
- // for all c,t: sum_p omega_pct < scaleC
- if (pool == null)
- {
- for (int ctx = 0; ctx < c.c.getNumContexts(); ++ctx)
- {
- List<Edge> edges = c.c.getEdgesForContext(ctx);
- double toProject[] = new double[edges.size()];
- for(int tag=0;tag<c.K;tag++)
- {
- // FIXME: slow hash lookup for e
- for(int e=0; e<edges.size(); e++)
- toProject[e] = point[index(edges.get(e), tag, false)];
- long lbegin = System.currentTimeMillis();
- projectionContext.project(toProject);
- actualProjectionTime += System.currentTimeMillis() - lbegin;
- for(int e=0; e<edges.size(); e++)
- newPoint[index(edges.get(e), tag, false)] = toProject[e];
- }
- }
- }
- else
- {
- // do above in parallel using thread pool
- for (int ctx = 0; ctx < c.c.getNumContexts(); ++ctx)
- {
- final int context = ctx;
- final double[] inPoint = point;
- Runnable task = new Runnable()
- {
- public void run()
- {
- List<Edge> edges = c.c.getEdgesForContext(context);
- double toProject[] = new double[edges.size()];
- for(int tag=0;tag<c.K;tag++)
- {
- // FIXME: slow hash lookup for e
- for(int e=0; e<edges.size(); e++)
- toProject[e] = inPoint[index(edges.get(e), tag, false)];
- projectionContext.project(toProject);
- for(int e=0; e<edges.size(); e++)
- newPoint[index(edges.get(e), tag, false)] = toProject[e];
- }
- }
- };
- tasks.add(pool.submit(task));
- }
- }
-
- if (pool != null)
- {
- // wait for all the jobs to complete
- Exception failure = null;
- for (Future<?> task: tasks)
- {
- try {
- task.get();
- } catch (InterruptedException e) {
- System.err.println("ERROR: Projection thread interrupted");
- e.printStackTrace();
- failure = e;
- } catch (ExecutionException e) {
- System.err.println("ERROR: Projection thread died");
- e.printStackTrace();
- failure = e;
- }
- }
- // rethrow the exception
- if (failure != null)
- {
- pool.shutdownNow();
- throw new RuntimeException(failure);
- }
- }
-
- double[] tmp = newPoint;
- newPoint = point;
- projectionTime += System.currentTimeMillis() - begin;
-
- //if (debug)
- //System.out.println("\t\treturning " + Arrays.toString(tmp));
- return tmp;
- }
-
- private int index(Edge edge, int tag, boolean phrase)
- {
- // NB if indexing changes must also change code in updateFunction and constructor
- if (phrase)
- return tag * edgeIndex.size() + edgeIndex.get(edge);
- else
- return (c.K + tag) * edgeIndex.size() + edgeIndex.get(edge);
- }
-
- private int index(int e, int tag, boolean phrase)
- {
- // NB if indexing changes must also change code in updateFunction and constructor
- if (phrase)
- return tag * edgeIndex.size() + e;
- else
- return (c.K + tag) * edgeIndex.size() + e;
- }
-
- @Override
- public double[] getGradient() {
- gradientCalls++;
- return gradient;
- }
-
- @Override
- public double getValue() {
- functionCalls++;
- return loglikelihood;
- }
-
- @Override
- public String toString() {
- return "No need for pointless toString";
- }
-
- public double []posterior(int edgeIndex){
- return q[edgeIndex];
- }
-
- public boolean optimizeWithProjectedGradientDescent()
- {
- projectionTime = 0;
- actualProjectionTime = 0;
- objectiveTime = 0;
- long start = System.currentTimeMillis();
-
- LineSearchMethod ls =
- new ArmijoLineSearchMinimizationAlongProjectionArc
- (new InterpolationPickFirstStep(INIT_STEP_SIZE));
- //LineSearchMethod ls = new WolfRuleLineSearch(
- // (new InterpolationPickFirstStep(INIT_STEP_SIZE)), c1, c2);
- OptimizerStats stats = new OptimizerStats();
-
-
- ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls);
- StopingCriteria stopGrad = new ProjectedGradientL2Norm(GRAD_DIFF);
- StopingCriteria stopValue = new ValueDifference(VAL_DIFF*(-llh));
- CompositeStopingCriteria compositeStop = new CompositeStopingCriteria();
- compositeStop.add(stopGrad);
- compositeStop.add(stopValue);
- optimizer.setMaxIterations(ITERATIONS);
- updateFunction();
- boolean success = optimizer.optimize(this,stats,compositeStop);
-
- System.out.println();
- System.out.println(stats.prettyPrint(1));
-
- if (success)
- System.out.print("\toptimization took " + optimizer.getCurrentIteration() + " iterations");
- else
- System.out.print("\toptimization failed to converge");
- long total = System.currentTimeMillis() - start;
- System.out.println(" and " + total + " ms: projection " + projectionTime +
- " actual " + actualProjectionTime + " objective " + objectiveTime);
-
- return success;
- }
-
- double loglikelihood()
- {
- return llh;
- }
-
- double KL_divergence()
- {
- return -loglikelihood + MathUtils.dotProduct(parameters, gradient);
- }
-
- double phrase_l1lmax()
- {
- // \sum_{tag,phrase} max_{context} P(tag|context,phrase)
- double sum=0;
- for (int p = 0; p < c.c.getNumPhrases(); ++p)
- {
- List<Edge> edges = c.c.getEdgesForPhrase(p);
- for(int tag=0;tag<c.K;tag++)
- {
- double max=0;
- for (Edge edge: edges)
- max = Math.max(max, q[edgeIndex.get(edge)][tag]);
- sum+=max;
- }
- }
- return sum;
- }
-
- double context_l1lmax()
- {
- // \sum_{tag,context} max_{phrase} P(tag|context,phrase)
- double sum=0;
- for (int ctx = 0; ctx < c.c.getNumContexts(); ++ctx)
- {
- List<Edge> edges = c.c.getEdgesForContext(ctx);
- for(int tag=0; tag<c.K; tag++)
- {
- double max=0;
- for (Edge edge: edges)
- max = Math.max(max, q[edgeIndex.get(edge)][tag]);
- sum+=max;
- }
- }
- return sum;
- }
-
- // L - KL(q||p) - scalePT * l1lmax_phrase - scaleCT * l1lmax_context
- public double primal()
- {
- return loglikelihood() - KL_divergence() - scalePT * phrase_l1lmax() - scaleCT * context_l1lmax();
- }
-}
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/src/phrase/PhraseCorpus.java b/gi/posterior-regularisation/prjava/src/phrase/PhraseCorpus.java deleted file mode 100644 index 0cf31c1c..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/PhraseCorpus.java +++ /dev/null @@ -1,193 +0,0 @@ -package phrase;
-
-import io.FileUtil;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Scanner;
-
-public class PhraseCorpus
-{
- public HashMap<String,Integer>wordLex;
- public HashMap<String,Integer>phraseLex;
-
- public String wordList[];
- public String phraseList[];
-
- //data[phrase][num context][position]
- public int data[][][];
- public int numContexts;
-
- public PhraseCorpus(String filename) throws FileNotFoundException, IOException
- {
- BufferedReader r = FileUtil.reader(new File(filename));
-
- phraseLex=new HashMap<String,Integer>();
- wordLex=new HashMap<String,Integer>();
-
- ArrayList<int[][]>dataList=new ArrayList<int[][]>();
- String line=null;
- numContexts = 0;
-
- while((line=readLine(r))!=null){
-
- String toks[]=line.split("\t");
- String phrase=toks[0];
- addLex(phrase,phraseLex);
-
- toks=toks[1].split(" \\|\\|\\| ");
-
- ArrayList <int[]>ctxList=new ArrayList<int[]>();
-
- for(int i=0;i<toks.length;i+=2){
- String ctx=toks[i];
- String words[]=ctx.split(" ");
- if (numContexts == 0)
- numContexts = words.length - 1;
- else
- assert numContexts == words.length - 1;
-
- int []context=new int [numContexts+1];
- int idx=0;
- for(String word:words){
- if(word.equals("<PHRASE>")){
- continue;
- }
- addLex(word,wordLex);
- context[idx]=wordLex.get(word);
- idx++;
- }
-
- String count=toks[i+1];
- context[idx]=Integer.parseInt(count.trim().substring(2));
-
- ctxList.add(context);
- }
-
- dataList.add(ctxList.toArray(new int [0][]));
-
- }
- try{
- r.close();
- }catch(IOException ioe){
- ioe.printStackTrace();
- }
- data=dataList.toArray(new int[0][][]);
- }
-
- private void addLex(String key, HashMap<String,Integer>lex){
- Integer i=lex.get(key);
- if(i==null){
- lex.put(key, lex.size());
- }
- }
-
- //for debugging
- public void saveLex(String lexFilename) throws FileNotFoundException, IOException
- {
- PrintStream ps = FileUtil.printstream(new File(lexFilename));
- ps.println("Phrase Lexicon");
- ps.println(phraseLex.size());
- printDict(phraseLex,ps);
-
- ps.println("Word Lexicon");
- ps.println(wordLex.size());
- printDict(wordLex,ps);
- ps.close();
- }
-
- private static void printDict(HashMap<String,Integer>lex,PrintStream ps){
- String []dict=buildList(lex);
- for(int i=0;i<dict.length;i++){
- ps.println(dict[i]);
- }
- }
-
- public void loadLex(String lexFilename){
- Scanner sc=io.FileUtil.openInFile(lexFilename);
-
- sc.nextLine();
- int size=sc.nextInt();
- sc.nextLine();
- String[]dict=new String[size];
- for(int i=0;i<size;i++){
- dict[i]=sc.nextLine();
- }
- phraseLex=buildMap(dict);
-
- sc.nextLine();
- size=sc.nextInt();
- sc.nextLine();
- dict=new String[size];
- for(int i=0;i<size;i++){
- dict[i]=sc.nextLine();
- }
- wordLex=buildMap(dict);
- sc.close();
- }
-
- private HashMap<String, Integer> buildMap(String[]dict){
- HashMap<String,Integer> map=new HashMap<String,Integer>();
- for(int i=0;i<dict.length;i++){
- map.put(dict[i], i);
- }
- return map;
- }
-
- public void buildList(){
- if(wordList==null){
- wordList=buildList(wordLex);
- phraseList=buildList(phraseLex);
- }
- }
-
- private static String[]buildList(HashMap<String,Integer>lex){
- String dict[]=new String [lex.size()];
- for(String key:lex.keySet()){
- dict[lex.get(key)]=key;
- }
- return dict;
- }
-
- public String getContextString(int context[], boolean addPhraseMarker)
- {
- StringBuffer b = new StringBuffer();
- for (int i=0;i<context.length-1;i++)
- {
- if (b.length() > 0)
- b.append(" ");
-
- if (i == context.length/2)
- b.append("<PHRASE> ");
-
- b.append(wordList[context[i]]);
- }
- return b.toString();
- }
-
- public static String readLine(BufferedReader r){
- try{
- return r.readLine();
- }
- catch(IOException ioe){
- ioe.printStackTrace();
- }
- return null;
- }
-
- public static void main(String[] args) throws Exception
- {
- String LEX_FILENAME="../pdata/lex.out";
- String DATA_FILENAME="../pdata/btec.con";
- PhraseCorpus c=new PhraseCorpus(DATA_FILENAME);
- c.saveLex(LEX_FILENAME);
- c.loadLex(LEX_FILENAME);
- c.saveLex(LEX_FILENAME);
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/PhraseObjective.java b/gi/posterior-regularisation/prjava/src/phrase/PhraseObjective.java deleted file mode 100644 index ac73a075..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/PhraseObjective.java +++ /dev/null @@ -1,224 +0,0 @@ -package phrase;
-
-import java.util.Arrays;
-import java.util.List;
-
-import optimization.gradientBasedMethods.ProjectedGradientDescent;
-import optimization.gradientBasedMethods.ProjectedObjective;
-import optimization.gradientBasedMethods.stats.OptimizerStats;
-import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc;
-import optimization.linesearch.InterpolationPickFirstStep;
-import optimization.linesearch.LineSearchMethod;
-import optimization.linesearch.WolfRuleLineSearch;
-import optimization.projections.SimplexProjection;
-import optimization.stopCriteria.CompositeStopingCriteria;
-import optimization.stopCriteria.ProjectedGradientL2Norm;
-import optimization.stopCriteria.StopingCriteria;
-import optimization.stopCriteria.ValueDifference;
-import optimization.util.MathUtils;
-
-public class PhraseObjective extends ProjectedObjective
-{
- static final double GRAD_DIFF = 0.00002;
- static double INIT_STEP_SIZE = 300;
- static double VAL_DIFF = 1e-8; // tuned to BTEC subsample
- static int ITERATIONS = 100;
- private PhraseCluster c;
-
- /**@brief
- * for debugging purposes
- */
- //public static PrintStream ps;
-
- /**@brief current phrase being optimzed*/
- public int phrase;
-
- /**@brief un-regularized posterior
- * unnormalized
- * p[edge][tag]
- * P(tag|edge) \propto P(tag|phrase)P(context|tag)
- */
- private double[][]p;
-
- /**@brief regularized posterior
- * q[edge][tag] propto p[edge][tag]*exp(-lambda)
- */
- private double q[][];
- private List<Corpus.Edge> data;
-
- /**@brief log likelihood of the associated phrase
- *
- */
- private double loglikelihood;
- private SimplexProjection projection;
-
- double[] newPoint ;
-
- private int n_param;
-
- /**@brief likelihood under p
- *
- */
- public double llh;
-
- public PhraseObjective(PhraseCluster cluster, int phraseIdx, double scale, double[] lambda){
- phrase=phraseIdx;
- c=cluster;
- data=c.c.getEdgesForPhrase(phrase);
- n_param=data.size()*c.K;
- //System.out.println("Num parameters " + n_param + " for phrase #" + phraseIdx);
-
- if (lambda==null)
- lambda=new double[n_param];
-
- parameters = lambda;
- newPoint = new double[n_param];
- gradient = new double[n_param];
- initP();
- projection=new SimplexProjection(scale);
- q=new double [data.size()][c.K];
-
- setParameters(parameters);
- }
-
- private void initP(){
- p=new double[data.size()][];
- for(int edge=0;edge<data.size();edge++){
- p[edge]=c.posterior(data.get(edge));
- llh += data.get(edge).getCount() * Math.log(arr.F.l1norm(p[edge])); // Was bug here - count inside log!
- arr.F.l1normalize(p[edge]);
- }
- }
-
- @Override
- public void setParameters(double[] params) {
- super.setParameters(params);
- updateFunction();
- }
-
- private void updateFunction(){
- updateCalls++;
- loglikelihood=0;
-
- for(int tag=0;tag<c.K;tag++){
- for(int edge=0;edge<data.size();edge++){
- q[edge][tag]=p[edge][tag]*
- Math.exp(-parameters[tag*data.size()+edge]/data.get(edge).getCount());
- }
- }
-
- for(int edge=0;edge<data.size();edge++){
- loglikelihood+=data.get(edge).getCount() * Math.log(arr.F.l1norm(q[edge]));
- arr.F.l1normalize(q[edge]);
- }
-
- for(int tag=0;tag<c.K;tag++){
- for(int edge=0;edge<data.size();edge++){
- gradient[tag*data.size()+edge]=-q[edge][tag];
- }
- }
- }
-
- @Override
- public double[] projectPoint(double[] point)
- {
- double toProject[]=new double[data.size()];
- for(int tag=0;tag<c.K;tag++){
- for(int edge=0;edge<data.size();edge++){
- toProject[edge]=point[tag*data.size()+edge];
- }
- projection.project(toProject);
- for(int edge=0;edge<data.size();edge++){
- newPoint[tag*data.size()+edge]=toProject[edge];
- }
- }
- return newPoint;
- }
-
- @Override
- public double[] getGradient() {
- gradientCalls++;
- return gradient;
- }
-
- @Override
- public double getValue() {
- functionCalls++;
- return loglikelihood;
- }
-
- @Override
- public String toString() {
- return Arrays.toString(parameters);
- }
-
- public double [][]posterior(){
- return q;
- }
-
- long optimizationTime;
-
- public boolean optimizeWithProjectedGradientDescent(){
- long start = System.currentTimeMillis();
-
- LineSearchMethod ls =
- new ArmijoLineSearchMinimizationAlongProjectionArc
- (new InterpolationPickFirstStep(INIT_STEP_SIZE));
- //LineSearchMethod ls = new WolfRuleLineSearch(
- // (new InterpolationPickFirstStep(INIT_STEP_SIZE)), c1, c2);
- OptimizerStats stats = new OptimizerStats();
-
-
- ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls);
- StopingCriteria stopGrad = new ProjectedGradientL2Norm(GRAD_DIFF);
- StopingCriteria stopValue = new ValueDifference(VAL_DIFF*(-llh));
- CompositeStopingCriteria compositeStop = new CompositeStopingCriteria();
- compositeStop.add(stopGrad);
- compositeStop.add(stopValue);
- optimizer.setMaxIterations(ITERATIONS);
- updateFunction();
- boolean success = optimizer.optimize(this,stats,compositeStop);
- //System.out.println("Ended optimzation Projected Gradient Descent\n" + stats.prettyPrint(1));
- //if(succed){
- //System.out.println("Ended optimization in " + optimizer.getCurrentIteration());
- //}else{
-// System.out.println("Failed to optimize");
- //}
- //System.out.println(Arrays.toString(parameters));
-
- // for(int edge=0;edge<data.getSize();edge++){
- // ps.println(Arrays.toString(q[edge]));
- // }
-
- return success;
- }
-
- public double KL_divergence()
- {
- return -loglikelihood + MathUtils.dotProduct(parameters, gradient);
- }
-
- public double loglikelihood()
- {
- return llh;
- }
-
- public double l1lmax()
- {
- double sum=0;
- for(int tag=0;tag<c.K;tag++){
- double max=0;
- for(int edge=0;edge<data.size();edge++){
- if(q[edge][tag]>max)
- max=q[edge][tag];
- }
- sum+=max;
- }
- return sum;
- }
-
- public double primal(double scale)
- {
- return loglikelihood() - KL_divergence() - scale * l1lmax();
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/phrase/Trainer.java b/gi/posterior-regularisation/prjava/src/phrase/Trainer.java deleted file mode 100644 index 6f302b20..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/Trainer.java +++ /dev/null @@ -1,257 +0,0 @@ -package phrase; - -import io.FileUtil; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import java.io.File; -import java.io.IOException; -import java.io.PrintStream; -import java.util.List; -import java.util.Random; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import phrase.Corpus.Edge; - -import arr.F; - -public class Trainer -{ - public static void main(String[] args) - { - OptionParser parser = new OptionParser(); - parser.accepts("help"); - parser.accepts("in").withRequiredArg().ofType(File.class); - parser.accepts("in1").withRequiredArg().ofType(File.class); - parser.accepts("test").withRequiredArg().ofType(File.class); - parser.accepts("out").withRequiredArg().ofType(File.class); - parser.accepts("start").withRequiredArg().ofType(File.class); - parser.accepts("parameters").withRequiredArg().ofType(File.class); - parser.accepts("topics").withRequiredArg().ofType(Integer.class).defaultsTo(5); - parser.accepts("iterations").withRequiredArg().ofType(Integer.class).defaultsTo(10); - parser.accepts("threads").withRequiredArg().ofType(Integer.class).defaultsTo(0); - parser.accepts("scale-phrase").withRequiredArg().ofType(Double.class).defaultsTo(0.0); - parser.accepts("scale-context").withRequiredArg().ofType(Double.class).defaultsTo(0.0); - parser.accepts("seed").withRequiredArg().ofType(Long.class).defaultsTo(0l); - parser.accepts("convergence-threshold").withRequiredArg().ofType(Double.class).defaultsTo(1e-6); - parser.accepts("variational-bayes"); - parser.accepts("alpha-emit").withRequiredArg().ofType(Double.class).defaultsTo(0.1); - parser.accepts("alpha-pi").withRequiredArg().ofType(Double.class).defaultsTo(0.0001); - parser.accepts("agree-direction"); - parser.accepts("agree-language"); - parser.accepts("no-parameter-cache"); - parser.accepts("skip-large-phrases").withRequiredArg().ofType(Integer.class).defaultsTo(5); - OptionSet options = parser.parse(args); - - if (options.has("help") || !options.has("in")) - { - try { - parser.printHelpOn(System.err); - } catch (IOException e) { - System.err.println("This should never happen."); - e.printStackTrace(); - } - System.exit(1); - } - - int tags = (Integer) options.valueOf("topics"); - int iterations = (Integer) options.valueOf("iterations"); - double scale_phrase = (Double) options.valueOf("scale-phrase"); - double scale_context = (Double) options.valueOf("scale-context"); - int threads = (Integer) options.valueOf("threads"); - double threshold = (Double) options.valueOf("convergence-threshold"); - boolean vb = options.has("variational-bayes"); - double alphaEmit = (vb) ? (Double) options.valueOf("alpha-emit") : 0; - double alphaPi = (vb) ? (Double) options.valueOf("alpha-pi") : 0; - int skip = (Integer) options.valueOf("skip-large-phrases"); - - if (options.has("seed")) - F.rng = new Random((Long) options.valueOf("seed")); - - ExecutorService threadPool = null; - if (threads > 0) - threadPool = Executors.newFixedThreadPool(threads); - - if (tags <= 1 || scale_phrase < 0 || scale_context < 0 || threshold < 0) - { - System.err.println("Invalid arguments. Try again!"); - System.exit(1); - } - - Corpus corpus = null; - File infile = (File) options.valueOf("in"); - Corpus corpus1 = null; - File infile1 = (File) options.valueOf("in1"); - try { - System.out.println("Reading concordance from " + infile); - corpus = Corpus.readFromFile(FileUtil.reader(infile)); - corpus.printStats(System.out); - if(options.has("in1")){ - corpus1 = Corpus.readFromFile(FileUtil.reader(infile1)); - corpus1.printStats(System.out); - } - } catch (IOException e) { - System.err.println("Failed to open input file: " + infile); - e.printStackTrace(); - System.exit(1); - } - - if (!(options.has("agree-direction")||options.has("agree-language"))) - System.out.println("Running with " + tags + " tags " + - "for " + iterations + " iterations " + - ((skip > 0) ? "skipping large phrases for first " + skip + " iterations " : "") + - "with scale " + scale_phrase + " phrase and " + scale_context + " context " + - "and " + threads + " threads"); - else - System.out.println("Running agreement model with " + tags + " tags " + - "for " + iterations); - - System.out.println(); - - PhraseCluster cluster = null; - Agree2Sides agree2sides = null; - Agree agree= null; - VB vbModel=null; - if (options.has("agree-language")) - agree2sides = new Agree2Sides(tags, corpus,corpus1); - else if (options.has("agree-direction")) - agree = new Agree(tags, corpus); - else - { - if (vb) - { - vbModel=new VB(tags,corpus); - vbModel.alpha=alphaPi; - vbModel.lambda=alphaEmit; - if (threadPool != null) vbModel.useThreadPool(threadPool); - } - else - { - cluster = new PhraseCluster(tags, corpus); - if (threadPool != null) cluster.useThreadPool(threadPool); - - if (options.has("no-parameter-cache")) - cluster.cacheLambda = false; - if (options.has("start")) - { - try { - System.err.println("Reading starting parameters from " + options.valueOf("start")); - cluster.loadParameters(FileUtil.reader((File)options.valueOf("start"))); - } catch (IOException e) { - System.err.println("Failed to open input file: " + options.valueOf("start")); - e.printStackTrace(); - } - } - } - } - - double last = 0; - for (int i=0; i < iterations; i++) - { - double o; - if (agree != null) - o = agree.EM(); - else if(agree2sides!=null) - o = agree2sides.EM(); - else - { - if (i < skip) - System.out.println("Skipping phrases of length > " + (i+1)); - - if (scale_phrase <= 0 && scale_context <= 0) - { - if (!vb) - o = cluster.EM((i < skip) ? i+1 : 0); - else - o = vbModel.EM(); - } - else - o = cluster.PREM(scale_phrase, scale_context, (i < skip) ? i+1 : 0); - } - - System.out.println("ITER: "+i+" objective: " + o); - - // sometimes takes a few iterations to break the ties - if (i > 5 && Math.abs((o - last) / o) < threshold) - { - last = o; - break; - } - last = o; - } - - double pl1lmax = 0, cl1lmax = 0; - if (cluster != null) - { - pl1lmax = cluster.phrase_l1lmax(); - cl1lmax = cluster.context_l1lmax(); - } - else if (agree != null) - { - // fairly arbitrary choice of model1 cf model2 - pl1lmax = agree.model1.phrase_l1lmax(); - cl1lmax = agree.model1.context_l1lmax(); - } - else if (agree2sides != null) - { - // fairly arbitrary choice of model1 cf model2 - pl1lmax = agree2sides.model1.phrase_l1lmax(); - cl1lmax = agree2sides.model1.context_l1lmax(); - } - - System.out.println("\nFinal posterior phrase l1lmax " + pl1lmax + " context l1lmax " + cl1lmax); - - if (options.has("out")) - { - File outfile = (File) options.valueOf("out"); - try { - PrintStream ps = FileUtil.printstream(outfile); - List<Edge> test; - if (!options.has("test")) // just use the training - test = corpus.getEdges(); - else - { // if --test supplied, load up the file - infile = (File) options.valueOf("test"); - System.out.println("Reading testing concordance from " + infile); - test = corpus.readEdges(FileUtil.reader(infile)); - } - if(vb) { - assert !options.has("test"); - vbModel.displayPosterior(ps); - } else if (cluster != null) - cluster.displayPosterior(ps, test); - else if (agree != null) - agree.displayPosterior(ps, test); - else if (agree2sides != null) { - assert !options.has("test"); - agree2sides.displayPosterior(ps); - } - - ps.close(); - } catch (IOException e) { - System.err.println("Failed to open either testing file or output file"); - e.printStackTrace(); - System.exit(1); - } - } - - if (options.has("parameters")) - { - assert !vb; - File outfile = (File) options.valueOf("parameters"); - PrintStream ps; - try { - ps = FileUtil.printstream(outfile); - cluster.displayModelParam(ps); - ps.close(); - } catch (IOException e) { - System.err.println("Failed to open output parameters file: " + outfile); - e.printStackTrace(); - System.exit(1); - } - } - - if (cluster != null && cluster.pool != null) - cluster.pool.shutdown(); - } -} diff --git a/gi/posterior-regularisation/prjava/src/phrase/VB.java b/gi/posterior-regularisation/prjava/src/phrase/VB.java deleted file mode 100644 index cd3f4966..00000000 --- a/gi/posterior-regularisation/prjava/src/phrase/VB.java +++ /dev/null @@ -1,419 +0,0 @@ -package phrase;
-
-import gnu.trove.TIntArrayList;
-
-import io.FileUtil;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-
-import org.apache.commons.math.special.Gamma;
-
-import phrase.Corpus.Edge;
-
-public class VB {
-
- public static int MAX_ITER=400;
-
- /**@brief
- * hyper param for beta
- * where beta is multinomial
- * for generating words from a topic
- */
- public double lambda=0.1;
- /**@brief
- * hyper param for theta
- * where theta is dirichlet for z
- */
- public double alpha=0.0001;
- /**@brief
- * variational param for beta
- */
- private double rho[][][];
- private double digamma_rho[][][];
- private double rho_sum[][];
- /**@brief
- * variational param for z
- */
- //private double phi[][];
- /**@brief
- * variational param for theta
- */
- private double gamma[];
- private static double VAL_DIFF_RATIO=0.005;
-
- private int n_positions;
- private int n_words;
- private int K;
- private ExecutorService pool;
-
- private Corpus c;
- public static void main(String[] args) {
- // String in="../pdata/canned.con";
- String in="../pdata/btec.con";
- String out="../pdata/vb.out";
- int numCluster=25;
- Corpus corpus = null;
- File infile = new File(in);
- try {
- System.out.println("Reading concordance from " + infile);
- corpus = Corpus.readFromFile(FileUtil.reader(infile));
- corpus.printStats(System.out);
- } catch (IOException e) {
- System.err.println("Failed to open input file: " + infile);
- e.printStackTrace();
- System.exit(1);
- }
-
- VB vb=new VB(numCluster, corpus);
- int iter=20;
- for(int i=0;i<iter;i++){
- double obj=vb.EM();
- System.out.println("Iter "+i+": "+obj);
- }
-
- File outfile = new File (out);
- try {
- PrintStream ps = FileUtil.printstream(outfile);
- vb.displayPosterior(ps);
- // ps.println();
- // c2f.displayModelParam(ps);
- ps.close();
- } catch (IOException e) {
- System.err.println("Failed to open output file: " + outfile);
- e.printStackTrace();
- System.exit(1);
- }
- }
-
- public VB(int numCluster, Corpus corpus){
- c=corpus;
- K=numCluster;
- n_positions=c.getNumContextPositions();
- n_words=c.getNumWords();
- rho=new double[K][n_positions][n_words];
- //to init rho
- //loop through data and count up words
- double[] phi_tmp=new double[K];
- for(int i=0;i<K;i++){
- for(int pos=0;pos<n_positions;pos++){
- Arrays.fill(rho[i][pos], lambda);
- }
- }
- for(int d=0;d<c.getNumPhrases();d++){
- List<Edge>doc=c.getEdgesForPhrase(d);
- for(int n=0;n<doc.size();n++){
- TIntArrayList context=doc.get(n).getContext();
- arr.F.randomise(phi_tmp);
- for(int i=0;i<K;i++){
- for(int pos=0;pos<n_positions;pos++){
- rho[i][pos][context.get(pos)]+=phi_tmp[i];
- }
- }
- }
- }
-
- }
-
- private double inference(int phraseID, double[][] phi, double[] gamma)
- {
- List<Edge > doc=c.getEdgesForPhrase(phraseID);
- for(int i=0;i<phi.length;i++){
- for(int j=0;j<phi[i].length;j++){
- phi[i][j]=1.0/K;
- }
- }
- Arrays.fill(gamma,alpha+1.0/K);
-
- double digamma_gamma[]=new double[K];
-
- double gamma_sum=digamma(arr.F.l1norm(gamma));
- for(int i=0;i<K;i++){
- digamma_gamma[i]=digamma(gamma[i]);
- }
- double gammaSum[]=new double [K];
- double prev_val=0;
- double obj=0;
-
- for(int iter=0;iter<MAX_ITER;iter++){
- prev_val=obj;
- obj=0;
- Arrays.fill(gammaSum,0.0);
- for(int n=0;n<doc.size();n++){
- TIntArrayList context=doc.get(n).getContext();
- double phisum=0;
- for(int i=0;i<K;i++){
- double sum=0;
- for(int pos=0;pos<n_positions;pos++){
- int word=context.get(pos);
- sum+=digamma_rho[i][pos][word]-rho_sum[i][pos];
- }
- sum+= digamma_gamma[i]-gamma_sum;
- phi[n][i]=sum;
-
- if (i > 0){
- phisum = log_sum(phisum, phi[n][i]);
- }
- else{
- phisum = phi[n][i];
- }
-
- }//end of a word
-
- for(int i=0;i<K;i++){
- phi[n][i]=Math.exp(phi[n][i]-phisum);
- gammaSum[i]+=phi[n][i];
- }
-
- }//end of doc
-
- for(int i=0;i<K;i++){
- gamma[i]=alpha+gammaSum[i];
- }
- gamma_sum=digamma(arr.F.l1norm(gamma));
- for(int i=0;i<K;i++){
- digamma_gamma[i]=digamma(gamma[i]);
- }
- //compute objective for reporting
-
- obj=0;
-
- for(int i=0;i<K;i++){
- obj+=(alpha-1)*(digamma_gamma[i]-gamma_sum);
- }
-
-
- for(int n=0;n<doc.size();n++){
- TIntArrayList context=doc.get(n).getContext();
-
- for(int i=0;i<K;i++){
- //entropy of phi + expected log likelihood of z
- obj+=phi[n][i]*(digamma_gamma[i]-gamma_sum);
-
- if(phi[n][i]>1e-10){
- obj+=phi[n][i]*Math.log(phi[n][i]);
- }
-
- double beta_sum=0;
- for(int pos=0;pos<n_positions;pos++){
- int word=context.get(pos);
- beta_sum+=(digamma(rho[i][pos][word])-rho_sum[i][pos]);
- }
- obj+=phi[n][i]*beta_sum;
- }
- }
-
- obj-=log_gamma(arr.F.l1norm(gamma));
- for(int i=0;i<K;i++){
- obj+=Gamma.logGamma(gamma[i]);
- obj-=(gamma[i]-1)*(digamma_gamma[i]-gamma_sum);
- }
-
-// System.out.println(phraseID+": "+obj);
- if(iter>0 && (obj-prev_val)/Math.abs(obj)<VAL_DIFF_RATIO){
- break;
- }
- }//end of inference loop
-
- return obj;
- }//end of inference
-
- /**
- * @return objective of this iteration
- */
- public double EM(){
- double emObj=0;
- if(digamma_rho==null){
- digamma_rho=new double[K][n_positions][n_words];
- }
- for(int i=0;i<K;i++){
- for (int pos=0;pos<n_positions;pos++){
- for(int j=0;j<n_words;j++){
- digamma_rho[i][pos][j]= digamma(rho[i][pos][j]);
- }
- }
- }
-
- if(rho_sum==null){
- rho_sum=new double [K][n_positions];
- }
- for(int i=0;i<K;i++){
- for(int pos=0;pos<n_positions;pos++){
- rho_sum[i][pos]=digamma(arr.F.l1norm(rho[i][pos]));
- }
- }
-
- //E
- double exp_rho[][][]=new double[K][n_positions][n_words];
- if (pool == null)
- {
- for (int d=0;d<c.getNumPhrases();d++)
- {
- List<Edge > doc=c.getEdgesForPhrase(d);
- double[][] phi = new double[doc.size()][K];
- double[] gamma = new double[K];
-
- emObj += inference(d, phi, gamma);
-
- for(int n=0;n<doc.size();n++){
- TIntArrayList context=doc.get(n).getContext();
- for(int pos=0;pos<n_positions;pos++){
- int word=context.get(pos);
- for(int i=0;i<K;i++){
- exp_rho[i][pos][word]+=phi[n][i];
- }
- }
- }
- //if(d!=0 && d%100==0) System.out.print(".");
- //if(d!=0 && d%1000==0) System.out.println(d);
- }
- }
- else // multi-threaded version of above loop
- {
- class PartialEStep implements Callable<PartialEStep>
- {
- double[][] phi;
- double[] gamma;
- double obj;
- int d;
- PartialEStep(int d) { this.d = d; }
-
- public PartialEStep call()
- {
- phi = new double[c.getEdgesForPhrase(d).size()][K];
- gamma = new double[K];
- obj = inference(d, phi, gamma);
- return this;
- }
- }
-
- List<Future<PartialEStep>> jobs = new ArrayList<Future<PartialEStep>>();
- for (int d=0;d<c.getNumPhrases();d++)
- jobs.add(pool.submit(new PartialEStep(d)));
-
- for (Future<PartialEStep> job: jobs)
- {
- try {
- PartialEStep e = job.get();
-
- emObj += e.obj;
- List<Edge> doc = c.getEdgesForPhrase(e.d);
- for(int n=0;n<doc.size();n++){
- TIntArrayList context=doc.get(n).getContext();
- for(int pos=0;pos<n_positions;pos++){
- int word=context.get(pos);
- for(int i=0;i<K;i++){
- exp_rho[i][pos][word]+=e.phi[n][i];
- }
- }
- }
- } catch (ExecutionException e) {
- System.err.println("ERROR: E-step thread execution failed.");
- throw new RuntimeException(e);
- } catch (InterruptedException e) {
- System.err.println("ERROR: Failed to join E-step thread.");
- throw new RuntimeException(e);
- }
- }
- }
- // System.out.println("EM Objective:"+emObj);
-
- //M
- for(int i=0;i<K;i++){
- for(int pos=0;pos<n_positions;pos++){
- for(int j=0;j<n_words;j++){
- rho[i][pos][j]=lambda+exp_rho[i][pos][j];
- }
- }
- }
-
- //E[\log p(\beta|\lambda)] - E[\log q(\beta)]
- for(int i=0;i<K;i++){
- double rhoSum=0;
- for(int pos=0;pos<n_positions;pos++){
- for(int j=0;j<n_words;j++){
- rhoSum+=rho[i][pos][j];
- }
- double digamma_rhoSum=Gamma.digamma(rhoSum);
- emObj-=Gamma.logGamma(rhoSum);
- for(int j=0;j<n_words;j++){
- emObj+=(lambda-rho[i][pos][j])*(Gamma.digamma(rho[i][pos][j])-digamma_rhoSum);
- emObj+=Gamma.logGamma(rho[i][pos][j]);
- }
- }
- }
-
- return emObj;
- }//end of EM
-
- public void displayPosterior(PrintStream ps)
- {
- for(int d=0;d<c.getNumPhrases();d++){
- List<Edge > doc=c.getEdgesForPhrase(d);
- double[][] phi = new double[doc.size()][K];
- for(int i=0;i<phi.length;i++)
- for(int j=0;j<phi[i].length;j++)
- phi[i][j]=1.0/K;
- double[] gamma = new double[K];
-
- inference(d, phi, gamma);
-
- for(int n=0;n<doc.size();n++){
- Edge edge=doc.get(n);
- int tag=arr.F.argmax(phi[n]);
- ps.print(edge.getPhraseString());
- ps.print("\t");
- ps.print(edge.getContextString(true));
-
- ps.println(" ||| C=" + tag);
- }
- }
- }
-
- double log_sum(double log_a, double log_b)
- {
- double v;
-
- if (log_a < log_b)
- v = log_b+Math.log(1 + Math.exp(log_a-log_b));
- else
- v = log_a+Math.log(1 + Math.exp(log_b-log_a));
- return(v);
- }
-
- double digamma(double x)
- {
- double p;
- x=x+6;
- p=1/(x*x);
- p=(((0.004166666666667*p-0.003968253986254)*p+
- 0.008333333333333)*p-0.083333333333333)*p;
- p=p+Math.log(x)-0.5/x-1/(x-1)-1/(x-2)-1/(x-3)-1/(x-4)-1/(x-5)-1/(x-6);
- return p;
- }
-
- double log_gamma(double x)
- {
- double z=1/(x*x);
-
- x=x+6;
- z=(((-0.000595238095238*z+0.000793650793651)
- *z-0.002777777777778)*z+0.083333333333333)/x;
- z=(x-0.5)*Math.log(x)-x+0.918938533204673+z-Math.log(x-1)-
- Math.log(x-2)-Math.log(x-3)-Math.log(x-4)-Math.log(x-5)-Math.log(x-6);
- return z;
- }
-
- public void useThreadPool(ExecutorService threadPool)
- {
- pool = threadPool;
- }
-}//End of class
diff --git a/gi/posterior-regularisation/prjava/src/test/CorpusTest.java b/gi/posterior-regularisation/prjava/src/test/CorpusTest.java deleted file mode 100644 index b4c3041f..00000000 --- a/gi/posterior-regularisation/prjava/src/test/CorpusTest.java +++ /dev/null @@ -1,60 +0,0 @@ -package test;
-
-import java.util.Arrays;
-import java.util.HashMap;
-
-import data.Corpus;
-import hmm.POS;
-
-public class CorpusTest {
-
- public static void main(String[] args) {
- Corpus c=new Corpus(POS.trainFilename);
-
-
- int idx=30;
-
-
- HashMap<String, Integer>vocab=
- (HashMap<String, Integer>) io.SerializedObjects.readSerializedObject(Corpus.alphaFilename);
-
- HashMap<String, Integer>tagVocab=
- (HashMap<String, Integer>) io.SerializedObjects.readSerializedObject(Corpus.tagalphaFilename);
-
-
- String [] dict=new String [vocab.size()+1];
- for(String key:vocab.keySet()){
- dict[vocab.get(key)]=key;
- }
- dict[dict.length-1]=Corpus.UNK_TOK;
-
- String [] tagdict=new String [tagVocab.size()+1];
- for(String key:tagVocab.keySet()){
- tagdict[tagVocab.get(key)]=key;
- }
- tagdict[tagdict.length-1]=Corpus.UNK_TOK;
-
- String[] sent=c.get(idx);
- int []data=c.getInt(idx);
-
-
- String []roundtrip=new String [sent.length];
- for(int i=0;i<sent.length;i++){
- roundtrip[i]=dict[data[i]];
- }
- System.out.println(Arrays.toString(sent));
- System.out.println(Arrays.toString(roundtrip));
-
- sent=c.tag.get(idx);
- data=c.tagData.get(idx);
-
-
- roundtrip=new String [sent.length];
- for(int i=0;i<sent.length;i++){
- roundtrip[i]=tagdict[data[i]];
- }
- System.out.println(Arrays.toString(sent));
- System.out.println(Arrays.toString(roundtrip));
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/test/HMMModelStats.java b/gi/posterior-regularisation/prjava/src/test/HMMModelStats.java deleted file mode 100644 index d54525c8..00000000 --- a/gi/posterior-regularisation/prjava/src/test/HMMModelStats.java +++ /dev/null @@ -1,105 +0,0 @@ -package test;
-
-import hmm.HMM;
-import hmm.POS;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-
-import data.Corpus;
-
-public class HMMModelStats {
-
- public static String modelFilename="../posdata/posModel.out";
- public static String alphaFilename="../posdata/corpus.alphabet";
- public static String statsFilename="../posdata/model.stats";
-
- public static final int NUM_WORD=50;
-
- public static String testFilename="../posdata/en_test.conll";
-
- public static double [][]maxwt;
-
- public static void main(String[] args) {
- HashMap<String, Integer>vocab=
- (HashMap<String, Integer>) io.SerializedObjects.readSerializedObject(alphaFilename);
-
- Corpus test=new Corpus(testFilename,vocab);
-
- String [] dict=new String [vocab.size()+1];
- for(String key:vocab.keySet()){
- dict[vocab.get(key)]=key;
- }
- dict[dict.length-1]=Corpus.UNK_TOK;
-
- HMM hmm=new HMM();
- hmm.readModel(modelFilename);
-
-
-
- PrintStream ps = null;
- try {
- ps = io.FileUtil.printstream(new File(statsFilename));
- } catch (IOException e) {
- e.printStackTrace();
- System.exit(1);
- }
-
- double [][] emit=hmm.getEmitProb();
- for(int i=0;i<emit.length;i++){
- ArrayList<IntDoublePair>l=new ArrayList<IntDoublePair>();
- for(int j=0;j<emit[i].length;j++){
- l.add(new IntDoublePair(j,emit[i][j]));
- }
- Collections.sort(l);
- ps.println(i);
- for(int j=0;j<NUM_WORD;j++){
- if(j>=dict.length){
- break;
- }
- ps.print(dict[l.get(j).idx]+"\t");
- if((1+j)%10==0){
- ps.println();
- }
- }
- ps.println("\n");
- }
-
- checkMaxwt(hmm,ps,test.getAllData());
-
- int terminalSym=vocab.get(Corpus .END_SYM);
- //sample 10 sentences
- for(int i=0;i<10;i++){
- int []sent=hmm.sample(terminalSym);
- for(int j=0;j<sent.length;j++){
- ps.print(dict[sent[j]]+"\t");
- }
- ps.println();
- }
-
- ps.close();
-
- }
-
- public static void checkMaxwt(HMM hmm,PrintStream ps,int [][]data){
- double [][]emit=hmm.getEmitProb();
- maxwt=new double[emit.length][emit[0].length];
-
- hmm.computeMaxwt(maxwt,data);
- double sum=0;
- for(int i=0;i<maxwt.length;i++){
- for(int j=0;j<maxwt.length;j++){
- sum+=maxwt[i][j];
- }
- }
-
- ps.println("max w t P(w_i|t): "+sum);
-
- }
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/test/IntDoublePair.java b/gi/posterior-regularisation/prjava/src/test/IntDoublePair.java deleted file mode 100644 index 3f9f0ad7..00000000 --- a/gi/posterior-regularisation/prjava/src/test/IntDoublePair.java +++ /dev/null @@ -1,23 +0,0 @@ -package test;
-
-public class IntDoublePair implements Comparable{
- double val;
- int idx;
- public int compareTo(Object o){
- if(o instanceof IntDoublePair){
- IntDoublePair pair=(IntDoublePair)o;
- if(pair.val>val){
- return 1;
- }
- if(pair.val<val){
- return -1;
- }
- return 0;
- }
- return -1;
- }
- public IntDoublePair(int i,double v){
- val=v;
- idx=i;
- }
-}
diff --git a/gi/posterior-regularisation/prjava/src/test/X2y2WithConstraints.java b/gi/posterior-regularisation/prjava/src/test/X2y2WithConstraints.java deleted file mode 100644 index 9059a59e..00000000 --- a/gi/posterior-regularisation/prjava/src/test/X2y2WithConstraints.java +++ /dev/null @@ -1,131 +0,0 @@ -package test;
-
-
-
-import optimization.gradientBasedMethods.ProjectedGradientDescent;
-import optimization.gradientBasedMethods.ProjectedObjective;
-import optimization.gradientBasedMethods.stats.OptimizerStats;
-import optimization.linesearch.ArmijoLineSearchMinimizationAlongProjectionArc;
-import optimization.linesearch.InterpolationPickFirstStep;
-import optimization.linesearch.LineSearchMethod;
-import optimization.projections.BoundsProjection;
-import optimization.projections.Projection;
-import optimization.projections.SimplexProjection;
-import optimization.stopCriteria.CompositeStopingCriteria;
-import optimization.stopCriteria.GradientL2Norm;
-import optimization.stopCriteria.ProjectedGradientL2Norm;
-import optimization.stopCriteria.StopingCriteria;
-import optimization.stopCriteria.ValueDifference;
-
-
-/**
- * @author javg
- *
- *
- *ax2+ b(y2 -displacement)
- */
-public class X2y2WithConstraints extends ProjectedObjective{
-
-
- double a, b;
- double dx;
- double dy;
- Projection projection;
-
-
- public X2y2WithConstraints(double a, double b, double[] params, double dx, double dy, Projection proj){
- //projection = new BoundsProjection(0.2,Double.MAX_VALUE);
- super();
- projection = proj;
- this.a = a;
- this.b = b;
- this.dx = dx;
- this.dy = dy;
- setInitialParameters(params);
- System.out.println("Function " +a+"(x-"+dx+")^2 + "+b+"(y-"+dy+")^2");
- System.out.println("Gradient " +(2*a)+"(x-"+dx+") ; "+(b*2)+"(y-"+dy+")");
- printParameters();
- projection.project(parameters);
- printParameters();
- gradient = new double[2];
- }
-
- public double getValue() {
- functionCalls++;
- return a*(parameters[0]-dx)*(parameters[0]-dx)+b*((parameters[1]-dy)*(parameters[1]-dy));
- }
-
- public double[] getGradient() {
- if(gradient == null){
- gradient = new double[2];
- }
- gradientCalls++;
- gradient[0]=2*a*(parameters[0]-dx);
- gradient[1]=2*b*(parameters[1]-dy);
- return gradient;
- }
-
-
- public double[] projectPoint(double[] point) {
- double[] newPoint = point.clone();
- projection.project(newPoint);
- return newPoint;
- }
-
- public void optimizeWithProjectedGradientDescent(LineSearchMethod ls, OptimizerStats stats, X2y2WithConstraints o){
- ProjectedGradientDescent optimizer = new ProjectedGradientDescent(ls);
- StopingCriteria stopGrad = new ProjectedGradientL2Norm(0.001);
- StopingCriteria stopValue = new ValueDifference(0.001);
- CompositeStopingCriteria compositeStop = new CompositeStopingCriteria();
- compositeStop.add(stopGrad);
- compositeStop.add(stopValue);
-
- optimizer.setMaxIterations(5);
- boolean succed = optimizer.optimize(o,stats,compositeStop);
- System.out.println("Ended optimzation Projected Gradient Descent\n" + stats.prettyPrint(1));
- System.out.println("Solution: " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1]);
- if(succed){
- System.out.println("Ended optimization in " + optimizer.getCurrentIteration());
- }else{
- System.out.println("Failed to optimize");
- }
- }
-
-
-
- public String toString(){
-
- return "P1: " + parameters[0] + " P2: " + parameters[1] + " value " + getValue() + " grad (" + getGradient()[0] + ":" + getGradient()[1]+")";
- }
-
- public static void main(String[] args) {
- double a = 1;
- double b=1;
- double x0 = 0;
- double y0 =1;
- double dx = 0.5;
- double dy = 0.2 ;
- double [] parameters = new double[2];
- parameters[0] = x0;
- parameters[1] = y0;
- X2y2WithConstraints o = new X2y2WithConstraints(a,b,parameters,dx,dy,
- new SimplexProjection(0.5)
- //new BoundsProjection(0.0,0.4)
- );
- System.out.println("Starting optimization " + " x0 " + o.parameters[0]+ " x1 " + o.parameters[1] + " a " + a + " b "+b );
- o.setDebugLevel(4);
-
- LineSearchMethod ls = new ArmijoLineSearchMinimizationAlongProjectionArc(new InterpolationPickFirstStep(1));
-
- OptimizerStats stats = new OptimizerStats();
- o.optimizeWithProjectedGradientDescent(ls, stats, o);
-
-// o = new x2y2WithConstraints(a,b,x0,y0,dx,dy);
-// stats = new OptimizerStats();
-// o.optimizeWithSpectralProjectedGradientDescent(stats, o);
- }
-
-
-
-
-}
diff --git a/gi/posterior-regularisation/prjava/src/util/Array.java b/gi/posterior-regularisation/prjava/src/util/Array.java deleted file mode 100644 index cc4725af..00000000 --- a/gi/posterior-regularisation/prjava/src/util/Array.java +++ /dev/null @@ -1,41 +0,0 @@ -package util; - -import java.util.Arrays; - -public class Array { - - - - public static void sortDescending(double[] ds){ - for (int i = 0; i < ds.length; i++) ds[i] = -ds[i]; - Arrays.sort(ds); - for (int i = 0; i < ds.length; i++) ds[i] = -ds[i]; - } - - /** - * Return a new reversed array - * @param array - * @return - */ - public static int[] reverseIntArray(int[] array){ - int[] reversed = new int[array.length]; - for (int i = 0; i < reversed.length; i++) { - reversed[i] = array[reversed.length-1-i]; - } - return reversed; - } - - public static String[] sumArray(String[] in, int from){ - String[] res = new String[in.length-from]; - for (int i = from; i < in.length; i++) { - res[i-from] = in[i]; - } - return res; - } - - public static void main(String[] args) { - int[] i = {1,2,3,4}; - util.Printing.printIntArray(i, null, "original"); - util.Printing.printIntArray(reverseIntArray(i), null, "reversed"); - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/ArrayMath.java b/gi/posterior-regularisation/prjava/src/util/ArrayMath.java deleted file mode 100644 index 398a13a2..00000000 --- a/gi/posterior-regularisation/prjava/src/util/ArrayMath.java +++ /dev/null @@ -1,186 +0,0 @@ -package util; - -import java.util.Arrays; - -public class ArrayMath { - - public static double dotProduct(double[] v1, double[] v2) { - assert(v1.length == v2.length); - double result = 0; - for(int i = 0; i < v1.length; i++) - result += v1[i]*v2[i]; - return result; - } - - public static double twoNormSquared(double[] v) { - double result = 0; - for(double d : v) - result += d*d; - return result; - } - - public static boolean containsInvalid(double[] v) { - for(int i = 0; i < v.length; i++) - if(Double.isNaN(v[i]) || Double.isInfinite(v[i])) - return true; - return false; - } - - - - public static double safeAdd(double[] toAdd) { - // Make sure there are no positive infinities - double sum = 0; - for(int i = 0; i < toAdd.length; i++) { - assert(!(Double.isInfinite(toAdd[i]) && toAdd[i] > 0)); - assert(!Double.isNaN(toAdd[i])); - sum += toAdd[i]; - } - - return sum; - } - - /* Methods for filling integer and double arrays (of up to four dimensions) with the given value. */ - - public static void set(int[][][][] array, int value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(int[][][] array, int value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(int[][] array, int value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(int[] array, int value) { - Arrays.fill(array, value); - } - - - public static void set(double[][][][] array, double value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(double[][][] array, double value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(double[][] array, double value) { - for(int i = 0; i < array.length; i++) { - set(array[i], value); - } - } - - public static void set(double[] array, double value) { - Arrays.fill(array, value); - } - - public static void setEqual(double[][][][] dest, double[][][][] source){ - for (int i = 0; i < source.length; i++) { - setEqual(dest[i],source[i]); - } - } - - - public static void setEqual(double[][][] dest, double[][][] source){ - for (int i = 0; i < source.length; i++) { - set(dest[i],source[i]); - } - } - - - public static void set(double[][] dest, double[][] source){ - for (int i = 0; i < source.length; i++) { - setEqual(dest[i],source[i]); - } - } - - public static void setEqual(double[] dest, double[] source){ - System.arraycopy(source, 0, dest, 0, source.length); - } - - public static void plusEquals(double[][][][] array, double val){ - for (int i = 0; i < array.length; i++) { - plusEquals(array[i], val); - } - } - - public static void plusEquals(double[][][] array, double val){ - for (int i = 0; i < array.length; i++) { - plusEquals(array[i], val); - } - } - - public static void plusEquals(double[][] array, double val){ - for (int i = 0; i < array.length; i++) { - plusEquals(array[i], val); - } - } - - public static void plusEquals(double[] array, double val){ - for (int i = 0; i < array.length; i++) { - array[i] += val; - } - } - - - public static double sum(double[] array) { - double res = 0; - for (int i = 0; i < array.length; i++) res += array[i]; - return res; - } - - - - public static double[][] deepclone(double[][] in){ - double[][] res = new double[in.length][]; - for (int i = 0; i < res.length; i++) { - res[i] = in[i].clone(); - } - return res; - } - - - public static double[][][] deepclone(double[][][] in){ - double[][][] res = new double[in.length][][]; - for (int i = 0; i < res.length; i++) { - res[i] = deepclone(in[i]); - } - return res; - } - - public static double cosine(double[] a, - double[] b) { - return (dotProduct(a, b)+1e-5)/(Math.sqrt(dotProduct(a, a)+1e-5)*Math.sqrt(dotProduct(b, b)+1e-5)); - } - - public static double max(double[] ds) { - double max = Double.NEGATIVE_INFINITY; - for(double d:ds) max = Math.max(d,max); - return max; - } - - public static void exponentiate(double[] a) { - for (int i = 0; i < a.length; i++) { - a[i] = Math.exp(a[i]); - } - } - - public static int sum(int[] array) { - int res = 0; - for (int i = 0; i < array.length; i++) res += array[i]; - return res; - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/DifferentiableObjective.java b/gi/posterior-regularisation/prjava/src/util/DifferentiableObjective.java deleted file mode 100644 index 1ff1ae4a..00000000 --- a/gi/posterior-regularisation/prjava/src/util/DifferentiableObjective.java +++ /dev/null @@ -1,14 +0,0 @@ -package util; - -public interface DifferentiableObjective { - - public double getValue(); - - public void getGradient(double[] gradient); - - public void getParameters(double[] params); - - public void setParameters(double[] newParameters); - - public int getNumParameters(); -} diff --git a/gi/posterior-regularisation/prjava/src/util/DigammaFunction.java b/gi/posterior-regularisation/prjava/src/util/DigammaFunction.java deleted file mode 100644 index ff1478ad..00000000 --- a/gi/posterior-regularisation/prjava/src/util/DigammaFunction.java +++ /dev/null @@ -1,21 +0,0 @@ -package util; - -public class DigammaFunction { - public static double expDigamma(double number){ - if(number==0)return number; - return Math.exp(digamma(number)); - } - - public static double digamma(double number){ - if(number > 7){ - return digammApprox(number-0.5); - }else{ - return digamma(number+1) - 1.0/number; - } - } - - private static double digammApprox(double value){ - return Math.log(value) + 0.04167*Math.pow(value, -2) - 0.00729*Math.pow(value, -4) - + 0.00384*Math.pow(value, -6) - 0.00413*Math.pow(value, -8); - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/FileSystem.java b/gi/posterior-regularisation/prjava/src/util/FileSystem.java deleted file mode 100644 index d7812e40..00000000 --- a/gi/posterior-regularisation/prjava/src/util/FileSystem.java +++ /dev/null @@ -1,21 +0,0 @@ -package util; - -import java.io.File; - -public class FileSystem { - public static boolean createDir(String directory) { - - File dir = new File(directory); - if (!dir.isDirectory()) { - boolean success = dir.mkdirs(); - if (!success) { - System.out.println("Unable to create directory " + directory); - return false; - } - System.out.println("Created directory " + directory); - } else { - System.out.println("Reusing directory " + directory); - } - return true; - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/InputOutput.java b/gi/posterior-regularisation/prjava/src/util/InputOutput.java deleted file mode 100644 index da7f71bf..00000000 --- a/gi/posterior-regularisation/prjava/src/util/InputOutput.java +++ /dev/null @@ -1,67 +0,0 @@ -package util; - -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.PrintStream; -import java.io.UnsupportedEncodingException; -import java.util.Properties; -import java.util.zip.GZIPInputStream; -import java.util.zip.GZIPOutputStream; - -public class InputOutput { - - /** - * Opens a file either compress with gzip or not compressed. - */ - public static BufferedReader openReader(String fileName) throws UnsupportedEncodingException, FileNotFoundException, IOException{ - System.out.println("Reading: " + fileName); - BufferedReader reader; - fileName = fileName.trim(); - if(fileName.endsWith("gz")){ - reader = new BufferedReader( - new InputStreamReader(new GZIPInputStream(new FileInputStream(fileName)),"UTF8")); - }else{ - reader = new BufferedReader(new InputStreamReader( - new FileInputStream(fileName), "UTF8")); - } - - return reader; - } - - - public static PrintStream openWriter(String fileName) - throws UnsupportedEncodingException, FileNotFoundException, IOException{ - System.out.println("Writting to file: " + fileName); - PrintStream writter; - fileName = fileName.trim(); - if(fileName.endsWith("gz")){ - writter = new PrintStream(new GZIPOutputStream(new FileOutputStream(fileName)), - true, "UTF-8"); - - }else{ - writter = new PrintStream(new FileOutputStream(fileName), - true, "UTF-8"); - - } - - return writter; - } - - public static Properties readPropertiesFile(String fileName) { - Properties properties = new Properties(); - try { - properties.load(new FileInputStream(fileName)); - } catch (IOException e) { - e.printStackTrace(); - throw new AssertionError("Wrong properties file " + fileName); - } - System.out.println(properties.toString()); - - return properties; - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/LogSummer.java b/gi/posterior-regularisation/prjava/src/util/LogSummer.java deleted file mode 100644 index 117393b9..00000000 --- a/gi/posterior-regularisation/prjava/src/util/LogSummer.java +++ /dev/null @@ -1,86 +0,0 @@ -package util; - -import java.lang.Math; - -/* - * Math tool for computing logs of sums, when the terms of the sum are already in log form. - * (Useful if the terms of the sum are very small numbers.) - */ -public class LogSummer { - - private LogSummer() { - } - - /** - * Given log(a) and log(b), computes log(a + b). - * - * @param loga log of first sum term - * @param logb log of second sum term - * @return log(sum), where sum = a + b - */ - public static double sum(double loga, double logb) { - assert(!Double.isNaN(loga)); - assert(!Double.isNaN(logb)); - - if(Double.isInfinite(loga)) - return logb; - if(Double.isInfinite(logb)) - return loga; - - double maxLog; - double difference; - if(loga > logb) { - difference = logb - loga; - maxLog = loga; - } - else { - difference = loga - logb; - maxLog = logb; - } - - return Math.log1p(Math.exp(difference)) + maxLog; - } - - /** - * Computes log(exp(array[index]) + b), and - * modifies array[index] to contain this new value. - * - * @param array array to modify - * @param index index at which to modify - * @param logb log of the second sum term - */ - public static void sum(double[] array, int index, double logb) { - array[index] = sum(array[index], logb); - } - - /** - * Computes log(a + b + c + ...) from log(a), log(b), log(c), ... - * by recursively splitting the input and delegating to the sum method. - * - * @param terms an array containing the log of all the terms for the sum - * @return log(sum), where sum = exp(terms[0]) + exp(terms[1]) + ... - */ - public static double sumAll(double... terms) { - return sumAllHelper(terms, 0, terms.length); - } - - /** - * Computes log(a_0 + a_1 + ...) from a_0 = exp(terms[begin]), - * a_1 = exp(terms[begin + 1]), ..., a_{end - 1 - begin} = exp(terms[end - 1]). - * - * @param terms an array containing the log of all the terms for the sum, - * and possibly some other terms that will not go into the sum - * @return log of the sum of the elements in the [begin, end) region of the terms array - */ - private static double sumAllHelper(final double[] terms, final int begin, final int end) { - int length = end - begin; - switch(length) { - case 0: return Double.NEGATIVE_INFINITY; - case 1: return terms[begin]; - default: - int midIndex = begin + length/2; - return sum(sumAllHelper(terms, begin, midIndex), sumAllHelper(terms, midIndex, end)); - } - } - -}
\ No newline at end of file diff --git a/gi/posterior-regularisation/prjava/src/util/MathUtil.java b/gi/posterior-regularisation/prjava/src/util/MathUtil.java deleted file mode 100644 index 799b1faf..00000000 --- a/gi/posterior-regularisation/prjava/src/util/MathUtil.java +++ /dev/null @@ -1,148 +0,0 @@ -package util; - -import java.util.Random; - -public class MathUtil { - public static final boolean closeToOne(double number){ - return Math.abs(number-1) < 1.E-10; - } - - public static final boolean closeToZero(double number){ - return Math.abs(number) < 1.E-5; - } - - /** - * Return a ramdom multinominal distribution. - * - * @param size - * @return - */ - public static final double[] randomVector(int size, Random r){ - double[] random = new double[size]; - double sum=0; - for(int i = 0; i < size; i++){ - double number = r.nextDouble(); - random[i] = number; - sum+=number; - } - for(int i = 0; i < size; i++){ - random[i] = random[i]/sum; - } - return random; - } - - - - public static double sum(double[] ds) { - double res = 0; - for (int i = 0; i < ds.length; i++) { - res+=ds[i]; - } - return res; - } - - public static double max(double[] ds) { - double res = Double.NEGATIVE_INFINITY; - for (int i = 0; i < ds.length; i++) { - res = Math.max(res, ds[i]); - } - return res; - } - - public static double min(double[] ds) { - double res = Double.POSITIVE_INFINITY; - for (int i = 0; i < ds.length; i++) { - res = Math.min(res, ds[i]); - } - return res; - } - - - public static double KLDistance(double[] p, double[] q) { - int len = p.length; - double kl = 0; - for (int j = 0; j < len; j++) { - if (p[j] == 0 || q[j] == 0) { - continue; - } else { - kl += q[j] * Math.log(q[j] / p[j]); - } - - } - return kl; - } - - public static double L2Distance(double[] p, double[] q) { - int len = p.length; - double l2 = 0; - for (int j = 0; j < len; j++) { - if (p[j] == 0 || q[j] == 0) { - continue; - } else { - l2 += (q[j] - p[j])*(q[j] - p[j]); - } - - } - return Math.sqrt(l2); - } - - public static double L1Distance(double[] p, double[] q) { - int len = p.length; - double l1 = 0; - for (int j = 0; j < len; j++) { - if (p[j] == 0 || q[j] == 0) { - continue; - } else { - l1 += Math.abs(q[j] - p[j]); - } - - } - return l1; - } - - public static double dot(double[] ds, double[] ds2) { - double res = 0; - for (int i = 0; i < ds2.length; i++) { - res+= ds[i]*ds2[i]; - } - return res; - } - - public static double expDigamma(double number){ - return Math.exp(digamma(number)); - } - - public static double digamma(double number){ - if(number > 7){ - return digammApprox(number-0.5); - }else{ - return digamma(number+1) - 1.0/number; - } - } - - private static double digammApprox(double value){ - return Math.log(value) + 0.04167*Math.pow(value, -2) - 0.00729*Math.pow(value, -4) - + 0.00384*Math.pow(value, -6) - 0.00413*Math.pow(value, -8); - } - - public static double eulerGamma = 0.57721566490152386060651209008240243; - // FIXME -- so far just the initialization from Minka's paper "Estimating a Dirichlet distribution". - public static double invDigamma(double y) { - if (y>= -2.22) return Math.exp(y)+0.5; - return -1.0/(y+eulerGamma); - } - - - - public static void main(String[] args) { - for(double i = 0; i < 10 ; i+=0.1){ - System.out.println(i+"\t"+expDigamma(i)+"\t"+(i-0.5)); - } -// double gammaValue = (expDigamma(3)/expDigamma(10) + expDigamma(3)/expDigamma(10) + expDigamma(4)/expDigamma(10)); -// double normalValue = 3/10+3/4+10/10; -// System.out.println("Gamma " + gammaValue + " normal " + normalValue); - } - - - -} diff --git a/gi/posterior-regularisation/prjava/src/util/Matrix.java b/gi/posterior-regularisation/prjava/src/util/Matrix.java deleted file mode 100644 index 8fb6d911..00000000 --- a/gi/posterior-regularisation/prjava/src/util/Matrix.java +++ /dev/null @@ -1,16 +0,0 @@ -package util; - -public class Matrix { - int x; - int y; - double[][] values; - - public Matrix(int x, int y){ - this.x = x; - this.y=y; - values = new double[x][y]; - } - - - -} diff --git a/gi/posterior-regularisation/prjava/src/util/MemoryTracker.java b/gi/posterior-regularisation/prjava/src/util/MemoryTracker.java deleted file mode 100644 index 83a65611..00000000 --- a/gi/posterior-regularisation/prjava/src/util/MemoryTracker.java +++ /dev/null @@ -1,47 +0,0 @@ -package util; - - -public class MemoryTracker { - - double initM,finalM; - boolean start = false,finish = false; - - public MemoryTracker(){ - - } - - public void start(){ - System.gc(); - System.gc(); - System.gc(); - initM = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory())/(1024*1024); - start = true; - } - - public void finish(){ - if(!start){ - throw new RuntimeException("Canot stop before starting"); - } - System.gc(); - System.gc(); - System.gc(); - finalM = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory())/(1024*1024); - finish = true; - } - - public String print(){ - if(!finish){ - throw new RuntimeException("Canot print before stopping"); - } - return "Used: " + (finalM - initM) + "MB"; - } - - public void clear(){ - initM = 0; - finalM = 0; - finish = false; - start = false; - } - - -} diff --git a/gi/posterior-regularisation/prjava/src/util/Pair.java b/gi/posterior-regularisation/prjava/src/util/Pair.java deleted file mode 100644 index 7b1f108d..00000000 --- a/gi/posterior-regularisation/prjava/src/util/Pair.java +++ /dev/null @@ -1,31 +0,0 @@ -package util; - -public class Pair<O1, O2> { - public O1 _first; - public O2 _second; - - public final O1 first() { - return _first; - } - - public final O2 second() { - return _second; - } - - public final void setFirst(O1 value){ - _first = value; - } - - public final void setSecond(O2 value){ - _second = value; - } - - public Pair(O1 first, O2 second) { - _first = first; - _second = second; - } - - public String toString(){ - return _first + " " + _second; - } -} diff --git a/gi/posterior-regularisation/prjava/src/util/Printing.java b/gi/posterior-regularisation/prjava/src/util/Printing.java deleted file mode 100644 index 14fcbe91..00000000 --- a/gi/posterior-regularisation/prjava/src/util/Printing.java +++ /dev/null @@ -1,158 +0,0 @@ -package util; - -public class Printing { - static java.text.DecimalFormat fmt = new java.text.DecimalFormat(); - - public static String padWithSpace(String s, int len){ - StringBuffer sb = new StringBuffer(); - while(sb.length() +s.length() < len){ - sb.append(" "); - } - sb.append(s); - return sb.toString(); - } - - public static String prettyPrint(double d, String patt, int len) { - fmt.applyPattern(patt); - String s = fmt.format(d); - while (s.length() < len) { - s = " " + s; - } - return s; - } - - public static String formatTime(long duration) { - StringBuilder sb = new StringBuilder(); - double d = duration / 1000; - fmt.applyPattern("00"); - sb.append(fmt.format((int) (d / (60 * 60))) + ":"); - d -= ((int) d / (60 * 60)) * 60 * 60; - sb.append(fmt.format((int) (d / 60)) + ":"); - d -= ((int) d / 60) * 60; - fmt.applyPattern("00.0"); - sb.append(fmt.format(d)); - return sb.toString(); - } - - - public static String doubleArrayToString(double[] array, String[] labels, String arrayName) { - StringBuffer res = new StringBuffer(); - res.append(arrayName); - res.append("\n"); - for (int i = 0; i < array.length; i++) { - if (labels == null){ - res.append(i+" \t"); - }else{ - res.append(labels[i]+ "\t"); - } - } - res.append("sum\n"); - double sum = 0; - for (int i = 0; i < array.length; i++) { - res.append(prettyPrint(array[i], - "0.00000E00", 8) + "\t"); - sum+=array[i]; - } - res.append(prettyPrint(sum, - "0.00000E00", 8)+"\n"); - return res.toString(); - } - - - - public static void printDoubleArray(double[] array, String labels[], String arrayName) { - System.out.println(doubleArrayToString(array, labels,arrayName)); - } - - - public static String doubleArrayToString(double[][] array, String[] labels1, String[] labels2, - String arrayName){ - StringBuffer res = new StringBuffer(); - res.append(arrayName); - res.append("\n\t"); - //Calculates the column sum to keeps the sums - double[] sums = new double[array[0].length+1]; - //Prints rows headings - for (int i = 0; i < array[0].length; i++) { - if (labels1 == null){ - res.append(i+" \t"); - }else{ - res.append(labels1[i]+" \t"); - } - } - res.append("sum\n"); - double sum = 0; - //For each row print heading - for (int i = 0; i < array.length; i++) { - if (labels2 == null){ - res.append(i+"\t"); - }else{ - res.append(labels2[i]+"\t"); - } - //Print values for that row - for (int j = 0; j < array[0].length; j++) { - res.append(" " + prettyPrint(array[i][j], - "0.00000E00", 8) + "\t"); - sums[j] += array[i][j]; - sum+=array[i][j]; //Sum all values of that row - } - //Print row sum - res.append(prettyPrint(sum,"0.00000E00", 8)+"\n"); - sums[array[0].length]+=sum; - sum=0; - } - res.append("sum\t"); - //Print values for colums sum - for (int i = 0; i < array[0].length+1; i++) { - res.append(prettyPrint(sums[i],"0.00000E00", 8)+"\t"); - } - res.append("\n"); - return res.toString(); - } - - public static void printDoubleArray(double[][] array, String[] labels1, String[] labels2 - , String arrayName) { - System.out.println(doubleArrayToString(array, labels1,labels2,arrayName)); - } - - - public static void printIntArray(int[][] array, String[] labels1, String[] labels2, String arrayName, - int size1, int size2) { - System.out.println(arrayName); - for (int i = 0; i < size1; i++) { - for (int j = 0; j < size2; j++) { - System.out.print(" " + array[i][j] + " "); - - } - System.out.println(); - } - System.out.println(); - } - - public static String intArrayToString(int[] array, String[] labels, String arrayName) { - StringBuffer res = new StringBuffer(); - res.append(arrayName); - for (int i = 0; i < array.length; i++) { - res.append(" " + array[i] + " "); - - } - res.append("\n"); - return res.toString(); - } - - public static void printIntArray(int[] array, String[] labels, String arrayName) { - System.out.println(intArrayToString(array, labels,arrayName)); - } - - public static String toString(double[][] d){ - StringBuffer sb = new StringBuffer(); - for (int i = 0; i < d.length; i++) { - for (int j = 0; j < d[0].length; j++) { - sb.append(prettyPrint(d[i][j], "0.00E0", 10)); - } - sb.append("\n"); - } - return sb.toString(); - } - -} diff --git a/gi/posterior-regularisation/prjava/src/util/Sorters.java b/gi/posterior-regularisation/prjava/src/util/Sorters.java deleted file mode 100644 index 836444e5..00000000 --- a/gi/posterior-regularisation/prjava/src/util/Sorters.java +++ /dev/null @@ -1,39 +0,0 @@ -package util; - -import java.util.Comparator; - -public class Sorters { - public static class sortWordsCounts implements Comparator{ - - /** - * Sorter for a pair of word id, counts. Sort ascending by counts - */ - public int compare(Object arg0, Object arg1) { - Pair<Integer,Integer> p1 = (Pair<Integer,Integer>)arg0; - Pair<Integer,Integer> p2 = (Pair<Integer,Integer>)arg1; - if(p1.second() > p2.second()){ - return 1; - }else{ - return -1; - } - } - - } - -public static class sortWordsDouble implements Comparator{ - - /** - * Sorter for a pair of word id, counts. Sort by counts - */ - public int compare(Object arg0, Object arg1) { - Pair<Integer,Double> p1 = (Pair<Integer,Double>)arg0; - Pair<Integer,Double> p2 = (Pair<Integer,Double>)arg1; - if(p1.second() < p2.second()){ - return 1; - }else{ - return -1; - } - } - - } -} diff --git a/gi/posterior-regularisation/prjava/train-PR-cluster.sh b/gi/posterior-regularisation/prjava/train-PR-cluster.sh deleted file mode 100755 index 67552c00..00000000 --- a/gi/posterior-regularisation/prjava/train-PR-cluster.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -d=`dirname $0` -java -ea -Xmx30g -cp $d/prjava.jar:$d/lib/trove-2.0.2.jar:$d/lib/optimization.jar:$d/lib/jopt-simple-3.2.jar:$d/lib/commons-math-2.1.jar phrase.Trainer $* diff --git a/gi/posterior-regularisation/projected_gradient.cc b/gi/posterior-regularisation/projected_gradient.cc deleted file mode 100644 index f7c39817..00000000 --- a/gi/posterior-regularisation/projected_gradient.cc +++ /dev/null @@ -1,87 +0,0 @@ -// -// Minimises given functional using the projected gradient method. Based on -// algorithm and demonstration example in Linear and Nonlinear Programming, -// Luenberger and Ye, 3rd ed., p 370. -// - -#include "invert.hh" -#include <iostream> - -using namespace std; - -double -f(double x1, double x2, double x3, double x4) -{ - return x1 * x1 + x2 * x2 + x3 * x3 + x4 * x4 - 2 * x1 - 3 * x4; -} - -ublas::vector<double> -g(double x1, double x2, double x3, double x4) -{ - ublas::vector<double> v(4); - v(0) = 2 * x1 - 2; - v(1) = 2 * x2; - v(2) = 2 * x3; - v(3) = 2 * x4 - 3; - return v; -} - -ublas::matrix<double> -activeConstraints(double x1, double x2, double x3, double x4) -{ - int n = 2; - if (x1 == 0) ++n; - if (x2 == 0) ++n; - if (x3 == 0) ++n; - if (x4 == 0) ++n; - - ublas::matrix<double> a(n,4); - a(0, 0) = 2; a(0, 1) = 1; a(0, 2) = 1; a(0, 3) = 4; - a(1, 0) = 1; a(1, 1) = 1; a(1, 2) = 2; a(1, 3) = 1; - - int c = 2; - if (x1 == 0) a(c++, 0) = 1; - if (x2 == 0) a(c++, 1) = 1; - if (x3 == 0) a(c++, 2) = 1; - if (x4 == 0) a(c++, 3) = 1; - - return a; -} - -ublas::matrix<double> -projection(const ublas::matrix<double> &a) -{ - ublas::matrix<double> aT = ublas::trans(a); - ublas::matrix<double> inv(a.size1(), a.size1()); - bool ok = invert_matrix(ublas::matrix<double>(ublas::prod(a, aT)), inv); - assert(ok && "Failed to invert matrix"); - return ublas::identity_matrix<double>(4) - - ublas::prod(aT, ublas::matrix<double>(ublas::prod(inv, a))); -} - -int main(int argc, char *argv[]) -{ - double x1 = 2, x2 = 2, x3 = 1, x4 = 0; - - double fval = f(x1, x2, x3, x4); - cout << "f = " << fval << endl; - ublas::vector<double> grad = g(x1, x2, x3, x4); - cout << "g = " << grad << endl; - ublas::matrix<double> A = activeConstraints(x1, x2, x3, x4); - cout << "A = " << A << endl; - ublas::matrix<double> P = projection(A); - cout << "P = " << P << endl; - // the direction of movement - ublas::vector<double> d = prod(P, grad); - cout << "d = " << (d / d(0)) << endl; - - // special case for d = 0 - - // next solve for limits on the line search - - // then use golden rule technique between these values (if bounded) - - // or simple Armijo's rule technique - - return 0; -} diff --git a/gi/posterior-regularisation/simplex_pg.py b/gi/posterior-regularisation/simplex_pg.py deleted file mode 100644 index 5da796d3..00000000 --- a/gi/posterior-regularisation/simplex_pg.py +++ /dev/null @@ -1,55 +0,0 @@ -# -# Following Leunberger and Ye, Linear and Nonlinear Progamming, 3rd ed. p367 -# "The gradient projection method" -# applied to an equality constraint for a simplex. -# -# min f(x) -# s.t. x >= 0, sum_i x = d -# -# FIXME: enforce the positivity constraint - a limit on the line search? -# - -from numpy import * -from scipy import * -from linesearch import line_search -# local copy of scipy's Amijo line_search - wasn't enforcing alpha max correctly -import sys - -dims = 4 - -def f(x): - fv = x[0]*x[0] + x[1]*x[1] + x[2]*x[2] + x[3]*x[3] - 2*x[0] - 3*x[3] - # print 'evaluating f at', x, 'value', fv - return fv - -def g(x): - return array([2*x[0] - 2, 2*x[1], 2*x[2], 2*x[3]-3]) - -def pg(x): - gv = g(x) - return gv - sum(gv) / dims - -x = ones(dims) / dims -old_fval = None - -while True: - fv = f(x) - gv = g(x) - dv = pg(x) - - print 'x', x, 'f', fv, 'g', gv, 'd', dv - - if old_fval == None: - old_fval = fv + 0.1 - - # solve for maximum step size i.e. when positivity constraints kick in - # x - alpha d = 0 => alpha = x/d - amax = max(x/dv) - if amax < 1e-8: break - - stuff = line_search(f, pg, x, -dv, dv, fv, old_fval, amax=amax) - alpha = stuff[0] # Nb. can avoid next evaluation of f,g,d using 'stuff' - if alpha < 1e-8: break - x -= alpha * dv - - old_fval = fv diff --git a/gi/posterior-regularisation/split-languages.py b/gi/posterior-regularisation/split-languages.py deleted file mode 100755 index 206da661..00000000 --- a/gi/posterior-regularisation/split-languages.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/python - -import sys - -sout = open(sys.argv[1], 'w') -tout = open(sys.argv[2], 'w') -for line in sys.stdin: - phrase, contexts = line.rstrip().split('\t') - sp, tp = phrase.split(' <SPLIT> ') - sout.write('%s\t' % sp) - tout.write('%s\t' % tp) - parts = contexts.split(' ||| ') - for i in range(0, len(parts), 2): - sc, tc = parts[i].split(' <SPLIT> ') - if i != 0: - sout.write(' ||| ') - tout.write(' ||| ') - sout.write('%s ||| %s' % (sc, parts[i+1])) - tout.write('%s ||| %s' % (tc, parts[i+1])) - sout.write('\n') - tout.write('\n') -sout.close() -tout.close() diff --git a/gi/posterior-regularisation/train_pr_agree.py b/gi/posterior-regularisation/train_pr_agree.py deleted file mode 100644 index 9d41362d..00000000 --- a/gi/posterior-regularisation/train_pr_agree.py +++ /dev/null @@ -1,400 +0,0 @@ -import sys -import scipy.optimize -from scipy.stats import geom -from numpy import * -from numpy.random import random, seed - -style = sys.argv[1] -if len(sys.argv) >= 3: - seed(int(sys.argv[2])) - -# -# Step 1: load the concordance counts -# - -edges = [] -word_types = {} -phrase_types = {} -context_types = {} - -for line in sys.stdin: - phrase, rest = line.strip().split('\t') - ptoks = tuple(map(lambda t: word_types.setdefault(t, len(word_types)), phrase.split())) - pid = phrase_types.setdefault(ptoks, len(phrase_types)) - - parts = rest.split('|||') - for i in range(0, len(parts), 2): - context, count = parts[i:i+2] - - ctx = filter(lambda x: x != '<PHRASE>', context.split()) - ctoks = tuple(map(lambda t: word_types.setdefault(t, len(word_types)), ctx)) - cid = context_types.setdefault(ctoks, len(context_types)) - - cnt = int(count.strip()[2:]) - edges.append((pid, cid, cnt)) - -word_type_list = [None] * len(word_types) -for typ, index in word_types.items(): - word_type_list[index] = typ - -phrase_type_list = [None] * len(phrase_types) -for typ, index in phrase_types.items(): - phrase_type_list[index] = typ - -context_type_list = [None] * len(context_types) -for typ, index in context_types.items(): - context_type_list[index] = typ - -num_tags = 5 -num_types = len(word_types) -num_phrases = len(phrase_types) -num_contexts = len(context_types) -num_edges = len(edges) - -print 'Read in', num_edges, 'edges', num_phrases, 'phrases', num_contexts, 'contexts and', num_types, 'word types' - -# -# Step 2: expectation maximisation -# - -def normalise(a): - return a / float(sum(a)) - -class PhraseToContextModel: - def __init__(self): - # Pr(tag | phrase) - self.tagDist = [normalise(random(num_tags)+1) for p in range(num_phrases)] - # Pr(context at pos i = w | tag) indexed by i, tag, word - self.contextWordDist = [[normalise(random(num_types)+1) for t in range(num_tags)] for i in range(4)] - - def prob(self, pid, cid): - # return distribution p(tag, context | phrase) as vector of length |tags| - context = context_type_list[cid] - dist = zeros(num_tags) - for t in range(num_tags): - prob = self.tagDist[pid][t] - for k, tokid in enumerate(context): - prob *= self.contextWordDist[k][t][tokid] - dist[t] = prob - return dist - - def expectation_maximisation_step(self, lamba=None): - tagCounts = zeros((num_phrases, num_tags)) - contextWordCounts = zeros((4, num_tags, num_types)) - - # E-step - llh = 0 - for pid, cid, cnt in edges: - q = self.prob(pid, cid) - z = sum(q) - q /= z - llh += log(z) - context = context_type_list[cid] - if lamba != None: - q *= exp(lamba) - q /= sum(q) - for t in range(num_tags): - tagCounts[pid][t] += cnt * q[t] - for i in range(4): - for t in range(num_tags): - contextWordCounts[i][t][context[i]] += cnt * q[t] - - # M-step - for p in range(num_phrases): - self.tagDist[p] = normalise(tagCounts[p]) - for i in range(4): - for t in range(num_tags): - self.contextWordDist[i][t] = normalise(contextWordCounts[i,t]) - - return llh - -class ContextToPhraseModel: - def __init__(self): - # Pr(tag | context) = Multinomial - self.tagDist = [normalise(random(num_tags)+1) for p in range(num_contexts)] - # Pr(phrase = w | tag) = Multinomial - self.phraseSingleDist = [normalise(random(num_types)+1) for t in range(num_tags)] - # Pr(phrase_1 = w | tag) = Multinomial - self.phraseLeftDist = [normalise(random(num_types)+1) for t in range(num_tags)] - # Pr(phrase_-1 = w | tag) = Multinomial - self.phraseRightDist = [normalise(random(num_types)+1) for t in range(num_tags)] - # Pr(|phrase| = l | tag) = Geometric - self.phraseLengthDist = [0.5] * num_tags - # n.b. internal words for phrases of length >= 3 are drawn from uniform distribution - - def prob(self, pid, cid): - # return distribution p(tag, phrase | context) as vector of length |tags| - phrase = phrase_type_list[pid] - dist = zeros(num_tags) - for t in range(num_tags): - prob = self.tagDist[cid][t] - f = self.phraseLengthDist[t] - prob *= geom.pmf(len(phrase), f) - if len(phrase) == 1: - prob *= self.phraseSingleDist[t][phrase[0]] - else: - prob *= self.phraseLeftDist[t][phrase[0]] - prob *= self.phraseRightDist[t][phrase[-1]] - dist[t] = prob - return dist - - def expectation_maximisation_step(self, lamba=None): - tagCounts = zeros((num_contexts, num_tags)) - phraseSingleCounts = zeros((num_tags, num_types)) - phraseLeftCounts = zeros((num_tags, num_types)) - phraseRightCounts = zeros((num_tags, num_types)) - phraseLength = zeros(num_types) - - # E-step - llh = 0 - for pid, cid, cnt in edges: - q = self.prob(pid, cid) - z = sum(q) - q /= z - llh += log(z) - if lamba != None: - q *= exp(lamba) - q /= sum(q) - #print 'p', phrase_type_list[pid], 'c', context_type_list[cid], 'q', q - phrase = phrase_type_list[pid] - for t in range(num_tags): - tagCounts[cid][t] += cnt * q[t] - phraseLength[t] += cnt * len(phrase) * q[t] - if len(phrase) == 1: - phraseSingleCounts[t][phrase[0]] += cnt * q[t] - else: - phraseLeftCounts[t][phrase[0]] += cnt * q[t] - phraseRightCounts[t][phrase[-1]] += cnt * q[t] - - # M-step - for t in range(num_tags): - self.phraseLengthDist[t] = min(max(sum(tagCounts[:,t]) / phraseLength[t], 1e-6), 1-1e-6) - self.phraseSingleDist[t] = normalise(phraseSingleCounts[t]) - self.phraseLeftDist[t] = normalise(phraseLeftCounts[t]) - self.phraseRightDist[t] = normalise(phraseRightCounts[t]) - for c in range(num_contexts): - self.tagDist[c] = normalise(tagCounts[c]) - - #print 't', self.tagDist - #print 'l', self.phraseLengthDist - #print 's', self.phraseSingleDist - #print 'L', self.phraseLeftDist - #print 'R', self.phraseRightDist - - return llh - -class ProductModel: - """ - WARNING: I haven't verified the maths behind this model. It's quite likely to be incorrect. - """ - - def __init__(self): - self.pcm = PhraseToContextModel() - self.cpm = ContextToPhraseModel() - - def prob(self, pid, cid): - p1 = self.pcm.prob(pid, cid) - p2 = self.cpm.prob(pid, cid) - return (p1 / sum(p1)) * (p2 / sum(p2)) - - def expectation_maximisation_step(self): - tagCountsGivenPhrase = zeros((num_phrases, num_tags)) - contextWordCounts = zeros((4, num_tags, num_types)) - - tagCountsGivenContext = zeros((num_contexts, num_tags)) - phraseSingleCounts = zeros((num_tags, num_types)) - phraseLeftCounts = zeros((num_tags, num_types)) - phraseRightCounts = zeros((num_tags, num_types)) - phraseLength = zeros(num_types) - - kl = llh1 = llh2 = 0 - for pid, cid, cnt in edges: - p1 = self.pcm.prob(pid, cid) - llh1 += log(sum(p1)) * cnt - p2 = self.cpm.prob(pid, cid) - llh2 += log(sum(p2)) * cnt - - q = (p1 / sum(p1)) * (p2 / sum(p2)) - kl += log(sum(q)) * cnt - qi = sqrt(q) - qi /= sum(qi) - - phrase = phrase_type_list[pid] - context = context_type_list[cid] - for t in range(num_tags): - tagCountsGivenPhrase[pid][t] += cnt * qi[t] - tagCountsGivenContext[cid][t] += cnt * qi[t] - phraseLength[t] += cnt * len(phrase) * qi[t] - if len(phrase) == 1: - phraseSingleCounts[t][phrase[0]] += cnt * qi[t] - else: - phraseLeftCounts[t][phrase[0]] += cnt * qi[t] - phraseRightCounts[t][phrase[-1]] += cnt * qi[t] - for i in range(4): - contextWordCounts[i][t][context[i]] += cnt * qi[t] - - kl *= -2 - - for t in range(num_tags): - for i in range(4): - self.pcm.contextWordDist[i][t] = normalise(contextWordCounts[i,t]) - self.cpm.phraseLengthDist[t] = min(max(sum(tagCountsGivenContext[:,t]) / phraseLength[t], 1e-6), 1-1e-6) - self.cpm.phraseSingleDist[t] = normalise(phraseSingleCounts[t]) - self.cpm.phraseLeftDist[t] = normalise(phraseLeftCounts[t]) - self.cpm.phraseRightDist[t] = normalise(phraseRightCounts[t]) - for p in range(num_phrases): - self.pcm.tagDist[p] = normalise(tagCountsGivenPhrase[p]) - for c in range(num_contexts): - self.cpm.tagDist[c] = normalise(tagCountsGivenContext[c]) - - # return the overall objective - return llh1 + llh2 + kl - -class RegularisedProductModel: - # as above, but with a slack regularisation term which kills the - # closed-form solution for the E-step - - def __init__(self, epsilon): - self.pcm = PhraseToContextModel() - self.cpm = ContextToPhraseModel() - self.epsilon = epsilon - self.lamba = zeros(num_tags) - - def prob(self, pid, cid): - p1 = self.pcm.prob(pid, cid) - p2 = self.cpm.prob(pid, cid) - return (p1 / sum(p1)) * (p2 / sum(p2)) - - def dual(self, lamba): - return self.logz(lamba) + self.epsilon * dot(lamba, lamba) ** 0.5 - - def dual_gradient(self, lamba): - return self.expected_features(lamba) + self.epsilon * 2 * lamba - - def expectation_maximisation_step(self): - # PR-step: optimise lambda to minimise log(z_lambda) + eps ||lambda||_2 - self.lamba = scipy.optimize.fmin_slsqp(self.dual, self.lamba, - bounds=[(0, 1e100)] * num_tags, - fprime=self.dual_gradient, iprint=1) - - # E,M-steps: collect expected counts under q_lambda and normalise - llh1 = self.pcm.expectation_maximisation_step(self.lamba) - llh2 = self.cpm.expectation_maximisation_step(-self.lamba) - - # return the overall objective: llh - KL(q||p1.p2) - # llh = llh1 + llh2 - # kl = sum q log q / p1 p2 = sum q { lambda . phi } - log Z - return llh1 + llh2 + self.logz(self.lamba) \ - - dot(self.lamba, self.expected_features(self.lamba)) - - def logz(self, lamba): - lz = 0 - for pid, cid, cnt in edges: - p1 = self.pcm.prob(pid, cid) - z1 = dot(p1 / sum(p1), exp(lamba)) - lz += log(z1) * cnt - - p2 = self.cpm.prob(pid, cid) - z2 = dot(p2 / sum(p2), exp(-lamba)) - lz += log(z2) * cnt - return lz - - def expected_features(self, lamba): - fs = zeros(num_tags) - for pid, cid, cnt in edges: - p1 = self.pcm.prob(pid, cid) - q1 = (p1 / sum(p1)) * exp(lamba) - fs += cnt * q1 / sum(q1) - - p2 = self.cpm.prob(pid, cid) - q2 = (p2 / sum(p2)) * exp(-lamba) - fs -= cnt * q2 / sum(q2) - return fs - - -class InterpolatedModel: - def __init__(self, epsilon): - self.pcm = PhraseToContextModel() - self.cpm = ContextToPhraseModel() - self.epsilon = epsilon - self.lamba = zeros(num_tags) - - def prob(self, pid, cid): - p1 = self.pcm.prob(pid, cid) - p2 = self.cpm.prob(pid, cid) - return (p1 + p2) / 2 - - def dual(self, lamba): - return self.logz(lamba) + self.epsilon * dot(lamba, lamba) ** 0.5 - - def dual_gradient(self, lamba): - return self.expected_features(lamba) + self.epsilon * 2 * lamba - - def expectation_maximisation_step(self): - # PR-step: optimise lambda to minimise log(z_lambda) + eps ||lambda||_2 - self.lamba = scipy.optimize.fmin_slsqp(self.dual, self.lamba, - bounds=[(0, 1e100)] * num_tags, - fprime=self.dual_gradient, iprint=2) - - # E,M-steps: collect expected counts under q_lambda and normalise - llh1 = self.pcm.expectation_maximisation_step(self.lamba) - llh2 = self.cpm.expectation_maximisation_step(self.lamba) - - # return the overall objective: llh1 + llh2 - KL(q||p1.p2) - # kl = sum_y q log q / 0.5 * (p1 + p2) = sum_y q(y) { -lambda . phi(y) } - log Z - # = -log Z + lambda . (E_q1[-phi] + E_q2[-phi]) / 2 - kl = -self.logz(self.lamba) + dot(self.lamba, self.expected_features(self.lamba)) - return llh1 + llh2 - kl, llh1, llh2, kl - # FIXME: KL comes out negative... - - def logz(self, lamba): - lz = 0 - for pid, cid, cnt in edges: - p1 = self.pcm.prob(pid, cid) - q1 = p1 / sum(p1) * exp(-lamba) - q1z = sum(q1) - - p2 = self.cpm.prob(pid, cid) - q2 = p2 / sum(p2) * exp(-lamba) - q2z = sum(q2) - - lz += log(0.5 * (q1z + q2z)) * cnt - return lz - - # z = 1/2 * (sum_y p1(y|x) exp (-lambda . phi(y)) + sum_y p2(y|x) exp (-lambda . phi(y))) - # = 1/2 (z1 + z2) - # d (log z) / dlambda = 1/2 (E_q1 [ -phi ] + E_q2 [ -phi ] ) - def expected_features(self, lamba): - fs = zeros(num_tags) - for pid, cid, cnt in edges: - p1 = self.pcm.prob(pid, cid) - q1 = (p1 / sum(p1)) * exp(-lamba) - fs -= 0.5 * cnt * q1 / sum(q1) - - p2 = self.cpm.prob(pid, cid) - q2 = (p2 / sum(p2)) * exp(-lamba) - fs -= 0.5 * cnt * q2 / sum(q2) - return fs - -if style == 'p2c': - m = PhraseToContextModel() -elif style == 'c2p': - m = ContextToPhraseModel() -elif style == 'prod': - m = ProductModel() -elif style == 'prodslack': - m = RegularisedProductModel(0.5) -elif style == 'sum': - m = InterpolatedModel(0.5) - -for iteration in range(30): - obj = m.expectation_maximisation_step() - print 'iteration', iteration, 'objective', obj - -for pid, cid, cnt in edges: - p = m.prob(pid, cid) - phrase = phrase_type_list[pid] - phrase_str = ' '.join(map(word_type_list.__getitem__, phrase)) - context = context_type_list[cid] - context_str = ' '.join(map(word_type_list.__getitem__, context)) - print '%s\t%s ||| C=%d' % (phrase_str, context_str, argmax(p)) diff --git a/gi/posterior-regularisation/train_pr_global.py b/gi/posterior-regularisation/train_pr_global.py deleted file mode 100644 index 8521bccb..00000000 --- a/gi/posterior-regularisation/train_pr_global.py +++ /dev/null @@ -1,296 +0,0 @@ -import sys -import scipy.optimize -from numpy import * -from numpy.random import random - -# -# Step 1: load the concordance counts -# - -edges_phrase_to_context = [] -edges_context_to_phrase = [] -types = {} -context_types = {} -num_edges = 0 - -for line in sys.stdin: - phrase, rest = line.strip().split('\t') - parts = rest.split('|||') - edges_phrase_to_context.append((phrase, [])) - for i in range(0, len(parts), 2): - context, count = parts[i:i+2] - - ctx = tuple(filter(lambda x: x != '<PHRASE>', context.split())) - cnt = int(count.strip()[2:]) - edges_phrase_to_context[-1][1].append((ctx, cnt)) - - cid = context_types.get(ctx, len(context_types)) - if cid == len(context_types): - context_types[ctx] = cid - edges_context_to_phrase.append((ctx, [])) - edges_context_to_phrase[cid][1].append((phrase, cnt)) - - for token in ctx: - types.setdefault(token, len(types)) - for token in phrase.split(): - types.setdefault(token, len(types)) - - num_edges += 1 - -print 'Read in', num_edges, 'edges and', len(types), 'word types' - -print 'edges_phrase_to_context', edges_phrase_to_context - -# -# Step 2: initialise the model parameters -# - -num_tags = 10 -num_types = len(types) -num_phrases = len(edges_phrase_to_context) -num_contexts = len(edges_context_to_phrase) -delta = int(sys.argv[1]) -gamma = int(sys.argv[2]) - -def normalise(a): - return a / float(sum(a)) - -# Pr(tag | phrase) -tagDist = [normalise(random(num_tags)+1) for p in range(num_phrases)] -#tagDist = [normalise(array(range(1,num_tags+1))) for p in range(num_phrases)] -# Pr(context at pos i = w | tag) indexed by i, tag, word -#contextWordDist = [[normalise(array(range(1,num_types+1))) for t in range(num_tags)] for i in range(4)] -contextWordDist = [[normalise(random(num_types)+1) for t in range(num_tags)] for i in range(4)] -# PR langrange multipliers -lamba = zeros(2 * num_edges * num_tags) -omega_offset = num_edges * num_tags -lamba_index = {} -next = 0 -for phrase, ccs in edges_phrase_to_context: - for context, count in ccs: - lamba_index[phrase,context] = next - next += num_tags -#print lamba_index - -# -# Step 3: expectation maximisation -# - -for iteration in range(20): - tagCounts = [zeros(num_tags) for p in range(num_phrases)] - contextWordCounts = [[zeros(num_types) for t in range(num_tags)] for i in range(4)] - - #print 'tagDist', tagDist - #print 'contextWordCounts[0][0]', contextWordCounts[0][0] - - # Tune lambda - # dual: min log Z(lamba) s.t. lamba >= 0; - # sum_c lamba_pct <= delta; sum_p lamba_pct <= gamma - def dual(ls): - logz = 0 - for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - conditionals /= cz - - #print 'dual', phrase, context, count, 'p =', conditionals - - local_z = 0 - for t in range(num_tags): - li = lamba_index[phrase,context] + t - local_z += conditionals[t] * exp(-ls[li] - ls[omega_offset+li]) - logz += log(local_z) * count - - #print 'ls', ls - #print 'lambda', list(ls) - #print 'dual', logz - return logz - - def loglikelihood(): - llh = 0 - for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - llh += log(cz) * count - return llh - - def primal(ls): - # FIXME: returns negative values for KL (impossible) - logz = dual(ls) - expectations = -dual_deriv(ls) - kl = -logz - dot(ls, expectations) - llh = loglikelihood() - - pt_l1linf = 0 - for phrase, ccs in edges_phrase_to_context: - for t in range(num_tags): - best = -1e500 - for context, count in ccs: - li = lamba_index[phrase,context] + t - s = expectations[li] - if s > best: best = s - pt_l1linf += best - - ct_l1linf = 0 - for context, pcs in edges_context_to_phrase: - for t in range(num_tags): - best = -1e500 - for phrase, count in pcs: - li = omega_offset + lamba_index[phrase,context] + t - s = expectations[li] - if s > best: best = s - ct_l1linf += best - - return llh, kl, pt_l1linf, ct_l1linf, llh - kl - delta * pt_l1linf - gamma * ct_l1linf - - def dual_deriv(ls): - # d/dl log(z) = E_q[phi] - deriv = zeros(2 * num_edges * num_tags) - for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - conditionals /= cz - - scores = zeros(num_tags) - for t in range(num_tags): - li = lamba_index[phrase,context] + t - scores[t] = conditionals[t] * exp(-ls[li] - ls[omega_offset + li]) - local_z = sum(scores) - - #print 'ddual', phrase, context, count, 'q =', scores / local_z - - for t in range(num_tags): - deriv[lamba_index[phrase,context] + t] -= count * scores[t] / local_z - deriv[omega_offset + lamba_index[phrase,context] + t] -= count * scores[t] / local_z - - #print 'ddual', list(deriv) - return deriv - - def constraints(ls): - cons = zeros(num_phrases * num_tags + num_edges * num_tags) - - index = 0 - for phrase, ccs in edges_phrase_to_context: - for t in range(num_tags): - if delta > 0: - total = delta - for cprime, count in ccs: - total -= ls[lamba_index[phrase, cprime] + t] - cons[index] = total - index += 1 - - for context, pcs in edges_context_to_phrase: - for t in range(num_tags): - if gamma > 0: - total = gamma - for pprime, count in pcs: - total -= ls[omega_offset + lamba_index[pprime, context] + t] - cons[index] = total - index += 1 - - #print 'cons', cons - return cons - - def constraints_deriv(ls): - cons = zeros((num_phrases * num_tags + num_edges * num_tags, 2 * num_edges * num_tags)) - - index = 0 - for phrase, ccs in edges_phrase_to_context: - for t in range(num_tags): - if delta > 0: - d = cons[index,:]#zeros(num_edges * num_tags) - for cprime, count in ccs: - d[lamba_index[phrase, cprime] + t] = -1 - #cons[index] = d - index += 1 - - for context, pcs in edges_context_to_phrase: - for t in range(num_tags): - if gamma > 0: - d = cons[index,:]#d = zeros(num_edges * num_tags) - for pprime, count in pcs: - d[omega_offset + lamba_index[pprime, context] + t] = -1 - #cons[index] = d - index += 1 - #print 'dcons', cons - return cons - - print 'Pre lambda optimisation dual', dual(lamba), 'primal', primal(lamba) - #print 'lambda', lamba, lamba.shape - #print 'bounds', [(0, max(delta, gamma))] * (2 * num_edges * num_tags) - - lamba = scipy.optimize.fmin_slsqp(dual, lamba, - bounds=[(0, max(delta, gamma))] * (2 * num_edges * num_tags), - f_ieqcons=constraints, - fprime=dual_deriv, - fprime_ieqcons=constraints_deriv, - iprint=0) - print 'Post lambda optimisation dual', dual(lamba), 'primal', primal(lamba) - - # E-step - llh = log_z = 0 - for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - conditionals /= cz - llh += log(cz) * count - - q = zeros(num_tags) - li = lamba_index[phrase, context] - for t in range(num_tags): - q[t] = conditionals[t] * exp(-lamba[li + t] - lamba[omega_offset + li + t]) - qz = sum(q) - log_z += count * log(qz) - - for t in range(num_tags): - tagCounts[p][t] += count * q[t] / qz - - for i in range(4): - for t in range(num_tags): - contextWordCounts[i][t][types[context[i]]] += count * q[t] / qz - - print 'iteration', iteration, 'llh', llh, 'logz', log_z - - # M-step - for p in range(num_phrases): - tagDist[p] = normalise(tagCounts[p]) - for i in range(4): - for t in range(num_tags): - contextWordDist[i][t] = normalise(contextWordCounts[i][t]) - -for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - conditionals /= cz - - print '%s\t%s ||| C=%d |||' % (phrase, context, argmax(conditionals)), conditionals diff --git a/gi/posterior-regularisation/train_pr_parallel.py b/gi/posterior-regularisation/train_pr_parallel.py deleted file mode 100644 index 3b9cefed..00000000 --- a/gi/posterior-regularisation/train_pr_parallel.py +++ /dev/null @@ -1,333 +0,0 @@ -import sys -import scipy.optimize -from numpy import * -from numpy.random import random, seed - -# -# Step 1: load the concordance counts -# - -edges_phrase_to_context = [] -edges_context_to_phrase = [] -types = {} -context_types = {} -num_edges = 0 - -for line in sys.stdin: - phrase, rest = line.strip().split('\t') - parts = rest.split('|||') - edges_phrase_to_context.append((phrase, [])) - for i in range(0, len(parts), 2): - context, count = parts[i:i+2] - - ctx = tuple(filter(lambda x: x != '<PHRASE>', context.split())) - cnt = int(count.strip()[2:]) - edges_phrase_to_context[-1][1].append((ctx, cnt)) - - cid = context_types.get(ctx, len(context_types)) - if cid == len(context_types): - context_types[ctx] = cid - edges_context_to_phrase.append((ctx, [])) - edges_context_to_phrase[cid][1].append((phrase, cnt)) - - for token in ctx: - types.setdefault(token, len(types)) - for token in phrase.split(): - types.setdefault(token, len(types)) - - num_edges += 1 - -# -# Step 2: initialise the model parameters -# - -num_tags = 25 -num_types = len(types) -num_phrases = len(edges_phrase_to_context) -num_contexts = len(edges_context_to_phrase) -delta = float(sys.argv[1]) -assert sys.argv[2] in ('local', 'global') -local = sys.argv[2] == 'local' -if len(sys.argv) >= 2: - seed(int(sys.argv[3])) - -print 'Read in', num_edges, 'edges', num_phrases, 'phrases', num_contexts, 'contexts and', len(types), 'word types' - -def normalise(a): - return a / float(sum(a)) - -# Pr(tag | phrase) -tagDist = [normalise(random(num_tags)+1) for p in range(num_phrases)] -# Pr(context at pos i = w | tag) indexed by i, tag, word -contextWordDist = [[normalise(random(num_types)+1) for t in range(num_tags)] for i in range(4)] - -# -# Step 3: expectation maximisation -# - -class GlobalDualObjective: - """ - Objective, log(z), for all phrases s.t. lambda >= 0, sum_c lambda_pct <= scale - """ - - def __init__(self, scale): - self.scale = scale - self.posterior = zeros((num_edges, num_tags)) - self.q = zeros((num_edges, num_tags)) - self.llh = 0 - - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for context, count in edges: - for t in range(num_tags): - prob = tagDist[j][t] - for k, token in enumerate(context): - prob *= contextWordDist[k][t][types[token]] - self.posterior[index,t] = prob - z = sum(self.posterior[index,:]) - self.posterior[index,:] /= z - self.llh += log(z) * count - index += 1 - - def objective(self, ls): - ls = ls.reshape((num_edges, num_tags)) - logz = 0 - - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for context, count in edges: - for t in range(num_tags): - self.q[index,t] = self.posterior[index,t] * exp(-ls[index,t]) - local_z = sum(self.q[index,:]) - self.q[index,:] /= local_z - logz += log(local_z) * count - index += 1 - - return logz - - # FIXME: recomputes q many more times than necessary - - def gradient(self, ls): - ls = ls.reshape((num_edges, num_tags)) - gradient = zeros((num_edges, num_tags)) - - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for context, count in edges: - for t in range(num_tags): - self.q[index,t] = self.posterior[index,t] * exp(-ls[index,t]) - local_z = sum(self.q[index,:]) - self.q[index,:] /= local_z - for t in range(num_tags): - gradient[index,t] -= self.q[index,t] * count - index += 1 - - return gradient.ravel() - - def constraints(self, ls): - ls = ls.reshape((num_edges, num_tags)) - cons = ones((num_phrases, num_tags)) * self.scale - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for i, (context, count) in enumerate(edges): - for t in range(num_tags): - cons[j,t] -= ls[index,t] * count - index += 1 - return cons.ravel() - - def constraints_gradient(self, ls): - ls = ls.reshape((num_edges, num_tags)) - gradient = zeros((num_phrases, num_tags, num_edges, num_tags)) - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for i, (context, count) in enumerate(edges): - for t in range(num_tags): - gradient[j,t,index,t] -= count - index += 1 - return gradient.reshape((num_phrases*num_tags, num_edges*num_tags)) - - def optimize(self): - ls = zeros(num_edges * num_tags) - #print '\tpre lambda optimisation dual', self.objective(ls) #, 'primal', primal(lamba) - ls = scipy.optimize.fmin_slsqp(self.objective, ls, - bounds=[(0, self.scale)] * num_edges * num_tags, - f_ieqcons=self.constraints, - fprime=self.gradient, - fprime_ieqcons=self.constraints_gradient, - iprint=0) # =2 for verbose - #print '\tpost lambda optimisation dual', self.objective(ls) #, 'primal', primal(lamba) - - # returns llh, kl and l1lmax contribution - l1lmax = 0 - index = 0 - for j, (phrase, edges) in enumerate(edges_phrase_to_context): - for t in range(num_tags): - lmax = None - for i, (context, count) in enumerate(edges): - lmax = max(lmax, self.q[index+i,t]) - l1lmax += lmax - index += len(edges) - - return self.llh, -self.objective(ls) + dot(ls, self.gradient(ls)), l1lmax - -class LocalDualObjective: - """ - Local part of objective, log(z) relevant to lambda_p**. - Optimised subject to lambda >= 0, sum_c lambda_pct <= scale forall t - """ - - def __init__(self, phraseId, scale): - self.phraseId = phraseId - self.scale = scale - edges = edges_phrase_to_context[self.phraseId][1] - self.posterior = zeros((len(edges), num_tags)) - self.q = zeros((len(edges), num_tags)) - self.llh = 0 - - for i, (context, count) in enumerate(edges): - for t in range(num_tags): - prob = tagDist[phraseId][t] - for j, token in enumerate(context): - prob *= contextWordDist[j][t][types[token]] - self.posterior[i,t] = prob - z = sum(self.posterior[i,:]) - self.posterior[i,:] /= z - self.llh += log(z) * count - - def objective(self, ls): - edges = edges_phrase_to_context[self.phraseId][1] - ls = ls.reshape((len(edges), num_tags)) - logz = 0 - - for i, (context, count) in enumerate(edges): - for t in range(num_tags): - self.q[i,t] = self.posterior[i,t] * exp(-ls[i,t]) - local_z = sum(self.q[i,:]) - self.q[i,:] /= local_z - logz += log(local_z) * count - - return logz - - # FIXME: recomputes q many more times than necessary - - def gradient(self, ls): - edges = edges_phrase_to_context[self.phraseId][1] - ls = ls.reshape((len(edges), num_tags)) - gradient = zeros((len(edges), num_tags)) - - for i, (context, count) in enumerate(edges): - for t in range(num_tags): - self.q[i,t] = self.posterior[i,t] * exp(-ls[i,t]) - local_z = sum(self.q[i,:]) - self.q[i,:] /= local_z - for t in range(num_tags): - gradient[i,t] -= self.q[i,t] * count - - return gradient.ravel() - - def constraints(self, ls): - edges = edges_phrase_to_context[self.phraseId][1] - ls = ls.reshape((len(edges), num_tags)) - cons = ones(num_tags) * self.scale - for t in range(num_tags): - for i, (context, count) in enumerate(edges): - cons[t] -= ls[i,t] * count - return cons - - def constraints_gradient(self, ls): - edges = edges_phrase_to_context[self.phraseId][1] - ls = ls.reshape((len(edges), num_tags)) - gradient = zeros((num_tags, len(edges), num_tags)) - for t in range(num_tags): - for i, (context, count) in enumerate(edges): - gradient[t,i,t] -= count - return gradient.reshape((num_tags, len(edges)*num_tags)) - - def optimize(self, ls=None): - edges = edges_phrase_to_context[self.phraseId][1] - if ls == None: - ls = zeros(len(edges) * num_tags) - #print '\tpre lambda optimisation dual', self.objective(ls) #, 'primal', primal(lamba) - ls = scipy.optimize.fmin_slsqp(self.objective, ls, - bounds=[(0, self.scale)] * len(edges) * num_tags, - f_ieqcons=self.constraints, - fprime=self.gradient, - fprime_ieqcons=self.constraints_gradient, - iprint=0) # =2 for verbose - #print '\tlambda', list(ls) - #print '\tpost lambda optimisation dual', self.objective(ls) #, 'primal', primal(lamba) - - # returns llh, kl and l1lmax contribution - l1lmax = 0 - for t in range(num_tags): - lmax = None - for i, (context, count) in enumerate(edges): - lmax = max(lmax, self.q[i,t]) - l1lmax += lmax - - return self.llh, -self.objective(ls) + dot(ls, self.gradient(ls)), l1lmax, ls - -ls = [None] * num_phrases -for iteration in range(20): - tagCounts = [zeros(num_tags) for p in range(num_phrases)] - contextWordCounts = [[zeros(num_types) for t in range(num_tags)] for i in range(4)] - - # E-step - llh = kl = l1lmax = 0 - if local: - for p in range(num_phrases): - o = LocalDualObjective(p, delta) - #print '\toptimising lambda for phrase', p, '=', edges_phrase_to_context[p][0] - #print '\toptimising lambda for phrase', p, 'ls', ls[p] - obj = o.optimize(ls[p]) - #print '\tphrase', p, 'deltas', obj - llh += obj[0] - kl += obj[1] - l1lmax += obj[2] - ls[p] = obj[3] - - edges = edges_phrase_to_context[p][1] - for j, (context, count) in enumerate(edges): - for t in range(num_tags): - tagCounts[p][t] += count * o.q[j,t] - for i in range(4): - for t in range(num_tags): - contextWordCounts[i][t][types[context[i]]] += count * o.q[j,t] - - #print 'iteration', iteration, 'LOCAL objective', (llh + kl + delta * l1lmax), 'llh', llh, 'kl', kl, 'l1lmax', l1lmax - else: - o = GlobalDualObjective(delta) - obj = o.optimize() - llh, kl, l1lmax = o.optimize() - - index = 0 - for p, (phrase, edges) in enumerate(edges_phrase_to_context): - for context, count in edges: - for t in range(num_tags): - tagCounts[p][t] += count * o.q[index,t] - for i in range(4): - for t in range(num_tags): - contextWordCounts[i][t][types[context[i]]] += count * o.q[index,t] - index += 1 - - print 'iteration', iteration, 'objective', (llh - kl - delta * l1lmax), 'llh', llh, 'kl', kl, 'l1lmax', l1lmax - - # M-step - for p in range(num_phrases): - tagDist[p] = normalise(tagCounts[p]) - for i in range(4): - for t in range(num_tags): - contextWordDist[i][t] = normalise(contextWordCounts[i][t]) - -for p, (phrase, ccs) in enumerate(edges_phrase_to_context): - for context, count in ccs: - conditionals = zeros(num_tags) - for t in range(num_tags): - prob = tagDist[p][t] - for i in range(4): - prob *= contextWordDist[i][t][types[context[i]]] - conditionals[t] = prob - cz = sum(conditionals) - conditionals /= cz - - print '%s\t%s ||| C=%d |||' % (phrase, context, argmax(conditionals)), conditionals diff --git a/gi/pyp-topics/scripts/contexts2documents.py b/gi/pyp-topics/scripts/contexts2documents.py deleted file mode 100755 index 9be4ebbb..00000000 --- a/gi/pyp-topics/scripts/contexts2documents.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/python - -import sys -from operator import itemgetter - -if len(sys.argv) > 3: - print "Usage: contexts2documents.py [contexts_index_out] [phrases_index_out]" - exit(1) - -context_index = {} -phrase_index = {} -for line in sys.stdin: - phrase, line_tail = line.split('\t') - - raw_contexts = line_tail.split('|||') - contexts = [c.strip() for x,c in enumerate(raw_contexts) if x%2 == 0] - counts = [int(c.split('=')[1].strip()) for x,c in enumerate(raw_contexts) if x%2 != 0] - phrase_index.setdefault(phrase, len(phrase_index)) - print len(contexts), - for context,count in zip(contexts,counts): - c = context_index.setdefault(context, len(context_index)) - print "%d:%d" % (c,count), - print -if 1 < len(sys.argv) < 4: - contexts_out = open(sys.argv[1],'w') - contexts = context_index.items() - contexts.sort(key = itemgetter(1)) - for context in contexts: - print >>contexts_out, context[0] - contexts_out.close() -if len(sys.argv) == 3: - phrases_out = open(sys.argv[2],'w') - phrases = phrase_index.items() - phrases.sort(key = itemgetter(1)) - for phrase in phrases: - print >>phrases_out, phrase[0] - phrases_out.close() diff --git a/gi/pyp-topics/scripts/extract_contexts.py b/gi/pyp-topics/scripts/extract_contexts.py deleted file mode 100755 index b2723f2a..00000000 --- a/gi/pyp-topics/scripts/extract_contexts.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/python - -import sys,collections - -def extract_backoff(context_list, order): - assert len(context_list) == (2*order) - backoffs = [] - for i in range(1,order+1): - if i == order: - backoffs.append(([context_list[i-1]+"|"], ["|"+context_list[i]])) - else: - right_limit = 2*order-i - core = context_list[i:right_limit] - left = [context_list[i-1]+"|"*(order-i+1)] - right = ["|"*(order-i+1)+context_list[right_limit]] - backoffs.append((core, left, right)) -# print context_list, backoffs - return backoffs - -def tuple_to_str(t): - s="" - for i,x in enumerate(t): - if i > 0: s += "|" - s += str(x) - return s - -if len(sys.argv) < 3: - print "Usage: extract-contexts.py output_filename order cutoff lowercase" - exit(1) - -output_filename = sys.argv[1] -order = int(sys.argv[2]) -cutoff = 0 -if len(sys.argv) > 3: - cutoff = int(sys.argv[3]) -lowercase = False -if len(sys.argv) > 4: - lowercase = bool(sys.argv[4]) - -contexts_dict={} -contexts_list=[] -contexts_freq=collections.defaultdict(int) -contexts_backoff={} - -token_dict={} -token_list=[] -documents_dict=collections.defaultdict(dict) - -contexts_at_order = [i for i in range(order+1)] - -prefix = ["<s%d>|<s>"%i for i in range(order)] -suffix = ["</s%d>|</s>"%i for i in range(order)] - -for line in sys.stdin: - tokens = list(prefix) - tokens.extend(line.split()) - tokens.extend(suffix) - if lowercase: - tokens = map(lambda x: x.lower(), tokens) - - for i in range(order, len(tokens)-order): - context_list = [] - term="" - for j in range(i-order, i+order+1): - token,tag = tokens[j].rsplit('|',2) - if j != i: - context_list.append(token) - else: - if token not in token_dict: - token_dict[token] = len(token_dict) - token_list.append(token) - term = token_dict[token] - - context = tuple_to_str(tuple(context_list)) - - if context not in contexts_dict: - context_index = len(contexts_dict) - contexts_dict[context] = context_index - contexts_list.append(context) - contexts_at_order[0] += 1 - - # handle backoff - backoff_contexts = extract_backoff(context_list, order) - bo_indexes=[(context_index,)] -# bo_indexes=[(context,)] - for i,bo in enumerate(backoff_contexts): - factor_indexes=[] - for factor in bo: - bo_tuple = tuple_to_str(tuple(factor)) - if bo_tuple not in contexts_dict: - contexts_dict[bo_tuple] = len(contexts_dict) - contexts_list.append(bo_tuple) - contexts_at_order[i+1] += 1 -# factor_indexes.append(bo_tuple) - factor_indexes.append(contexts_dict[bo_tuple]) - bo_indexes.append(tuple(factor_indexes)) - - for i in range(len(bo_indexes)-1): - contexts_backoff[bo_indexes[i][0]] = bo_indexes[i+1] - - context_index = contexts_dict[context] - contexts_freq[context_index] += 1 - - if context_index not in documents_dict[term]: - documents_dict[term][context_index] = 1 - else: - documents_dict[term][context_index] += 1 - -term_file = open(output_filename+".terms",'w') -for t in token_list: print >>term_file, t -term_file.close() - -contexts_file = open(output_filename+".contexts",'w') -for c in contexts_list: - print >>contexts_file, c -contexts_file.close() - -data_file = open(output_filename+".data",'w') -for t in range(len(token_list)): - line="" - num_active=0 - for c in documents_dict[t]: - count = documents_dict[t][c] - if contexts_freq[c] >= cutoff: - line += (' ' + str(c) + ':' + str(count)) - num_active += 1 - if num_active > 0: - print >>data_file, "%d%s" % (num_active,line) -data_file.close() - -contexts_backoff_file = open(output_filename+".contexts_backoff",'w') -print >>contexts_backoff_file, len(contexts_list), order, -#for x in contexts_at_order: -# print >>contexts_backoff_file, x, -#print >>contexts_backoff_file -for x in range(order-1): - print >>contexts_backoff_file, 3, -print >>contexts_backoff_file, 2 - -for x in contexts_backoff: - print >>contexts_backoff_file, x, - for y in contexts_backoff[x]: print >>contexts_backoff_file, y, - print >>contexts_backoff_file -contexts_backoff_file.close() diff --git a/gi/pyp-topics/scripts/extract_contexts_test.py b/gi/pyp-topics/scripts/extract_contexts_test.py deleted file mode 100755 index 693b6e0b..00000000 --- a/gi/pyp-topics/scripts/extract_contexts_test.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python - -import sys,collections - -def tuple_to_str(t): - s="" - for i,x in enumerate(t): - if i > 0: s += "|" - s += str(x) - return s - -if len(sys.argv) < 5: - print "Usage: extract-contexts_test.py output_filename vocab contexts order lowercase" - exit(1) - -output_filename = sys.argv[1] -output = open(output_filename+".test_data",'w') - -unk_term="-UNK-" -vocab_dict={} -for i,x in enumerate(file(sys.argv[2], 'r').readlines()): - vocab_dict[x.strip()]=i - -contexts_dict={} -contexts_list=[] -for i,x in enumerate(file(sys.argv[3], 'r').readlines()): - contexts_dict[x.strip()]=i - contexts_list.append(x.strip()) - -order = int(sys.argv[4]) - -lowercase = False -if len(sys.argv) > 5: - lowercase = bool(sys.argv[5]) -if lowercase: unk_term = unk_term.lower() - -prefix = ["<s%d>|<s>"%i for i in range(order)] -suffix = ["</s%d>|</s>"%i for i in range(order)] - -assert unk_term in vocab_dict -for line in sys.stdin: - tokens = list(prefix) - tokens.extend(line.split()) - tokens.extend(suffix) - if lowercase: - tokens = map(lambda x: x.lower(), tokens) - - for i in range(order, len(tokens)-order): - context_list=[] - term="" - for j in range(i-order, i+order+1): - token,tag = tokens[j].rsplit('|',2) - if j != i: - context_list.append(token) - else: - if token not in vocab_dict: - term = vocab_dict[unk_term] - else: - term = vocab_dict[token] - context = tuple_to_str(context_list) - if context not in contexts_dict: - contexts_dict[context] = len(contexts_dict) - contexts_list.append(context) - context_index = contexts_dict[context] - print >>output, "%d:%d" % (term,context_index), - print >>output -output.close() - -contexts_file = open(output_filename+".test_contexts",'w') -for c in contexts_list: - print >>contexts_file, c -contexts_file.close() diff --git a/gi/pyp-topics/scripts/extract_leaves.py b/gi/pyp-topics/scripts/extract_leaves.py deleted file mode 100755 index 14783b36..00000000 --- a/gi/pyp-topics/scripts/extract_leaves.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/python - -import nltk -import nltk.probability -import sys -import getopt - -lexicalise=False -rm_traces=False -cutoff=100 -length_cutoff=10000 -try: - opts, args = getopt.getopt(sys.argv[1:], "hs:c:l", ["help", "lexicalise", "cutoff","sentence-length","remove-traces"]) -except getopt.GetoptError: - print "Usage: extract_leaves.py [-lsc]" - sys.exit(2) -for opt, arg in opts: - if opt in ("-h", "--help"): - print "Usage: extract_leaves.py [-lsc]" - sys.exit() - elif opt in ("-l", "--lexicalise"): - lexicalise = True - elif opt in ("-c", "--cutoff"): - cutoff = int(arg) - elif opt in ("-s", "--sentence-length"): - length_cutoff = int(arg) - elif opt in ("--remove-traces"): - rm_traces = True - -token_freq = nltk.probability.FreqDist() -lines = [] -for line in sys.stdin: - t = nltk.Tree.parse(line) - pos = t.pos() - if len(pos) <= length_cutoff: - lines.append(pos) - for token, tag in pos: - token_freq.inc(token) - -for line in lines: - for token,tag in line: - if not (rm_traces and tag == "-NONE-"): - if lexicalise: - if token_freq[token] < cutoff: - token = '-UNK-' - print '%s|%s' % (token,tag), - else: - print '%s' % tag, - print diff --git a/gi/pyp-topics/scripts/map-documents.py b/gi/pyp-topics/scripts/map-documents.py deleted file mode 100755 index 703de312..00000000 --- a/gi/pyp-topics/scripts/map-documents.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import sys - -if len(sys.argv) != 2: - print "Usage: map-documents.py vocab-file" - exit(1) - -vocab = file(sys.argv[1], 'r').readlines() -term_dict = map(lambda x: x.strip(), vocab) - -for line in sys.stdin: - tokens = line.split() - for token in tokens: - elements = token.split(':') - if len(elements) == 1: - print "%s" % (term_dict[int(elements[0])]), - else: - print "%s:%s" % (term_dict[int(elements[0])], elements[1]), - print diff --git a/gi/pyp-topics/scripts/map-terms.py b/gi/pyp-topics/scripts/map-terms.py deleted file mode 100755 index eb0298d7..00000000 --- a/gi/pyp-topics/scripts/map-terms.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import sys - -if len(sys.argv) != 2: - print "Usage: map-terms.py vocab-file" - exit(1) - -vocab = file(sys.argv[1], 'r').readlines() -term_dict = map(lambda x: x.strip().replace(' ','_'), vocab) - -for line in sys.stdin: - tokens = line.split() - for token in tokens: - elements = token.split(':') - if len(elements) == 1: - print "%s" % (term_dict[int(elements[0])]), - else: - print "%s:%s" % (term_dict[int(elements[0])], elements[1]), - print diff --git a/gi/pyp-topics/scripts/run.sh b/gi/pyp-topics/scripts/run.sh deleted file mode 100644 index 19e625b1..00000000 --- a/gi/pyp-topics/scripts/run.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - - -./simple-extract-context.sh ~/workspace/clsp2010/jhuws2010/data/btec/split.zh-en.al 1 | ~/workspace/pyp-topics/scripts/contexts2documents.py > split.zh-en.data - -~/workspace/pyp-topics/bin/pyp-topics-train -d split.zh-en.data -t 50 -s 100 -o split.zh-en.documents.gz -w split.zh-en.topics.gz -gunzip split.zh-en.documents.gz - -~/workspace/cdec/extools/extractor -i ../jhuws2010/data/btec/split.zh-en.al -S 1 -c 500000 -L 12 --base_phrase_spans | ~/workspace/pyp-topics/scripts/spans2labels.py split.zh-en.phrases split.zh-en.contexts split.zh-en.documents > corpus.zh-en.labelled_spans - -paste -d " " ~/workspace/clsp2010/jhuws2010/data/btec/split.zh-en.al corpus.labelled_spans > split.zh-en.labelled_spans - -./simple-extract.sh ~/workspace/clsp2010/scratch/split.zh-en.labelled_spans diff --git a/gi/pyp-topics/scripts/score-mkcls.py b/gi/pyp-topics/scripts/score-mkcls.py deleted file mode 100755 index 6bd33fc5..00000000 --- a/gi/pyp-topics/scripts/score-mkcls.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/python - -import sys -from collections import defaultdict - -def dict_max(d): - max_val=-1 - max_key=None - for k in d: - if d[k] > max_val: - max_val = d[k] - max_key = k - assert max_key - return max_key - -if len(sys.argv) != 3: - print "Usage: score-mkcls.py gold classes" - exit(1) - -gold_file=open(sys.argv[1],'r') - -term_to_topics = {} -for line in open(sys.argv[2],'r'): - term,cls = line.split() - term_to_topics[term] = cls - -gold_to_topics = defaultdict(dict) -topics_to_gold = defaultdict(dict) - -for gold_line in gold_file: - gold_tokens = gold_line.split() - for gold_token in gold_tokens: - gold_term,gold_tag = gold_token.rsplit('|',1) - pred_token = term_to_topics[gold_term] - gold_to_topics[gold_tag][pred_token] \ - = gold_to_topics[gold_tag].get(pred_token, 0) + 1 - topics_to_gold[pred_token][gold_tag] \ - = topics_to_gold[pred_token].get(gold_tag, 0) + 1 - -pred=0 -correct=0 -gold_file=open(sys.argv[1],'r') -for gold_line in gold_file: - gold_tokens = gold_line.split() - - for gold_token in gold_tokens: - gold_term,gold_tag = gold_token.rsplit('|',1) - pred_token = term_to_topics[gold_term] - print "%s|%s|%s" % (gold_token, pred_token, dict_max(topics_to_gold[pred_token])), - pred += 1 - if gold_tag == dict_max(topics_to_gold[pred_token]): - correct += 1 - print -print >>sys.stderr, "Many-to-One Accuracy = %f" % (float(correct) / pred) -#for x in gold_to_topics: -# print x,dict_max(gold_to_topics[x]) -#print "###################################################" -#for x in range(len(topics_to_gold)): -# print x,dict_max(topics_to_gold[str(x)]) -# print x,topics_to_gold[str(x)] -#print term_to_topics diff --git a/gi/pyp-topics/scripts/score-topics.py b/gi/pyp-topics/scripts/score-topics.py deleted file mode 100755 index 1d8a1fcd..00000000 --- a/gi/pyp-topics/scripts/score-topics.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python - -import sys -from collections import defaultdict - -def dict_max(d): - max_val=-1 - max_key=None - for k in d: - if d[k] > max_val: - max_val = d[k] - max_key = k - assert max_key - return max_key - -if len(sys.argv) != 3: - print "Usage: score-topics.py gold pred" - exit(1) - -gold_file=open(sys.argv[1],'r') -pred_file=open(sys.argv[2],'r') - -gold_to_topics = defaultdict(dict) -topics_to_gold = defaultdict(dict) -term_to_topics = defaultdict(dict) - -for gold_line,pred_line in zip(gold_file,pred_file): - gold_tokens = gold_line.split() - pred_tokens = pred_line.split() - assert len(gold_tokens) == len(pred_tokens) - - for gold_token,pred_token in zip(gold_tokens,pred_tokens): - gold_term,gold_tag = gold_token.rsplit('|',1) - gold_to_topics[gold_tag][pred_token] \ - = gold_to_topics[gold_tag].get(pred_token, 0) + 1 - term_to_topics[gold_term][pred_token] \ - = term_to_topics[gold_term].get(pred_token, 0) + 1 - topics_to_gold[pred_token][gold_tag] \ - = topics_to_gold[pred_token].get(gold_tag, 0) + 1 - -pred=0 -correct=0 -gold_file=open(sys.argv[1],'r') -pred_file=open(sys.argv[2],'r') -for gold_line,pred_line in zip(gold_file,pred_file): - gold_tokens = gold_line.split() - pred_tokens = pred_line.split() - - for gold_token,pred_token in zip(gold_tokens,pred_tokens): - gold_term,gold_tag = gold_token.rsplit('|',1) -# print "%s|%s" % (gold_token, dict_max(gold_to_topics[gold_tag])), - print "%s|%s|%s" % (gold_token, pred_token, dict_max(topics_to_gold[pred_token])), - pred += 1 - if gold_tag == dict_max(topics_to_gold[pred_token]): - correct += 1 - print -print >>sys.stderr, "Many-to-One Accuracy = %f" % (float(correct) / pred) -#for x in gold_to_topics: -# print x,dict_max(gold_to_topics[x]) -#print "###################################################" -#for x in range(len(topics_to_gold)): -# print x,dict_max(topics_to_gold[str(x)]) -# print x,topics_to_gold[str(x)] -#print term_to_topics diff --git a/gi/pyp-topics/scripts/spans2labels.py b/gi/pyp-topics/scripts/spans2labels.py deleted file mode 100755 index 50fa8106..00000000 --- a/gi/pyp-topics/scripts/spans2labels.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/python - -import sys -from operator import itemgetter - -if len(sys.argv) <= 2: - print "Usage: spans2labels.py phrase_context_index [order] [threshold] [languages={s,t,b}{s,t,b}] [type={tag,tok,both},{tag,tok,both}]" - exit(1) - -order=1 -threshold = 0 -cutoff_cat = "<UNK>" -if len(sys.argv) > 2: - order = int(sys.argv[2]) -if len(sys.argv) > 3: - threshold = float(sys.argv[3]) -phr=ctx='t' -if len(sys.argv) > 4: - phr, ctx = sys.argv[4] - assert phr in 'stb' - assert ctx in 'stb' -phr_typ = ctx_typ = 'both' -if len(sys.argv) > 5: - phr_typ, ctx_typ = sys.argv[5].split(',') - assert phr_typ in ('tag', 'tok', 'both') - assert ctx_typ in ('tag', 'tok', 'both') - -#print >>sys.stderr, "Loading phrase index" -phrase_context_index = {} -for line in file(sys.argv[1], 'r'): - phrase,tail= line.split('\t') - contexts = tail.split(" ||| ") - try: # remove Phil's bizarre integer pair - x,y = contexts[0].split() - x=int(x); y=int(y) - contexts = contexts[1:] - except: - pass - if len(contexts) == 1: continue - assert len(contexts) % 2 == 0 - for i in range(0, len(contexts), 2): - #parse contexts[i+1] = " C=1 P=0.8 ... " - features=dict([ keyval.split('=') for keyval in contexts[i+1].split()]) - category = features['C'] - if features.has_key('P') and float(features['P']) < threshold: - category = cutoff_cat - - phrase_context_index[(phrase,contexts[i])] = category - #print (phrase,contexts[i]), category - -#print >>sys.stderr, "Labelling spans" -for line in sys.stdin: - #print >>sys.stderr, "line", line.strip() - line_segments = line.split(' ||| ') - assert len(line_segments) >= 3 - source = ['<s>' for x in range(order)] + line_segments[0].split() + ['</s>' for x in range(order)] - target = ['<s>' for x in range(order)] + line_segments[1].split() + ['</s>' for x in range(order)] - phrases = [ [int(i) for i in x.split('-')] for x in line_segments[2].split()] - - if phr_typ != 'both' or ctx_typ != 'both': - if phr in 'tb' or ctx in 'tb': - target_toks = ['<s>' for x in range(order)] + map(lambda x: x.rsplit('_', 1)[0], line_segments[1].split()) + ['</s>' for x in range(order)] - target_tags = ['<s>' for x in range(order)] + map(lambda x: x.rsplit('_', 1)[-1], line_segments[1].split()) + ['</s>' for x in range(order)] - - if phr in 'tb': - if phr_typ == 'tok': - targetP = target_toks - elif phr_typ == 'tag': - targetP = target_tags - if ctx in 'tb': - if ctx_typ == 'tok': - targetC = target_toks - elif ctx_typ == 'tag': - targetC = target_tags - - if phr in 'sb' or ctx in 'sb': - source_toks = ['<s>' for x in range(order)] + map(lambda x: x.rsplit('_', 1)[0], line_segments[0].split()) + ['</s>' for x in range(order)] - source_tags = ['<s>' for x in range(order)] + map(lambda x: x.rsplit('_', 1)[-1], line_segments[0].split()) + ['</s>' for x in range(order)] - - if phr in 'sb': - if phr_typ == 'tok': - sourceP = source_toks - elif phr_typ == 'tag': - sourceP = source_tags - if ctx in 'sb': - if ctx_typ == 'tok': - sourceC = source_toks - elif ctx_typ == 'tag': - sourceC = source_tags - else: - sourceP = sourceC = source - targetP = targetC = target - - #print >>sys.stderr, "line", source, '---', target, 'phrases', phrases - - print "|||", - - for s1,s2,t1,t2 in phrases: - s1 += order - s2 += order - t1 += order - t2 += order - - phraset = phrases = contextt = contexts = '' - if phr in 'tb': - phraset = reduce(lambda x, y: x+y+" ", targetP[t1:t2], "").strip() - if phr in 'sb': - phrases = reduce(lambda x, y: x+y+" ", sourceP[s1:s2], "").strip() - - if ctx in 'tb': - left_context = reduce(lambda x, y: x+y+" ", targetC[t1-order:t1], "") - right_context = reduce(lambda x, y: x+y+" ", targetC[t2:t2+order], "").strip() - contextt = "%s<PHRASE> %s" % (left_context, right_context) - if ctx in 'sb': - left_context = reduce(lambda x, y: x+y+" ", sourceC[s1-order:s1], "") - right_context = reduce(lambda x, y: x+y+" ", sourceC[s2:s2+order], "").strip() - contexts = "%s<PHRASE> %s" % (left_context, right_context) - - if phr == 'b': - phrase = phraset + ' <SPLIT> ' + phrases - elif phr == 's': - phrase = phrases - else: - phrase = phraset - - if ctx == 'b': - context = contextt + ' <SPLIT> ' + contexts - elif ctx == 's': - context = contexts - else: - context = contextt - - #print "%d-%d-%d-%d looking up" % (s1-order,s2-order,t1-order,t2-order), (phrase, context) - label = phrase_context_index.get((phrase,context), cutoff_cat) - if label != cutoff_cat: #cutoff'd spans are left unlabelled - print "%d-%d-%d-%d:X%s" % (s1-order,s2-order,t1-order,t2-order,label), - print diff --git a/gi/pyp-topics/scripts/tokens2classes.py b/gi/pyp-topics/scripts/tokens2classes.py deleted file mode 100755 index 33df255f..00000000 --- a/gi/pyp-topics/scripts/tokens2classes.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/python - -import sys - -if len(sys.argv) != 3: - print "Usage: tokens2classes.py source_classes target_classes" - exit(1) - -source_to_topics = {} -for line in open(sys.argv[1],'r'): - term,cls = line.split() - source_to_topics[term] = cls - -target_to_topics = {} -for line in open(sys.argv[2],'r'): - term,cls = line.split() - target_to_topics[term] = cls - -for line in sys.stdin: - source, target, tail = line.split(" ||| ") - - for token in source.split(): - print source_to_topics[token], - print "|||", - for token in target.split(): - print target_to_topics[token], - print "|||", tail, diff --git a/gi/pyp-topics/scripts/topics.py b/gi/pyp-topics/scripts/topics.py deleted file mode 100755 index 0db1af71..00000000 --- a/gi/pyp-topics/scripts/topics.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python - -import sys - -if len(sys.argv) != 2: - print "Usage: topics.py words-per-topic" - exit(1) - -for t,line in enumerate(sys.stdin): - tokens = line.split() - terms = [] - for token in tokens: - elements = token.rsplit(':',1) - terms.append((int(elements[1]),elements[0])) - terms.sort() - terms.reverse() - - print "Topic %d:" % t - map(lambda (x,y) : sys.stdout.write(" %s:%s\n" % (y,x)), terms[:int(sys.argv[1])]) - print diff --git a/gi/pyp-topics/src/Makefile.am b/gi/pyp-topics/src/Makefile.am deleted file mode 100644 index d3f95d0b..00000000 --- a/gi/pyp-topics/src/Makefile.am +++ /dev/null @@ -1,16 +0,0 @@ -bin_PROGRAMS = pyp-topics-train pyp-contexts-train #mpi-pyp-contexts-train - -contexts_lexer.cc: contexts_lexer.l - $(LEX) -s -CF -8 -o$@ $< - -pyp_topics_train_SOURCES = mt19937ar.c corpus.cc gzstream.cc pyp-topics.cc train.cc contexts_lexer.cc contexts_corpus.cc -pyp_topics_train_LDADD = $(top_srcdir)/utils/libutils.a -lz - -pyp_contexts_train_SOURCES = mt19937ar.c corpus.cc gzstream.cc pyp-topics.cc contexts_lexer.cc contexts_corpus.cc train-contexts.cc -pyp_contexts_train_LDADD = $(top_srcdir)/utils/libutils.a -lz - -#mpi_pyp_contexts_train_SOURCES = mt19937ar.c corpus.cc gzstream.cc mpi-pyp-topics.cc contexts_lexer.cc contexts_corpus.cc mpi-train-contexts.cc -#mpi_pyp_contexts_train_LDADD = $(top_srcdir)/utils/libutils.a -lz - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -funroll-loops -I../../../utils - diff --git a/gi/pyp-topics/src/Makefile.mpi b/gi/pyp-topics/src/Makefile.mpi deleted file mode 100644 index b7b8a290..00000000 --- a/gi/pyp-topics/src/Makefile.mpi +++ /dev/null @@ -1,26 +0,0 @@ -BLD_ARCH=$(shell uname -s) --include macros.${BLD_ARCH} - -local_objs = mt19937ar.o corpus.o gzstream.o mpi-pyp-topics.o contexts_lexer.o contexts_corpus.o mpi-train-contexts.o - -all: mpi-pyp-contexts-train - --include makefile.depend - -#-----------------------# -# Local stuff -#-----------------------# - -mpi-pyp-contexts-train: mpi-train-contexts.o $(local_objs) - $(CXX) -o $@ $^ $(LDFLAGS) - -.PHONY: depend echo -depend: -#$(CXX) -MM $(CXXFLAGS) *.cc *.c | sed 's/^\(.*\.o:\)/obj\/\1/' > makefile.depend - $(CXX) -MM $(CXXFLAGS) *.cc *.c > makefile.depend - -clean: - rm -f *.o - -#clobber: clean -# rm makefile.depend ../bin/${ARCH}/* diff --git a/gi/pyp-topics/src/clock_gettime_stub.c b/gi/pyp-topics/src/clock_gettime_stub.c deleted file mode 100644 index 4883b7c1..00000000 --- a/gi/pyp-topics/src/clock_gettime_stub.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c), MM Weiss - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modification, - * are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of the MM Weiss nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR - * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * clock_gettime_stub.c - * gcc -Wall -c clock_gettime_stub.c - * posix realtime functions; MacOS user space glue - */ - -/* @comment - * other possible implementation using intel builtin rdtsc - * rdtsc-workaround: http://www.mcs.anl.gov/~kazutomo/rdtsc.html - * - * we could get the ticks by doing this - * - * __asm __volatile("mov %%ebx, %%esi\n\t" - * "cpuid\n\t" - * "xchg %%esi, %%ebx\n\t" - * "rdtsc" - * : "=a" (a), - * "=d" (d) - * ); - - * we could even replace our tricky sched_yield call by assembly code to get a better accurency, - * anyway the following C stub will satisfy 99% of apps using posix clock_gettime call, - * moreover, the setter version (clock_settime) could be easly written using mach primitives: - * http://www.opensource.apple.com/source/xnu/xnu-${VERSION}/osfmk/man/ (clock_[set|get]_time) - * - * hackers don't be crackers, don't you use a flush toilet? - * - * - * @see draft: ./posix-realtime-stub/posix-realtime-stub.c - * - */ - - -#ifdef __APPLE__ - -#pragma weak clock_gettime - -#include <sys/time.h> -#include <sys/resource.h> -#include <mach/mach.h> -#include <mach/clock.h> -#include <mach/mach_time.h> -#include <errno.h> -#include <unistd.h> -#include <sched.h> - -typedef enum { - CLOCK_REALTIME, - CLOCK_MONOTONIC, - CLOCK_PROCESS_CPUTIME_ID, - CLOCK_THREAD_CPUTIME_ID -} clockid_t; - -static mach_timebase_info_data_t __clock_gettime_inf; - -static int clock_gettime(clockid_t clk_id, struct timespec *tp) { - kern_return_t ret; - clock_serv_t clk; - clock_id_t clk_serv_id; - mach_timespec_t tm; - - uint64_t start, end, delta, nano; - - //task_basic_info_data_t tinfo; - //task_thread_times_info_data_t ttinfo; - //mach_msg_type_number_t tflag; - - int retval = -1; - switch (clk_id) { - case CLOCK_REALTIME: - case CLOCK_MONOTONIC: - clk_serv_id = clk_id == CLOCK_REALTIME ? CALENDAR_CLOCK : SYSTEM_CLOCK; - if (KERN_SUCCESS == (ret = host_get_clock_service(mach_host_self(), clk_serv_id, &clk))) { - if (KERN_SUCCESS == (ret = clock_get_time(clk, &tm))) { - tp->tv_sec = tm.tv_sec; - tp->tv_nsec = tm.tv_nsec; - retval = 0; - } - } - if (KERN_SUCCESS != ret) { - errno = EINVAL; - retval = -1; - } - break; - case CLOCK_PROCESS_CPUTIME_ID: - case CLOCK_THREAD_CPUTIME_ID: - start = mach_absolute_time(); - if (clk_id == CLOCK_PROCESS_CPUTIME_ID) { - getpid(); - } else { - sched_yield(); - } - end = mach_absolute_time(); - delta = end - start; - if (0 == __clock_gettime_inf.denom) { - mach_timebase_info(&__clock_gettime_inf); - } - nano = delta * __clock_gettime_inf.numer / __clock_gettime_inf.denom; - tp->tv_sec = nano * 1e-9; - tp->tv_nsec = nano - (tp->tv_sec * 1e9); - retval = 0; - break; - default: - errno = EINVAL; - retval = -1; - } - return retval; -} - -#endif // __APPLE__ - -/* EOF */ diff --git a/gi/pyp-topics/src/contexts_corpus.cc b/gi/pyp-topics/src/contexts_corpus.cc deleted file mode 100644 index 92b1b34c..00000000 --- a/gi/pyp-topics/src/contexts_corpus.cc +++ /dev/null @@ -1,164 +0,0 @@ -#include <sstream> -#include <iostream> -#include <set> - -#include "contexts_corpus.hh" -#include "gzstream.hh" -#include "contexts_lexer.h" - -#include <boost/tuple/tuple.hpp> - - -using namespace std; - -////////////////////////////////////////////////// -// ContextsCorpus -////////////////////////////////////////////////// - -bool read_callback_binary_contexts = false; - -void read_callback(const ContextsLexer::PhraseContextsType& new_contexts, void* extra) { - assert(new_contexts.contexts.size() == new_contexts.counts.size()); - - boost::tuple<ContextsCorpus*, BackoffGenerator*, map<string,int>* >* extra_pair - = static_cast< boost::tuple<ContextsCorpus*, BackoffGenerator*, map<string,int>* >* >(extra); - - ContextsCorpus* corpus_ptr = extra_pair->get<0>(); - BackoffGenerator* backoff_gen = extra_pair->get<1>(); - //map<string,int>* counts = extra_pair->get<2>(); - - Document* doc(new Document()); - - //cout << "READ: " << new_contexts.phrase << "\t"; - for (int i=0; i < (int)new_contexts.counts.size(); ++i) { - int cache_word_count = corpus_ptr->m_dict.max(); - - //string context_str = corpus_ptr->m_dict.toString(new_contexts.contexts[i]); - int context_index = new_contexts.counts.at(i).first; - string context_str = corpus_ptr->m_dict.toString(new_contexts.contexts[context_index]); - - // filter out singleton contexts - //if (!counts->empty()) { - // map<string,int>::const_iterator find_it = counts->find(context_str); - // if (find_it == counts->end() || find_it->second < 2) - // continue; - //} - - WordID id = corpus_ptr->m_dict.Convert(context_str); - if (cache_word_count != corpus_ptr->m_dict.max()) { - corpus_ptr->m_backoff->terms_at_level(0)++; - corpus_ptr->m_num_types++; - } - - //int count = new_contexts.counts[i]; - int count = new_contexts.counts.at(i).second; - if (read_callback_binary_contexts) { - doc->push_back(id); - corpus_ptr->m_num_terms++; - } - else { - for (int j=0; j<count; ++j) - doc->push_back(id); - corpus_ptr->m_num_terms += count; - } - - // generate the backoff map - if (backoff_gen) { - int order = 1; - WordID backoff_id = id; - //ContextsLexer::Context backedoff_context = new_contexts.contexts[i]; - ContextsLexer::Context backedoff_context = new_contexts.contexts[context_index]; - while (true) { - if (!corpus_ptr->m_backoff->has_backoff(backoff_id)) { - //cerr << "Backing off from " << corpus_ptr->m_dict.Convert(backoff_id) << " to "; - backedoff_context = (*backoff_gen)(backedoff_context); - - if (backedoff_context.empty()) { - //cerr << "Nothing." << endl; - (*corpus_ptr->m_backoff)[backoff_id] = -1; - break; - } - - if (++order > corpus_ptr->m_backoff->order()) - corpus_ptr->m_backoff->order(order); - - int cache_word_count = corpus_ptr->m_dict.max(); - int new_backoff_id = corpus_ptr->m_dict.Convert(backedoff_context); - if (cache_word_count != corpus_ptr->m_dict.max()) - corpus_ptr->m_backoff->terms_at_level(order-1)++; - - //cerr << corpus_ptr->m_dict.Convert(new_backoff_id) << " ." << endl; - - backoff_id = ((*corpus_ptr->m_backoff)[backoff_id] = new_backoff_id); - } - else break; - } - } - //cout << context_str << " (" << id << ") ||| C=" << count << " ||| "; - } - //cout << endl; - - //if (!doc->empty()) { - corpus_ptr->m_documents.push_back(doc); - corpus_ptr->m_keys.push_back(new_contexts.phrase); - //} -} - -void filter_callback(const ContextsLexer::PhraseContextsType& new_contexts, void* extra) { - assert(new_contexts.contexts.size() == new_contexts.counts.size()); - - map<string,int>* context_counts = (static_cast<map<string,int>*>(extra)); - - for (int i=0; i < (int)new_contexts.counts.size(); ++i) { - int context_index = new_contexts.counts.at(i).first; - int count = new_contexts.counts.at(i).second; - //if (read_callback_binary_contexts) count = 1; - //int count = new_contexts.counts[i]; - pair<map<string,int>::iterator,bool> result - = context_counts->insert(make_pair(Dict::toString(new_contexts.contexts[context_index]),count)); - //= context_counts->insert(make_pair(Dict::toString(new_contexts.contexts[i]),count)); - if (!result.second) - result.first->second += count; - } -} - - -unsigned ContextsCorpus::read_contexts(const string &filename, - BackoffGenerator* backoff_gen_ptr, - bool /*filter_singeltons*/, - bool binary_contexts) { - read_callback_binary_contexts = binary_contexts; - - map<string,int> counts; - //if (filter_singeltons) - { - // cerr << "--- Filtering singleton contexts ---" << endl; - - igzstream in(filename.c_str()); - ContextsLexer::ReadContexts(&in, filter_callback, &counts); - } - - m_num_terms = 0; - m_num_types = 0; - - igzstream in(filename.c_str()); - boost::tuple<ContextsCorpus*, BackoffGenerator*, map<string,int>* > extra_pair(this,backoff_gen_ptr,&counts); - ContextsLexer::ReadContexts(&in, read_callback, &extra_pair); - - //m_num_types = m_dict.max(); - - cerr << "Read backoff with order " << m_backoff->order() << "\n"; - for (int o=0; o<m_backoff->order(); o++) - cerr << " Terms at " << o << " = " << m_backoff->terms_at_level(o) << endl; - //cerr << endl; - - int i=0; double av_freq=0; - for (map<string,int>::const_iterator it=counts.begin(); it != counts.end(); ++it, ++i) { - WordID id = m_dict.Convert(it->first); - m_context_counts[id] = it->second; - av_freq += it->second; - } - cerr << " Average term frequency = " << av_freq / (double) i << endl; - - return m_documents.size(); -} diff --git a/gi/pyp-topics/src/contexts_corpus.hh b/gi/pyp-topics/src/contexts_corpus.hh deleted file mode 100644 index 2527f655..00000000 --- a/gi/pyp-topics/src/contexts_corpus.hh +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _CONTEXTS_CORPUS_HH -#define _CONTEXTS_CORPUS_HH - -#include <vector> -#include <string> -#include <map> -#include <tr1/unordered_map> - -#include <boost/ptr_container/ptr_vector.hpp> - -#include "corpus.hh" -#include "contexts_lexer.h" -#include "dict.h" - - -class BackoffGenerator { -public: - virtual ContextsLexer::Context - operator()(const ContextsLexer::Context& c) = 0; - -protected: - ContextsLexer::Context strip_edges(const ContextsLexer::Context& c) { - if (c.size() <= 1) return ContextsLexer::Context(); - assert(c.size() % 2 == 1); - return ContextsLexer::Context(c.begin() + 1, c.end() - 1); - } -}; - -class NullBackoffGenerator : public BackoffGenerator { - virtual ContextsLexer::Context - operator()(const ContextsLexer::Context&) - { return ContextsLexer::Context(); } -}; - -class SimpleBackoffGenerator : public BackoffGenerator { - virtual ContextsLexer::Context - operator()(const ContextsLexer::Context& c) { - if (c.size() <= 3) - return ContextsLexer::Context(); - return strip_edges(c); - } -}; - - -//////////////////////////////////////////////////////////////// -// ContextsCorpus -//////////////////////////////////////////////////////////////// - -class ContextsCorpus : public Corpus { - friend void read_callback(const ContextsLexer::PhraseContextsType&, void*); - -public: - ContextsCorpus() : m_backoff(new TermBackoff) {} - virtual ~ContextsCorpus() {} - - virtual unsigned read_contexts(const std::string &filename, - BackoffGenerator* backoff_gen=0, - bool filter_singeltons=false, - bool binary_contexts=false); - - TermBackoffPtr backoff_index() { - return m_backoff; - } - - std::vector<std::string> context2string(const WordID& id) const { - std::vector<std::string> res; - assert (id >= 0); - m_dict.AsVector(id, &res); - return res; - } - - virtual int context_count(const WordID& id) const { - return m_context_counts.find(id)->second; - } - - - const std::string& key(const int& i) const { - return m_keys.at(i); - } - - const Dict& dict() const { return m_dict; } - -protected: - TermBackoffPtr m_backoff; - Dict m_dict; - std::vector<std::string> m_keys; - std::tr1::unordered_map<int,int> m_context_counts; -}; - -#endif // _CONTEXTS_CORPUS_HH diff --git a/gi/pyp-topics/src/contexts_lexer.h b/gi/pyp-topics/src/contexts_lexer.h deleted file mode 100644 index 66004990..00000000 --- a/gi/pyp-topics/src/contexts_lexer.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _CONTEXTS_LEXER_H_ -#define _CONTEXTS_LEXER_H_ - -#include <iostream> -#include <vector> -#include <string> - -#include "dict.h" - -struct ContextsLexer { - typedef std::vector<std::string> Context; - struct PhraseContextsType { - std::string phrase; - std::vector<Context> contexts; - std::vector< std::pair<int,int> > counts; - }; - - typedef void (*ContextsCallback)(const PhraseContextsType& new_contexts, void* extra); - static void ReadContexts(std::istream* in, ContextsCallback func, void* extra); -}; - -#endif diff --git a/gi/pyp-topics/src/contexts_lexer.l b/gi/pyp-topics/src/contexts_lexer.l deleted file mode 100644 index 64cd7ca3..00000000 --- a/gi/pyp-topics/src/contexts_lexer.l +++ /dev/null @@ -1,113 +0,0 @@ -%{ -#include "contexts_lexer.h" - -#include <string> -#include <iostream> -#include <sstream> -#include <cstring> -#include <cassert> -#include <algorithm> - -int lex_line = 0; -std::istream* contextslex_stream = NULL; -ContextsLexer::ContextsCallback contexts_callback = NULL; -void* contexts_callback_extra = NULL; - -#undef YY_INPUT -#define YY_INPUT(buf, result, max_size) (result = contextslex_stream->read(buf, max_size).gcount()) - -#define YY_SKIP_YYWRAP 1 -int num_phrases = 0; -int yywrap() { return 1; } - -#define MAX_TOKEN_SIZE 255 -std::string contextslex_tmp_token(MAX_TOKEN_SIZE, '\0'); -ContextsLexer::PhraseContextsType current_contexts; - -#define MAX_CONTEXT_SIZE 255 -//std::string tmp_context[MAX_CONTEXT_SIZE]; -ContextsLexer::Context tmp_context; - - -void contextslex_reset() { - current_contexts.phrase.clear(); - current_contexts.contexts.clear(); - current_contexts.counts.clear(); - tmp_context.clear(); -} - -%} - -INT [\-+]?[0-9]+|inf|[\-+]inf - -%x CONTEXT COUNT COUNT_END -%% - -<INITIAL>[^\t]+ { - contextslex_reset(); - current_contexts.phrase.assign(yytext, yyleng); - BEGIN(CONTEXT); - } -<INITIAL>\t { - ; - } - -<INITIAL,CONTEXT,COUNT>\n { - std::cerr << "ERROR: contexts_lexer.l: unexpected newline while trying to read phrase|context|count." << std::endl; - abort(); - } - -<CONTEXT>\|\|\| { - current_contexts.contexts.push_back(tmp_context); - tmp_context.clear(); - BEGIN(COUNT); - } -<CONTEXT>[^ \t]+ { - contextslex_tmp_token.assign(yytext, yyleng); - tmp_context.push_back(contextslex_tmp_token); - } -<CONTEXT>[ \t]+ { ; } - -<COUNT>[ \t]+ { ; } -<COUNT>C={INT} { - current_contexts.counts.push_back(std::make_pair(current_contexts.counts.size(), atoi(yytext+2))); - BEGIN(COUNT_END); - } -<COUNT>. { - std::cerr << "ERROR: contexts_lexer.l: unexpected content while reading count." << std::endl; - abort(); - } - -<COUNT_END>[ \t]+ { ; } -<COUNT_END>\|\|\| { - BEGIN(CONTEXT); - } -<COUNT_END>\n { - //std::cerr << "READ:" << current_contexts.phrase << " with " << current_contexts.contexts.size() - // << " contexts, and " << current_contexts.counts.size() << " counts." << std::endl; - std::sort(current_contexts.counts.rbegin(), current_contexts.counts.rend()); - - contexts_callback(current_contexts, contexts_callback_extra); - current_contexts.phrase.clear(); - current_contexts.contexts.clear(); - current_contexts.counts.clear(); - BEGIN(INITIAL); - } -<COUNT_END>. { - contextslex_tmp_token.assign(yytext, yyleng); - std::cerr << "ERROR: contexts_lexer.l: unexpected content while looking for ||| closing count." << std::endl; - abort(); - } - -%% - -#include "filelib.h" - -void ContextsLexer::ReadContexts(std::istream* in, ContextsLexer::ContextsCallback func, void* extra) { - lex_line = 1; - contextslex_stream = in; - contexts_callback_extra = extra, - contexts_callback = func; - yylex(); -} - diff --git a/gi/pyp-topics/src/corpus.cc b/gi/pyp-topics/src/corpus.cc deleted file mode 100644 index f182381f..00000000 --- a/gi/pyp-topics/src/corpus.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include <sstream> -#include <iostream> -#include <set> - -#include "corpus.hh" -#include "gzstream.hh" - -using namespace std; - -////////////////////////////////////////////////// -// Corpus -////////////////////////////////////////////////// - -Corpus::Corpus() : m_num_terms(0), m_num_types(0) {} - -unsigned Corpus::read(const std::string &filename) { - m_num_terms = 0; - m_num_types = 0; - std::set<int> seen_types; - - igzstream in(filename.c_str()); - - string buf; - int token; - unsigned doc_count=0; - while (getline(in, buf)) { - Document* doc(new Document()); - istringstream ss(buf); - - ss >> token; // the number of unique terms - - char delimeter; - int count; - while(ss >> token >> delimeter >> count) { - for (int i=0; i<count; ++i) - doc->push_back(token); - m_num_terms += count; - seen_types.insert(token); - } - - m_documents.push_back(doc); - doc_count++; - } - - m_num_types = seen_types.size(); - - return doc_count; -} - - -////////////////////////////////////////////////// -// TestCorpus -////////////////////////////////////////////////// - -TestCorpus::TestCorpus() {} - -void TestCorpus::read(const std::string &filename) { - igzstream in(filename.c_str()); - - string buf; - Term term; - DocumentId doc; - char delimeter; - while (getline(in, buf)) { - DocumentTerms* line(new DocumentTerms()); - istringstream ss(buf); - - while(ss >> doc >> delimeter >> term) - line->push_back(DocumentTerm(doc, term)); - - m_lines.push_back(line); - } -} - -////////////////////////////////////////////////// -// TermBackoff -////////////////////////////////////////////////// - -void TermBackoff::read(const std::string &filename) { - igzstream in(filename.c_str()); - - string buf; - int num_terms; - getline(in, buf); - istringstream ss(buf); - ss >> num_terms >> m_backoff_order; - - m_dict.resize(num_terms, -1); - for (int i=0; i<m_backoff_order; ++i) { - int count; ss >> count; - m_terms_at_order.push_back(count); - } - - Term term, backoff; - while (getline(in, buf)) { - istringstream ss(buf); - ss >> term >> backoff; - - assert(term < num_terms); - assert(term >= 0); - - m_dict[term] = backoff; - } -} diff --git a/gi/pyp-topics/src/corpus.hh b/gi/pyp-topics/src/corpus.hh deleted file mode 100644 index 2aa03527..00000000 --- a/gi/pyp-topics/src/corpus.hh +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef _CORPUS_HH -#define _CORPUS_HH - -#include <vector> -#include <string> -#include <map> -#include <limits> - -#include <boost/shared_ptr.hpp> -#include <boost/ptr_container/ptr_vector.hpp> - -//////////////////////////////////////////////////////////////// -// Corpus -//////////////////////////////////////////////////////////////// -typedef int Term; - -typedef std::vector<Term> Document; -typedef std::vector<Term> Terms; - -class Corpus { -public: - typedef boost::ptr_vector<Document>::const_iterator const_iterator; - -public: - Corpus(); - virtual ~Corpus() {} - - virtual unsigned read(const std::string &filename); - - const_iterator begin() const { return m_documents.begin(); } - const_iterator end() const { return m_documents.end(); } - - const Document& at(size_t i) const { return m_documents.at(i); } - - int num_documents() const { return m_documents.size(); } - int num_terms() const { return m_num_terms; } - int num_types() const { return m_num_types; } - - virtual int context_count(const int&) const { - return std::numeric_limits<int>::max(); - } - -protected: - int m_num_terms, m_num_types; - boost::ptr_vector<Document> m_documents; -}; - -typedef int DocumentId; -struct DocumentTerm { - DocumentTerm(DocumentId d, Term t) : term(t), doc(d) {} - Term term; - DocumentId doc; -}; -typedef std::vector<DocumentTerm> DocumentTerms; - -class TestCorpus { -public: - typedef boost::ptr_vector<DocumentTerms>::const_iterator const_iterator; - -public: - TestCorpus(); - ~TestCorpus() {} - - void read(const std::string &filename); - - const_iterator begin() const { return m_lines.begin(); } - const_iterator end() const { return m_lines.end(); } - - int num_instances() const { return m_lines.size(); } - -protected: - boost::ptr_vector<DocumentTerms> m_lines; -}; - -class TermBackoff { -public: - typedef std::vector<Term> dictionary_type; - typedef dictionary_type::const_iterator const_iterator; - const static int NullBackoff=-1; - -public: - TermBackoff() { order(1); } - ~TermBackoff() {} - - void read(const std::string &filename); - - const_iterator begin() const { return m_dict.begin(); } - const_iterator end() const { return m_dict.end(); } - - const Term& operator[](const Term& t) const { - assert(t < static_cast<int>(m_dict.size())); - return m_dict[t]; - } - - Term& operator[](const Term& t) { - if (t >= static_cast<int>(m_dict.size())) - m_dict.resize(t+1, -1); - return m_dict[t]; - } - - bool has_backoff(const Term& t) { - return t >= 0 && t < static_cast<int>(m_dict.size()) && m_dict[t] >= 0; - } - - int order() const { return m_backoff_order; } - void order(int o) { - if (o >= (int)m_terms_at_order.size()) - m_terms_at_order.resize(o, 0); - m_backoff_order = o; - } - -// int levels() const { return m_terms_at_order.size(); } - bool is_null(const Term& term) const { return term < 0; } - int terms_at_level(int level) const { - assert (level < (int)m_terms_at_order.size()); - return m_terms_at_order.at(level); - } - - int& terms_at_level(int level) { - assert (level < (int)m_terms_at_order.size()); - return m_terms_at_order.at(level); - } - - int size() const { return m_dict.size(); } - -protected: - dictionary_type m_dict; - int m_backoff_order; - std::vector<int> m_terms_at_order; -}; -typedef boost::shared_ptr<TermBackoff> TermBackoffPtr; - -#endif // _CORPUS_HH diff --git a/gi/pyp-topics/src/gammadist.c b/gi/pyp-topics/src/gammadist.c deleted file mode 100644 index 4e260db8..00000000 --- a/gi/pyp-topics/src/gammadist.c +++ /dev/null @@ -1,247 +0,0 @@ -/* gammadist.c -- computes probability of samples under / produces samples from a Gamma distribution - * - * Mark Johnson, 22nd March 2008 - * - * WARNING: you need to set the flag -std=c99 to compile - * - * gammavariate() was translated from random.py in Python library - * - * The Gamma distribution is: - * - * Gamma(x | alpha, beta) = pow(x/beta, alpha-1) * exp(-x/beta) / (gamma(alpha)*beta) - * - * shape parameter alpha > 0 (also called c), scale parameter beta > 0 (also called s); - * mean is alpha*beta, variance is alpha*beta**2 - * - * Note that many parameterizations of the Gamma function are in terms of an _inverse_ - * scale parameter beta, which is the inverse of the beta given here. - * - * To define a main() that tests the routines, uncomment the following #define: - */ -/* #define GAMMATEST */ - -#include <assert.h> -#include <math.h> - -#include "gammadist.h" -#include "mt19937ar.h" - -/* gammadist() returns the probability density of x under a Gamma(alpha,beta) - * distribution - */ - -long double gammadist(long double x, long double alpha, long double beta) { - assert(alpha > 0); - assert(beta > 0); - return pow(x/beta, alpha-1) * exp(-x/beta) / (tgamma(alpha)*beta); -} - -/* lgammadist() returns the log probability density of x under a Gamma(alpha,beta) - * distribution - */ - -long double lgammadist(long double x, long double alpha, long double beta) { - assert(alpha > 0); - assert(beta > 0); - return (alpha-1)*log(x) - alpha*log(beta) - x/beta - lgamma(alpha); -} - -/* This definition of gammavariate is from Python code in - * the Python random module. - */ - -long double gammavariate(long double alpha, long double beta) { - - assert(alpha > 0); - assert(beta > 0); - - if (alpha > 1.0) { - - /* Uses R.C.H. Cheng, "The generation of Gamma variables with - non-integral shape parameters", Applied Statistics, (1977), 26, - No. 1, p71-74 */ - - long double ainv = sqrt(2.0 * alpha - 1.0); - long double bbb = alpha - log(4.0); - long double ccc = alpha + ainv; - - while (1) { - long double u1 = mt_genrand_real3(); - if (u1 > 1e-7 || u1 < 0.9999999) { - long double u2 = 1.0 - mt_genrand_real3(); - long double v = log(u1/(1.0-u1))/ainv; - long double x = alpha*exp(v); - long double z = u1*u1*u2; - long double r = bbb+ccc*v-x; - if (r + (1.0+log(4.5)) - 4.5*z >= 0.0 || r >= log(z)) - return x * beta; - } - } - } - else if (alpha == 1.0) { - long double u = mt_genrand_real3(); - while (u <= 1e-7) - u = mt_genrand_real3(); - return -log(u) * beta; - } - else { - /* alpha is between 0 and 1 (exclusive) - Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle */ - - while (1) { - long double u = mt_genrand_real3(); - long double b = (exp(1) + alpha)/exp(1); - long double p = b*u; - long double x = (p <= 1.0) ? pow(p, 1.0/alpha) : -log((b-p)/alpha); - long double u1 = mt_genrand_real3(); - if (! (((p <= 1.0) && (u1 > exp(-x))) || - ((p > 1.0) && (u1 > pow(x, alpha - 1.0))))) - return x * beta; - } - } -} - -/* betadist() returns the probability density of x under a Beta(alpha,beta) - * distribution. - */ - -long double betadist(long double x, long double alpha, long double beta) { - assert(x >= 0); - assert(x <= 1); - assert(alpha > 0); - assert(beta > 0); - return pow(x,alpha-1)*pow(1-x,beta-1)*tgamma(alpha+beta)/(tgamma(alpha)*tgamma(beta)); -} - -/* lbetadist() returns the log probability density of x under a Beta(alpha,beta) - * distribution. - */ - -long double lbetadist(long double x, long double alpha, long double beta) { - assert(x > 0); - assert(x < 1); - assert(alpha > 0); - assert(beta > 0); - return (alpha-1)*log(x)+(beta-1)*log(1-x)+lgamma(alpha+beta)-lgamma(alpha)-lgamma(beta); -} - -/* betavariate() generates a sample from a Beta distribution with - * parameters alpha and beta. - * - * 0 < alpha < 1, 0 < beta < 1, mean is alpha/(alpha+beta) - */ - -long double betavariate(long double alpha, long double beta) { - long double x = gammavariate(alpha, 1); - long double y = gammavariate(beta, 1); - return x/(x+y); -} - -#ifdef GAMMATEST -#include <stdio.h> - -int main(int argc, char **argv) { - int iteration, niterations = 1000; - - for (iteration = 0; iteration < niterations; ++iteration) { - long double alpha = 100*mt_genrand_real3(); - long double gv = gammavariate(alpha, 1); - long double pgv = gammadist(gv, alpha, 1); - long double pgvl = exp(lgammadist(gv, alpha, 1)); - fprintf(stderr, "iteration = %d, gammavariate(%lg,1) = %lg, gammadist(%lg,%lg,1) = %lg, exp(lgammadist(%lg,%lg,1) = %lg\n", - iteration, alpha, gv, gv, alpha, pgv, gv, alpha, pgvl); - } - return 0; -} - -#endif /* GAMMATEST */ - - -/* Other routines I tried, but which weren't as good as the ones above */ - -#if 0 - -/*! gammavariate() returns samples from a Gamma distribution - *! where alpha is the shape parameter and beta is the scale - *! parameter, using the algorithm described on p. 94 of - *! Gentle (1998) Random Number Generation and Monte Carlo Methods, - *! Springer. - */ - -long double gammavariate(long double alpha) { - - assert(alpha > 0); - - if (alpha > 1.0) { - while (1) { - long double u1 = mt_genrand_real3(); - long double u2 = mt_genrand_real3(); - long double v = (alpha - 1/(6*alpha))*u1/(alpha-1)*u2; - if (2*(u2-1)/(alpha-1) + v + 1/v <= 2 - || 2*log(u2)/(alpha-1) - log(v) + v <= 1) - return (alpha-1)*v; - } - } else if (alpha < 1.0) { - while (1) { - long double t = 0.07 + 0.75*sqrt(1-alpha); - long double b = alpha + exp(-t)*alpha/t; - long double u1 = mt_genrand_real3(); - long double u2 = mt_genrand_real3(); - long double v = b*u1; - if (v <= 1) { - long double x = t*pow(v, 1/alpha); - if (u2 <= (2 - x)/(2 + x)) - return x; - if (u2 <= exp(-x)) - return x; - } - else { - long double x = log(t*(b-v)/alpha); - long double y = x/t; - if (u2*(alpha + y*(1-alpha)) <= 1) - return x; - if (u2 <= pow(y,alpha-1)) - return x; - } - } - } - else - return -log(mt_genrand_real3()); -} - - -/*! gammavariate() returns a deviate distributed as a gamma - *! distribution of order alpha, beta, i.e., a waiting time to the alpha'th - *! event in a Poisson process of unit mean. - *! - *! Code from Numerical Recipes - */ - -long double nr_gammavariate(long double ia) { - int j; - long double am,e,s,v1,v2,x,y; - assert(ia > 0); - if (ia < 10) { - x=1.0; - for (j=1;j<=ia;j++) - x *= mt_genrand_real3(); - x = -log(x); - } else { - do { - do { - do { - v1=mt_genrand_real3(); - v2=2.0*mt_genrand_real3()-1.0; - } while (v1*v1+v2*v2 > 1.0); - y=v2/v1; - am=ia-1; - s=sqrt(2.0*am+1.0); - x=s*y+am; - } while (x <= 0.0); - e=(1.0+y*y)*exp(am*log(x/am)-s*y); - } while (mt_genrand_real3() > e); - } - return x; -} - -#endif diff --git a/gi/pyp-topics/src/gammadist.h b/gi/pyp-topics/src/gammadist.h deleted file mode 100644 index b6ad6c40..00000000 --- a/gi/pyp-topics/src/gammadist.h +++ /dev/null @@ -1,72 +0,0 @@ -/* gammadist.h -- computes probability of samples under / produces samples from a Gamma distribution - * - * Mark Johnson, 22nd March 2008 - * - * gammavariate() was translated from random.py in Python library - * - * The Gamma distribution is: - * - * Gamma(x | alpha, beta) = pow(x/beta, alpha-1) * exp(-x/beta) / (gamma(alpha)*beta) - * - * shape parameter alpha > 0 (also called c), scale parameter beta > 0 (also called s); - * mean is alpha*beta, variance is alpha*beta**2 - * - * Note that many parameterizations of the Gamma function are in terms of an _inverse_ - * scale parameter beta, which is the inverse of the beta given here. - */ - -#ifndef GAMMADIST_H -#define GAMMADIST_H - -#ifdef __cplusplus -extern "C" { -#endif - - /* gammadist() returns the probability density of x under a Gamma(alpha,beta) - * distribution - */ - - long double gammadist(long double x, long double alpha, long double beta); - - /* lgammadist() returns the log probability density of x under a Gamma(alpha,beta) - * distribution - */ - - long double lgammadist(long double x, long double alpha, long double beta); - - /* gammavariate() generates samples from a Gamma distribution - * conditioned on the parameters alpha and beta. - * - * alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 - * - * Warning: a few older sources define the gamma distribution in terms - * of alpha > -1.0 - */ - - long double gammavariate(long double alpha, long double beta); - - /* betadist() returns the probability density of x under a Beta(alpha,beta) - * distribution. - */ - - long double betadist(long double x, long double alpha, long double beta); - - /* lbetadist() returns the log probability density of x under a Beta(alpha,beta) - * distribution. - */ - - long double lbetadist(long double x, long double alpha, long double beta); - - /* betavariate() generates a sample from a Beta distribution with - * parameters alpha and beta. - * - * 0 < alpha < 1, 0 < beta < 1, mean is alpha/(alpha+beta) - */ - - long double betavariate(long double alpha, long double beta); - -#ifdef __cplusplus -}; -#endif - -#endif /* GAMMADIST_H */ diff --git a/gi/pyp-topics/src/gzstream.cc b/gi/pyp-topics/src/gzstream.cc deleted file mode 100644 index 7c4d3a12..00000000 --- a/gi/pyp-topics/src/gzstream.cc +++ /dev/null @@ -1,165 +0,0 @@ -// ============================================================================ -// gzstream, C++ iostream classes wrapping the zlib compression library. -// Copyright (C) 2001 Deepak Bandyopadhyay, Lutz Kettner -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -// ============================================================================ -// -// File : gzstream.C -// Revision : $Revision: 1.1 $ -// Revision_date : $Date: 2006/03/30 04:05:52 $ -// Author(s) : Deepak Bandyopadhyay, Lutz Kettner -// -// Standard streambuf implementation following Nicolai Josuttis, "The -// Standard C++ Library". -// ============================================================================ - -#include "gzstream.hh" -#include <iostream> -#include <string.h> // for memcpy - -#ifdef GZSTREAM_NAMESPACE -namespace GZSTREAM_NAMESPACE { -#endif - -// ---------------------------------------------------------------------------- -// Internal classes to implement gzstream. See header file for user classes. -// ---------------------------------------------------------------------------- - -// -------------------------------------- -// class gzstreambuf: -// -------------------------------------- - -gzstreambuf* gzstreambuf::open( const char* name, int open_mode) { - if ( is_open()) - return (gzstreambuf*)0; - mode = open_mode; - // no append nor read/write mode - if ((mode & std::ios::ate) || (mode & std::ios::app) - || ((mode & std::ios::in) && (mode & std::ios::out))) - return (gzstreambuf*)0; - char fmode[10]; - char* fmodeptr = fmode; - if ( mode & std::ios::in) - *fmodeptr++ = 'r'; - else if ( mode & std::ios::out) - *fmodeptr++ = 'w'; - *fmodeptr++ = 'b'; - *fmodeptr = '\0'; - file = gzopen( name, fmode); - if (file == 0) - return (gzstreambuf*)0; - opened = 1; - return this; -} - -gzstreambuf * gzstreambuf::close() { - if ( is_open()) { - sync(); - opened = 0; - if ( gzclose( file) == Z_OK) - return this; - } - return (gzstreambuf*)0; -} - -int gzstreambuf::underflow() { // used for input buffer only - if ( gptr() && ( gptr() < egptr())) - return * reinterpret_cast<unsigned char *>( gptr()); - - if ( ! (mode & std::ios::in) || ! opened) - return EOF; - // Josuttis' implementation of inbuf - int n_putback = gptr() - eback(); - if ( n_putback > 4) - n_putback = 4; - memcpy( buffer + (4 - n_putback), gptr() - n_putback, n_putback); - - int num = gzread( file, buffer+4, bufferSize-4); - if (num <= 0) // ERROR or EOF - return EOF; - - // reset buffer pointers - setg( buffer + (4 - n_putback), // beginning of putback area - buffer + 4, // read position - buffer + 4 + num); // end of buffer - - // return next character - return * reinterpret_cast<unsigned char *>( gptr()); -} - -int gzstreambuf::flush_buffer() { - // Separate the writing of the buffer from overflow() and - // sync() operation. - int w = pptr() - pbase(); - if ( gzwrite( file, pbase(), w) != w) - return EOF; - pbump( -w); - return w; -} - -int gzstreambuf::overflow( int c) { // used for output buffer only - if ( ! ( mode & std::ios::out) || ! opened) - return EOF; - if (c != EOF) { - *pptr() = c; - pbump(1); - } - if ( flush_buffer() == EOF) - return EOF; - return c; -} - -int gzstreambuf::sync() { - // Changed to use flush_buffer() instead of overflow( EOF) - // which caused improper behavior with std::endl and flush(), - // bug reported by Vincent Ricard. - if ( pptr() && pptr() > pbase()) { - if ( flush_buffer() == EOF) - return -1; - } - return 0; -} - -// -------------------------------------- -// class gzstreambase: -// -------------------------------------- - -gzstreambase::gzstreambase( const char* name, int mode) { - init( &buf); - open( name, mode); -} - -gzstreambase::~gzstreambase() { - buf.close(); -} - -void gzstreambase::open( const char* name, int open_mode) { - if ( ! buf.open( name, open_mode)) - clear( rdstate() | std::ios::badbit); -} - -void gzstreambase::close() { - if ( buf.is_open()) - if ( ! buf.close()) - clear( rdstate() | std::ios::badbit); -} - -#ifdef GZSTREAM_NAMESPACE -} // namespace GZSTREAM_NAMESPACE -#endif - -// ============================================================================ -// EOF // diff --git a/gi/pyp-topics/src/gzstream.hh b/gi/pyp-topics/src/gzstream.hh deleted file mode 100644 index ad9785fd..00000000 --- a/gi/pyp-topics/src/gzstream.hh +++ /dev/null @@ -1,121 +0,0 @@ -// ============================================================================ -// gzstream, C++ iostream classes wrapping the zlib compression library. -// Copyright (C) 2001 Deepak Bandyopadhyay, Lutz Kettner -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -// ============================================================================ -// -// File : gzstream.h -// Revision : $Revision: 1.1 $ -// Revision_date : $Date: 2006/03/30 04:05:52 $ -// Author(s) : Deepak Bandyopadhyay, Lutz Kettner -// -// Standard streambuf implementation following Nicolai Josuttis, "The -// Standard C++ Library". -// ============================================================================ - -#ifndef GZSTREAM_H -#define GZSTREAM_H 1 - -// standard C++ with new header file names and std:: namespace -#include <iostream> -#include <fstream> -#include <zlib.h> - -#ifdef GZSTREAM_NAMESPACE -namespace GZSTREAM_NAMESPACE { -#endif - -// ---------------------------------------------------------------------------- -// Internal classes to implement gzstream. See below for user classes. -// ---------------------------------------------------------------------------- - -class gzstreambuf : public std::streambuf { -private: - static const int bufferSize = 47+256; // size of data buff - // totals 512 bytes under g++ for igzstream at the end. - - gzFile file; // file handle for compressed file - char buffer[bufferSize]; // data buffer - char opened; // open/close state of stream - int mode; // I/O mode - - int flush_buffer(); -public: - gzstreambuf() : opened(0) { - setp( buffer, buffer + (bufferSize-1)); - setg( buffer + 4, // beginning of putback area - buffer + 4, // read position - buffer + 4); // end position - // ASSERT: both input & output capabilities will not be used together - } - int is_open() { return opened; } - gzstreambuf* open( const char* name, int open_mode); - gzstreambuf* close(); - ~gzstreambuf() { close(); } - - virtual int overflow( int c = EOF); - virtual int underflow(); - virtual int sync(); -}; - -class gzstreambase : virtual public std::ios { -protected: - gzstreambuf buf; -public: - gzstreambase() { init(&buf); } - gzstreambase( const char* name, int open_mode); - ~gzstreambase(); - void open( const char* name, int open_mode); - void close(); - gzstreambuf* rdbuf() { return &buf; } -}; - -// ---------------------------------------------------------------------------- -// User classes. Use igzstream and ogzstream analogously to ifstream and -// ofstream respectively. They read and write files based on the gz* -// function interface of the zlib. Files are compatible with gzip compression. -// ---------------------------------------------------------------------------- - -class igzstream : public gzstreambase, public std::istream { -public: - igzstream() : std::istream( &buf) {} - igzstream( const char* name, int open_mode = std::ios::in) - : gzstreambase( name, open_mode), std::istream( &buf) {} - gzstreambuf* rdbuf() { return gzstreambase::rdbuf(); } - void open( const char* name, int open_mode = std::ios::in) { - gzstreambase::open( name, open_mode); - } -}; - -class ogzstream : public gzstreambase, public std::ostream { -public: - ogzstream() : std::ostream( &buf) {} - ogzstream( const char* name, int mode = std::ios::out) - : gzstreambase( name, mode), std::ostream( &buf) {} - gzstreambuf* rdbuf() { return gzstreambase::rdbuf(); } - void open( const char* name, int open_mode = std::ios::out) { - gzstreambase::open( name, open_mode); - } -}; - -#ifdef GZSTREAM_NAMESPACE -} // namespace GZSTREAM_NAMESPACE -#endif - -#endif // GZSTREAM_H -// ============================================================================ -// EOF // - diff --git a/gi/pyp-topics/src/log_add.h b/gi/pyp-topics/src/log_add.h deleted file mode 100644 index e0620c5a..00000000 --- a/gi/pyp-topics/src/log_add.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef log_add_hh -#define log_add_hh - -#include <limits> -#include <iostream> -#include <cassert> -#include <cmath> - -template <typename T> -struct Log -{ - static T zero() { return -std::numeric_limits<T>::infinity(); } - - static T add(T l1, T l2) - { - if (l1 == zero()) return l2; - if (l1 > l2) - return l1 + std::log(1 + exp(l2 - l1)); - else - return l2 + std::log(1 + exp(l1 - l2)); - } - - static T subtract(T l1, T l2) - { - //std::assert(l1 >= l2); - return l1 + log(1 - exp(l2 - l1)); - } -}; - -#endif diff --git a/gi/pyp-topics/src/macros.Linux b/gi/pyp-topics/src/macros.Linux deleted file mode 100644 index 7c6e7fa7..00000000 --- a/gi/pyp-topics/src/macros.Linux +++ /dev/null @@ -1,18 +0,0 @@ -CC = /home/pblunsom/software/bin/mpicc -CXX = /home/pblunsom/software/bin/mpicxx -LD = /home/pblunsom/software/bin/mpicxx -FC = /home/pblunsom/software/bin/mpif77 - -SOFTWARE_DIR=/export/ws10smt/software - -CXXFLAGS = -Wall -I${SOFTWARE_DIR}/include -CFLAGS = -Wall -I${SOFTWARE_DIR}/include -FFLAGS = -Wall -LDFLAGS = -lm -lz -L${SOFTWARE_DIR}/lib \ - -lboost_program_options -lboost_mpi -lboost_serialization \ - -lboost_regex -L../../../decoder -lcdec - -FFLAGS += -g -O6 -march=native -CFLAGS += -g -O6 -march=native -CXXFLAGS += -g -O6 -march=native -LDFLAGS += -g -O6 -march=native diff --git a/gi/pyp-topics/src/makefile.darwin b/gi/pyp-topics/src/makefile.darwin deleted file mode 100644 index af608fd8..00000000 --- a/gi/pyp-topics/src/makefile.darwin +++ /dev/null @@ -1,15 +0,0 @@ -CC = /usr/bin/gcc -CXX = /usr/bin/g++ -LD = /usr/bin/g++ -FC=/usr/bin/g77 - -ARCH=i686-m64 -CXXFLAGS = -m64 -Wall -I/Users/pblunsom/packages/include -CFLAGS = -m64 -Wall -I/Users/pblunsom/packages/include -FFLAGS = -m64 -Wall -LDFLAGS = -L/Users/pblunsom/packages/lib -lboost_program_options -lm -lz - -FFLAGS += -g -O3 -funroll-loops #-pg -CFLAGS += -g -O3 -funroll-loops #-pg -CXXFLAGS += -g -O3 -funroll-loops #-pg -LDFLAGS += -g -O3 -funroll-loops #-pg diff --git a/gi/pyp-topics/src/makefile.depend b/gi/pyp-topics/src/makefile.depend deleted file mode 100644 index 9b8e306c..00000000 --- a/gi/pyp-topics/src/makefile.depend +++ /dev/null @@ -1,4042 +0,0 @@ -contexts_corpus.o: contexts_corpus.cc contexts_corpus.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - corpus.hh /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp contexts_lexer.h \ - ../../../decoder/dict.h \ - /home/pblunsom/packages/include/boost/functional/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash_fwd.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/float_functions.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/limits.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/integer/static_log2.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float_generic.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/extensions.hpp \ - /home/pblunsom/packages/include/boost/detail/container_fwd.hpp \ - ../../../decoder/wordid.h gzstream.hh \ - /home/pblunsom/packages/include/boost/tuple/tuple.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/tuple/detail/tuple_basic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/cv_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/function_traits.hpp -contexts_lexer.o: contexts_lexer.cc contexts_lexer.h \ - ../../../decoder/dict.h \ - /home/pblunsom/packages/include/boost/functional/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash_fwd.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/float_functions.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/limits.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/integer/static_log2.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float_generic.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/extensions.hpp \ - /home/pblunsom/packages/include/boost/detail/container_fwd.hpp \ - ../../../decoder/wordid.h ../../../decoder/filelib.h \ - ../../../decoder/gzstream.h -corpus.o: corpus.cc corpus.hh \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - gzstream.hh -gzstream.o: gzstream.cc gzstream.hh -mpi-pyp-topics.o: mpi-pyp-topics.cc \ - /home/pblunsom/packages/include/boost/mpi/communicator.hpp \ - /home/pblunsom/packages/include/boost/mpi/config.hpp \ - /home/pblunsom/packages/include/mpi.h \ - /home/pblunsom/packages/include/mpio.h \ - /home/pblunsom/packages/include/mpi.h \ - /home/pblunsom/packages/include/mpicxx.h \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/config/auto_link.hpp \ - /home/pblunsom/packages/include/boost/mpi/exception.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/optional.hpp \ - /home/pblunsom/packages/include/boost/optional/optional.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/alignment_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/type_with_alignment.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/detail/reference_content.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_copy.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_copy.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/none.hpp \ - /home/pblunsom/packages/include/boost/none_t.hpp \ - /home/pblunsom/packages/include/boost/utility/compare_pointees.hpp \ - /home/pblunsom/packages/include/boost/optional/optional_fwd.hpp \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/mpi/datatype.hpp \ - /home/pblunsom/packages/include/boost/mpi/datatype_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_cache.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/oserializer.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/mpl/equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/comparison_op.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/numeric_op.hpp \ - /home/pblunsom/packages/include/boost/mpl/numeric_cast.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/numeric_cast_utils.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/forwarding.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_eti_base.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/is_msvc_eti_arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/serialization/extended_type_info_typeid.hpp \ - /home/pblunsom/packages/include/boost/serialization/static_warning.hpp \ - /home/pblunsom/packages/include/boost/mpl/print.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_polymorphic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/serialization/singleton.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/serialization/force_include.hpp \ - /home/pblunsom/packages/include/boost/serialization/extended_type_info.hpp \ - /home/pblunsom/packages/include/boost/serialization/config.hpp \ - /home/pblunsom/packages/include/boost/config/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/config/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/serialization/factory.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/serialization/access.hpp \ - /home/pblunsom/packages/include/boost/serialization/pfto.hpp \ - /home/pblunsom/packages/include/boost/serialization/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/serialization/smart_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_and_derived.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/serialization/assume_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_extent.hpp \ - /home/pblunsom/packages/include/boost/serialization/serialization.hpp \ - /home/pblunsom/packages/include/boost/serialization/strong_typedef.hpp \ - /home/pblunsom/packages/include/boost/operators.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/serialization/nvp.hpp \ - /home/pblunsom/packages/include/boost/serialization/level.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_fundamental.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/serialization/level_enum.hpp \ - /home/pblunsom/packages/include/boost/serialization/tracking.hpp \ - /home/pblunsom/packages/include/boost/mpl/greater.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/greater.hpp \ - /home/pblunsom/packages/include/boost/serialization/tracking_enum.hpp \ - /home/pblunsom/packages/include/boost/serialization/type_info_implementation.hpp \ - /home/pblunsom/packages/include/boost/serialization/traits.hpp \ - /home/pblunsom/packages/include/boost/serialization/split_member.hpp \ - /home/pblunsom/packages/include/boost/serialization/base_object.hpp \ - /home/pblunsom/packages/include/boost/serialization/void_cast_fwd.hpp \ - /home/pblunsom/packages/include/boost/serialization/wrapper.hpp \ - /home/pblunsom/packages/include/boost/serialization/version.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/less.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/less.hpp \ - /home/pblunsom/packages/include/boost/mpl/comparison.hpp \ - /home/pblunsom/packages/include/boost/mpl/not_equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/not_equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/less_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/less_equal.hpp \ - /home/pblunsom/packages/include/boost/serialization/void_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_virtual_base_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_of.hpp \ - /home/pblunsom/packages/include/boost/serialization/array.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/array.hpp \ - /home/pblunsom/packages/include/boost/swap.hpp \ - /home/pblunsom/packages/include/boost/utility/swap.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/serialization/collection_size_type.hpp \ - /home/pblunsom/packages/include/boost/archive/archive_exception.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/decl.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_archive.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/integer_traits.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/auto_link_archive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_oserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_serializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_pointer_oserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/archive_serializer_map.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/check.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_skeleton_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/common_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/interface_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_primitive.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/get_data.hpp \ - /home/pblunsom/packages/include/boost/integer.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/register_archive.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/mpi/packed_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_binary_oarchive.hpp \ - /home/pblunsom/packages/include/boost/serialization/string.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/packed_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/allocator.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/binary_buffer_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/serialization/is_bitwise_serializable.hpp \ - /home/pblunsom/packages/include/boost/mpi/packed_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_binary_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/common_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_pointer_iserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/interface_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/iserializer.hpp \ - /home/pblunsom/packages/include/boost/detail/no_exceptions_support.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_new_operator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_iserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/shared_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/serialization/shared_ptr_132.hpp \ - /home/pblunsom/packages/include/boost/serialization/split_free.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/shared_ptr_132.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/shared_count_132.hpp \ - /home/pblunsom/packages/include/boost/detail/lightweight_mutex.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/lightweight_mutex.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/lwm_pthreads.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/packed_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/binary_buffer_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/skeleton_and_content_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/point_to_point.hpp \ - /home/pblunsom/packages/include/boost/mpi/status.hpp \ - /home/pblunsom/packages/include/boost/mpi/request.hpp timing.h \ - clock_gettime_stub.c mpi-pyp-topics.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_real.hpp \ - /home/pblunsom/packages/include/boost/random/detail/config.hpp \ - /home/pblunsom/packages/include/boost/random/variate_generator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_01.hpp \ - /home/pblunsom/packages/include/boost/random/detail/pass_through_engine.hpp \ - /home/pblunsom/packages/include/boost/random/detail/ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/random/detail/disable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/enable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/uniform_int_float.hpp \ - /home/pblunsom/packages/include/boost/random/mersenne_twister.hpp \ - /home/pblunsom/packages/include/boost/random/linear_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/detail/const_mod.hpp \ - /home/pblunsom/packages/include/boost/random/detail/seed.hpp \ - /home/pblunsom/packages/include/boost/random/inversive_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/lagged_fibonacci.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/mpi/environment.hpp mpi-pyp.hh \ - /home/pblunsom/packages/include/boost/tuple/tuple.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/tuple/detail/tuple_basic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/cv_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/function_traits.hpp \ - /home/pblunsom/packages/include/boost/serialization/map.hpp \ - /home/pblunsom/packages/include/boost/serialization/utility.hpp \ - /home/pblunsom/packages/include/boost/serialization/collections_save_imp.hpp \ - /home/pblunsom/packages/include/boost/serialization/collections_load_imp.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/stack_constructor.hpp \ - /home/pblunsom/packages/include/boost/aligned_storage.hpp \ - /home/pblunsom/packages/include/boost/mpi.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_gather.hpp \ - /home/pblunsom/packages/include/boost/serialization/vector.hpp \ - /home/pblunsom/packages/include/boost/serialization/collection_traits.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/broadcast.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/gather.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_reduce.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/reduce.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/computation_tree.hpp \ - /home/pblunsom/packages/include/boost/mpi/operations.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_to_all.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/scatter.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/scan.hpp \ - /home/pblunsom/packages/include/boost/mpi/graph_communicator.hpp \ - /home/pblunsom/packages/include/boost/graph/graph_traits.hpp \ - /home/pblunsom/packages/include/boost/pending/property.hpp \ - /home/pblunsom/packages/include/boost/pending/detail/property.hpp \ - /home/pblunsom/packages/include/boost/type_traits/same_traits.hpp \ - /home/pblunsom/packages/include/boost/graph/properties.hpp \ - /home/pblunsom/packages/include/boost/property_map/property_map.hpp \ - /home/pblunsom/packages/include/boost/pending/cstddef.hpp \ - /home/pblunsom/packages/include/boost/concept_check.hpp \ - /home/pblunsom/packages/include/boost/concept/assert.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/general.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/has_constraints.hpp \ - /home/pblunsom/packages/include/boost/type_traits/conversion_traits.hpp \ - /home/pblunsom/packages/include/boost/concept/usage.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/concept_def.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/concept_undef.hpp \ - /home/pblunsom/packages/include/boost/concept_archetype.hpp \ - /home/pblunsom/packages/include/boost/property_map/vector_property_map.hpp \ - /home/pblunsom/packages/include/boost/graph/property_maps/constant_property_map.hpp \ - /home/pblunsom/packages/include/boost/graph/property_maps/null_property_map.hpp \ - /home/pblunsom/packages/include/boost/iterator/counting_iterator.hpp \ - /home/pblunsom/packages/include/boost/detail/numeric_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_assign.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_assign.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_constructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_constructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_virtual_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_compound.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_floating_point.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_object_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_object.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_stateless.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_union.hpp \ - /home/pblunsom/packages/include/boost/type_traits/rank.hpp \ - /home/pblunsom/packages/include/boost/type_traits/extent.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_all_extents.hpp \ - /home/pblunsom/packages/include/boost/type_traits/aligned_storage.hpp \ - /home/pblunsom/packages/include/boost/type_traits/floating_point_promotion.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_promotion.hpp \ - /home/pblunsom/packages/include/boost/type_traits/promote.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/decay.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_complex.hpp \ - /home/pblunsom/packages/include/boost/detail/select_type.hpp \ - /home/pblunsom/packages/include/boost/graph/iteration_macros.hpp \ - /home/pblunsom/packages/include/boost/shared_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_array.hpp \ - /home/pblunsom/packages/include/boost/mpi/group.hpp \ - /home/pblunsom/packages/include/boost/mpi/intercommunicator.hpp \ - /home/pblunsom/packages/include/boost/mpi/nonblocking.hpp \ - /home/pblunsom/packages/include/boost/mpi/skeleton_and_content.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/forward_skeleton_iarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/forward_skeleton_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/content_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/broadcast_sc.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/communicator_sc.hpp \ - /home/pblunsom/packages/include/boost/mpi/timer.hpp pyp.hh \ - slice-sampler.h log_add.h mt19937ar.h corpus.hh -mpi-train-contexts.o: mpi-train-contexts.cc \ - /home/pblunsom/packages/include/boost/program_options/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/config.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/version.hpp \ - /home/pblunsom/packages/include/boost/config/auto_link.hpp \ - /home/pblunsom/packages/include/boost/program_options/option.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/errors.hpp \ - /home/pblunsom/packages/include/boost/program_options/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/options_description.hpp \ - /home/pblunsom/packages/include/boost/program_options/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/any.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/function/function1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/maybe_include.hpp \ - /home/pblunsom/packages/include/boost/function/function_template.hpp \ - /home/pblunsom/packages/include/boost/function/detail/prologue.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/functional.hpp \ - /home/pblunsom/packages/include/boost/function/function_base.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/integer.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/integer_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_copy.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/composite_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_union.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/alignment_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/function_equal.hpp \ - /home/pblunsom/packages/include/boost/function/function_fwd.hpp \ - /home/pblunsom/packages/include/boost/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/get_pointer.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_template.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_cc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/detail/no_exceptions_support.hpp \ - /home/pblunsom/packages/include/boost/lexical_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_volatile.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/lcast_precision.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/function.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/function_iterate.hpp \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/program_options/positional_options.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/convert.hpp \ - /home/pblunsom/packages/include/boost/program_options/variables_map.hpp \ - /home/pblunsom/packages/include/boost/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/mpi/environment.hpp \ - /home/pblunsom/packages/include/boost/mpi/config.hpp \ - /home/pblunsom/packages/include/mpi.h \ - /home/pblunsom/packages/include/mpio.h \ - /home/pblunsom/packages/include/mpi.h \ - /home/pblunsom/packages/include/mpicxx.h \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/optional.hpp \ - /home/pblunsom/packages/include/boost/optional/optional.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/type_with_alignment.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/detail/reference_content.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_copy.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/none.hpp \ - /home/pblunsom/packages/include/boost/none_t.hpp \ - /home/pblunsom/packages/include/boost/utility/compare_pointees.hpp \ - /home/pblunsom/packages/include/boost/optional/optional_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpi/communicator.hpp \ - /home/pblunsom/packages/include/boost/mpi/exception.hpp \ - /home/pblunsom/packages/include/boost/mpi/datatype.hpp \ - /home/pblunsom/packages/include/boost/mpi/datatype_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_cache.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/oserializer.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/mpl/equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/comparison_op.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/numeric_op.hpp \ - /home/pblunsom/packages/include/boost/mpl/numeric_cast.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/numeric_cast_utils.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/forwarding.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_eti_base.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/is_msvc_eti_arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/serialization/extended_type_info_typeid.hpp \ - /home/pblunsom/packages/include/boost/serialization/static_warning.hpp \ - /home/pblunsom/packages/include/boost/mpl/print.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_polymorphic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/serialization/singleton.hpp \ - /home/pblunsom/packages/include/boost/serialization/force_include.hpp \ - /home/pblunsom/packages/include/boost/serialization/extended_type_info.hpp \ - /home/pblunsom/packages/include/boost/serialization/config.hpp \ - /home/pblunsom/packages/include/boost/config/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/config/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/serialization/factory.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/serialization/access.hpp \ - /home/pblunsom/packages/include/boost/serialization/pfto.hpp \ - /home/pblunsom/packages/include/boost/serialization/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/serialization/smart_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_and_derived.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/serialization/assume_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_extent.hpp \ - /home/pblunsom/packages/include/boost/serialization/serialization.hpp \ - /home/pblunsom/packages/include/boost/serialization/strong_typedef.hpp \ - /home/pblunsom/packages/include/boost/operators.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/serialization/nvp.hpp \ - /home/pblunsom/packages/include/boost/serialization/level.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_fundamental.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/serialization/level_enum.hpp \ - /home/pblunsom/packages/include/boost/serialization/tracking.hpp \ - /home/pblunsom/packages/include/boost/mpl/greater.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/greater.hpp \ - /home/pblunsom/packages/include/boost/serialization/tracking_enum.hpp \ - /home/pblunsom/packages/include/boost/serialization/type_info_implementation.hpp \ - /home/pblunsom/packages/include/boost/serialization/traits.hpp \ - /home/pblunsom/packages/include/boost/serialization/split_member.hpp \ - /home/pblunsom/packages/include/boost/serialization/base_object.hpp \ - /home/pblunsom/packages/include/boost/serialization/void_cast_fwd.hpp \ - /home/pblunsom/packages/include/boost/serialization/wrapper.hpp \ - /home/pblunsom/packages/include/boost/serialization/version.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/less.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/less.hpp \ - /home/pblunsom/packages/include/boost/mpl/comparison.hpp \ - /home/pblunsom/packages/include/boost/mpl/not_equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/not_equal_to.hpp \ - /home/pblunsom/packages/include/boost/mpl/less_equal.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/less_equal.hpp \ - /home/pblunsom/packages/include/boost/serialization/void_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_virtual_base_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_of.hpp \ - /home/pblunsom/packages/include/boost/serialization/array.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/array.hpp \ - /home/pblunsom/packages/include/boost/swap.hpp \ - /home/pblunsom/packages/include/boost/utility/swap.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/serialization/collection_size_type.hpp \ - /home/pblunsom/packages/include/boost/archive/archive_exception.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/decl.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_archive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/auto_link_archive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_oserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_serializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_pointer_oserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/archive_serializer_map.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/check.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_skeleton_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/common_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/interface_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/mpi_datatype_primitive.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/get_data.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/register_archive.hpp \ - /home/pblunsom/packages/include/boost/mpi/packed_oarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_binary_oarchive.hpp \ - /home/pblunsom/packages/include/boost/serialization/string.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/packed_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/allocator.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/binary_buffer_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/serialization/is_bitwise_serializable.hpp \ - /home/pblunsom/packages/include/boost/mpi/packed_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/basic_binary_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/common_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_pointer_iserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/interface_iarchive.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/iserializer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_new_operator.hpp \ - /home/pblunsom/packages/include/boost/archive/detail/basic_iserializer.hpp \ - /home/pblunsom/packages/include/boost/archive/shared_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/serialization/shared_ptr_132.hpp \ - /home/pblunsom/packages/include/boost/serialization/split_free.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/shared_ptr_132.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/shared_count_132.hpp \ - /home/pblunsom/packages/include/boost/detail/lightweight_mutex.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/lightweight_mutex.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/lwm_pthreads.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/packed_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/binary_buffer_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/skeleton_and_content_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/point_to_point.hpp \ - /home/pblunsom/packages/include/boost/mpi/status.hpp \ - /home/pblunsom/packages/include/boost/mpi/request.hpp mpi-pyp-topics.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_real.hpp \ - /home/pblunsom/packages/include/boost/random/detail/config.hpp \ - /home/pblunsom/packages/include/boost/random/variate_generator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_01.hpp \ - /home/pblunsom/packages/include/boost/random/detail/pass_through_engine.hpp \ - /home/pblunsom/packages/include/boost/random/detail/ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/random/detail/disable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/enable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/uniform_int_float.hpp \ - /home/pblunsom/packages/include/boost/random/mersenne_twister.hpp \ - /home/pblunsom/packages/include/boost/random/linear_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/detail/const_mod.hpp \ - /home/pblunsom/packages/include/boost/random/detail/seed.hpp \ - /home/pblunsom/packages/include/boost/random/inversive_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/lagged_fibonacci.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp mpi-pyp.hh \ - /home/pblunsom/packages/include/boost/tuple/tuple.hpp \ - /home/pblunsom/packages/include/boost/tuple/detail/tuple_basic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/cv_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/function_traits.hpp \ - /home/pblunsom/packages/include/boost/serialization/map.hpp \ - /home/pblunsom/packages/include/boost/serialization/utility.hpp \ - /home/pblunsom/packages/include/boost/serialization/collections_save_imp.hpp \ - /home/pblunsom/packages/include/boost/serialization/collections_load_imp.hpp \ - /home/pblunsom/packages/include/boost/serialization/detail/stack_constructor.hpp \ - /home/pblunsom/packages/include/boost/aligned_storage.hpp \ - /home/pblunsom/packages/include/boost/mpi.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_gather.hpp \ - /home/pblunsom/packages/include/boost/serialization/vector.hpp \ - /home/pblunsom/packages/include/boost/serialization/collection_traits.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/broadcast.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/gather.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_reduce.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/reduce.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/computation_tree.hpp \ - /home/pblunsom/packages/include/boost/mpi/operations.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/all_to_all.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/scatter.hpp \ - /home/pblunsom/packages/include/boost/mpi/collectives/scan.hpp \ - /home/pblunsom/packages/include/boost/mpi/graph_communicator.hpp \ - /home/pblunsom/packages/include/boost/graph/graph_traits.hpp \ - /home/pblunsom/packages/include/boost/pending/property.hpp \ - /home/pblunsom/packages/include/boost/pending/detail/property.hpp \ - /home/pblunsom/packages/include/boost/type_traits/same_traits.hpp \ - /home/pblunsom/packages/include/boost/graph/properties.hpp \ - /home/pblunsom/packages/include/boost/property_map/property_map.hpp \ - /home/pblunsom/packages/include/boost/pending/cstddef.hpp \ - /home/pblunsom/packages/include/boost/concept_check.hpp \ - /home/pblunsom/packages/include/boost/concept/assert.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/general.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/has_constraints.hpp \ - /home/pblunsom/packages/include/boost/type_traits/conversion_traits.hpp \ - /home/pblunsom/packages/include/boost/concept/usage.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/concept_def.hpp \ - /home/pblunsom/packages/include/boost/concept/detail/concept_undef.hpp \ - /home/pblunsom/packages/include/boost/concept_archetype.hpp \ - /home/pblunsom/packages/include/boost/property_map/vector_property_map.hpp \ - /home/pblunsom/packages/include/boost/graph/property_maps/constant_property_map.hpp \ - /home/pblunsom/packages/include/boost/graph/property_maps/null_property_map.hpp \ - /home/pblunsom/packages/include/boost/iterator/counting_iterator.hpp \ - /home/pblunsom/packages/include/boost/detail/numeric_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_assign.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_assign.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_constructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_constructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_virtual_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_compound.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_floating_point.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_object_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_object.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_stateless.hpp \ - /home/pblunsom/packages/include/boost/type_traits/rank.hpp \ - /home/pblunsom/packages/include/boost/type_traits/extent.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_all_extents.hpp \ - /home/pblunsom/packages/include/boost/type_traits/aligned_storage.hpp \ - /home/pblunsom/packages/include/boost/type_traits/floating_point_promotion.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_promotion.hpp \ - /home/pblunsom/packages/include/boost/type_traits/promote.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/decay.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_complex.hpp \ - /home/pblunsom/packages/include/boost/detail/select_type.hpp \ - /home/pblunsom/packages/include/boost/graph/iteration_macros.hpp \ - /home/pblunsom/packages/include/boost/shared_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_array.hpp \ - /home/pblunsom/packages/include/boost/mpi/group.hpp \ - /home/pblunsom/packages/include/boost/mpi/intercommunicator.hpp \ - /home/pblunsom/packages/include/boost/mpi/nonblocking.hpp \ - /home/pblunsom/packages/include/boost/mpi/skeleton_and_content.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/forward_skeleton_iarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/forward_skeleton_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_iprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/ignore_oprimitive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/content_oarchive.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/broadcast_sc.hpp \ - /home/pblunsom/packages/include/boost/mpi/detail/communicator_sc.hpp \ - /home/pblunsom/packages/include/boost/mpi/timer.hpp pyp.hh \ - slice-sampler.h log_add.h mt19937ar.h corpus.hh mpi-corpus.hh \ - contexts_corpus.hh contexts_lexer.h ../../../decoder/dict.h \ - /home/pblunsom/packages/include/boost/functional/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash_fwd.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/float_functions.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/limits.hpp \ - /home/pblunsom/packages/include/boost/integer/static_log2.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float_generic.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/extensions.hpp \ - /home/pblunsom/packages/include/boost/detail/container_fwd.hpp \ - ../../../decoder/wordid.h gzstream.hh -pyp-topics.o: pyp-topics.cc timing.h clock_gettime_stub.c pyp-topics.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_real.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/random/detail/config.hpp \ - /home/pblunsom/packages/include/boost/random/variate_generator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_01.hpp \ - /home/pblunsom/packages/include/boost/random/detail/pass_through_engine.hpp \ - /home/pblunsom/packages/include/boost/random/detail/ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/random/detail/disable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/enable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/uniform_int_float.hpp \ - /home/pblunsom/packages/include/boost/random/mersenne_twister.hpp \ - /home/pblunsom/packages/include/boost/integer_traits.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/random/linear_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/detail/const_mod.hpp \ - /home/pblunsom/packages/include/boost/random/detail/seed.hpp pyp.hh \ - slice-sampler.h log_add.h mt19937ar.h corpus.hh \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp workers.hh \ - /home/pblunsom/packages/include/boost/bind.hpp \ - /home/pblunsom/packages/include/boost/bind/bind.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/get_pointer.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_template.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_cc.hpp \ - /home/pblunsom/packages/include/boost/is_placeholder.hpp \ - /home/pblunsom/packages/include/boost/bind/arg.hpp \ - /home/pblunsom/packages/include/boost/visit_each.hpp \ - /home/pblunsom/packages/include/boost/bind/storage.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_template.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf2_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/placeholders.hpp \ - /home/pblunsom/packages/include/boost/function.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iterate.hpp \ - /home/pblunsom/packages/include/boost/function/detail/prologue.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/functional.hpp \ - /home/pblunsom/packages/include/boost/function/function_base.hpp \ - /home/pblunsom/packages/include/boost/integer.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_copy.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/composite_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_union.hpp \ - /home/pblunsom/packages/include/boost/type_traits/alignment_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/function_equal.hpp \ - /home/pblunsom/packages/include/boost/function/function_fwd.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum_params.hpp \ - /home/pblunsom/packages/include/boost/function/detail/function_iterate.hpp \ - /home/pblunsom/packages/include/boost/function/detail/maybe_include.hpp \ - /home/pblunsom/packages/include/boost/function/function_template.hpp \ - /home/pblunsom/packages/include/boost/detail/no_exceptions_support.hpp \ - /home/pblunsom/packages/include/boost/thread/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/config/requires_threads.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/config.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/config/auto_link.hpp \ - /home/pblunsom/packages/include/boost/thread/exceptions.hpp \ - /home/pblunsom/packages/include/boost/config/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/config/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/thread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/locks.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/move.hpp \ - /home/pblunsom/packages/include/boost/thread/thread_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/microsec_time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/compiler_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/locale_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/c_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/filetime_functions.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/ptime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_system.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_duration.hpp \ - /home/pblunsom/packages/include/boost/operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/special_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_resolution_traits.hpp \ - /home/pblunsom/packages/include/boost/date_time/int_adapter.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/gregorian_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/date.hpp \ - /home/pblunsom/packages/include/boost/date_time/year_month_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/period.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_weekday.hpp \ - /home/pblunsom/packages/include/boost/date_time/constrained_value.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_and_derived.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day_of_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.ipp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_ymd.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_month.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_date.hpp \ - /home/pblunsom/packages/include/boost/date_time/adjust_functors.hpp \ - /home/pblunsom/packages/include/boost/date_time/wrapping_int.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_generators.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_clock_device.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_split.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_counted.hpp \ - /home/pblunsom/packages/include/boost/date_time/time.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/date_duration_operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/time_period.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/dst_rules.hpp \ - /home/pblunsom/packages/include/boost/thread/xtime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/conversion.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/conversion.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/timespec.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/pthread_mutex_scoped_lock.hpp \ - /home/pblunsom/packages/include/boost/optional.hpp \ - /home/pblunsom/packages/include/boost/optional/optional.hpp \ - /home/pblunsom/packages/include/boost/type_traits/type_with_alignment.hpp \ - /home/pblunsom/packages/include/boost/detail/reference_content.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_copy.hpp \ - /home/pblunsom/packages/include/boost/none.hpp \ - /home/pblunsom/packages/include/boost/none_t.hpp \ - /home/pblunsom/packages/include/boost/utility/compare_pointees.hpp \ - /home/pblunsom/packages/include/boost/optional/optional_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_interruption.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_group.hpp \ - /home/pblunsom/packages/include/boost/thread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/future.hpp \ - /home/pblunsom/packages/include/boost/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_fundamental.hpp \ - /home/pblunsom/packages/include/boost/thread/condition.hpp -train-contexts.o: train-contexts.cc \ - /home/pblunsom/packages/include/boost/program_options/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/config.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/version.hpp \ - /home/pblunsom/packages/include/boost/config/auto_link.hpp \ - /home/pblunsom/packages/include/boost/program_options/option.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/errors.hpp \ - /home/pblunsom/packages/include/boost/program_options/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/options_description.hpp \ - /home/pblunsom/packages/include/boost/program_options/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/any.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/function/function1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/maybe_include.hpp \ - /home/pblunsom/packages/include/boost/function/function_template.hpp \ - /home/pblunsom/packages/include/boost/function/detail/prologue.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/functional.hpp \ - /home/pblunsom/packages/include/boost/function/function_base.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/integer.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/integer_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_copy.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/composite_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_union.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/alignment_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/function_equal.hpp \ - /home/pblunsom/packages/include/boost/function/function_fwd.hpp \ - /home/pblunsom/packages/include/boost/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/get_pointer.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_template.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_cc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/detail/no_exceptions_support.hpp \ - /home/pblunsom/packages/include/boost/lexical_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_volatile.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/lcast_precision.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/function.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/function_iterate.hpp \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/program_options/positional_options.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/convert.hpp \ - /home/pblunsom/packages/include/boost/program_options/variables_map.hpp \ - /home/pblunsom/packages/include/boost/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_ptr.hpp \ - pyp-topics.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_real.hpp \ - /home/pblunsom/packages/include/boost/random/detail/config.hpp \ - /home/pblunsom/packages/include/boost/random/variate_generator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_01.hpp \ - /home/pblunsom/packages/include/boost/random/detail/pass_through_engine.hpp \ - /home/pblunsom/packages/include/boost/random/detail/ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/random/detail/disable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/enable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/uniform_int_float.hpp \ - /home/pblunsom/packages/include/boost/random/mersenne_twister.hpp \ - /home/pblunsom/packages/include/boost/random/linear_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/detail/const_mod.hpp \ - /home/pblunsom/packages/include/boost/random/detail/seed.hpp pyp.hh \ - slice-sampler.h log_add.h mt19937ar.h corpus.hh workers.hh \ - /home/pblunsom/packages/include/boost/bind.hpp \ - /home/pblunsom/packages/include/boost/bind/bind.hpp \ - /home/pblunsom/packages/include/boost/is_placeholder.hpp \ - /home/pblunsom/packages/include/boost/bind/arg.hpp \ - /home/pblunsom/packages/include/boost/visit_each.hpp \ - /home/pblunsom/packages/include/boost/bind/storage.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_template.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf2_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/placeholders.hpp \ - /home/pblunsom/packages/include/boost/thread/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/config/requires_threads.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/config.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/thread/exceptions.hpp \ - /home/pblunsom/packages/include/boost/config/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/config/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/thread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/locks.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/move.hpp \ - /home/pblunsom/packages/include/boost/thread/thread_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/microsec_time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/compiler_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/locale_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/c_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/filetime_functions.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/ptime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_system.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_duration.hpp \ - /home/pblunsom/packages/include/boost/operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/special_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_resolution_traits.hpp \ - /home/pblunsom/packages/include/boost/date_time/int_adapter.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/gregorian_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/date.hpp \ - /home/pblunsom/packages/include/boost/date_time/year_month_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/period.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_weekday.hpp \ - /home/pblunsom/packages/include/boost/date_time/constrained_value.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_and_derived.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day_of_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.ipp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_ymd.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_month.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_date.hpp \ - /home/pblunsom/packages/include/boost/date_time/adjust_functors.hpp \ - /home/pblunsom/packages/include/boost/date_time/wrapping_int.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_generators.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_clock_device.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_split.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_counted.hpp \ - /home/pblunsom/packages/include/boost/date_time/time.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/date_duration_operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/time_period.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/dst_rules.hpp \ - /home/pblunsom/packages/include/boost/thread/xtime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/conversion.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/conversion.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/timespec.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/pthread_mutex_scoped_lock.hpp \ - /home/pblunsom/packages/include/boost/optional.hpp \ - /home/pblunsom/packages/include/boost/optional/optional.hpp \ - /home/pblunsom/packages/include/boost/type_traits/type_with_alignment.hpp \ - /home/pblunsom/packages/include/boost/detail/reference_content.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_copy.hpp \ - /home/pblunsom/packages/include/boost/none.hpp \ - /home/pblunsom/packages/include/boost/none_t.hpp \ - /home/pblunsom/packages/include/boost/utility/compare_pointees.hpp \ - /home/pblunsom/packages/include/boost/optional/optional_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_interruption.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_group.hpp \ - /home/pblunsom/packages/include/boost/thread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/future.hpp \ - /home/pblunsom/packages/include/boost/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_fundamental.hpp \ - /home/pblunsom/packages/include/boost/thread/condition.hpp timing.h \ - clock_gettime_stub.c contexts_corpus.hh contexts_lexer.h \ - ../../../decoder/dict.h \ - /home/pblunsom/packages/include/boost/functional/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash_fwd.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/float_functions.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/limits.hpp \ - /home/pblunsom/packages/include/boost/integer/static_log2.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float_generic.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/extensions.hpp \ - /home/pblunsom/packages/include/boost/detail/container_fwd.hpp \ - ../../../decoder/wordid.h gzstream.hh -train.o: train.cc \ - /home/pblunsom/packages/include/boost/program_options/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/config.hpp \ - /home/pblunsom/packages/include/boost/config.hpp \ - /home/pblunsom/packages/include/boost/config/user.hpp \ - /home/pblunsom/packages/include/boost/config/select_compiler_config.hpp \ - /home/pblunsom/packages/include/boost/config/compiler/gcc.hpp \ - /home/pblunsom/packages/include/boost/config/select_stdlib_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/utility.hpp \ - /home/pblunsom/packages/include/boost/config/stdlib/libstdcpp3.hpp \ - /home/pblunsom/packages/include/boost/config/select_platform_config.hpp \ - /home/pblunsom/packages/include/boost/config/platform/linux.hpp \ - /home/pblunsom/packages/include/boost/config/posix_features.hpp \ - /home/pblunsom/packages/include/boost/config/suffix.hpp \ - /home/pblunsom/packages/include/boost/version.hpp \ - /home/pblunsom/packages/include/boost/config/auto_link.hpp \ - /home/pblunsom/packages/include/boost/program_options/option.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/errors.hpp \ - /home/pblunsom/packages/include/boost/program_options/cmdline.hpp \ - /home/pblunsom/packages/include/boost/program_options/options_description.hpp \ - /home/pblunsom/packages/include/boost/program_options/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/any.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/broken_compiler_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_support.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/gcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/workaround.hpp \ - /home/pblunsom/packages/include/boost/detail/workaround.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/ctps.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/template_arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/int.hpp \ - /home/pblunsom/packages/include/boost/mpl/int_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/adl_barrier.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/adl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/intel.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nttp_decl.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/nttp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/integral_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_tag.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/static_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/static_cast.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/config.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/params.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bool.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/comma.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/error.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/auto_rec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/eat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/inc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/inc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/overload_resolution.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/type_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/config.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_def.hpp \ - /home/pblunsom/packages/include/boost/type_traits/integral_constant.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool.hpp \ - /home/pblunsom/packages/include/boost/mpl/bool_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c.hpp \ - /home/pblunsom/packages/include/boost/mpl/integral_c_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/bool_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/attribute_noreturn.hpp \ - /home/pblunsom/packages/include/boost/exception/exception.hpp \ - /home/pblunsom/packages/include/boost/current_function.hpp \ - /home/pblunsom/packages/include/boost/static_assert.hpp \ - /home/pblunsom/packages/include/boost/function/function1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/maybe_include.hpp \ - /home/pblunsom/packages/include/boost/function/function_template.hpp \ - /home/pblunsom/packages/include/boost/function/detail/prologue.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/functional.hpp \ - /home/pblunsom/packages/include/boost/function/function_base.hpp \ - /home/pblunsom/packages/include/boost/detail/sp_typeinfo.hpp \ - /home/pblunsom/packages/include/boost/assert.hpp \ - /home/pblunsom/packages/include/boost/integer.hpp \ - /home/pblunsom/packages/include/boost/integer_fwd.hpp \ - /home/pblunsom/packages/include/boost/limits.hpp \ - /home/pblunsom/packages/include/boost/cstdint.hpp \ - /home/pblunsom/packages/include/boost/integer_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_copy.hpp \ - /home/pblunsom/packages/include/boost/type_traits/intrinsics.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_same.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_volatile.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/cv_traits_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pod.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_void.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_scalar.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_arithmetic.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_integral.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_float.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_or.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_enum.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_member_function_pointer.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_mem_fun_pointer_impl.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_cv.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_and.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_not.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_trivial_destructor.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/composite_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_array.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_union.hpp \ - /home/pblunsom/packages/include/boost/type_traits/ice.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/yes_no_type.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/ice_eq.hpp \ - /home/pblunsom/packages/include/boost/ref.hpp \ - /home/pblunsom/packages/include/boost/utility/addressof.hpp \ - /home/pblunsom/packages/include/boost/mpl/if.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/value_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/integral.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/eti.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/void_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/lambda_arity_param.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/dtp.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessor/def_params_tail.hpp \ - /home/pblunsom/packages/include/boost/mpl/limits/arity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/and.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/identity.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/empty.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/add.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/dec.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_iif.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/adt.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/check.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/compl.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/detail/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/detail/while.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/sub.hpp \ - /home/pblunsom/packages/include/boost/type_traits/alignment_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t.hpp \ - /home/pblunsom/packages/include/boost/mpl/size_t_fwd.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/size_t_trait_undef.hpp \ - /home/pblunsom/packages/include/boost/utility/enable_if.hpp \ - /home/pblunsom/packages/include/boost/function_equal.hpp \ - /home/pblunsom/packages/include/boost/function/function_fwd.hpp \ - /home/pblunsom/packages/include/boost/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn.hpp \ - /home/pblunsom/packages/include/boost/get_pointer.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/memory.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_template.hpp \ - /home/pblunsom/packages/include/boost/bind/mem_fn_cc.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/rem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/enum_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params.hpp \ - /home/pblunsom/packages/include/boost/detail/no_exceptions_support.hpp \ - /home/pblunsom/packages/include/boost/lexical_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/make_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_signed.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_unsigned.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_const.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_volatile.hpp \ - /home/pblunsom/packages/include/boost/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/call_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/lcast_precision.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_abstract.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/value_semantic.hpp \ - /home/pblunsom/packages/include/boost/function.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/iterate.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/data.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/def.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/iter/forward1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/lower1.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot/detail/shared.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/detail/bounds/upper1.hpp \ - /home/pblunsom/packages/include/boost/function/detail/function_iterate.hpp \ - /home/pblunsom/packages/include/boost/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/shared_ptr.hpp \ - /home/pblunsom/packages/include/boost/checked_delete.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/shared_count.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/bad_weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_has_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_base_gcc_x86.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_counted_impl.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/sp_convertible.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_pool.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/spinlock_sync.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/yield_k.hpp \ - /home/pblunsom/packages/include/boost/memory_order.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/detail/operator_bool.hpp \ - /home/pblunsom/packages/include/boost/program_options/positional_options.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/parsers.hpp \ - /home/pblunsom/packages/include/boost/program_options/detail/convert.hpp \ - /home/pblunsom/packages/include/boost/program_options/variables_map.hpp \ - /home/pblunsom/packages/include/boost/scoped_ptr.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_ptr.hpp \ - pyp-topics.hh \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_vector.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/ptr_sequence_adapter.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/reversible_ptr_container.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/throw_exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/scoped_deleter.hpp \ - /home/pblunsom/packages/include/boost/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/scoped_array.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/static_move_ptr.hpp \ - /home/pblunsom/packages/include/boost/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/detail/compressed_pair.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_empty.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_reference.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_class.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/default_deleter.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_bounds.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/is_convertible.hpp \ - /home/pblunsom/packages/include/boost/mpl/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/use_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/nested_type_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/include_preprocessed.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/compiler.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/stringize.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/and.hpp \ - /home/pblunsom/packages/include/boost/mpl/identity.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/move.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/exception.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/clone_allocator.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/nullable.hpp \ - /home/pblunsom/packages/include/boost/mpl/eval_if.hpp \ - /home/pblunsom/packages/include/boost/range/functions.hpp \ - /home/pblunsom/packages/include/boost/range/begin.hpp \ - /home/pblunsom/packages/include/boost/range/config.hpp \ - /home/pblunsom/packages/include/boost/range/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/mutable_iterator.hpp \ - /home/pblunsom/packages/include/boost/range/detail/extract_optional_type.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_traits.hpp \ - /home/pblunsom/packages/include/boost/detail/iterator.hpp \ - /home/pblunsom/packages/include/boost/range/const_iterator.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_const.hpp \ - /home/pblunsom/packages/include/boost/range/end.hpp \ - /home/pblunsom/packages/include/boost/range/detail/implementation_help.hpp \ - /home/pblunsom/packages/include/boost/range/detail/common.hpp \ - /home/pblunsom/packages/include/boost/range/detail/sfinae.hpp \ - /home/pblunsom/packages/include/boost/range/size.hpp \ - /home/pblunsom/packages/include/boost/range/difference_type.hpp \ - /home/pblunsom/packages/include/boost/range/distance.hpp \ - /home/pblunsom/packages/include/boost/range/empty.hpp \ - /home/pblunsom/packages/include/boost/range/rbegin.hpp \ - /home/pblunsom/packages/include/boost/range/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator/reverse_iterator.hpp \ - /home/pblunsom/packages/include/boost/iterator.hpp \ - /home/pblunsom/packages/include/boost/utility.hpp \ - /home/pblunsom/packages/include/boost/utility/base_from_member.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/repeat_from_to.hpp \ - /home/pblunsom/packages/include/boost/utility/binary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/deduce_d.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_left.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/elem.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mod.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/detail/div_base.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/not.hpp \ - /home/pblunsom/packages/include/boost/next_prior.hpp \ - /home/pblunsom/packages/include/boost/noncopyable.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_adaptor.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_categories.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_def.hpp \ - /home/pblunsom/packages/include/boost/mpl/placeholders.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/arg_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/na_assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/assert.hpp \ - /home/pblunsom/packages/include/boost/mpl/not.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/yes_no.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/arrays.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/pp_counter.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arity_spec.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/arg_typedef.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/arg.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/placeholders.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/config_undef.hpp \ - /home/pblunsom/packages/include/boost/iterator/iterator_facade.hpp \ - /home/pblunsom/packages/include/boost/iterator/interoperable.hpp \ - /home/pblunsom/packages/include/boost/mpl/or.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/or.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/facade_iterator_category.hpp \ - /home/pblunsom/packages/include/boost/detail/indirect_traits.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_function.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/false_result.hpp \ - /home/pblunsom/packages/include/boost/type_traits/detail/is_function_ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/type_traits/remove_pointer.hpp \ - /home/pblunsom/packages/include/boost/iterator/detail/enable_if.hpp \ - /home/pblunsom/packages/include/boost/implicit_cast.hpp \ - /home/pblunsom/packages/include/boost/type_traits/add_pointer.hpp \ - /home/pblunsom/packages/include/boost/mpl/always.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/type_wrapper.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_xxx.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/msvc_typename.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/has_apply.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/msvc_never_true.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply_wrap.hpp \ - /home/pblunsom/packages/include/boost/mpl/lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind_fwd.hpp \ - /home/pblunsom/packages/include/boost/mpl/next.hpp \ - /home/pblunsom/packages/include/boost/mpl/next_prior.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/common_name_wknd.hpp \ - /home/pblunsom/packages/include/boost/mpl/protect.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/bind.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/void.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/has_type.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/config/bcc.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/quote.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/template_arity.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/full_lambda.hpp \ - /home/pblunsom/packages/include/boost/mpl/aux_/preprocessed/gcc/apply.hpp \ - /home/pblunsom/packages/include/boost/range/rend.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/indirect_fun.hpp \ - /home/pblunsom/packages/include/boost/utility/result_of.hpp \ - /home/pblunsom/packages/include/boost/type.hpp \ - /home/pblunsom/packages/include/boost/preprocessor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/library.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/div.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/arithmetic/mul.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/not_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_z.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/array/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/less.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/comparison/greater_equal.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/config/limits.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/control/expr_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/assert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/debug/line.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/apply.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/detail/is_unary.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/expand.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/facilities/intercept.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/local.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/iteration/self.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/append.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/at.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/cat.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/detail/for.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_list.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/size.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/list/transform.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitnor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/bitxor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/nor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/or.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/logical/xor.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/punctuation/paren_if.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/deduce_r.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_a_default.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_params_with_defaults.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_shifted_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_binary_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/repetition/enum_trailing_params.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/max.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/selection/min.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/enum.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/filter.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/first_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/detail/split.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/fold_right.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/reverse.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_i.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/for_each_product.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/insert.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/rest_n.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/pop_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_back.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/push_front.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/remove.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/replace.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/subseq.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_array.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/seq/to_tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/slot.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple.hpp \ - /home/pblunsom/packages/include/boost/preprocessor/tuple/to_seq.hpp \ - /home/pblunsom/packages/include/boost/utility/detail/result_of_iterate.hpp \ - /home/pblunsom/packages/include/boost/pointee.hpp \ - /home/pblunsom/packages/include/boost/detail/is_incrementable.hpp \ - /home/pblunsom/packages/include/boost/ptr_container/detail/void_ptr_iterator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_real.hpp \ - /home/pblunsom/packages/include/boost/random/detail/config.hpp \ - /home/pblunsom/packages/include/boost/random/variate_generator.hpp \ - /home/pblunsom/packages/include/boost/random/uniform_01.hpp \ - /home/pblunsom/packages/include/boost/random/detail/pass_through_engine.hpp \ - /home/pblunsom/packages/include/boost/random/detail/ptr_helper.hpp \ - /home/pblunsom/packages/include/boost/random/detail/disable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/enable_warnings.hpp \ - /home/pblunsom/packages/include/boost/random/detail/uniform_int_float.hpp \ - /home/pblunsom/packages/include/boost/random/mersenne_twister.hpp \ - /home/pblunsom/packages/include/boost/random/linear_congruential.hpp \ - /home/pblunsom/packages/include/boost/random/detail/const_mod.hpp \ - /home/pblunsom/packages/include/boost/random/detail/seed.hpp pyp.hh \ - slice-sampler.h log_add.h mt19937ar.h corpus.hh workers.hh \ - /home/pblunsom/packages/include/boost/bind.hpp \ - /home/pblunsom/packages/include/boost/bind/bind.hpp \ - /home/pblunsom/packages/include/boost/is_placeholder.hpp \ - /home/pblunsom/packages/include/boost/bind/arg.hpp \ - /home/pblunsom/packages/include/boost/visit_each.hpp \ - /home/pblunsom/packages/include/boost/bind/storage.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_template.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/bind_mf2_cc.hpp \ - /home/pblunsom/packages/include/boost/bind/placeholders.hpp \ - /home/pblunsom/packages/include/boost/thread/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/config/requires_threads.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/config.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/platform.hpp \ - /home/pblunsom/packages/include/boost/thread/exceptions.hpp \ - /home/pblunsom/packages/include/boost/config/abi_prefix.hpp \ - /home/pblunsom/packages/include/boost/config/abi_suffix.hpp \ - /home/pblunsom/packages/include/boost/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/enable_shared_from_this.hpp \ - /home/pblunsom/packages/include/boost/smart_ptr/weak_ptr.hpp \ - /home/pblunsom/packages/include/boost/thread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/locks.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/move.hpp \ - /home/pblunsom/packages/include/boost/thread/thread_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/microsec_time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/compiler_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/locale_config.hpp \ - /home/pblunsom/packages/include/boost/date_time/c_time.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_clock.hpp \ - /home/pblunsom/packages/include/boost/date_time/filetime_functions.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/ptime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_system.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_config.hpp \ - /home/pblunsom/packages/include/boost/config/no_tr1/cmath.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_duration.hpp \ - /home/pblunsom/packages/include/boost/operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/special_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_resolution_traits.hpp \ - /home/pblunsom/packages/include/boost/date_time/int_adapter.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/gregorian_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/date.hpp \ - /home/pblunsom/packages/include/boost/date_time/year_month_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/period.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_weekday.hpp \ - /home/pblunsom/packages/include/boost/date_time/constrained_value.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_of.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_base_and_derived.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_defs.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day_of_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian_calendar.ipp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_ymd.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_day.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_year.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_month.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_duration_types.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/greg_date.hpp \ - /home/pblunsom/packages/include/boost/date_time/adjust_functors.hpp \ - /home/pblunsom/packages/include/boost/date_time/wrapping_int.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_generators.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_clock_device.hpp \ - /home/pblunsom/packages/include/boost/date_time/date_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_split.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_system_counted.hpp \ - /home/pblunsom/packages/include/boost/date_time/time.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/date_duration_operators.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/posix_time_duration.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/time_period.hpp \ - /home/pblunsom/packages/include/boost/date_time/time_iterator.hpp \ - /home/pblunsom/packages/include/boost/date_time/dst_rules.hpp \ - /home/pblunsom/packages/include/boost/thread/xtime.hpp \ - /home/pblunsom/packages/include/boost/date_time/posix_time/conversion.hpp \ - /home/pblunsom/packages/include/boost/date_time/gregorian/conversion.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/timespec.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/pthread_mutex_scoped_lock.hpp \ - /home/pblunsom/packages/include/boost/optional.hpp \ - /home/pblunsom/packages/include/boost/optional/optional.hpp \ - /home/pblunsom/packages/include/boost/type_traits/type_with_alignment.hpp \ - /home/pblunsom/packages/include/boost/detail/reference_content.hpp \ - /home/pblunsom/packages/include/boost/type_traits/has_nothrow_copy.hpp \ - /home/pblunsom/packages/include/boost/none.hpp \ - /home/pblunsom/packages/include/boost/none_t.hpp \ - /home/pblunsom/packages/include/boost/utility/compare_pointees.hpp \ - /home/pblunsom/packages/include/boost/optional/optional_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable_fwd.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_heap_alloc.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_interruption.hpp \ - /home/pblunsom/packages/include/boost/thread/detail/thread_group.hpp \ - /home/pblunsom/packages/include/boost/thread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/shared_mutex.hpp \ - /home/pblunsom/packages/include/boost/thread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/condition_variable.hpp \ - /home/pblunsom/packages/include/boost/thread/pthread/thread_data.hpp \ - /home/pblunsom/packages/include/boost/thread/future.hpp \ - /home/pblunsom/packages/include/boost/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/exception/detail/exception_ptr.hpp \ - /home/pblunsom/packages/include/boost/type_traits/is_fundamental.hpp \ - /home/pblunsom/packages/include/boost/thread/condition.hpp timing.h \ - clock_gettime_stub.c contexts_corpus.hh contexts_lexer.h \ - ../../../decoder/dict.h \ - /home/pblunsom/packages/include/boost/functional/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/hash_fwd.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/float_functions.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/limits.hpp \ - /home/pblunsom/packages/include/boost/integer/static_log2.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/detail/hash_float_generic.hpp \ - /home/pblunsom/packages/include/boost/functional/hash/extensions.hpp \ - /home/pblunsom/packages/include/boost/detail/container_fwd.hpp \ - ../../../decoder/wordid.h gzstream.hh -clock_gettime_stub.o: clock_gettime_stub.c -gammadist.o: gammadist.c gammadist.h mt19937ar.h -mt19937ar.o: mt19937ar.c mt19937ar.h diff --git a/gi/pyp-topics/src/mpi-corpus.hh b/gi/pyp-topics/src/mpi-corpus.hh deleted file mode 100644 index f5c478a9..00000000 --- a/gi/pyp-topics/src/mpi-corpus.hh +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef _MPI_CORPUS_HH -#define _MPI_CORPUS_HH - -#include <vector> -#include <string> -#include <map> -#include <tr1/unordered_map> - -#include <boost/ptr_container/ptr_vector.hpp> -#include <boost/mpi/environment.hpp> -#include <boost/mpi/communicator.hpp> - -#include "contexts_corpus.hh" - - -//////////////////////////////////////////////////////////////// -// MPICorpus -//////////////////////////////////////////////////////////////// - -class MPICorpus : public ContextsCorpus { -public: - MPICorpus() : ContextsCorpus() { - boost::mpi::communicator world; - m_rank = world.rank(); - m_size = world.size(); - m_start = -1; - m_end = -1; - } - virtual ~MPICorpus() {} - - virtual unsigned read_contexts(const std::string &filename, - BackoffGenerator* backoff_gen=0, - bool filter_singeltons=false, - bool binary_contexts=false) { - unsigned result = ContextsCorpus::read_contexts(filename, backoff_gen, filter_singeltons, binary_contexts); - - if (m_rank == 0) std::cerr << "\tLoad balancing terms per mpi segment:" << std::endl; - float segment_size = num_terms() / m_size; - float term_threshold = segment_size; - int seen_terms = 0; - std::vector<int> end_points; - for (int i=0; i < num_documents(); ++i) { - seen_terms += m_documents.at(i).size(); - if (seen_terms >= term_threshold) { - end_points.push_back(i+1); - term_threshold += segment_size; - if (m_rank == 0) std::cerr << "\t\t" << i+1 << ": " << seen_terms << " terms, " << 100*seen_terms / (float)num_terms() << "%" << std::endl; - } - } - m_start = (m_rank == 0 ? 0 : end_points.at(m_rank-1)); - m_end = (m_rank == m_size-1 ? num_documents() : end_points.at(m_rank)); - - return result; - } - - void - bounds(int* start, int* end) const { - *start = m_start; - *end = m_end; - } - - - -protected: - int m_rank, m_size; - int m_start, m_end; -}; - -#endif // _MPI_CORPUS_HH diff --git a/gi/pyp-topics/src/mpi-pyp-topics.cc b/gi/pyp-topics/src/mpi-pyp-topics.cc deleted file mode 100644 index d6e22af6..00000000 --- a/gi/pyp-topics/src/mpi-pyp-topics.cc +++ /dev/null @@ -1,466 +0,0 @@ -#include <boost/mpi/communicator.hpp> - -#include "timing.h" -#include "mpi-pyp-topics.hh" - -//#include <boost/date_time/posix_time/posix_time_types.hpp> -void MPIPYPTopics::sample_corpus(const MPICorpus& corpus, int samples, - int freq_cutoff_start, int freq_cutoff_end, - int freq_cutoff_interval, - int max_contexts_per_document) { - Timer timer; - - //int documents = corpus.num_documents(); - /* - m_mpi_start = 0; - m_mpi_end = documents; - if (m_size != 1) { - assert(documents < std::numeric_limits<int>::max()); - m_mpi_start = (documents / m_size) * m_rank; - if (m_rank == m_size-1) m_mpi_end = documents; - else m_mpi_end = (documents / m_size)*(m_rank+1); - } - */ - corpus.bounds(&m_mpi_start, &m_mpi_end); - int local_documents = m_mpi_end - m_mpi_start; - - if (!m_backoff.get()) { - m_word_pyps.clear(); - m_word_pyps.push_back(MPIPYPs()); - } - - if (m_am_root) std::cerr << "\n Training with " << m_word_pyps.size()-1 << " backoff level" - << (m_word_pyps.size()>1 ? ":" : "s:") << std::endl; - - for (int i=0; i<(int)m_word_pyps.size(); ++i) { - m_word_pyps.at(i).reserve(m_num_topics); - for (int j=0; j<m_num_topics; ++j) - m_word_pyps.at(i).push_back(new MPIPYP<int>(0.5, 1.0)); - } - if (m_am_root) std::cerr << std::endl; - - m_document_pyps.reserve(local_documents); - //m_document_pyps.reserve(corpus.num_documents()); - //for (int j=0; j<corpus.num_documents(); ++j) - for (int j=0; j<local_documents; ++j) - m_document_pyps.push_back(new PYP<int>(0.5, 1.0)); - - m_topic_p0 = 1.0/m_num_topics; - m_term_p0 = 1.0/corpus.num_types(); - m_backoff_p0 = 1.0/corpus.num_documents(); - - if (m_am_root) std::cerr << " Documents: " << corpus.num_documents() << "(" - << local_documents << ")" << " Terms: " << corpus.num_types() << std::endl; - - int frequency_cutoff = freq_cutoff_start; - if (m_am_root) std::cerr << " Context frequency cutoff set to " << frequency_cutoff << std::endl; - - timer.Reset(); - // Initialisation pass - int document_id=0, topic_counter=0; - for (int i=0; i<local_documents; ++i) { - document_id = i+m_mpi_start; - - //for (Corpus::const_iterator corpusIt=corpus.begin(); - // corpusIt != corpus.end(); ++corpusIt, ++document_id) { - m_corpus_topics.push_back(DocumentTopics(corpus.at(document_id).size(), 0)); - - int term_index=0; - for (Document::const_iterator docIt=corpus.at(document_id).begin(); - docIt != corpus.at(document_id).end(); ++docIt, ++term_index) { - topic_counter++; - Term term = *docIt; - - // sample a new_topic - //int new_topic = (topic_counter % m_num_topics); - int freq = corpus.context_count(term); - int new_topic = -1; - if (freq > frequency_cutoff - && (!max_contexts_per_document || term_index < max_contexts_per_document)) { - new_topic = sample(i, term); - //new_topic = document_id % m_num_topics; - - // add the new topic to the PYPs - increment(term, new_topic); - - if (m_use_topic_pyp) { - F p0 = m_topic_pyp.prob(new_topic, m_topic_p0); - int table_delta = m_document_pyps.at(i).increment(new_topic, p0); - if (table_delta) - m_topic_pyp.increment(new_topic, m_topic_p0, rnd); - } - else m_document_pyps.at(i).increment(new_topic, m_topic_p0); - } - - m_corpus_topics.at(i).at(term_index) = new_topic; - } - } - - // Synchronise the topic->word counds across the processes. - synchronise(); - - if (m_am_root) std::cerr << " Initialized in " << timer.Elapsed() << " seconds\n"; - - int* randomDocIndices = new int[local_documents]; - for (int i = 0; i < local_documents; ++i) - randomDocIndices[i] = i; - - // Sampling phase - for (int curr_sample=0; curr_sample < samples; ++curr_sample) { - if (freq_cutoff_interval > 0 && curr_sample != 1 - && curr_sample % freq_cutoff_interval == 1 - && frequency_cutoff > freq_cutoff_end) { - frequency_cutoff--; - if (m_am_root) std::cerr << "\n Context frequency cutoff set to " << frequency_cutoff << std::endl; - } - - if (m_am_root) std::cerr << "\n -- Sample " << curr_sample << " "; std::cerr.flush(); - - // Randomize the corpus indexing array - int tmp; - int processed_terms=0; - for (int i = (local_documents-1); i > 0; --i) { - //i+1 since j \in [0,i] but rnd() \in [0,1) - int j = (int)(rnd() * (i+1)); - assert(j >= 0 && j <= i); - tmp = randomDocIndices[i]; - randomDocIndices[i] = randomDocIndices[j]; - randomDocIndices[j] = tmp; - } - - // for each document in the corpus - for (int rand_doc=0; rand_doc<local_documents; ++rand_doc) { - int doc_index = randomDocIndices[rand_doc]; - int document_id = doc_index + m_mpi_start; - const Document& doc = corpus.at(document_id); - - // for each term in the document - int term_index=0; - Document::const_iterator docEnd = doc.end(); - for (Document::const_iterator docIt=doc.begin(); - docIt != docEnd; ++docIt, ++term_index) { - - if (max_contexts_per_document && term_index > max_contexts_per_document) - break; - - Term term = *docIt; - int freq = corpus.context_count(term); - if (freq < frequency_cutoff) - continue; - - processed_terms++; - - // remove the prevous topic from the PYPs - int current_topic = m_corpus_topics.at(doc_index).at(term_index); - // a negative label mean that term hasn't been sampled yet - if (current_topic >= 0) { - decrement(term, current_topic); - - int table_delta = m_document_pyps.at(doc_index).decrement(current_topic); - if (m_use_topic_pyp && table_delta < 0) - m_topic_pyp.decrement(current_topic, rnd); - } - - // sample a new_topic - int new_topic = sample(doc_index, term); - - // add the new topic to the PYPs - m_corpus_topics.at(doc_index).at(term_index) = new_topic; - increment(term, new_topic); - - if (m_use_topic_pyp) { - F p0 = m_topic_pyp.prob(new_topic, m_topic_p0); - int table_delta = m_document_pyps.at(doc_index).increment(new_topic, p0); - if (table_delta) - m_topic_pyp.increment(new_topic, m_topic_p0, rnd); - } - else m_document_pyps.at(doc_index).increment(new_topic, m_topic_p0); - } - if (document_id && document_id % 10000 == 0) { - if (m_am_root) std::cerr << "."; std::cerr.flush(); - } - } - std::cerr << "|"; std::cerr.flush(); - - // Synchronise the topic->word counds across the processes. - synchronise(); - - if (m_am_root) std::cerr << " ||| sampled " << processed_terms << " terms."; - - if (curr_sample != 0 && curr_sample % 10 == 0) { - if (m_am_root) std::cerr << " ||| time=" << (timer.Elapsed() / 10.0) << " sec/sample" << std::endl; - timer.Reset(); - if (m_am_root) std::cerr << " ... Resampling hyperparameters"; std::cerr.flush(); - - // resample the hyperparamters - F log_p=0.0; - for (std::vector<MPIPYPs>::iterator levelIt=m_word_pyps.begin(); - levelIt != m_word_pyps.end(); ++levelIt) { - for (MPIPYPs::iterator pypIt=levelIt->begin(); - pypIt != levelIt->end(); ++pypIt) { - pypIt->resample_prior(rnd); - log_p += pypIt->log_restaurant_prob(); - } - } - - for (PYPs::iterator pypIt=m_document_pyps.begin(); - pypIt != m_document_pyps.end(); ++pypIt) { - pypIt->resample_prior(rnd); - log_p += pypIt->log_restaurant_prob(); - } - - if (m_use_topic_pyp) { - m_topic_pyp.resample_prior(rnd); - log_p += m_topic_pyp.log_restaurant_prob(); - } - - std::cerr.precision(10); - if (m_am_root) std::cerr << " ||| LLH=" << log_p << " ||| resampling time=" << timer.Elapsed() << " sec" << std::endl; - timer.Reset(); - - int k=0; - if (m_am_root) std::cerr << "Topics distribution: "; - std::cerr.precision(2); - for (MPIPYPs::iterator pypIt=m_word_pyps.front().begin(); - pypIt != m_word_pyps.front().end(); ++pypIt, ++k) { - if (m_am_root && k % 5 == 0) std::cerr << std::endl << '\t'; - if (m_am_root) std::cerr << "<" << k << ":" << pypIt->num_customers() << "," - << pypIt->num_types() << "," << m_topic_pyp.prob(k, m_topic_p0) << "> "; - } - std::cerr.precision(4); - if (m_am_root) std::cerr << std::endl; - } - } - delete [] randomDocIndices; -} - -void MPIPYPTopics::synchronise() { - // Synchronise the topic->word counds across the processes. - //for (std::vector<MPIPYPs>::iterator levelIt=m_word_pyps.begin(); - // levelIt != m_word_pyps.end(); ++levelIt) { -// std::vector<MPIPYPs>::iterator levelIt=m_word_pyps.begin(); -// { -// for (MPIPYPs::iterator pypIt=levelIt->begin(); pypIt != levelIt->end(); ++pypIt) { - for (size_t label=0; label < m_word_pyps.at(0).size(); ++label) { - MPIPYP<int>& pyp = m_word_pyps.at(0).at(label); - - //if (!m_am_root) boost::mpi::communicator().barrier(); - //std::cerr << "Before Sync Process " << m_rank << ":"; - //pyp.debug_info(std::cerr); std::cerr << std::endl; - //if (m_am_root) boost::mpi::communicator().barrier(); - - MPIPYP<int>::dish_delta_type delta; - pyp.synchronise(&delta); - - for (MPIPYP<int>::dish_delta_type::const_iterator it=delta.begin(); it != delta.end(); ++it) { - int count = it->second; - if (count > 0) - for (int i=0; i < count; ++i) increment(it->first, label); - if (count < 0) - for (int i=0; i > count; --i) decrement(it->first, label); - } - pyp.reset_deltas(); - - //if (!m_am_root) boost::mpi::communicator().barrier(); - //std::cerr << "After Sync Process " << m_rank << ":"; - //pyp.debug_info(std::cerr); std::cerr << std::endl; - //if (m_am_root) boost::mpi::communicator().barrier(); - } -// } - // Synchronise the hierarchical topic pyp - MPIPYP<int>::dish_delta_type topic_delta; - m_topic_pyp.synchronise(&topic_delta); - for (MPIPYP<int>::dish_delta_type::const_iterator it=topic_delta.begin(); it != topic_delta.end(); ++it) { - int count = it->second; - if (count > 0) - for (int i=0; i < count; ++i) - m_topic_pyp.increment(it->first, m_topic_p0, rnd); - if (count < 0) - for (int i=0; i > count; --i) - m_topic_pyp.decrement(it->first, rnd); - } - m_topic_pyp.reset_deltas(); -} - -void MPIPYPTopics::decrement(const Term& term, int topic, int level) { - //std::cerr << "MPIPYPTopics::decrement(" << term << "," << topic << "," << level << ")" << std::endl; - m_word_pyps.at(level).at(topic).decrement(term, rnd); - if (m_backoff.get()) { - Term backoff_term = (*m_backoff)[term]; - if (!m_backoff->is_null(backoff_term)) - decrement(backoff_term, topic, level+1); - } -} - -void MPIPYPTopics::increment(const Term& term, int topic, int level) { - //std::cerr << "MPIPYPTopics::increment(" << term << "," << topic << "," << level << ")" << std::endl; - m_word_pyps.at(level).at(topic).increment(term, word_pyps_p0(term, topic, level), rnd); - - if (m_backoff.get()) { - Term backoff_term = (*m_backoff)[term]; - if (!m_backoff->is_null(backoff_term)) - increment(backoff_term, topic, level+1); - } -} - -int MPIPYPTopics::sample(const DocumentId& doc, const Term& term) { - // First pass: collect probs - F sum=0.0; - std::vector<F> sums; - for (int k=0; k<m_num_topics; ++k) { - F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - //F p_k_d = m_document_pyps[doc].prob(k, topic_prob); - F p_k_d = m_document_pyps.at(doc).unnormalised_prob(k, topic_prob); - - sum += (p_w_k*p_k_d); - sums.push_back(sum); - } - // Second pass: sample a topic - F cutoff = rnd() * sum; - for (int k=0; k<m_num_topics; ++k) { - if (cutoff <= sums[k]) - return k; - } - std::cerr << cutoff << " " << sum << std::endl; - assert(false); -} - -MPIPYPTopics::F MPIPYPTopics::word_pyps_p0(const Term& term, int topic, int level) const { - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "MPIPYPTopics::word_pyps_p0(" << term << "," << topic << "," << level << ")" << std::endl; - - F p0 = m_term_p0; - if (m_backoff.get()) { - //static F fudge=m_backoff_p0; // TODO - - Term backoff_term = (*m_backoff)[term]; - if (!m_backoff->is_null(backoff_term)) { - assert (level < m_backoff->order()); - //p0 = (1.0/(double)m_backoff->terms_at_level(level))*prob(backoff_term, topic, level+1); - p0 = prob(backoff_term, topic, level+1); - } - else - p0 = m_term_p0; - } - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "MPIPYPTopics::word_pyps_p0(" << term << "," << topic << "," << level << ") = " << p0 << std::endl; - return p0; -} - -MPIPYPTopics::F MPIPYPTopics::prob(const Term& term, int topic, int level) const { - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "MPIPYPTopics::prob(" << term << "," << topic << "," << level << " " << factor << ")" << std::endl; - - F p0 = word_pyps_p0(term, topic, level); - F p_w_k = m_word_pyps.at(level).at(topic).prob(term, p0); - - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "MPIPYPTopics::prob(" << term << "," << topic << "," << level << ") = " << p_w_k << std::endl; - - return p_w_k; -} - -int MPIPYPTopics::max_topic() const { - if (!m_use_topic_pyp) - return -1; - - F current_max=0.0; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - F prob = m_topic_pyp.prob(k, m_topic_p0); - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - assert(current_max >= 0); - return current_max; -} - -std::pair<int,MPIPYPTopics::F> MPIPYPTopics::max(const DocumentId& true_doc) const { - //std::cerr << "MPIPYPTopics::max(" << doc << "," << term << ")" << std::endl; - // collect probs - F current_max=0.0; - DocumentId local_doc = true_doc - m_mpi_start; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - //F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) - topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - F prob = 0; - if (local_doc < 0) prob = topic_prob; - else prob = m_document_pyps.at(local_doc).prob(k, topic_prob); - - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - assert(current_max >= 0); - return std::make_pair(current_topic, current_max); -} - -std::pair<int,MPIPYPTopics::F> MPIPYPTopics::max(const DocumentId& true_doc, const Term& term) const { - //std::cerr << "MPIPYPTopics::max(" << doc << "," << term << ")" << std::endl; - // collect probs - F current_max=0.0; - DocumentId local_doc = true_doc - m_mpi_start; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) - topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - F p_k_d = 0; - if (local_doc < 0) p_k_d = topic_prob; - else p_k_d = m_document_pyps.at(local_doc).prob(k, topic_prob); - - F prob = (p_w_k*p_k_d); - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - assert(current_max >= 0); - return std::make_pair(current_topic, current_max); -} - -std::ostream& MPIPYPTopics::print_document_topics(std::ostream& out) const { - for (CorpusTopics::const_iterator corpusIt=m_corpus_topics.begin(); - corpusIt != m_corpus_topics.end(); ++corpusIt) { - int term_index=0; - for (DocumentTopics::const_iterator docIt=corpusIt->begin(); - docIt != corpusIt->end(); ++docIt, ++term_index) { - if (term_index) out << " "; - out << *docIt; - } - out << std::endl; - } - return out; -} - -std::ostream& MPIPYPTopics::print_topic_terms(std::ostream& out) const { - for (PYPs::const_iterator pypsIt=m_word_pyps.front().begin(); - pypsIt != m_word_pyps.front().end(); ++pypsIt) { - int term_index=0; - for (PYP<int>::const_iterator termIt=pypsIt->begin(); - termIt != pypsIt->end(); ++termIt, ++term_index) { - if (term_index) out << " "; - out << termIt->first << ":" << termIt->second; - } - out << std::endl; - } - return out; -} diff --git a/gi/pyp-topics/src/mpi-pyp-topics.hh b/gi/pyp-topics/src/mpi-pyp-topics.hh deleted file mode 100644 index d96bc4e5..00000000 --- a/gi/pyp-topics/src/mpi-pyp-topics.hh +++ /dev/null @@ -1,106 +0,0 @@ -#ifndef MPI_PYP_TOPICS_HH -#define MPI_PYP_TOPICS_HH - -#include <vector> -#include <iostream> - -#include <boost/ptr_container/ptr_vector.hpp> -#include <boost/random/uniform_real.hpp> -#include <boost/random/variate_generator.hpp> -#include <boost/random/mersenne_twister.hpp> -#include <boost/random/inversive_congruential.hpp> -#include <boost/random/linear_congruential.hpp> -#include <boost/random/lagged_fibonacci.hpp> -#include <boost/mpi/environment.hpp> -#include <boost/mpi/communicator.hpp> - - -#include "mpi-pyp.hh" -#include "mpi-corpus.hh" - -class MPIPYPTopics { -public: - typedef std::vector<int> DocumentTopics; - typedef std::vector<DocumentTopics> CorpusTopics; - typedef double F; - -public: - MPIPYPTopics(int num_topics, bool use_topic_pyp=false, unsigned long seed = 0) - : m_num_topics(num_topics), m_word_pyps(1), - m_topic_pyp(0.5,1.0), m_use_topic_pyp(use_topic_pyp), - m_seed(seed), - uni_dist(0,1), rng(seed == 0 ? (unsigned long)this : seed), - rnd(rng, uni_dist), m_mpi_start(-1), m_mpi_end(-1) { - boost::mpi::communicator m_world; - m_rank = m_world.rank(); - m_size = m_world.size(); - m_am_root = (m_rank == 0); - } - - void sample_corpus(const MPICorpus& corpus, int samples, - int freq_cutoff_start=0, int freq_cutoff_end=0, - int freq_cutoff_interval=0, - int max_contexts_per_document=0); - - int sample(const DocumentId& doc, const Term& term); - std::pair<int,F> max(const DocumentId& doc, const Term& term) const; - std::pair<int,F> max(const DocumentId& doc) const; - int max_topic() const; - - void set_backoff(const std::string& filename) { - m_backoff.reset(new TermBackoff); - m_backoff->read(filename); - m_word_pyps.clear(); - m_word_pyps.resize(m_backoff->order(), MPIPYPs()); - } - void set_backoff(TermBackoffPtr backoff) { - m_backoff = backoff; - m_word_pyps.clear(); - m_word_pyps.resize(m_backoff->order(), MPIPYPs()); - } - - F prob(const Term& term, int topic, int level=0) const; - void decrement(const Term& term, int topic, int level=0); - void increment(const Term& term, int topic, int level=0); - - std::ostream& print_document_topics(std::ostream& out) const; - std::ostream& print_topic_terms(std::ostream& out) const; - - void synchronise(); - -private: - F word_pyps_p0(const Term& term, int topic, int level) const; - - int m_num_topics; - F m_term_p0, m_topic_p0, m_backoff_p0; - - CorpusTopics m_corpus_topics; - typedef boost::ptr_vector< PYP<int> > PYPs; - typedef boost::ptr_vector< MPIPYP<int> > MPIPYPs; - PYPs m_document_pyps; - std::vector<MPIPYPs> m_word_pyps; - MPIPYP<int> m_topic_pyp; - bool m_use_topic_pyp; - - unsigned long m_seed; - - //typedef boost::mt19937 base_generator_type; - //typedef boost::hellekalek1995 base_generator_type; - typedef boost::lagged_fibonacci607 base_generator_type; - typedef boost::uniform_real<> uni_dist_type; - typedef boost::variate_generator<base_generator_type&, uni_dist_type> gen_type; - - uni_dist_type uni_dist; - base_generator_type rng; //this gets the seed - gen_type rnd; //instantiate: rnd(rng, uni_dist) - //call: rnd() generates uniform on [0,1) - - TermBackoffPtr m_backoff; - - boost::mpi::communicator m_world; - bool m_am_root; - int m_rank, m_size; - int m_mpi_start, m_mpi_end; -}; - -#endif // PYP_TOPICS_HH diff --git a/gi/pyp-topics/src/mpi-pyp.hh b/gi/pyp-topics/src/mpi-pyp.hh deleted file mode 100644 index c2341b9e..00000000 --- a/gi/pyp-topics/src/mpi-pyp.hh +++ /dev/null @@ -1,447 +0,0 @@ -#ifndef _mpipyp_hh -#define _mpipyp_hh - -#include <math.h> -#include <map> -#include <tr1/unordered_map> -//#include <google/sparse_hash_map> - -#include <boost/random/uniform_real.hpp> -#include <boost/random/variate_generator.hpp> -#include <boost/random/mersenne_twister.hpp> -#include <boost/tuple/tuple.hpp> -#include <boost/serialization/map.hpp> -#include <boost/mpi.hpp> -#include <boost/mpi/environment.hpp> -#include <boost/mpi/communicator.hpp> -#include <boost/mpi/operations.hpp> - - -#include "pyp.hh" - -// -// Pitman-Yor process with customer and table tracking -// - -template <typename Dish, typename Hash=std::tr1::hash<Dish> > -class MPIPYP : public PYP<Dish, Hash> { -public: - typedef std::map<Dish, int> dish_delta_type; - - MPIPYP(double a, double b, Hash hash=Hash()); - - template < typename Uniform01 > - int increment(Dish d, double p0, Uniform01& rnd); - template < typename Uniform01 > - int decrement(Dish d, Uniform01& rnd); - - void clear(); - void reset_deltas(); - - void synchronise(dish_delta_type* result); - -private: - typedef std::map<Dish, typename PYP<Dish,Hash>::TableCounter> table_delta_type; - - dish_delta_type m_count_delta; - table_delta_type m_table_delta; -}; - -template <typename Dish, typename Hash> -MPIPYP<Dish,Hash>::MPIPYP(double a, double b, Hash h) -: PYP<Dish,Hash>(a, b, 0, h) {} - -template <typename Dish, typename Hash> - template <typename Uniform01> -int -MPIPYP<Dish,Hash>::increment(Dish dish, double p0, Uniform01& rnd) { - //std::cerr << "-----INCREMENT DISH " << dish << std::endl; - int delta = 0; - int table_joined=-1; - typename PYP<Dish,Hash>::TableCounter &tc = PYP<Dish,Hash>::_dish_tables[dish]; - - // seated on a new or existing table? - int c = PYP<Dish,Hash>::count(dish); - int t = PYP<Dish,Hash>::num_tables(dish); - int T = PYP<Dish,Hash>::num_tables(); - double& a = PYP<Dish,Hash>::_a; - double& b = PYP<Dish,Hash>::_b; - double pshare = (c > 0) ? (c - a*t) : 0.0; - double pnew = (b + a*T) * p0; - if (pshare < 0.0) { - std::cerr << pshare << " " << c << " " << a << " " << t << std::endl; - assert(false); - } - - if (rnd() < pnew / (pshare + pnew)) { - // assign to a new table - tc.tables += 1; - tc.table_histogram[1] += 1; - PYP<Dish,Hash>::_total_tables += 1; - delta = 1; - table_joined = 1; - } - else { - // randomly assign to an existing table - // remove constant denominator from inner loop - double r = rnd() * (c - a*t); - for (std::map<int,int>::iterator - hit = tc.table_histogram.begin(); - hit != tc.table_histogram.end(); ++hit) { - r -= ((hit->first - a) * hit->second); - if (r <= 0) { - tc.table_histogram[hit->first+1] += 1; - hit->second -= 1; - table_joined = hit->first+1; - if (hit->second == 0) - tc.table_histogram.erase(hit); - break; - } - } - if (r > 0) { - std::cerr << r << " " << c << " " << a << " " << t << std::endl; - assert(false); - } - delta = 0; - } - - std::tr1::unordered_map<Dish,int,Hash>::operator[](dish) += 1; - //google::sparse_hash_map<Dish,int,Hash>::operator[](dish) += 1; - PYP<Dish,Hash>::_total_customers += 1; - - // MPI Delta handling - // track the customer entering - typename dish_delta_type::iterator customer_it; - bool customer_insert_result; - boost::tie(customer_it, customer_insert_result) - = m_count_delta.insert(std::make_pair(dish,0)); - - customer_it->second += 1; - if (customer_it->second == 0) - m_count_delta.erase(customer_it); - - // increment the histogram bar for the table joined - /* - typename PYP<Dish,Hash>::TableCounter &delta_tc = m_table_delta[dish]; - - std::map<int,int> &histogram = delta_tc.table_histogram; - assert (table_joined > 0); - - typename std::map<int,int>::iterator table_it; bool table_insert_result; - boost::tie(table_it, table_insert_result) = histogram.insert(std::make_pair(table_joined,0)); - table_it->second += 1; - if (delta == 0) { - // decrement the histogram bar for the table left - typename std::map<int,int>::iterator left_table_it; - boost::tie(left_table_it, table_insert_result) - = histogram.insert(std::make_pair(table_joined-1,0)); - left_table_it->second -= 1; - if (left_table_it->second == 0) histogram.erase(left_table_it); - } - else delta_tc.tables += 1; - - if (table_it->second == 0) histogram.erase(table_it); - - //std::cerr << "Added (" << delta << ") " << dish << " to table " << table_joined << "\n"; - //std::cerr << "Dish " << dish << " has " << count(dish) << " customers, and is sitting at " << PYP<Dish,Hash>::num_tables(dish) << " tables.\n"; - //for (std::map<int,int>::const_iterator - // hit = delta_tc.table_histogram.begin(); - // hit != delta_tc.table_histogram.end(); ++hit) { - // std::cerr << " " << hit->second << " tables with " << hit->first << " customers." << std::endl; - //} - //std::cerr << "Added (" << delta << ") " << dish << " to table " << table_joined << "\n"; - //std::cerr << "Dish " << dish << " has " << count(dish) << " customers, and is sitting at " << PYP<Dish,Hash>::num_tables(dish) << " tables.\n"; - int x_num_customers=0, x_num_table=0; - for (std::map<int,int>::const_iterator - hit = delta_tc.table_histogram.begin(); - hit != delta_tc.table_histogram.end(); ++hit) { - x_num_table += hit->second; - x_num_customers += (hit->second*hit->first); - } - int tmp_c = PYP<Dish,Hash>::count(dish); - int tmp_t = PYP<Dish,Hash>::num_tables(dish); - assert (x_num_customers <= tmp_c); - assert (x_num_table <= tmp_t); - - if (delta_tc.table_histogram.empty()) { - assert (delta_tc.tables == 0); - m_table_delta.erase(dish); - } - */ - - //PYP<Dish,Hash>::debug_info(std::cerr); - //std::cerr << " Dish " << dish << " has count " << PYP<Dish,Hash>::count(dish) << " tables " << PYP<Dish,Hash>::num_tables(dish) << std::endl; - - return delta; -} - -template <typename Dish, typename Hash> - template <typename Uniform01> -int -MPIPYP<Dish,Hash>::decrement(Dish dish, Uniform01& rnd) -{ - //std::cerr << "-----DECREMENT DISH " << dish << std::endl; - typename std::tr1::unordered_map<Dish, int>::iterator dcit = find(dish); - //typename google::sparse_hash_map<Dish, int>::iterator dcit = find(dish); - if (dcit == PYP<Dish,Hash>::end()) { - std::cerr << dish << std::endl; - assert(false); - } - - int delta = 0, table_left=-1; - - typename std::tr1::unordered_map<Dish, typename PYP<Dish,Hash>::TableCounter>::iterator dtit - = PYP<Dish,Hash>::_dish_tables.find(dish); - //typename google::sparse_hash_map<Dish, TableCounter>::iterator dtit = _dish_tables.find(dish); - if (dtit == PYP<Dish,Hash>::_dish_tables.end()) { - std::cerr << dish << std::endl; - assert(false); - } - typename PYP<Dish,Hash>::TableCounter &tc = dtit->second; - - double r = rnd() * PYP<Dish,Hash>::count(dish); - for (std::map<int,int>::iterator hit = tc.table_histogram.begin(); - hit != tc.table_histogram.end(); ++hit) { - r -= (hit->first * hit->second); - if (r <= 0) { - table_left = hit->first; - if (hit->first > 1) { - tc.table_histogram[hit->first-1] += 1; - } - else { - delta = -1; - tc.tables -= 1; - PYP<Dish,Hash>::_total_tables -= 1; - } - - hit->second -= 1; - if (hit->second == 0) tc.table_histogram.erase(hit); - break; - } - } - if (r > 0) { - std::cerr << r << " " << PYP<Dish,Hash>::count(dish) << " " << PYP<Dish,Hash>::_a << " " - << PYP<Dish,Hash>::num_tables(dish) << std::endl; - assert(false); - } - - // remove the customer - dcit->second -= 1; - PYP<Dish,Hash>::_total_customers -= 1; - assert(dcit->second >= 0); - if (dcit->second == 0) { - PYP<Dish,Hash>::erase(dcit); - PYP<Dish,Hash>::_dish_tables.erase(dtit); - } - - // MPI Delta processing - typename dish_delta_type::iterator it; - bool insert_result; - boost::tie(it, insert_result) = m_count_delta.insert(std::make_pair(dish,0)); - it->second -= 1; - if (it->second == 0) m_count_delta.erase(it); - - assert (table_left > 0); - typename PYP<Dish,Hash>::TableCounter& delta_tc = m_table_delta[dish]; - if (table_left > 1) { - std::map<int,int>::iterator tit; - boost::tie(tit, insert_result) = delta_tc.table_histogram.insert(std::make_pair(table_left-1,0)); - tit->second += 1; - if (tit->second == 0) delta_tc.table_histogram.erase(tit); - } - else delta_tc.tables -= 1; - - std::map<int,int>::iterator tit; - boost::tie(tit, insert_result) = delta_tc.table_histogram.insert(std::make_pair(table_left,0)); - tit->second -= 1; - if (tit->second == 0) delta_tc.table_histogram.erase(tit); - - // std::cerr << "Dish " << dish << " has " << count(dish) << " customers, and is sitting at " << PYP<Dish,Hash>::num_tables(dish) << " tables.\n"; - // for (std::map<int,int>::const_iterator - // hit = delta_tc.table_histogram.begin(); - // hit != delta_tc.table_histogram.end(); ++hit) { - // std::cerr << " " << hit->second << " tables with " << hit->first << " customers." << std::endl; - // } - int x_num_customers=0, x_num_table=0; - for (std::map<int,int>::const_iterator - hit = delta_tc.table_histogram.begin(); - hit != delta_tc.table_histogram.end(); ++hit) { - x_num_table += hit->second; - x_num_customers += (hit->second*hit->first); - } - int tmp_c = PYP<Dish,Hash>::count(dish); - int tmp_t = PYP<Dish,Hash>::num_tables(dish); - assert (x_num_customers <= tmp_c); - assert (x_num_table <= tmp_t); - - if (delta_tc.table_histogram.empty()) { - // std::cerr << " DELETING " << dish << std::endl; - assert (delta_tc.tables == 0); - m_table_delta.erase(dish); - } - - //PYP<Dish,Hash>::debug_info(std::cerr); - //std::cerr << " Dish " << dish << " has count " << PYP<Dish,Hash>::count(dish) << " tables " << PYP<Dish,Hash>::num_tables(dish) << std::endl; - return delta; -} - -template <typename Dish, typename Hash> -void -MPIPYP<Dish,Hash>::clear() { - PYP<Dish,Hash>::clear(); - reset_deltas(); -} - -template <typename Dish, typename Hash> -void -MPIPYP<Dish,Hash>::reset_deltas() { - m_count_delta.clear(); - m_table_delta.clear(); -} - -template <typename Dish> -struct sum_maps { - typedef std::map<Dish,int> map_type; - map_type& operator() (map_type& l, map_type const & r) const { - for (typename map_type::const_iterator it=r.begin(); it != r.end(); it++) - l[it->first] += it->second; - return l; - } -}; - -template <typename Dish> -struct subtract_maps { - typedef std::map<Dish,int> map_type; - map_type& operator() (map_type& l, map_type const & r) const { - for (typename map_type::const_iterator it=r.begin(); it != r.end(); it++) - l[it->first] -= it->second; - return l; - } -}; - -// Needed Boost definitions -namespace boost { - namespace mpi { - template <> - struct is_commutative< sum_maps<int>, std::map<int,int> > : mpl::true_ {}; - } - - namespace serialization { - template<class Archive> - void serialize(Archive & ar, PYP<int>::TableCounter& t, const unsigned int version) { - ar & t.table_histogram; - ar & t.tables; - } - - } // namespace serialization -} // namespace boost - -template <typename A, typename B, typename C> -struct triple { - triple() {} - triple(const A& a, const B& b, const C& c) : first(a), second(b), third(c) {} - A first; - B second; - C third; - - template<class Archive> - void serialize(Archive &ar, const unsigned int version){ - ar & first; - ar & second; - ar & third; - } -}; - -BOOST_IS_BITWISE_SERIALIZABLE(MPIPYP<int>::dish_delta_type) -BOOST_CLASS_TRACKING(MPIPYP<int>::dish_delta_type,track_never) - -template <typename Dish, typename Hash> -void -MPIPYP<Dish,Hash>::synchronise(dish_delta_type* result) { - boost::mpi::communicator world; - //int rank = world.rank(), size = world.size(); - - boost::mpi::all_reduce(world, m_count_delta, *result, sum_maps<Dish>()); - subtract_maps<Dish>()(*result, m_count_delta); - -/* - // communicate the customer count deltas - dish_delta_type global_dish_delta; - boost::mpi::all_reduce(world, m_count_delta, global_dish_delta, sum_maps<Dish>()); - - // update this restaurant - for (typename dish_delta_type::const_iterator it=global_dish_delta.begin(); - it != global_dish_delta.end(); ++it) { - int global_delta = it->second - m_count_delta[it->first]; - if (global_delta == 0) continue; - typename std::tr1::unordered_map<Dish,int,Hash>::iterator dit; bool inserted; - boost::tie(dit, inserted) - = std::tr1::unordered_map<Dish,int,Hash>::insert(std::make_pair(it->first, 0)); - dit->second += global_delta; - assert(dit->second >= 0); - if (dit->second == 0) { - std::tr1::unordered_map<Dish,int,Hash>::erase(dit); - } - - PYP<Dish,Hash>::_total_customers += (it->second - m_count_delta[it->first]); - int tmp = PYP<Dish,Hash>::_total_customers; - assert(tmp >= 0); - //std::cerr << "Process " << rank << " adding " << (it->second - m_count_delta[it->first]) << " of customer " << it->first << std::endl; - } -*/ -/* - // communicate the table count deltas - for (int process = 0; process < size; ++process) { - typename std::vector< triple<Dish, int, int> > message; - if (rank == process) { - // broadcast deltas - for (typename table_delta_type::const_iterator dish_it=m_table_delta.begin(); - dish_it != m_table_delta.end(); ++dish_it) { - //assert (dish_it->second.tables > 0); - for (std::map<int,int>::const_iterator it=dish_it->second.table_histogram.begin(); - it != dish_it->second.table_histogram.end(); ++it) { - triple<Dish, int, int> m(dish_it->first, it->first, it->second); - message.push_back(m); - } - // append a special message with the total table delta for this dish - triple<Dish, int, int> m(dish_it->first, -1, dish_it->second.tables); - message.push_back(m); - } - boost::mpi::broadcast(world, message, process); - } - else { - // receive deltas - boost::mpi::broadcast(world, message, process); - for (typename std::vector< triple<Dish, int, int> >::const_iterator it=message.begin(); it != message.end(); ++it) { - typename PYP<Dish,Hash>::TableCounter& tc = PYP<Dish,Hash>::_dish_tables[it->first]; - if (it->second >= 0) { - std::map<int,int>::iterator tit; bool inserted; - boost::tie(tit, inserted) = tc.table_histogram.insert(std::make_pair(it->second, 0)); - tit->second += it->third; - if (tit->second < 0) { - std::cerr << tit->first << " " << tit->second << " " << it->first << " " << it->second << " " << it->third << std::endl; - assert(tit->second >= 0); - } - if (tit->second == 0) { - tc.table_histogram.erase(tit); - } - } - else { - tc.tables += it->third; - PYP<Dish,Hash>::_total_tables += it->third; - assert(tc.tables >= 0); - if (tc.tables == 0) assert(tc.table_histogram.empty()); - if (tc.table_histogram.empty()) { - assert (tc.tables == 0); - PYP<Dish,Hash>::_dish_tables.erase(it->first); - } - } - } - } - } -*/ - -// reset_deltas(); -} - -#endif diff --git a/gi/pyp-topics/src/mpi-train-contexts.cc b/gi/pyp-topics/src/mpi-train-contexts.cc deleted file mode 100644 index e05e0eac..00000000 --- a/gi/pyp-topics/src/mpi-train-contexts.cc +++ /dev/null @@ -1,201 +0,0 @@ -// STL -#include <iostream> -#include <fstream> -#include <algorithm> -#include <iterator> - -// Boost -#include <boost/program_options/parsers.hpp> -#include <boost/program_options/variables_map.hpp> -#include <boost/scoped_ptr.hpp> -#include <boost/mpi/environment.hpp> -#include <boost/mpi/communicator.hpp> -#include <boost/lexical_cast.hpp> - -// Local -#include "mpi-pyp-topics.hh" -#include "corpus.hh" -#include "mpi-corpus.hh" -#include "gzstream.hh" - -static const char *REVISION = "$Rev: 170 $"; - -// Namespaces -using namespace boost; -using namespace boost::program_options; -using namespace std; - -int main(int argc, char **argv) -{ - mpi::environment env(argc, argv); - mpi::communicator world; - int rank = world.rank(); - bool am_root = (rank==0); - if (am_root) cout << "Pitman Yor topic models: Copyright 2010 Phil Blunsom\n"; - if (am_root) std::cout << "I am process " << world.rank() << " of " << world.size() << "." << std::endl; - if (am_root) cout << REVISION << '\n' <<endl; - - //////////////////////////////////////////////////////////////////////////////////////////// - // Command line processing - variables_map vm; - - // Command line processing - { - options_description cmdline_specific("Command line specific options"); - cmdline_specific.add_options() - ("help,h", "print help message") - ("config,c", value<string>(), "config file specifying additional command line options") - ; - options_description config_options("Allowed options"); - config_options.add_options() - ("help,h", "print help message") - ("data,d", value<string>(), "file containing the documents and context terms") - ("topics,t", value<int>()->default_value(50), "number of topics") - ("document-topics-out,o", value<string>(), "file to write the document topics to") - ("default-topics-out", value<string>(), "file to write default term topic assignments.") - ("topic-words-out,w", value<string>(), "file to write the topic word distribution to") - ("samples,s", value<int>()->default_value(10), "number of sampling passes through the data") - ("backoff-type", value<string>(), "backoff type: none|simple") -// ("filter-singleton-contexts", "filter singleton contexts") - ("hierarchical-topics", "Use a backoff hierarchical PYP as the P0 for the document topics distribution.") - ("binary-counts,b", "Use binary rather than integer counts for contexts.") - ("freq-cutoff-start", value<int>()->default_value(0), "initial frequency cutoff.") - ("freq-cutoff-end", value<int>()->default_value(0), "final frequency cutoff.") - ("freq-cutoff-interval", value<int>()->default_value(0), "number of iterations between frequency decrement.") - ("max-contexts-per-document", value<int>()->default_value(0), "Only sample the n most frequent contexts for a document.") - ; - - cmdline_specific.add(config_options); - - store(parse_command_line(argc, argv, cmdline_specific), vm); - notify(vm); - - if (vm.count("config") > 0) { - ifstream config(vm["config"].as<string>().c_str()); - store(parse_config_file(config, config_options), vm); - } - - if (vm.count("help")) { - cout << cmdline_specific << "\n"; - return 1; - } - } - //////////////////////////////////////////////////////////////////////////////////////////// - - if (!vm.count("data")) { - cerr << "Please specify a file containing the data." << endl; - return 1; - } - - // seed the random number generator: 0 = automatic, specify value otherwise - unsigned long seed = 0; - MPIPYPTopics model(vm["topics"].as<int>(), vm.count("hierarchical-topics"), seed); - - // read the data - BackoffGenerator* backoff_gen=0; - if (vm.count("backoff-type")) { - if (vm["backoff-type"].as<std::string>() == "none") { - backoff_gen = 0; - } - else if (vm["backoff-type"].as<std::string>() == "simple") { - backoff_gen = new SimpleBackoffGenerator(); - } - else { - cerr << "Backoff type (--backoff-type) must be one of none|simple." <<endl; - return(1); - } - } - - //ContextsCorpus contexts_corpus; - MPICorpus contexts_corpus; - contexts_corpus.read_contexts(vm["data"].as<string>(), backoff_gen, /*vm.count("filter-singleton-contexts")*/ false, vm.count("binary-counts")); - int mpi_start = 0, mpi_end = 0; - contexts_corpus.bounds(&mpi_start, &mpi_end); - std::cerr << "\tProcess " << rank << " has documents " << mpi_start << " -> " << mpi_end << "." << std::endl; - - model.set_backoff(contexts_corpus.backoff_index()); - - if (backoff_gen) - delete backoff_gen; - - // train the sampler - model.sample_corpus(contexts_corpus, vm["samples"].as<int>(), - vm["freq-cutoff-start"].as<int>(), - vm["freq-cutoff-end"].as<int>(), - vm["freq-cutoff-interval"].as<int>(), - vm["max-contexts-per-document"].as<int>()); - - if (vm.count("document-topics-out")) { - std::ofstream documents_out((vm["document-topics-out"].as<string>() + ".pyp-process-" + boost::lexical_cast<std::string>(rank)).c_str()); - //int documents = contexts_corpus.num_documents(); - /* - int mpi_start = 0, mpi_end = documents; - if (world.size() != 1) { - mpi_start = (documents / world.size()) * rank; - if (rank == world.size()-1) mpi_end = documents; - else mpi_end = (documents / world.size())*(rank+1); - } - */ - - map<int,int> all_terms; - for (int document_id=mpi_start; document_id<mpi_end; ++document_id) { - assert (document_id < contexts_corpus.num_documents()); - const Document& doc = contexts_corpus.at(document_id); - vector<int> unique_terms; - for (Document::const_iterator docIt=doc.begin(); docIt != doc.end(); ++docIt) { - if (unique_terms.empty() || *docIt != unique_terms.back()) - unique_terms.push_back(*docIt); - // increment this terms frequency - pair<map<int,int>::iterator,bool> insert_result = all_terms.insert(make_pair(*docIt,1)); - if (!insert_result.second) - all_terms[*docIt] = all_terms[*docIt] + 1; - } - documents_out << contexts_corpus.key(document_id) << '\t'; - documents_out << model.max(document_id).first << " " << doc.size() << " ||| "; - for (std::vector<int>::const_iterator termIt=unique_terms.begin(); termIt != unique_terms.end(); ++termIt) { - if (termIt != unique_terms.begin()) - documents_out << " ||| "; - vector<std::string> strings = contexts_corpus.context2string(*termIt); - copy(strings.begin(), strings.end(),ostream_iterator<std::string>(documents_out, " ")); - std::pair<int,MPIPYPTopics::F> maxinfo = model.max(document_id, *termIt); - documents_out << "||| C=" << maxinfo.first << " P=" << maxinfo.second; - } - documents_out <<endl; - } - documents_out.close(); - world.barrier(); - - if (am_root) { - ogzstream root_documents_out(vm["document-topics-out"].as<string>().c_str()); - for (int p=0; p < world.size(); ++p) { - std::string rank_p_prefix((vm["document-topics-out"].as<string>() + ".pyp-process-" + boost::lexical_cast<std::string>(p)).c_str()); - std::ifstream rank_p_trees_istream(rank_p_prefix.c_str(), std::ios_base::binary); - root_documents_out << rank_p_trees_istream.rdbuf(); - rank_p_trees_istream.close(); - remove((rank_p_prefix).c_str()); - } - root_documents_out.close(); - } - - if (am_root && vm.count("default-topics-out")) { - ofstream default_topics(vm["default-topics-out"].as<string>().c_str()); - default_topics << model.max_topic() <<endl; - for (std::map<int,int>::const_iterator termIt=all_terms.begin(); termIt != all_terms.end(); ++termIt) { - vector<std::string> strings = contexts_corpus.context2string(termIt->first); - default_topics << model.max(-1, termIt->first).first << " ||| " << termIt->second << " ||| "; - copy(strings.begin(), strings.end(),ostream_iterator<std::string>(default_topics, " ")); - default_topics <<endl; - } - } - } - - if (am_root && vm.count("topic-words-out")) { - ogzstream topics_out(vm["topic-words-out"].as<string>().c_str()); - model.print_topic_terms(topics_out); - topics_out.close(); - } - - cout <<endl; - - return 0; -} diff --git a/gi/pyp-topics/src/mt19937ar.c b/gi/pyp-topics/src/mt19937ar.c deleted file mode 100644 index 6551ea39..00000000 --- a/gi/pyp-topics/src/mt19937ar.c +++ /dev/null @@ -1,194 +0,0 @@ -/* - A C-program for MT19937, with initialization improved 2002/1/26. - Coded by Takuji Nishimura and Makoto Matsumoto. - - Before using, initialize the state by using mt_init_genrand(seed) - or mt_init_by_array(init_key, key_length). - - Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The names of its contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Any feedback is very welcome. - http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html - email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) -*/ - -#include "mt19937ar.h" /* XXX MJ 17th March 2006 */ - -/* Period parameters */ -#define N 624 -#define M 397 -#define MATRIX_A 0x9908b0dfUL /* constant vector a */ -#define UPPER_MASK 0x80000000UL /* most significant w-r bits */ -#define LOWER_MASK 0x7fffffffUL /* least significant r bits */ - -static unsigned long mt[N]; /* the array for the state vector */ -static int mti=N+1; /* mti==N+1 means mt[N] is not initialized */ - -/* initializes mt[N] with a seed */ -void mt_init_genrand(unsigned long s) -{ - mt[0]= s & 0xffffffffUL; - for (mti=1; mti<N; mti++) { - mt[mti] = - (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); - /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ - /* In the previous versions, MSBs of the seed affect */ - /* only MSBs of the array mt[]. */ - /* 2002/01/09 modified by Makoto Matsumoto */ - mt[mti] &= 0xffffffffUL; - /* for >32 bit machines */ - } -} - -/* initialize by an array with array-length */ -/* init_key is the array for initializing keys */ -/* key_length is its length */ -/* slight change for C++, 2004/2/26 */ -void mt_init_by_array(unsigned long init_key[], int key_length) -{ - int i, j, k; - mt_init_genrand(19650218UL); - i=1; j=0; - k = (N>key_length ? N : key_length); - for (; k; k--) { - mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL)) - + init_key[j] + j; /* non linear */ - mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ - i++; j++; - if (i>=N) { mt[0] = mt[N-1]; i=1; } - if (j>=key_length) j=0; - } - for (k=N-1; k; k--) { - mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - - i; /* non linear */ - mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ - i++; - if (i>=N) { mt[0] = mt[N-1]; i=1; } - } - - mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ -} - -/* generates a random number on [0,0xffffffff]-interval */ -unsigned long mt_genrand_int32(void) -{ - unsigned long y; - static unsigned long mag01[2]={0x0UL, MATRIX_A}; - /* mag01[x] = x * MATRIX_A for x=0,1 */ - - if (mti >= N) { /* generate N words at one time */ - int kk; - - if (mti == N+1) /* if mt_init_genrand() has not been called, */ - mt_init_genrand(5489UL); /* a default initial seed is used */ - - for (kk=0;kk<N-M;kk++) { - y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); - mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL]; - } - for (;kk<N-1;kk++) { - y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); - mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; - } - y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); - mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; - - mti = 0; - } - - y = mt[mti++]; - - /* Tempering */ - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680UL; - y ^= (y << 15) & 0xefc60000UL; - y ^= (y >> 18); - - return y; -} - -/* generates a random number on [0,0x7fffffff]-interval */ -long mt_genrand_int31(void) -{ - return (long)( mt_genrand_int32()>>1); -} - -/* generates a random number on [0,1]-real-interval */ -double mt_genrand_real1(void) -{ - return mt_genrand_int32()*(1.0/4294967295.0); - /* divided by 2^32-1 */ -} - -/* generates a random number on [0,1)-real-interval */ -double mt_genrand_real2(void) -{ - return mt_genrand_int32()*(1.0/4294967296.0); - /* divided by 2^32 */ -} - -/* generates a random number on (0,1)-real-interval */ -double mt_genrand_real3(void) -{ - return (((double) mt_genrand_int32()) + 0.5)*(1.0/4294967296.0); - /* divided by 2^32 */ -} - -/* generates a random number on [0,1) with 53-bit resolution*/ -double mt_genrand_res53(void) -{ - unsigned long a=mt_genrand_int32()>>5, b=mt_genrand_int32()>>6; - return(a*67108864.0+b)*(1.0/9007199254740992.0); -} -/* These real versions are due to Isaku Wada, 2002/01/09 added */ - -/* -#include <stdio.h> - -int main(void) -{ - int i; - unsigned long init[4]={0x123, 0x234, 0x345, 0x456}, length=4; - mt_init_by_array(init, length); - printf("1000 outputs of genrand_int32()\n"); - for (i=0; i<1000; i++) { - printf("%10lu ", mt_genrand_int32()); - if (i%5==4) printf("\n"); - } - printf("\n1000 outputs of genrand_real2()\n"); - for (i=0; i<1000; i++) { - printf("%10.8f ", mt_genrand_real2()); - if (i%5==4) printf("\n"); - } - return 0; -} -*/ diff --git a/gi/pyp-topics/src/mt19937ar.h b/gi/pyp-topics/src/mt19937ar.h deleted file mode 100644 index caab4045..00000000 --- a/gi/pyp-topics/src/mt19937ar.h +++ /dev/null @@ -1,44 +0,0 @@ -/* mt19937ar.h - * - * Mark Johnson, 17th March 2006 - */ - -#ifndef MT19937AR_H -#define MT19937AR_H - -#ifdef __cplusplus -extern "C" { -#endif - - /* initializes mt[N] with a seed */ - void mt_init_genrand(unsigned long s); - - /* initialize by an array with array-length */ - /* init_key is the array for initializing keys */ - /* key_length is its length */ - /* slight change for C++, 2004/2/26 */ - void mt_init_by_array(unsigned long init_key[], int key_length); - - /* generates a random number on [0,0xffffffff]-interval */ - unsigned long mt_genrand_int32(void); - - /* generates a random number on [0,0x7fffffff]-interval */ - long mt_genrand_int31(void); - - /* generates a random number on [0,1]-real-interval */ - double mt_genrand_real1(void); - - /* generates a random number on [0,1)-real-interval */ - double mt_genrand_real2(void); - - /* generates a random number on (0,1)-real-interval */ - double mt_genrand_real3(void); - - /* generates a random number on [0,1) with 53-bit resolution*/ - double mt_genrand_res53(void); - -#ifdef __cplusplus -}; -#endif - -#endif /* MT19937AR_H */ diff --git a/gi/pyp-topics/src/pyp-topics.cc b/gi/pyp-topics/src/pyp-topics.cc deleted file mode 100644 index 4de52fd7..00000000 --- a/gi/pyp-topics/src/pyp-topics.cc +++ /dev/null @@ -1,499 +0,0 @@ -#include "timing.h" -#include "pyp-topics.hh" -#include "contexts_corpus.hh" - -//Dict const *dict; - -//#include <boost/date_time/posix_time/posix_time_types.hpp> -void PYPTopics::sample_corpus(const Corpus& corpus, int samples, - int freq_cutoff_start, int freq_cutoff_end, - int freq_cutoff_interval, - int max_contexts_per_document, - F temp_start, F temp_end) { - Timer timer; - //dict = &((ContextsCorpus*) &corpus)->dict(); - - if (!m_backoff.get()) { - m_word_pyps.clear(); - m_word_pyps.push_back(PYPs()); - } - - std::cerr << "\n Training with " << m_word_pyps.size()-1 << " backoff level" - << (m_word_pyps.size()==2 ? ":" : "s:") << std::endl; - - - for (int i=0; i<(int)m_word_pyps.size(); ++i) - { - m_word_pyps.at(i).reserve(m_num_topics); - for (int j=0; j<m_num_topics; ++j) - m_word_pyps.at(i).push_back(new PYP<int>(0.01, 1.0, m_seed)); - } - std::cerr << std::endl; - - m_document_pyps.reserve(corpus.num_documents()); - for (int j=0; j<corpus.num_documents(); ++j) - m_document_pyps.push_back(new PYP<int>(0.01, 1.0, m_seed)); - - m_topic_p0 = 1.0/m_num_topics; - m_term_p0 = 1.0/(F)m_backoff->terms_at_level(m_word_pyps.size()-1); - //m_term_p0 = 1.0/corpus.num_types(); - m_backoff_p0 = 1.0/corpus.num_documents(); - - std::cerr << " Documents: " << corpus.num_documents() << " Terms: " - << corpus.num_types() << std::endl; - - int frequency_cutoff = freq_cutoff_start; - std::cerr << " Context frequency cutoff set to " << frequency_cutoff << std::endl; - - timer.Reset(); - // Initialisation pass - int document_id=0, topic_counter=0; - for (Corpus::const_iterator corpusIt=corpus.begin(); - corpusIt != corpus.end(); ++corpusIt, ++document_id) { - m_corpus_topics.push_back(DocumentTopics(corpusIt->size(), 0)); - - int term_index=0; - for (Document::const_iterator docIt=corpusIt->begin(); - docIt != corpusIt->end(); ++docIt, ++term_index) { - topic_counter++; - Term term = *docIt; - - // sample a new_topic - //int new_topic = (topic_counter % m_num_topics); - int freq = corpus.context_count(term); - int new_topic = -1; - if (freq > frequency_cutoff - && (!max_contexts_per_document || term_index < max_contexts_per_document)) { - //new_topic = sample(document_id, term); - //new_topic = document_id % m_num_topics; - new_topic = (int) (rnd() * m_num_topics); - - // add the new topic to the PYPs - increment(term, new_topic); - - if (m_use_topic_pyp) { - F p0 = m_topic_pyp.prob(new_topic, m_topic_p0); - int table_delta = m_document_pyps[document_id].increment(new_topic, p0); - if (table_delta) - m_topic_pyp.increment(new_topic, m_topic_p0); - } - else m_document_pyps[document_id].increment(new_topic, m_topic_p0); - } - - m_corpus_topics[document_id][term_index] = new_topic; - } - } - std::cerr << " Initialized in " << timer.Elapsed() << " seconds\n"; - - int* randomDocIndices = new int[corpus.num_documents()]; - for (int i = 0; i < corpus.num_documents(); ++i) - randomDocIndices[i] = i; - - if (num_jobs < max_threads) - num_jobs = max_threads; - int job_incr = (int) ( (float)m_document_pyps.size() / float(num_jobs) ); - - // Sampling phase - for (int curr_sample=0; curr_sample < samples; ++curr_sample) { - if (freq_cutoff_interval > 0 && curr_sample != 1 - && curr_sample % freq_cutoff_interval == 1 - && frequency_cutoff > freq_cutoff_end) { - frequency_cutoff--; - std::cerr << "\n Context frequency cutoff set to " << frequency_cutoff << std::endl; - } - - F temp = 1.0 / (temp_start - curr_sample*(temp_start-temp_end)/samples); - std::cerr << "\n -- Sample " << curr_sample << " (T=" << temp << ") "; std::cerr.flush(); - - // Randomize the corpus indexing array - int tmp; - int processed_terms=0; - /* - for (int i = corpus.num_documents()-1; i > 0; --i) - { - //i+1 since j \in [0,i] but rnd() \in [0,1) - int j = (int)(rnd() * (i+1)); - assert(j >= 0 && j <= i); - tmp = randomDocIndices[i]; - randomDocIndices[i] = randomDocIndices[j]; - randomDocIndices[j] = tmp; - } - */ - - // for each document in the corpus - int document_id; - for (int i=0; i<corpus.num_documents(); ++i) { - document_id = randomDocIndices[i]; - - // for each term in the document - int term_index=0; - Document::const_iterator docEnd = corpus.at(document_id).end(); - for (Document::const_iterator docIt=corpus.at(document_id).begin(); - docIt != docEnd; ++docIt, ++term_index) { - if (max_contexts_per_document && term_index > max_contexts_per_document) - break; - - Term term = *docIt; - - int freq = corpus.context_count(term); - if (freq < frequency_cutoff) - continue; - - processed_terms++; - - // remove the prevous topic from the PYPs - int current_topic = m_corpus_topics[document_id][term_index]; - // a negative label mean that term hasn't been sampled yet - if (current_topic >= 0) { - decrement(term, current_topic); - - int table_delta = m_document_pyps[document_id].decrement(current_topic); - if (m_use_topic_pyp && table_delta < 0) - m_topic_pyp.decrement(current_topic); - } - - // sample a new_topic - int new_topic = sample(document_id, term, temp); - //std::cerr << "TERM: " << dict->Convert(term) << " (" << term << ") " << " Old Topic: " - // << current_topic << " New Topic: " << new_topic << "\n" << std::endl; - - // add the new topic to the PYPs - m_corpus_topics[document_id][term_index] = new_topic; - increment(term, new_topic); - - if (m_use_topic_pyp) { - F p0 = m_topic_pyp.prob(new_topic, m_topic_p0); - int table_delta = m_document_pyps[document_id].increment(new_topic, p0); - if (table_delta) - m_topic_pyp.increment(new_topic, m_topic_p0); - } - else m_document_pyps[document_id].increment(new_topic, m_topic_p0); - } - if (document_id && document_id % 10000 == 0) { - std::cerr << "."; std::cerr.flush(); - } - } - std::cerr << " ||| LLH= " << log_likelihood(); - - if (curr_sample != 0 && curr_sample % 10 == 0) { - //if (true) { - std::cerr << " ||| time=" << (timer.Elapsed() / 10.0) << " sec/sample" << std::endl; - timer.Reset(); - std::cerr << " ... Resampling hyperparameters ("; - - // resample the hyperparamters - F log_p=0.0; - if (max_threads == 1) - { - std::cerr << "1 thread)" << std::endl; std::cerr.flush(); - log_p += hresample_topics(); - log_p += hresample_docs(0, m_document_pyps.size()); - } - else - { //parallelize - std::cerr << max_threads << " threads, " << num_jobs << " jobs)" << std::endl; std::cerr.flush(); - - WorkerPool<JobReturnsF, F> pool(max_threads); - int i=0, sz = m_document_pyps.size(); - //documents... - while (i <= sz - 2*job_incr) - { - JobReturnsF job = boost::bind(&PYPTopics::hresample_docs, this, i, i+job_incr); - pool.addJob(job); - i += job_incr; - } - // do all remaining documents - JobReturnsF job = boost::bind(&PYPTopics::hresample_docs, this, i,sz); - pool.addJob(job); - - //topics... - JobReturnsF topics_job = boost::bind(&PYPTopics::hresample_topics, this); - pool.addJob(topics_job); - - log_p += pool.get_result(); //blocks - - } - - if (m_use_topic_pyp) { - m_topic_pyp.resample_prior(rnd); - log_p += m_topic_pyp.log_restaurant_prob(); - } - - std::cerr.precision(10); - std::cerr << " ||| LLH=" << log_likelihood() << " ||| resampling time=" << timer.Elapsed() << " sec" << std::endl; - timer.Reset(); - - int k=0; - std::cerr << "Topics distribution: "; - std::cerr.precision(2); - for (PYPs::iterator pypIt=m_word_pyps.front().begin(); - pypIt != m_word_pyps.front().end(); ++pypIt, ++k) { - if (k % 5 == 0) std::cerr << std::endl << '\t'; - std::cerr << "<" << k << ":" << pypIt->num_customers() << "," - << pypIt->num_types() << "," << m_topic_pyp.prob(k, m_topic_p0) << "> "; - } - std::cerr.precision(10); - std::cerr << std::endl; - } - } - delete [] randomDocIndices; -} - -PYPTopics::F PYPTopics::hresample_docs(int start, int end) -{ - int resample_counter=0; - F log_p = 0.0; - assert(start >= 0); - assert(end >= 0); - assert(start <= end); - for (int i=start; i < end; ++i) - { - m_document_pyps[i].resample_prior(rnd); - log_p += m_document_pyps[i].log_restaurant_prob(); - if (resample_counter++ % 5000 == 0) { - std::cerr << "."; std::cerr.flush(); - } - } - return log_p; -} - -PYPTopics::F PYPTopics::hresample_topics() -{ - F log_p = 0.0; - for (std::vector<PYPs>::iterator levelIt=m_word_pyps.begin(); - levelIt != m_word_pyps.end(); ++levelIt) { - for (PYPs::iterator pypIt=levelIt->begin(); - pypIt != levelIt->end(); ++pypIt) { - - pypIt->resample_prior(rnd); - log_p += pypIt->log_restaurant_prob(); - } - std::cerr << log_p << std::endl; - } - return log_p; -} - -PYPTopics::F PYPTopics::log_likelihood() const -{ - F log_p = 0.0; - - // LLH of topic term distribution - size_t i=0; - for (std::vector<PYPs>::const_iterator levelIt=m_word_pyps.begin(); - levelIt != m_word_pyps.end(); ++levelIt, ++i) { - for (PYPs::const_iterator pypIt=levelIt->begin(); - pypIt != levelIt->end(); ++pypIt, ++i) { - log_p += pypIt->log_restaurant_prob(); - - if (i == m_word_pyps.size()-1) - log_p += (pypIt->num_tables() * -log(m_backoff->terms_at_level(i))); - else - log_p += (pypIt->num_tables() * log(m_term_p0)); - } - } - std::cerr << " TERM LLH: " << log_p << " "; //std::endl; - - // LLH of document topic distribution - for (size_t i=0; i < m_document_pyps.size(); ++i) { - log_p += m_document_pyps[i].log_restaurant_prob(); - if (!m_use_topic_pyp) log_p += (m_document_pyps[i].num_tables() * m_topic_p0); - } - if (m_use_topic_pyp) { - log_p += m_topic_pyp.log_restaurant_prob(); - log_p += (m_topic_pyp.num_tables() * log(m_topic_p0)); - } - - return log_p; -} - -void PYPTopics::decrement(const Term& term, int topic, int level) { - //std::cerr << "PYPTopics::decrement(" << term << "," << topic << "," << level << ")" << std::endl; - int table_delta = m_word_pyps.at(level).at(topic).decrement(term); - if (table_delta && m_backoff.get()) { - Term backoff_term = (*m_backoff)[term]; - if (!m_backoff->is_null(backoff_term)) - decrement(backoff_term, topic, level+1); - } -} - -void PYPTopics::increment(const Term& term, int topic, int level) { - //std::cerr << "PYPTopics::increment(" << term << "," << topic << "," << level << ")" << std::endl; - int table_delta = m_word_pyps.at(level).at(topic).increment(term, word_pyps_p0(term, topic, level)); - - if (table_delta && m_backoff.get()) { - Term backoff_term = (*m_backoff)[term]; - if (!m_backoff->is_null(backoff_term)) - increment(backoff_term, topic, level+1); - } -} - -int PYPTopics::sample(const DocumentId& doc, const Term& term, F inv_temp) { - // First pass: collect probs - F sum=0.0; - std::vector<F> sums; - for (int k=0; k<m_num_topics; ++k) { - F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - //F p_k_d = m_document_pyps[doc].prob(k, topic_prob); - F p_k_d = m_document_pyps[doc].unnormalised_prob(k, topic_prob); - - F prob = p_w_k*p_k_d; - /* - if (prob < 0.0) { std::cerr << "\n\n" << prob << " " << p_w_k << " " << p_k_d << std::endl; assert(false); } - if (prob > 1.0) { std::cerr << "\n\n" << prob << " " << p_w_k << " " << p_k_d << std::endl; assert(false); } - assert (pow(prob, inv_temp) >= 0.0); - assert (pow(prob, inv_temp) <= 1.0); - */ - sum += pow(prob, inv_temp); - sums.push_back(sum); - } - // Second pass: sample a topic - F cutoff = rnd() * sum; - for (int k=0; k<m_num_topics; ++k) { - if (cutoff <= sums[k]) - return k; - } - assert(false); -} - -PYPTopics::F PYPTopics::word_pyps_p0(const Term& term, int topic, int level) const { - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "PYPTopics::word_pyps_p0(" << term << "," << topic << "," << level << ")" << std::endl; - - F p0 = m_term_p0; - if (m_backoff.get()) { - //static F fudge=m_backoff_p0; // TODO - - Term backoff_term = (*m_backoff)[term]; - //std::cerr << "T: " << term << " BO: " << backoff_term << std::endl; - if (!m_backoff->is_null(backoff_term)) { - assert (level < m_backoff->order()); - //p0 = (1.0/(F)m_backoff->terms_at_level(level))*prob(backoff_term, topic, level+1); - p0 = m_term_p0*prob(backoff_term, topic, level+1); - p0 = prob(backoff_term, topic, level+1); - } - else - p0 = (1.0/(F) m_backoff->terms_at_level(level)); - //p0 = m_term_p0; - } - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "PYPTopics::word_pyps_p0(" << term << "," << topic << "," << level << ") = " << p0 << std::endl; - return p0; -} - -PYPTopics::F PYPTopics::prob(const Term& term, int topic, int level) const { - //for (int i=0; i<level+1; ++i) std::cerr << " "; - //std::cerr << "PYPTopics::prob(" << dict->Convert(term) << "," << topic << "," << level << ")" << std::endl; - - F p0 = word_pyps_p0(term, topic, level); - F p_w_k = m_word_pyps.at(level).at(topic).prob(term, p0); - - /* - for (int i=0; i<level+1; ++i) std::cerr << " "; - std::cerr << "PYPTopics::prob(" << dict->Convert(term) << "," << topic << "," << level << ") = " << p_w_k << std::endl; - for (int i=0; i<level+1; ++i) std::cerr << " "; - m_word_pyps.at(level).at(topic).debug_info(std::cerr); - */ - return p_w_k; -} - -int PYPTopics::max_topic() const { - if (!m_use_topic_pyp) - return -1; - - F current_max=0.0; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - F prob = m_topic_pyp.prob(k, m_topic_p0); - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - return current_topic; -} - -std::pair<int,PYPTopics::F> PYPTopics::max(const DocumentId& doc) const { - //std::cerr << "PYPTopics::max(" << doc << "," << term << ")" << std::endl; - // collect probs - F current_max=0.0; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - //F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) - topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - F prob = 0; - if (doc < 0) prob = topic_prob; - else prob = m_document_pyps[doc].prob(k, topic_prob); - - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - assert(current_max >= 0); - return std::make_pair(current_topic, current_max); -} - -std::pair<int,PYPTopics::F> PYPTopics::max(const DocumentId& doc, const Term& term) const { - //std::cerr << "PYPTopics::max(" << doc << "," << term << ")" << std::endl; - // collect probs - F current_max=0.0; - int current_topic=-1; - for (int k=0; k<m_num_topics; ++k) { - F p_w_k = prob(term, k); - - F topic_prob = m_topic_p0; - if (m_use_topic_pyp) - topic_prob = m_topic_pyp.prob(k, m_topic_p0); - - F p_k_d = 0; - if (doc < 0) p_k_d = topic_prob; - else p_k_d = m_document_pyps[doc].prob(k, topic_prob); - - F prob = (p_w_k*p_k_d); - if (prob > current_max) { - current_max = prob; - current_topic = k; - } - } - assert(current_topic >= 0); - assert(current_max >= 0); - return std::make_pair(current_topic,current_max); -} - -std::ostream& PYPTopics::print_document_topics(std::ostream& out) const { - for (CorpusTopics::const_iterator corpusIt=m_corpus_topics.begin(); - corpusIt != m_corpus_topics.end(); ++corpusIt) { - int term_index=0; - for (DocumentTopics::const_iterator docIt=corpusIt->begin(); - docIt != corpusIt->end(); ++docIt, ++term_index) { - if (term_index) out << " "; - out << *docIt; - } - out << std::endl; - } - return out; -} - -std::ostream& PYPTopics::print_topic_terms(std::ostream& out) const { - for (PYPs::const_iterator pypsIt=m_word_pyps.front().begin(); - pypsIt != m_word_pyps.front().end(); ++pypsIt) { - int term_index=0; - for (PYP<int>::const_iterator termIt=pypsIt->begin(); - termIt != pypsIt->end(); ++termIt, ++term_index) { - if (term_index) out << " "; - out << termIt->first << ":" << termIt->second; - } - out << std::endl; - } - return out; -} diff --git a/gi/pyp-topics/src/pyp-topics.hh b/gi/pyp-topics/src/pyp-topics.hh deleted file mode 100644 index 3a910540..00000000 --- a/gi/pyp-topics/src/pyp-topics.hh +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef PYP_TOPICS_HH -#define PYP_TOPICS_HH - -#include <vector> -#include <iostream> -#include <boost/ptr_container/ptr_vector.hpp> - -#include <boost/random/uniform_real.hpp> -#include <boost/random/variate_generator.hpp> -#include <boost/random/mersenne_twister.hpp> - -#include "pyp.hh" -#include "corpus.hh" -#include "workers.hh" - -class PYPTopics { -public: - typedef std::vector<int> DocumentTopics; - typedef std::vector<DocumentTopics> CorpusTopics; - typedef long double F; - -public: - PYPTopics(int num_topics, bool use_topic_pyp=false, unsigned long seed = 0, - int max_threads = 1, int num_jobs = 1) - : m_num_topics(num_topics), m_word_pyps(1), - m_topic_pyp(0.5,1.0,seed), m_use_topic_pyp(use_topic_pyp), - m_seed(seed), - uni_dist(0,1), rng(seed == 0 ? (unsigned long)this : seed), - rnd(rng, uni_dist), max_threads(max_threads), num_jobs(num_jobs) {} - - void sample_corpus(const Corpus& corpus, int samples, - int freq_cutoff_start=0, int freq_cutoff_end=0, - int freq_cutoff_interval=0, - int max_contexts_per_document=0, - F temp_start=1.0, F temp_end=1.0); - - int sample(const DocumentId& doc, const Term& term, F inv_temp=1.0); - std::pair<int,F> max(const DocumentId& doc, const Term& term) const; - std::pair<int,F> max(const DocumentId& doc) const; - int max_topic() const; - - void set_backoff(const std::string& filename) { - m_backoff.reset(new TermBackoff); - m_backoff->read(filename); - m_word_pyps.clear(); - m_word_pyps.resize(m_backoff->order(), PYPs()); - } - void set_backoff(TermBackoffPtr backoff) { - m_backoff = backoff; - m_word_pyps.clear(); - m_word_pyps.resize(m_backoff->order(), PYPs()); - } - - F prob(const Term& term, int topic, int level=0) const; - void decrement(const Term& term, int topic, int level=0); - void increment(const Term& term, int topic, int level=0); - - F log_likelihood() const; - - std::ostream& print_document_topics(std::ostream& out) const; - std::ostream& print_topic_terms(std::ostream& out) const; - -private: - F word_pyps_p0(const Term& term, int topic, int level) const; - - int m_num_topics; - F m_term_p0, m_topic_p0, m_backoff_p0; - - CorpusTopics m_corpus_topics; - typedef boost::ptr_vector< PYP<int> > PYPs; - PYPs m_document_pyps; - std::vector<PYPs> m_word_pyps; - PYP<int> m_topic_pyp; - bool m_use_topic_pyp; - - unsigned long m_seed; - - typedef boost::mt19937 base_generator_type; - typedef boost::uniform_real<> uni_dist_type; - typedef boost::variate_generator<base_generator_type&, uni_dist_type> gen_type; - - uni_dist_type uni_dist; - base_generator_type rng; //this gets the seed - gen_type rnd; //instantiate: rnd(rng, uni_dist) - //call: rnd() generates uniform on [0,1) - - typedef boost::function<F()> JobReturnsF; - - F hresample_docs(int start, int end); //does i in [start, end) - - F hresample_topics(); - - int max_threads; - int num_jobs; - TermBackoffPtr m_backoff; -}; - -#endif // PYP_TOPICS_HH diff --git a/gi/pyp-topics/src/pyp.hh b/gi/pyp-topics/src/pyp.hh deleted file mode 100644 index b1cb62be..00000000 --- a/gi/pyp-topics/src/pyp.hh +++ /dev/null @@ -1,566 +0,0 @@ -#ifndef _pyp_hh -#define _pyp_hh - -#include "slice-sampler.h" -#include <math.h> -#include <map> -#include <tr1/unordered_map> -//#include <google/sparse_hash_map> - -#include <boost/random/uniform_real.hpp> -#include <boost/random/variate_generator.hpp> -#include <boost/random/mersenne_twister.hpp> - -#include "log_add.h" -#include "mt19937ar.h" - -// -// Pitman-Yor process with customer and table tracking -// - -template <typename Dish, typename Hash=std::tr1::hash<Dish> > -class PYP : protected std::tr1::unordered_map<Dish, int, Hash> -//class PYP : protected google::sparse_hash_map<Dish, int, Hash> -{ -public: - using std::tr1::unordered_map<Dish,int>::const_iterator; - using std::tr1::unordered_map<Dish,int>::iterator; - using std::tr1::unordered_map<Dish,int>::begin; - using std::tr1::unordered_map<Dish,int>::end; -// using google::sparse_hash_map<Dish,int>::const_iterator; -// using google::sparse_hash_map<Dish,int>::iterator; -// using google::sparse_hash_map<Dish,int>::begin; -// using google::sparse_hash_map<Dish,int>::end; - - PYP(double a, double b, unsigned long seed = 0, Hash hash=Hash()); - - virtual int increment(Dish d, double p0); - virtual int decrement(Dish d); - - // lookup functions - int count(Dish d) const; - double prob(Dish dish, double p0) const; - double prob(Dish dish, double dcd, double dca, - double dtd, double dta, double p0) const; - double unnormalised_prob(Dish dish, double p0) const; - - int num_customers() const { return _total_customers; } - int num_types() const { return std::tr1::unordered_map<Dish,int>::size(); } - //int num_types() const { return google::sparse_hash_map<Dish,int>::size(); } - bool empty() const { return _total_customers == 0; } - - double log_prob(Dish dish, double log_p0) const; - // nb. d* are NOT logs - double log_prob(Dish dish, double dcd, double dca, - double dtd, double dta, double log_p0) const; - - int num_tables(Dish dish) const; - int num_tables() const; - - double a() const { return _a; } - void set_a(double a) { _a = a; } - - double b() const { return _b; } - void set_b(double b) { _b = b; } - - virtual void clear(); - std::ostream& debug_info(std::ostream& os) const; - - double log_restaurant_prob() const; - double log_prior() const; - static double log_prior_a(double a, double beta_a, double beta_b); - static double log_prior_b(double b, double gamma_c, double gamma_s); - - template <typename Uniform01> - void resample_prior(Uniform01& rnd); - template <typename Uniform01> - void resample_prior_a(Uniform01& rnd); - template <typename Uniform01> - void resample_prior_b(Uniform01& rnd); - -protected: - double _a, _b; // parameters of the Pitman-Yor distribution - double _a_beta_a, _a_beta_b; // parameters of Beta prior on a - double _b_gamma_s, _b_gamma_c; // parameters of Gamma prior on b - - struct TableCounter { - TableCounter() : tables(0) {}; - int tables; - std::map<int, int> table_histogram; // num customers at table -> number tables - }; - typedef std::tr1::unordered_map<Dish, TableCounter, Hash> DishTableType; - //typedef google::sparse_hash_map<Dish, TableCounter, Hash> DishTableType; - DishTableType _dish_tables; - int _total_customers, _total_tables; - - typedef boost::mt19937 base_generator_type; - typedef boost::uniform_real<> uni_dist_type; - typedef boost::variate_generator<base_generator_type&, uni_dist_type> gen_type; - -// uni_dist_type uni_dist; -// base_generator_type rng; //this gets the seed -// gen_type rnd; //instantiate: rnd(rng, uni_dist) - //call: rnd() generates uniform on [0,1) - - // Function objects for calculating the parts of the log_prob for - // the parameters a and b - struct resample_a_type { - int n, m; double b, a_beta_a, a_beta_b; - const DishTableType& dish_tables; - resample_a_type(int n, int m, double b, double a_beta_a, - double a_beta_b, const DishTableType& dish_tables) - : n(n), m(m), b(b), a_beta_a(a_beta_a), a_beta_b(a_beta_b), dish_tables(dish_tables) {} - - double operator() (double proposed_a) const { - double log_prior = log_prior_a(proposed_a, a_beta_a, a_beta_b); - double log_prob = 0.0; - double lgamma1a = lgamma(1.0 - proposed_a); - for (typename DishTableType::const_iterator dish_it=dish_tables.begin(); dish_it != dish_tables.end(); ++dish_it) - for (std::map<int, int>::const_iterator table_it=dish_it->second.table_histogram.begin(); - table_it !=dish_it->second.table_histogram.end(); ++table_it) - log_prob += (table_it->second * (lgamma(table_it->first - proposed_a) - lgamma1a)); - - log_prob += (proposed_a == 0.0 ? (m-1.0)*log(b) - : ((m-1.0)*log(proposed_a) + lgamma((m-1.0) + b/proposed_a) - lgamma(b/proposed_a))); - assert(std::isfinite(log_prob)); - return log_prob + log_prior; - } - }; - - struct resample_b_type { - int n, m; double a, b_gamma_c, b_gamma_s; - resample_b_type(int n, int m, double a, double b_gamma_c, double b_gamma_s) - : n(n), m(m), a(a), b_gamma_c(b_gamma_c), b_gamma_s(b_gamma_s) {} - - double operator() (double proposed_b) const { - double log_prior = log_prior_b(proposed_b, b_gamma_c, b_gamma_s); - double log_prob = 0.0; - log_prob += (a == 0.0 ? (m-1.0)*log(proposed_b) - : ((m-1.0)*log(a) + lgamma((m-1.0) + proposed_b/a) - lgamma(proposed_b/a))); - log_prob += (lgamma(1.0+proposed_b) - lgamma(n+proposed_b)); - return log_prob + log_prior; - } - }; - - /* lbetadist() returns the log probability density of x under a Beta(alpha,beta) - * distribution. - copied from Mark Johnson's gammadist.c - */ - static long double lbetadist(long double x, long double alpha, long double beta); - - /* lgammadist() returns the log probability density of x under a Gamma(alpha,beta) - * distribution - copied from Mark Johnson's gammadist.c - */ - static long double lgammadist(long double x, long double alpha, long double beta); - -}; - -template <typename Dish, typename Hash> -PYP<Dish,Hash>::PYP(double a, double b, unsigned long seed, Hash) -: std::tr1::unordered_map<Dish, int, Hash>(10), _a(a), _b(b), -//: google::sparse_hash_map<Dish, int, Hash>(10), _a(a), _b(b), - _a_beta_a(1), _a_beta_b(1), _b_gamma_s(1), _b_gamma_c(1), - //_a_beta_a(1), _a_beta_b(1), _b_gamma_s(10), _b_gamma_c(0.1), - _total_customers(0), _total_tables(0)//, - //uni_dist(0,1), rng(seed == 0 ? (unsigned long)this : seed), rnd(rng, uni_dist) -{ -// std::cerr << "\t##PYP<Dish,Hash>::PYP(a=" << _a << ",b=" << _b << ")" << std::endl; - //set_deleted_key(-std::numeric_limits<Dish>::max()); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::prob(Dish dish, double p0) const -{ - int c = count(dish), t = num_tables(dish); - double r = num_tables() * _a + _b; - //std::cerr << "\t\t\t\tPYP<Dish,Hash>::prob(" << dish << "," << p0 << ") c=" << c << " r=" << r << std::endl; - if (c > 0) - return (c - _a * t + r * p0) / (num_customers() + _b); - else - return r * p0 / (num_customers() + _b); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::unnormalised_prob(Dish dish, double p0) const -{ - int c = count(dish), t = num_tables(dish); - double r = num_tables() * _a + _b; - if (c > 0) return (c - _a * t + r * p0); - else return r * p0; -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::prob(Dish dish, double dcd, double dca, - double dtd, double dta, double p0) -const -{ - int c = count(dish) + dcd, t = num_tables(dish) + dtd; - double r = (num_tables() + dta) * _a + _b; - if (c > 0) - return (c - _a * t + r * p0) / (num_customers() + dca + _b); - else - return r * p0 / (num_customers() + dca + _b); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_prob(Dish dish, double log_p0) const -{ - using std::log; - int c = count(dish), t = num_tables(dish); - double r = log(num_tables() * _a + b); - if (c > 0) - return Log<double>::add(log(c - _a * t), r + log_p0) - - log(num_customers() + _b); - else - return r + log_p0 - log(num_customers() + b); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_prob(Dish dish, double dcd, double dca, - double dtd, double dta, double log_p0) -const -{ - using std::log; - int c = count(dish) + dcd, t = num_tables(dish) + dtd; - double r = log((num_tables() + dta) * _a + b); - if (c > 0) - return Log<double>::add(log(c - _a * t), r + log_p0) - - log(num_customers() + dca + _b); - else - return r + log_p0 - log(num_customers() + dca + b); -} - -template <typename Dish, typename Hash> -int -PYP<Dish,Hash>::increment(Dish dish, double p0) { - int delta = 0; - TableCounter &tc = _dish_tables[dish]; - - // seated on a new or existing table? - int c = count(dish), t = num_tables(dish), T = num_tables(); - double pshare = (c > 0) ? (c - _a*t) : 0.0; - double pnew = (_b + _a*T) * p0; - assert (pshare >= 0.0); - //assert (pnew > 0.0); - - //if (rnd() < pnew / (pshare + pnew)) { - if (mt_genrand_res53() < pnew / (pshare + pnew)) { - // assign to a new table - tc.tables += 1; - tc.table_histogram[1] += 1; - _total_tables += 1; - delta = 1; - } - else { - // randomly assign to an existing table - // remove constant denominator from inner loop - //double r = rnd() * (c - _a*t); - double r = mt_genrand_res53() * (c - _a*t); - for (std::map<int,int>::iterator - hit = tc.table_histogram.begin(); - hit != tc.table_histogram.end(); ++hit) { - r -= ((hit->first - _a) * hit->second); - if (r <= 0) { - tc.table_histogram[hit->first+1] += 1; - hit->second -= 1; - if (hit->second == 0) - tc.table_histogram.erase(hit); - break; - } - } - if (r > 0) { - std::cerr << r << " " << c << " " << _a << " " << t << std::endl; - assert(false); - } - delta = 0; - } - - std::tr1::unordered_map<Dish,int,Hash>::operator[](dish) += 1; - //google::sparse_hash_map<Dish,int,Hash>::operator[](dish) += 1; - _total_customers += 1; - - return delta; -} - -template <typename Dish, typename Hash> -int -PYP<Dish,Hash>::count(Dish dish) const -{ - typename std::tr1::unordered_map<Dish, int>::const_iterator - //typename google::sparse_hash_map<Dish, int>::const_iterator - dcit = find(dish); - if (dcit != end()) - return dcit->second; - else - return 0; -} - -template <typename Dish, typename Hash> -int -PYP<Dish,Hash>::decrement(Dish dish) -{ - typename std::tr1::unordered_map<Dish, int>::iterator dcit = find(dish); - //typename google::sparse_hash_map<Dish, int>::iterator dcit = find(dish); - if (dcit == end()) { - std::cerr << dish << std::endl; - assert(false); - } - - int delta = 0; - - typename std::tr1::unordered_map<Dish, TableCounter>::iterator dtit = _dish_tables.find(dish); - //typename google::sparse_hash_map<Dish, TableCounter>::iterator dtit = _dish_tables.find(dish); - if (dtit == _dish_tables.end()) { - std::cerr << dish << std::endl; - assert(false); - } - TableCounter &tc = dtit->second; - - //std::cerr << "\tdecrement for " << dish << "\n"; - //std::cerr << "\tBEFORE histogram: " << tc.table_histogram << " "; - //std::cerr << "count: " << count(dish) << " "; - //std::cerr << "tables: " << tc.tables << "\n"; - - //double r = rnd() * count(dish); - double r = mt_genrand_res53() * count(dish); - for (std::map<int,int>::iterator hit = tc.table_histogram.begin(); - hit != tc.table_histogram.end(); ++hit) - { - //r -= (hit->first - _a) * hit->second; - r -= (hit->first) * hit->second; - if (r <= 0) - { - if (hit->first > 1) - tc.table_histogram[hit->first-1] += 1; - else - { - delta = -1; - tc.tables -= 1; - _total_tables -= 1; - } - - hit->second -= 1; - if (hit->second == 0) tc.table_histogram.erase(hit); - break; - } - } - if (r > 0) { - std::cerr << r << " " << count(dish) << " " << _a << " " << num_tables(dish) << std::endl; - assert(false); - } - - // remove the customer - dcit->second -= 1; - _total_customers -= 1; - assert(dcit->second >= 0); - if (dcit->second == 0) { - erase(dcit); - _dish_tables.erase(dtit); - //std::cerr << "\tAFTER histogram: Empty\n"; - } - else { - //std::cerr << "\tAFTER histogram: " << _dish_tables[dish].table_histogram << " "; - //std::cerr << "count: " << count(dish) << " "; - //std::cerr << "tables: " << _dish_tables[dish].tables << "\n"; - } - - return delta; -} - -template <typename Dish, typename Hash> -int -PYP<Dish,Hash>::num_tables(Dish dish) const -{ - typename std::tr1::unordered_map<Dish, TableCounter, Hash>::const_iterator - //typename google::sparse_hash_map<Dish, TableCounter, Hash>::const_iterator - dtit = _dish_tables.find(dish); - - //assert(dtit != _dish_tables.end()); - if (dtit == _dish_tables.end()) - return 0; - - return dtit->second.tables; -} - -template <typename Dish, typename Hash> -int -PYP<Dish,Hash>::num_tables() const -{ - return _total_tables; -} - -template <typename Dish, typename Hash> -std::ostream& -PYP<Dish,Hash>::debug_info(std::ostream& os) const -{ - int hists = 0, tables = 0; - for (typename std::tr1::unordered_map<Dish, TableCounter, Hash>::const_iterator - //for (typename google::sparse_hash_map<Dish, TableCounter, Hash>::const_iterator - dtit = _dish_tables.begin(); dtit != _dish_tables.end(); ++dtit) - { - hists += dtit->second.table_histogram.size(); - tables += dtit->second.tables; - -// if (dtit->second.tables <= 0) -// std::cerr << dtit->first << " " << count(dtit->first) << std::endl; - assert(dtit->second.tables > 0); - assert(!dtit->second.table_histogram.empty()); - -// os << "Dish " << dtit->first << " has " << count(dtit->first) << " customers, and is sitting at " << dtit->second.tables << " tables.\n"; - for (std::map<int,int>::const_iterator - hit = dtit->second.table_histogram.begin(); - hit != dtit->second.table_histogram.end(); ++hit) { -// os << " " << hit->second << " tables with " << hit->first << " customers." << std::endl; - assert(hit->second > 0); - } - } - - os << "restaurant has " - << _total_customers << " customers; " - << _total_tables << " tables; " - << tables << " tables'; " - << num_types() << " dishes; " - << _dish_tables.size() << " dishes'; and " - << hists << " histogram entries\n"; - - return os; -} - -template <typename Dish, typename Hash> -void -PYP<Dish,Hash>::clear() -{ - this->std::tr1::unordered_map<Dish,int,Hash>::clear(); - //this->google::sparse_hash_map<Dish,int,Hash>::clear(); - _dish_tables.clear(); - _total_tables = _total_customers = 0; -} - -// log_restaurant_prob returns the log probability of the PYP table configuration. -// Excludes Hierarchical P0 term which must be calculated separately. -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_restaurant_prob() const { - if (_total_customers < 1) - return (double)0.0; - - double log_prob = 0.0; - double lgamma1a = lgamma(1.0-_a); - - //std::cerr << "-------------------\n" << std::endl; - for (typename DishTableType::const_iterator dish_it=_dish_tables.begin(); - dish_it != _dish_tables.end(); ++dish_it) { - for (std::map<int, int>::const_iterator table_it=dish_it->second.table_histogram.begin(); - table_it !=dish_it->second.table_histogram.end(); ++table_it) { - log_prob += (table_it->second * (lgamma(table_it->first - _a) - lgamma1a)); - //std::cerr << "|" << dish_it->first->parent << " --> " << dish_it->first->rhs << " " << table_it->first << " " << table_it->second << " " << log_prob; - } - } - //std::cerr << std::endl; - - log_prob += (_a == (double)0.0 ? (_total_tables-1.0)*log(_b) : (_total_tables-1.0)*log(_a) + lgamma((_total_tables-1.0) + _b/_a) - lgamma(_b/_a)); - //std::cerr << "\t\t" << log_prob << std::endl; - log_prob += (lgamma(1.0 + _b) - lgamma(_total_customers + _b)); - - //std::cerr << _total_customers << " " << _total_tables << " " << log_prob << " " << log_prior() << std::endl; - //std::cerr << _a << " " << _b << std::endl; - if (!std::isfinite(log_prob)) { - assert(false); - } - //return log_prob; - if (log_prob > 0.0) - std::cerr << log_prob << std::endl; - return log_prob;// + log_prior(); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_prior() const { - double prior = 0.0; - if (_a_beta_a > 0.0 && _a_beta_b > 0.0 && _a > 0.0) - prior += log_prior_a(_a, _a_beta_a, _a_beta_b); - if (_b_gamma_s > 0.0 && _b_gamma_c > 0.0) - prior += log_prior_b(_b, _b_gamma_c, _b_gamma_s); - - return prior; -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_prior_a(double a, double beta_a, double beta_b) { - return lbetadist(a, beta_a, beta_b); -} - -template <typename Dish, typename Hash> -double -PYP<Dish,Hash>::log_prior_b(double b, double gamma_c, double gamma_s) { - return lgammadist(b, gamma_c, gamma_s); -} - -template <typename Dish, typename Hash> -long double PYP<Dish,Hash>::lbetadist(long double x, long double alpha, long double beta) { - assert(x > 0); - assert(x < 1); - assert(alpha > 0); - assert(beta > 0); - return (alpha-1)*log(x)+(beta-1)*log(1-x)+lgamma(alpha+beta)-lgamma(alpha)-lgamma(beta); -//boost::math::lgamma -} - -template <typename Dish, typename Hash> -long double PYP<Dish,Hash>::lgammadist(long double x, long double alpha, long double beta) { - assert(alpha > 0); - assert(beta > 0); - return (alpha-1)*log(x) - alpha*log(beta) - x/beta - lgamma(alpha); -} - - -template <typename Dish, typename Hash> - template <typename Uniform01> -void -PYP<Dish,Hash>::resample_prior(Uniform01& rnd) { - for (int num_its=5; num_its >= 0; --num_its) { - resample_prior_b(rnd); - resample_prior_a(rnd); - } - resample_prior_b(rnd); -} - -template <typename Dish, typename Hash> - template <typename Uniform01> -void -PYP<Dish,Hash>::resample_prior_b(Uniform01& rnd) { - if (_total_tables == 0) - return; - - //int niterations = 10; // number of resampling iterations - int niterations = 5; // number of resampling iterations - //std::cerr << "\n## resample_prior_b(), initial a = " << _a << ", b = " << _b << std::endl; - resample_b_type b_log_prob(_total_customers, _total_tables, _a, _b_gamma_c, _b_gamma_s); - _b = slice_sampler1d(b_log_prob, _b, rnd, (double) 0.0, std::numeric_limits<double>::infinity(), - //_b = slice_sampler1d(b_log_prob, _b, mt_genrand_res53, (double) 0.0, std::numeric_limits<double>::infinity(), - (double) 0.0, niterations, 100*niterations); - //std::cerr << "\n## resample_prior_b(), final a = " << _a << ", b = " << _b << std::endl; -} - -template <typename Dish, typename Hash> - template <typename Uniform01> -void -PYP<Dish,Hash>::resample_prior_a(Uniform01& rnd) { - if (_total_tables == 0) - return; - - //int niterations = 10; - int niterations = 5; - //std::cerr << "\n## Initial a = " << _a << ", b = " << _b << std::endl; - resample_a_type a_log_prob(_total_customers, _total_tables, _b, _a_beta_a, _a_beta_b, _dish_tables); - _a = slice_sampler1d(a_log_prob, _a, rnd, std::numeric_limits<double>::min(), - //_a = slice_sampler1d(a_log_prob, _a, mt_genrand_res53, std::numeric_limits<double>::min(), - (double) 1.0, (double) 0.0, niterations, 100*niterations); -} - -#endif diff --git a/gi/pyp-topics/src/slice-sampler.h b/gi/pyp-topics/src/slice-sampler.h deleted file mode 100644 index 3108a0f7..00000000 --- a/gi/pyp-topics/src/slice-sampler.h +++ /dev/null @@ -1,192 +0,0 @@ -//! slice-sampler.h is an MCMC slice sampler -//! -//! Mark Johnson, 1st August 2008 - -#ifndef SLICE_SAMPLER_H -#define SLICE_SAMPLER_H - -#include <algorithm> -#include <cassert> -#include <cmath> -#include <iostream> -#include <limits> - -//! slice_sampler_rfc_type{} returns the value of a user-specified -//! function if the argument is within range, or - infinity otherwise -// -template <typename F, typename Fn, typename U> -struct slice_sampler_rfc_type { - F min_x, max_x; - const Fn& f; - U max_nfeval, nfeval; - slice_sampler_rfc_type(F min_x, F max_x, const Fn& f, U max_nfeval) - : min_x(min_x), max_x(max_x), f(f), max_nfeval(max_nfeval), nfeval(0) { } - - F operator() (F x) { - if (min_x < x && x < max_x) { - assert(++nfeval <= max_nfeval); - F fx = f(x); - assert(std::isfinite(fx)); - return fx; - } - else - return -std::numeric_limits<F>::infinity(); - } -}; // slice_sampler_rfc_type{} - -//! slice_sampler1d() implements the univariate "range doubling" slice sampler -//! described in Neal (2003) "Slice Sampling", The Annals of Statistics 31(3), 705-767. -// -template <typename F, typename LogF, typename Uniform01> -F slice_sampler1d(const LogF& logF0, //!< log of function to sample - F x, //!< starting point - Uniform01& u01, //!< uniform [0,1) random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - slice_sampler_rfc_type<F,LogF,U> logF(min_x, max_x, logF0, max_nfeval); - - assert(std::isfinite(x)); - - if (w <= 0.0) { // set w to a default width - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, (F) 0.1); - } - assert(std::isfinite(w)); - - F logFx = logF(x); - for (U sample = 0; sample < nsamples; ++sample) { - F logY = logFx + log(u01()+1e-100); //! slice logFx at this value - assert(std::isfinite(logY)); - - F xl = x - w*u01(); //! lower bound on slice interval - F logFxl = logF(xl); - F xr = xl + w; //! upper bound on slice interval - F logFxr = logF(xr); - - while (logY < logFxl || logY < logFxr) // doubling procedure - if (u01() < 0.5) - logFxl = logF(xl -= xr - xl); - else - logFxr = logF(xr += xr - xl); - - F xl1 = xl; - F xr1 = xr; - while (true) { // shrinking procedure - F x1 = xl1 + u01()*(xr1 - xl1); - if (logY < logF(x1)) { - F xl2 = xl; // acceptance procedure - F xr2 = xr; - bool d = false; - while (xr2 - xl2 > 1.1*w) { - F xm = (xl2 + xr2)/2; - if ((x < xm && x1 >= xm) || (x >= xm && x1 < xm)) - d = true; - if (x1 < xm) - xr2 = xm; - else - xl2 = xm; - if (d && logY >= logF(xl2) && logY >= logF(xr2)) - goto unacceptable; - } - x = x1; - goto acceptable; - } - goto acceptable; - unacceptable: - if (x1 < x) // rest of shrinking procedure - xl1 = x1; - else - xr1 = x1; - } - acceptable: - w = (4*w + (xr1 - xl1))/5; // update width estimate - } - return x; -} - -/* -//! slice_sampler1d() implements a 1-d MCMC slice sampler. -//! It should be correct for unimodal distributions, but -//! not for multimodal ones. -// -template <typename F, typename LogP, typename Uniform01> -F slice_sampler1d(const LogP& logP, //!< log of distribution to sample - F x, //!< initial sample - Uniform01& u01, //!< uniform random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - assert(std::isfinite(x)); - if (w <= 0.0) { - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, 0.1); - } - // TRACE4(x, min_x, max_x, w); - F logPx = logP(x); - assert(std::isfinite(logPx)); - U nfeval = 1; - for (U sample = 0; sample < nsamples; ++sample) { - F x0 = x; - F logU = logPx + log(u01()+1e-100); - assert(std::isfinite(logU)); - F r = u01(); - F xl = std::max(min_x, x - r*w); - F xr = std::min(max_x, x + (1-r)*w); - // TRACE3(x, logPx, logU); - while (xl > min_x && logP(xl) > logU) { - xl -= w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << std::endl; - assert(nfeval < max_nfeval); - } - xl = std::max(xl, min_x); - while (xr < max_x && logP(xr) > logU) { - xr += w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xr = " << xr << std::endl; - assert(nfeval < max_nfeval); - } - xr = std::min(xr, max_x); - while (true) { - r = u01(); - x = r*xl + (1-r)*xr; - assert(std::isfinite(x)); - logPx = logP(x); - // TRACE4(logPx, x, xl, xr); - assert(std::isfinite(logPx)); - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << ", xr = " << xr << ", x = " << x << std::endl; - assert(nfeval < max_nfeval); - if (logPx > logU) - break; - else if (x > x0) - xr = x; - else - xl = x; - } - // w = (4*w + (xr-xl))/5; // gradually adjust w - } - // TRACE2(logPx, x); - return x; -} // slice_sampler1d() -*/ - -#endif // SLICE_SAMPLER_H diff --git a/gi/pyp-topics/src/timing.h b/gi/pyp-topics/src/timing.h deleted file mode 100644 index 08360b0f..00000000 --- a/gi/pyp-topics/src/timing.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef TIMING_H -#define TIMING_H - -#ifdef __CYGWIN__ -# ifndef _POSIX_MONOTONIC_CLOCK -# define _POSIX_MONOTONIC_CLOCK -// this modifies <time.h> -# endif -// in case someone included <time.h> before we got here (this is lifted from time.h>) -# ifndef CLOCK_MONOTONIC -# define CLOCK_MONOTONIC (clockid_t)4 -# endif -#endif - - -#include <time.h> -#include <sys/time.h> -#include "clock_gettime_stub.c" - -struct Timer { - Timer() { Reset(); } - void Reset() - { - clock_gettime(CLOCK_MONOTONIC, &start_t); - } - double Elapsed() const { - timespec end_t; - clock_gettime(CLOCK_MONOTONIC, &end_t); - const double elapsed = (end_t.tv_sec - start_t.tv_sec) - + (end_t.tv_nsec - start_t.tv_nsec) / 1000000000.0; - return elapsed; - } - private: - timespec start_t; -}; - -#endif diff --git a/gi/pyp-topics/src/train-contexts.cc b/gi/pyp-topics/src/train-contexts.cc deleted file mode 100644 index 9463f9fc..00000000 --- a/gi/pyp-topics/src/train-contexts.cc +++ /dev/null @@ -1,174 +0,0 @@ -// STL -#include <iostream> -#include <fstream> -#include <algorithm> -#include <iterator> - -// Boost -#include <boost/program_options/parsers.hpp> -#include <boost/program_options/variables_map.hpp> -#include <boost/scoped_ptr.hpp> - -// Local -#include "pyp-topics.hh" -#include "corpus.hh" -#include "contexts_corpus.hh" -#include "gzstream.hh" - -static const char *REVISION = "$Rev$"; - -// Namespaces -using namespace boost; -using namespace boost::program_options; -using namespace std; - -int main(int argc, char **argv) -{ - cout << "Pitman Yor topic models: Copyright 2010 Phil Blunsom\n"; - cout << REVISION << '\n' <<endl; - - //////////////////////////////////////////////////////////////////////////////////////////// - // Command line processing - variables_map vm; - - // Command line processing - { - options_description cmdline_specific("Command line specific options"); - cmdline_specific.add_options() - ("help,h", "print help message") - ("config,c", value<string>(), "config file specifying additional command line options") - ; - options_description config_options("Allowed options"); - config_options.add_options() - ("data,d", value<string>(), "file containing the documents and context terms") - ("topics,t", value<int>()->default_value(50), "number of topics") - ("document-topics-out,o", value<string>(), "file to write the document topics to") - ("default-topics-out", value<string>(), "file to write default term topic assignments.") - ("topic-words-out,w", value<string>(), "file to write the topic word distribution to") - ("samples,s", value<int>()->default_value(10), "number of sampling passes through the data") - ("backoff-type", value<string>(), "backoff type: none|simple") -// ("filter-singleton-contexts", "filter singleton contexts") - ("hierarchical-topics", "Use a backoff hierarchical PYP as the P0 for the document topics distribution.") - ("freq-cutoff-start", value<int>()->default_value(0), "initial frequency cutoff.") - ("freq-cutoff-end", value<int>()->default_value(0), "final frequency cutoff.") - ("freq-cutoff-interval", value<int>()->default_value(0), "number of iterations between frequency decrement.") - ("max-threads", value<int>()->default_value(1), "maximum number of simultaneous threads allowed") - ("max-contexts-per-document", value<int>()->default_value(0), "Only sample the n most frequent contexts for a document.") - ("num-jobs", value<int>()->default_value(1), "allows finer control over parallelization") - ("temp-start", value<double>()->default_value(1.0), "starting annealing temperature.") - ("temp-end", value<double>()->default_value(1.0), "end annealing temperature.") - ; - - cmdline_specific.add(config_options); - - store(parse_command_line(argc, argv, cmdline_specific), vm); - notify(vm); - - if (vm.count("config") > 0) { - ifstream config(vm["config"].as<string>().c_str()); - store(parse_config_file(config, config_options), vm); - } - - if (vm.count("help")) { - cout << cmdline_specific << "\n"; - return 1; - } - } - //////////////////////////////////////////////////////////////////////////////////////////// - - if (!vm.count("data")) { - cerr << "Please specify a file containing the data." << endl; - return 1; - } - assert(vm["max-threads"].as<int>() > 0); - assert(vm["num-jobs"].as<int>() > -1); - // seed the random number generator: 0 = automatic, specify value otherwise - unsigned long seed = 0; - PYPTopics model(vm["topics"].as<int>(), vm.count("hierarchical-topics"), seed, vm["max-threads"].as<int>(), vm["num-jobs"].as<int>()); - - // read the data - BackoffGenerator* backoff_gen=0; - if (vm.count("backoff-type")) { - if (vm["backoff-type"].as<std::string>() == "none") { - backoff_gen = 0; - } - else if (vm["backoff-type"].as<std::string>() == "simple") { - backoff_gen = new SimpleBackoffGenerator(); - } - else { - cerr << "Backoff type (--backoff-type) must be one of none|simple." <<endl; - return(1); - } - } - - ContextsCorpus contexts_corpus; - contexts_corpus.read_contexts(vm["data"].as<string>(), backoff_gen, /*vm.count("filter-singleton-contexts")*/ false); - model.set_backoff(contexts_corpus.backoff_index()); - - if (backoff_gen) - delete backoff_gen; - - // train the sampler - model.sample_corpus(contexts_corpus, vm["samples"].as<int>(), - vm["freq-cutoff-start"].as<int>(), - vm["freq-cutoff-end"].as<int>(), - vm["freq-cutoff-interval"].as<int>(), - vm["max-contexts-per-document"].as<int>(), - vm["temp-start"].as<double>(), vm["temp-end"].as<double>()); - - if (vm.count("document-topics-out")) { - ogzstream documents_out(vm["document-topics-out"].as<string>().c_str()); - - int document_id=0; - map<int,int> all_terms; - for (Corpus::const_iterator corpusIt=contexts_corpus.begin(); - corpusIt != contexts_corpus.end(); ++corpusIt, ++document_id) { - vector<int> unique_terms; - for (Document::const_iterator docIt=corpusIt->begin(); - docIt != corpusIt->end(); ++docIt) { - if (unique_terms.empty() || *docIt != unique_terms.back()) - unique_terms.push_back(*docIt); - // increment this terms frequency - pair<map<int,int>::iterator,bool> insert_result = all_terms.insert(make_pair(*docIt,1)); - if (!insert_result.second) - all_terms[*docIt] = all_terms[*docIt] + 1; - //insert_result.first++; - } - documents_out << contexts_corpus.key(document_id) << '\t'; - documents_out << model.max(document_id).first << " " << corpusIt->size() << " ||| "; - for (std::vector<int>::const_iterator termIt=unique_terms.begin(); - termIt != unique_terms.end(); ++termIt) { - if (termIt != unique_terms.begin()) - documents_out << " ||| "; - vector<std::string> strings = contexts_corpus.context2string(*termIt); - copy(strings.begin(), strings.end(),ostream_iterator<std::string>(documents_out, " ")); - std::pair<int,PYPTopics::F> maxinfo = model.max(document_id, *termIt); - documents_out << "||| C=" << maxinfo.first << " P=" << maxinfo.second; - - } - documents_out <<endl; - } - documents_out.close(); - - if (vm.count("default-topics-out")) { - ofstream default_topics(vm["default-topics-out"].as<string>().c_str()); - default_topics << model.max_topic() <<endl; - for (std::map<int,int>::const_iterator termIt=all_terms.begin(); termIt != all_terms.end(); ++termIt) { - vector<std::string> strings = contexts_corpus.context2string(termIt->first); - default_topics << model.max(-1, termIt->first).first << " ||| " << termIt->second << " ||| "; - copy(strings.begin(), strings.end(),ostream_iterator<std::string>(default_topics, " ")); - default_topics <<endl; - } - } - } - - if (vm.count("topic-words-out")) { - ogzstream topics_out(vm["topic-words-out"].as<string>().c_str()); - model.print_topic_terms(topics_out); - topics_out.close(); - } - - cout <<endl; - - return 0; -} diff --git a/gi/pyp-topics/src/train.cc b/gi/pyp-topics/src/train.cc deleted file mode 100644 index db7ca46e..00000000 --- a/gi/pyp-topics/src/train.cc +++ /dev/null @@ -1,135 +0,0 @@ -// STL -#include <iostream> -#include <fstream> - -// Boost -#include <boost/program_options/parsers.hpp> -#include <boost/program_options/variables_map.hpp> -#include <boost/scoped_ptr.hpp> - -// Local -#include "pyp-topics.hh" -#include "corpus.hh" -#include "contexts_corpus.hh" -#include "gzstream.hh" - -static const char *REVISION = "$Rev$"; - -// Namespaces -using namespace boost; -using namespace boost::program_options; -using namespace std; - -int main(int argc, char **argv) -{ - std::cout << "Pitman Yor topic models: Copyright 2010 Phil Blunsom\n"; - std::cout << REVISION << '\n' << std::endl; - - //////////////////////////////////////////////////////////////////////////////////////////// - // Command line processing - variables_map vm; - - // Command line processing - options_description cmdline_specific("Command line specific options"); - cmdline_specific.add_options() - ("help,h", "print help message") - ("config,c", value<string>(), "config file specifying additional command line options") - ; - options_description generic("Allowed options"); - generic.add_options() - ("documents,d", value<string>(), "file containing the documents") - ("topics,t", value<int>()->default_value(50), "number of topics") - ("document-topics-out,o", value<string>(), "file to write the document topics to") - ("topic-words-out,w", value<string>(), "file to write the topic word distribution to") - ("samples,s", value<int>()->default_value(10), "number of sampling passes through the data") - ("test-corpus", value<string>(), "file containing the test data") - ("backoff-paths", value<string>(), "file containing the term backoff paths") - ; - options_description config_options, cmdline_options; - config_options.add(generic); - cmdline_options.add(generic).add(cmdline_specific); - - store(parse_command_line(argc, argv, cmdline_options), vm); - if (vm.count("config") > 0) { - ifstream config(vm["config"].as<string>().c_str()); - store(parse_config_file(config, cmdline_options), vm); - } - notify(vm); - //////////////////////////////////////////////////////////////////////////////////////////// - - if (vm.count("documents") == 0) { - cerr << "Please specify a file containing the documents." << endl; - cout << cmdline_options << "\n"; - return 1; - } - - if (vm.count("help")) { - cout << cmdline_options << "\n"; - return 1; - } - - // seed the random number generator: 0 = automatic, specify value otherwise - unsigned long seed = 0; - PYPTopics model(vm["topics"].as<int>(), false, seed); - - // read the data - Corpus corpus; - corpus.read(vm["documents"].as<string>()); - - // read the backoff dictionary - if (vm.count("backoff-paths")) - model.set_backoff(vm["backoff-paths"].as<string>()); - - // train the sampler - model.sample_corpus(corpus, vm["samples"].as<int>()); - - if (vm.count("document-topics-out")) { - ogzstream documents_out(vm["document-topics-out"].as<string>().c_str()); - //model.print_document_topics(documents_out); - - int document_id=0; - for (Corpus::const_iterator corpusIt=corpus.begin(); - corpusIt != corpus.end(); ++corpusIt, ++document_id) { - std::vector<int> unique_terms; - for (Document::const_iterator docIt=corpusIt->begin(); - docIt != corpusIt->end(); ++docIt) { - if (unique_terms.empty() || *docIt != unique_terms.back()) - unique_terms.push_back(*docIt); - } - documents_out << unique_terms.size(); - for (std::vector<int>::const_iterator termIt=unique_terms.begin(); - termIt != unique_terms.end(); ++termIt) - documents_out << " " << *termIt << ":" << model.max(document_id, *termIt).first; - documents_out << std::endl; - } - documents_out.close(); - } - - if (vm.count("topic-words-out")) { - ogzstream topics_out(vm["topic-words-out"].as<string>().c_str()); - model.print_topic_terms(topics_out); - topics_out.close(); - } - - if (vm.count("test-corpus")) { - TestCorpus test_corpus; - test_corpus.read(vm["test-corpus"].as<string>()); - ogzstream topics_out((vm["test-corpus"].as<string>() + ".topics.gz").c_str()); - - for (TestCorpus::const_iterator corpusIt=test_corpus.begin(); - corpusIt != test_corpus.end(); ++corpusIt) { - int index=0; - for (DocumentTerms::const_iterator instanceIt=corpusIt->begin(); - instanceIt != corpusIt->end(); ++instanceIt, ++index) { - int topic = model.max(instanceIt->doc, instanceIt->term).first; - if (index != 0) topics_out << " "; - topics_out << topic; - } - topics_out << std::endl; - } - topics_out.close(); - } - std::cout << std::endl; - - return 0; -} diff --git a/gi/pyp-topics/src/utility.h b/gi/pyp-topics/src/utility.h deleted file mode 100644 index 405a5b0a..00000000 --- a/gi/pyp-topics/src/utility.h +++ /dev/null @@ -1,962 +0,0 @@ -// utility.h -// -// (c) Mark Johnson, 24th January 2005 -// -// modified 6th May 2002 to ensure write/read consistency, fixed 18th July 2002 -// modified 14th July 2002 to include insert() (generic inserter) -// modified 26th September 2003 to use mapped_type instead of data_type -// 25th August 2004 added istream >> const char* -// 24th January 2005 added insert_newkey() -// -// Defines: -// loop macros foreach, cforeach -// dfind (default find function) -// afind (find function that asserts key exists) -// insert_newkey (inserts a new key into a map) -// insert (generic inserter into standard data structures) -// disjoint (set operation) -// first_lessthan and second_lessthan (compares elements of pairs) -// -// Simplified interfaces to STL routines: -// -// includes (simplified interface) -// set_intersection (simplified interface) -// inserter (simplified interface) -// max_element (simplified interface) -// min_element (simplified interface) -// hash functions for pairs, vectors, lists, slists and maps -// input and output for pairs and vectors -// resource_usage (interface improved) - - -#ifndef UTILITY_H -#define UTILITY_H - -#include <algorithm> -// #include <boost/smart_ptr.hpp> // Comment out this line if boost is not used -#include <cassert> -#include <cmath> -#include <cctype> -#include <cstdio> -#include <unordered_map> -#include <unordered_set> -#include <ext/slist> -#include <iostream> -#include <iterator> -#include <list> -#include <map> -#include <set> -#include <string> -#include <utility> -#include <vector> -#include <memory> - -#if (__GNUC__ > 3) || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 1) -#define EXT_NAMESPACE __gnu_cxx -#else -#define EXT_NAMESPACE std -#endif - -namespace ext = EXT_NAMESPACE; - -inline float power(float x, float y) { return powf(x, y); } -inline double power(double x, double y) { return pow(x, y); } -inline long double power(long double x, long double y) { return powl(x, y); } - -typedef unsigned U; -typedef long double F; // slower than double, but underflows less - -/////////////////////////////////////////////////////////////////////////// -// // -// Looping constructs // -// // -/////////////////////////////////////////////////////////////////////////// - -// foreach is a simple loop construct -// -// STORE should be an STL container -// TYPE is the typename of STORE -// VAR will be defined as a local variable of type TYPE::iterator -// -#define foreach(TYPE, VAR, STORE) \ - for (TYPE::iterator VAR = (STORE).begin(); VAR != (STORE).end(); ++VAR) - -// cforeach is just like foreach, except that VAR is a const_iterator -// -// STORE should be an STL container -// TYPE is the typename of STORE -// VAR will be defined as a local variable of type TYPE::const_iterator -// -#define cforeach(TYPE, VAR, STORE) \ - for (TYPE::const_iterator VAR = (STORE).begin(); VAR != (STORE).end(); ++VAR) - - -/////////////////////////////////////////////////////////////////////////// -// // -// Map searching // -// // -// dfind(map, key) returns the key's value in map, or map's default // -// value if no such key exists (the default value is not inserted) // -// // -// afind(map, key) returns a reference to the key's value in map, and // -// asserts that this value exists // -// // -/////////////////////////////////////////////////////////////////////////// - -// dfind(Map, Key) returns the value Map associates with Key, or the -// Map's default value if no such Key exists -// -template <class Map, class Key> -inline typename Map::mapped_type dfind(Map& m, const Key& k) -{ - typename Map::iterator i = m.find(k); - if (i == m.end()) - return typename Map::mapped_type(); - else - return i->second; -} - -template <class Map, class Key> -inline const typename Map::mapped_type dfind(const Map& m, const Key& k) -{ - typename Map::const_iterator i = m.find(k); - if (i == m.end()) - return typename Map::mapped_type(); - else - return i->second; -} - - -// afind(map, key) returns a reference to the value associated -// with key in map. It uses assert to check that the key's value -// is defined. -// -template <class Map, class Key> -inline typename Map::mapped_type& afind(Map& m, const Key& k) -{ - typename Map::iterator i = m.find(k); - assert(i != m.end()); - return i->second; -} - -template <class Map, class Key> -inline const typename Map::mapped_type& afind(const Map& m, const Key& k) -{ - typename Map::const_iterator i = m.find(k); - assert(i != m.end()); - return i->second; -} - -//! insert_newkey(map, key, value) checks that map does not contain -//! key, and binds key to value. -// -template <class Map, class Key, class Value> -inline typename Map::value_type& -insert_newkey(Map& m, const Key& k,const Value& v) -{ - std::pair<typename Map::iterator, bool> itb - = m.insert(Map::value_type(k, v)); - assert(itb.second); - return *(itb.first); -} // insert_newkey() - - -/////////////////////////////////////////////////////////////////////////// -// // -// Insert operations // -// // -/////////////////////////////////////////////////////////////////////////// - - -template <typename T> -void insert(std::list<T>& xs, const T& x) { - xs.push_back(x); -} - -template <typename T> -void insert(std::set<T>& xs, const T& x) { - xs.insert(x); -} - -template <typename T> -void insert(std::vector<T>& xs, const T& x) { - xs.push_back(x); -} - - -/////////////////////////////////////////////////////////////////////////// -// // -// Additional versions of standard algorithms // -// // -/////////////////////////////////////////////////////////////////////////// - -template <typename Set1, typename Set2> -inline bool includes(const Set1& set1, const Set2& set2) -{ - return std::includes(set1.begin(), set1.end(), set2.begin(), set2.end()); -} - -template <typename Set1, typename Set2, typename Compare> -inline bool includes(const Set1& set1, const Set2& set2, Compare comp) -{ - return std::includes(set1.begin(), set1.end(), set2.begin(), set2.end(), comp); -} - - -template <typename InputIter1, typename InputIter2> -bool disjoint(InputIter1 first1, InputIter1 last1, - InputIter2 first2, InputIter2 last2) -{ - while (first1 != last1 && first2 != last2) - if (*first1 < *first2) - ++first1; - else if (*first2 < *first1) - ++first2; - else // *first1 == *first2 - return false; - return true; -} - -template <typename InputIter1, typename InputIter2, typename Compare> -bool disjoint(InputIter1 first1, InputIter1 last1, - InputIter2 first2, InputIter2 last2, Compare comp) -{ - while (first1 != last1 && first2 != last2) - if (comp(*first1, *first2)) - ++first1; - else if (comp(*first2, *first1)) - ++first2; - else // *first1 == *first2 - return false; - return true; -} - -template <typename Set1, typename Set2> -inline bool disjoint(const Set1& set1, const Set2& set2) -{ - return disjoint(set1.begin(), set1.end(), set2.begin(), set2.end()); -} - -template <typename Set1, typename Set2, typename Compare> -inline bool disjoint(const Set1& set1, const Set2& set2, Compare comp) -{ - return disjoint(set1.begin(), set1.end(), set2.begin(), set2.end(), comp); -} - - -template <typename Set1, typename Set2, typename OutputIterator> -inline OutputIterator set_intersection(const Set1& set1, const Set2& set2, - OutputIterator result) -{ - return set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), result); -} - -template <typename Set1, typename Set2, typename OutputIterator, typename Compare> -inline OutputIterator set_intersection(const Set1& set1, const Set2& set2, - OutputIterator result, Compare comp) -{ - return set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(), result, comp); -} - - -template <typename Container> -inline std::insert_iterator<Container> inserter(Container& container) -{ - return std::inserter(container, container.begin()); -} - -// max_element -// -template <class Es> inline typename Es::iterator max_element(Es& es) -{ - return std::max_element(es.begin(), es.end()); -} - -template <class Es> inline typename Es::const_iterator max_element(const Es& es) -{ - return std::max_element(es.begin(), es.end()); -} - -template <class Es, class BinaryPredicate> -inline typename Es::iterator max_element(Es& es, BinaryPredicate comp) -{ - return std::max_element(es.begin(), es.end(), comp); -} - -template <class Es, class BinaryPredicate> -inline typename Es::const_iterator max_element(const Es& es, BinaryPredicate comp) -{ - return std::max_element(es.begin(), es.end(), comp); -} - -// min_element -// -template <class Es> inline typename Es::iterator min_element(Es& es) -{ - return std::min_element(es.begin(), es.end()); -} - -template <class Es> inline typename Es::const_iterator min_element(const Es& es) -{ - return std::min_element(es.begin(), es.end()); -} - -template <class Es, class BinaryPredicate> -inline typename Es::iterator min_element(Es& es, BinaryPredicate comp) -{ - return std::min_element(es.begin(), es.end(), comp); -} - -template <class Es, class BinaryPredicate> -inline typename Es::const_iterator min_element(const Es& es, BinaryPredicate comp) -{ - return std::min_element(es.begin(), es.end(), comp); -} - -// first_lessthan and second_lessthan -// -struct first_lessthan { - template <typename T1, typename T2> - bool operator() (const T1& e1, const T2& e2) { - return e1.first < e2.first; - } -}; - -struct second_lessthan { - template <typename T1, typename T2> - bool operator() (const T1& e1, const T2& e2) { - return e1.second < e2.second; - } -}; - -// first_greaterthan and second_greaterthan -// -struct first_greaterthan { - template <typename T1, typename T2> - bool operator() (const T1& e1, const T2& e2) { - return e1.first > e2.first; - } -}; - -struct second_greaterthan { - template <typename T1, typename T2> - bool operator() (const T1& e1, const T2& e2) { - return e1.second > e2.second; - } -}; - - -/////////////////////////////////////////////////////////////////////////// -// // -// hash<> specializations // -// // -// These must be in namespace std. They permit the corresponding STL // -// container to be used as a key in an STL hash table. // -// // -/////////////////////////////////////////////////////////////////////////// - -//namespace EXT_NAMESPACE { -namespace std { - /* - // hash function for bool - // - template <> struct hash<bool> - { - size_t operator() (bool b) const - { - return b; - } // operator() - }; // hash<bool>{} - - // hash function for double - // - template <> struct hash<double> - { - size_t operator() (double d) const - { - int exponent; - double fraction = frexp(d, &exponent); - return size_t(exponent) ^ size_t(1000000.0*(fabs(fraction-0.5))); - } // operator() - }; // hash<double>{} - - // hash function for strings - // - template <> struct hash<std::string> - { - size_t operator()(const std::string& s) const - { - typedef std::string::const_iterator CI; - - unsigned long h = 0; - unsigned long g; - CI p = s.begin(); - CI end = s.end(); - - while (p!=end) { - h = (h << 4) + (*p++); - if ((g = h&0xf0000000)) { - h = h ^ (g >> 24); - h = h ^ g; - }} - return size_t(h); - } // operator() - }; // hash<string>{} - -*/ - // hash function for arbitrary pairs - // - template<class T1, class T2> struct hash<std::pair<T1,T2> > { - size_t operator()(const std::pair<T1,T2>& p) const - { - size_t h1 = hash<T1>()(p.first); - size_t h2 = hash<T2>()(p.second); - return h1 ^ (h1 >> 1) ^ h2 ^ (h2 << 1); - } - }; - - - // hash function for vectors - // - template<class T> struct hash<std::vector<T> > - { // This is the fn hashpjw of Aho, Sethi and Ullman, p 436. - size_t operator()(const std::vector<T>& s) const - { - typedef typename std::vector<T>::const_iterator CI; - - unsigned long h = 0; - unsigned long g; - CI p = s.begin(); - CI end = s.end(); - - while (p!=end) { - h = (h << 5) + hash<T>()(*p++); - if ((g = h&0xff000000)) { - h = h ^ (g >> 23); - h = h ^ g; - }} - return size_t(h); - } - }; - - // hash function for slists - // - template<class T> struct hash<ext::slist<T> > - { // This is the fn hashpjw of Aho, Sethi and Ullman, p 436. - size_t operator()(const ext::slist<T>& s) const - { - typedef typename ext::slist<T>::const_iterator CI; - - unsigned long h = 0; - unsigned long g; - CI p = s.begin(); - CI end = s.end(); - - while (p!=end) { - h = (h << 7) + hash<T>()(*p++); - if ((g = h&0xff000000)) { - h = h ^ (g >> 23); - h = h ^ g; - }} - return size_t(h); - } - }; - - // hash function for maps - // - template<typename T1, typename T2> struct hash<std::map<T1,T2> > - { - size_t operator()(const std::map<T1,T2>& m) const - { - typedef typename std::map<T1,T2> M; - typedef typename M::const_iterator CI; - - unsigned long h = 0; - unsigned long g; - CI p = m.begin(); - CI end = m.end(); - - while (p != end) { - h = (h << 11) + hash<typename M::value_type>()(*p++); - if ((g = h&0xff000000)) { - h = h ^ (g >> 23); - h = h ^ g; - }} - return size_t(h); - } - }; - -} // namespace EXT_NAMESPACE - - - -/////////////////////////////////////////////////////////////////////////// -// // -// Write/Read code // -// // -// These routines should possess write/read invariance IF their elements // -// also have write-read invariance. Whitespace, '(' and ')' are used as // -// delimiters. // -// // -/////////////////////////////////////////////////////////////////////////// - - -// Define istream >> const char* so that it consumes the characters from the -// istream. Just as in scanf, a space consumes an arbitrary amount of whitespace. -// -inline std::istream& operator>> (std::istream& is, const char* cp) -{ - if (*cp == '\0') - return is; - else if (*cp == ' ') { - char c; - if (is.get(c)) { - if (isspace(c)) - return is >> cp; - else { - is.unget(); - return is >> (cp+1); - } - } - else { - is.clear(is.rdstate() & ~std::ios::failbit); // clear failbit - return is >> (cp+1); - } - } - else { - char c; - if (is.get(c)) { - if (c == *cp) - return is >> (cp+1); - else { - is.unget(); - is.setstate(std::ios::failbit); - } - } - return is; - } -} - - -// Write out an auto_ptr object just as you would write out the pointer object -// -template <typename T> -inline std::ostream& operator<<(std::ostream& os, const std::auto_ptr<T>& sp) -{ - return os << sp.get(); -} - - -// Pairs -// -template <class T1, class T2> -std::ostream& operator<< (std::ostream& os, const std::pair<T1,T2>& p) -{ - return os << '(' << p.first << ' ' << p.second << ')'; -} - -template <class T1, class T2> -std::istream& operator>> (std::istream& is, std::pair<T1,T2>& p) -{ - char c; - if (is >> c) { - if (c == '(') { - if (is >> p.first >> p.second >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - -// Lists -// -template <class T> -std::ostream& operator<< (std::ostream& os, const std::list<T>& xs) -{ - os << '('; - for (typename std::list<T>::const_iterator xi = xs.begin(); xi != xs.end(); ++xi) { - if (xi != xs.begin()) - os << ' '; - os << *xi; - } - return os << ')'; -} - -template <class T> -std::istream& operator>> (std::istream& is, std::list<T>& xs) -{ - char c; // This code avoids unnecessary copy - if (is >> c) { // read the initial '(' - if (c == '(') { - xs.clear(); // clear the list - do { - xs.push_back(T()); // create a new elt in list - is >> xs.back(); // read element - } - while (is.good()); // read as long as possible - xs.pop_back(); // last read failed; pop last elt - is.clear(is.rdstate() & ~std::ios::failbit); // clear failbit - if (is >> c && c == ')') // read terminating ')' - return is; // successful return - else - is.setstate(std::ios::badbit); // something went wrong, set badbit - } - else // c is not '(' - is.putback(c); // put c back into input - } - is.setstate(std::ios::failbit); // read failed, set failbit - return is; -} - -// Vectors -// -template <class T> -std::ostream& operator<< (std::ostream& os, const std::vector<T>& xs) -{ - os << '('; - for (typename std::vector<T>::const_iterator xi = xs.begin(); xi != xs.end(); ++xi) { - if (xi != xs.begin()) - os << ' '; - os << *xi; - } - return os << ')'; -} - -template <class T> -std::istream& operator>> (std::istream& is, std::vector<T>& xs) -{ - char c; // This code avoids unnecessary copy - if (is >> c) { // read the initial '(' - if (c == '(') { - xs.clear(); // clear the list - do { - xs.push_back(T()); // create a new elt in list - is >> xs.back(); // read element - } - while (is.good()); // read as long as possible - xs.pop_back(); // last read failed; pop last elt - is.clear(is.rdstate() & ~std::ios::failbit); // clear failbit - if (is >> c && c == ')') // read terminating ')' - return is; // successful return - else - is.setstate(std::ios::badbit); // something went wrong, set badbit - } - else // c is not '(' - is.putback(c); // put c back into input - } - is.setstate(std::ios::failbit); // read failed, set failbit - return is; -} - -// Slists -// -template <class T> -std::ostream& operator<< (std::ostream& os, const ext::slist<T>& xs) -{ - os << '('; - for (typename ext::slist<T>::const_iterator xi = xs.begin(); xi != xs.end(); ++xi) { - if (xi != xs.begin()) - os << ' '; - os << *xi; - } - return os << ')'; -} - -template <class T> -std::istream& operator>> (std::istream& is, ext::slist<T>& xs) -{ - char c; - if (is >> c) { - if (c == '(') { - xs.clear(); - T e; - if (is >> e) { - xs.push_front(e); - typename ext::slist<T>::iterator xi = xs.begin(); - while (is >> e) - xi = xs.insert_after(xi, e); - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else { // empty list - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else // didn't see closing ')' - is.setstate(std::ios::badbit); - } - } - else // didn't read '(' - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - -// Sets -// -template <class T> -std::ostream& operator<< (std::ostream& os, const std::set<T>& s) -{ - os << '('; - for (typename std::set<T>::const_iterator i = s.begin(); i != s.end(); ++i) { - if (i != s.begin()) - os << ' '; - os << *i; - } - return os << ')'; -} - -template <class T> -std::istream& operator>> (std::istream& is, std::set<T>& s) -{ - char c; - if (is >> c) { - if (c == '(') { - s.clear(); - T e; - while (is >> e) - s.insert(e); - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - -// Hash_sets -// -template <class T> -std::ostream& operator<< (std::ostream& os, const std::unordered_set<T>& s) -{ - os << '('; - for (typename std::unordered_set<T>::const_iterator i = s.begin(); i != s.end(); ++i) { - if (i != s.begin()) - os << ' '; - os << *i; - } - return os << ')'; -} - -template <class T> -std::istream& operator>> (std::istream& is, std::unordered_set<T>& s) -{ - char c; - if (is >> c) { - if (c == '(') { - s.clear(); - T e; - while (is >> e) - s.insert(e); - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - - -// Maps -// -template <class Key, class Value> -std::ostream& operator<< (std::ostream& os, const std::map<Key,Value>& m) -{ - typedef std::map<Key,Value> M; - os << '('; - for (typename M::const_iterator it = m.begin(); it != m.end(); ++it) { - if (it != m.begin()) - os << ' '; - os << *it; - } - return os << ")"; -} - -template <class Key, class Value> -std::istream& operator>> (std::istream& is, std::map<Key,Value>& m) -{ - char c; - if (is >> c) { - if (c == '(') { - m.clear(); - std::pair<Key,Value> e; - while (is >> e) - m.insert(e); - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - -// Hash_maps -// -template <class Key, class Value> -std::ostream& operator<< (std::ostream& os, const std::unordered_map<Key,Value>& m) -{ - typedef std::unordered_map<Key,Value> M; - os << '('; - for (typename M::const_iterator it = m.begin(); it != m.end(); ++it) { - if (it != m.begin()) - os << ' '; - os << *it; - } - return os << ")"; -} - -template <class Key, class Value> -std::istream& operator>> (std::istream& is, std::unordered_map<Key,Value>& m) -{ - char c; - if (is >> c) { - if (c == '(') { - m.clear(); - std::pair<Key,Value> e; - while (is >> e) - m.insert(e); - is.clear(is.rdstate() & ~std::ios::failbit); - if (is >> c && c == ')') - return is; - else - is.setstate(std::ios::badbit); - } - else - is.putback(c); - } - is.setstate(std::ios::failbit); - return is; -} - - -/////////////////////////////////////////////////////////////////////////// -// // -// Boost library additions // -// // -/////////////////////////////////////////////////////////////////////////// - -#ifdef BOOST_SHARED_PTR_HPP_INCLUDED - -// enhancements to boost::shared_ptr so it can be used with hash -// -namespace std { - template <typename T> struct equal_to<boost::shared_ptr<T> > - : public binary_function<boost::shared_ptr<T>, boost::shared_ptr<T>, bool> { - bool operator() (const boost::shared_ptr<T>& p1, const boost::shared_ptr<T>& p2) const { - return equal_to<T*>()(p1.get(), p2.get()); - } - }; -} // namespace std - -//namespace EXT_NAMESPACE { -namespace std { - template <typename T> struct hash<boost::shared_ptr<T> > { - size_t operator() (const boost::shared_ptr<T>& a) const { - return hash<T*>()(a.get()); - } - }; -} // namespace ext - -template <typename T> -inline std::ostream& operator<< (std::ostream& os, const boost::shared_ptr<T>& sp) -{ - return os << sp.get(); -} - -#endif // BOOST_SHARED_PTR_HPP_INCLUDED - -struct resource_usage { }; - -#ifndef __i386 -inline std::ostream& operator<< (std::ostream& os, resource_usage r) -{ - return os; -} -#else // Assume we are on a 586 linux -inline std::ostream& operator<< (std::ostream& os, resource_usage r) -{ - FILE* fp = fopen("/proc/self/stat", "r"); - assert(fp); - int utime; - int stime; - unsigned int vsize; - unsigned int rss; - int result = - fscanf(fp, "%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %d %d %*d %*d %*d %*d" - "%*u %*u %*d %u %u", &utime, &stime, &vsize, &rss); - assert(result == 4); - fclose(fp); - // s << "utime = " << utime << ", stime = " << stime << ", vsize = " << vsize << ", rss = " << rss - ; - // return s << "utime = " << utime << ", vsize = " << vsize; - return os << "utime " << float(utime)/1.0e2 << "s, vsize " - << float(vsize)/1048576.0 << " Mb."; -} -#endif - -//! A default_value_type{} object is used to read an object from a stream, -//! assigning a default value if the read fails. Users should not need to -//! construct such objects, but should use default_value() instead. -// -template <typename object_type, typename default_type> -struct default_value_type { - object_type& object; - const default_type defaultvalue; - default_value_type(object_type& object, const default_type defaultvalue) - : object(object), defaultvalue(defaultvalue) { } -}; - -//! default_value() is used to read an object from a stream, assigning a -//! default value if the read fails. It returns a default_value_type{} -//! object, which does the actual reading. -// -template <typename object_type, typename default_type> -default_value_type<object_type,default_type> -default_value(object_type& object, const default_type defaultvalue=default_type()) { - return default_value_type<object_type,default_type>(object, defaultvalue); -} - -//! This version of operator>>() reads default_value_type{} from an input stream. -// -template <typename object_type, typename default_type> -std::istream& operator>> (std::istream& is, - default_value_type<object_type, default_type> dv) { - if (is) { - if (is >> dv.object) - ; - else { - is.clear(is.rdstate() & ~std::ios::failbit); // clear failbit - dv.object = dv.defaultvalue; - } - } - return is; -} - -// inline F random1() { return rand()/(RAND_MAX+1.0); } -inline F random1() { return mt_genrand_res53(); } - -#endif // UTILITY_H diff --git a/gi/pyp-topics/src/workers.hh b/gi/pyp-topics/src/workers.hh deleted file mode 100644 index 95b18947..00000000 --- a/gi/pyp-topics/src/workers.hh +++ /dev/null @@ -1,275 +0,0 @@ -/** - Basic thread-pool tools using Boost.Thread. - (Jan Botha, 7/2010) - - --Simple usage-- - Use SimpleWorker. - Example, call a function that returns an int in a new thread: - typedef boost::function<int()> JobType; - JobType job = boost::bind(funcname); - //or boost::bind(&class::funcname, this) for a member function - SimpleWorker<JobType, int> worker(job); - int result = worker.getResult(); //blocks until result is ready - - --Extended usage-- - Use WorkerPool, which uses Queuemt (a synchronized queue) and Worker. - Example: - (same context and typedef - WorkerPool<JobType, int> pool(num_threads); - JobType job = ... - pool.addJob(job); - ... - pool.get_result(); //blocks until all workers are done, returns the some of their results. - - Jobs added to a WorkerPool need to be the same type. A WorkerPool instance should not be reused (e.g. adding jobs) after calling get_result(). -*/ - -#ifndef WORKERS_HH -#define WORKERS_HH - -#include <iostream> -#include <boost/bind.hpp> -#include <boost/function.hpp> -#include <queue> -#include <boost/ptr_container/ptr_vector.hpp> -#include <boost/thread/thread.hpp> -#include <boost/thread/mutex.hpp> -#include <boost/thread/shared_mutex.hpp> -#include <boost/thread/future.hpp> -#include <boost/thread/condition.hpp> - -#include <boost/date_time/posix_time/posix_time_types.hpp> -#include "timing.h" - -/** Implements a synchronized queue*/ -template<typename J> -class Queuemt -{ - -public: - boost::condition_variable_any cond; - const bool& running; - - Queuemt() { } - Queuemt(const bool& running) : running(running), maxsize(0), qsize(0) - { - } - - ~Queuemt() { - } - - J pop() - { - J job; - { - boost::unique_lock<boost::shared_mutex> qlock(q_mutex); - while (running && qsize == 0) - cond.wait(qlock); - - if (qsize > 0) - { - job = q.front(); - q.pop(); - --qsize; - } - } - if (job) - cond.notify_one(); - return job; - - } - - void push(J job) - { - { - boost::unique_lock<boost::shared_mutex> lock(q_mutex); - q.push(job); - ++qsize; - } - if (qsize > maxsize) - maxsize = qsize; - - cond.notify_one(); - } - - int getMaxsize() - { - return maxsize; - } - - int size() - { - return qsize; - } - -private: - boost::shared_mutex q_mutex; - std::queue<J> q; - int maxsize; - volatile int qsize; -}; - - -template<typename J, typename R> -class Worker -{ -typedef boost::packaged_task<R> PackagedTask; -public: - Worker(Queuemt<J>& queue, int id, int num_workers) : - q(queue), tasktime(0.0), id(id), num_workers(num_workers) - { - PackagedTask task(boost::bind(&Worker<J, R>::run, this)); - future = task.get_future(); - boost::thread t(boost::move(task)); - } - - R run() //this is called upon thread creation - { - R wresult = 0; - while (isRunning()) - { - J job = q.pop(); - - if (job) - { - timer.Reset(); - wresult += job(); - tasktime += timer.Elapsed(); - } - } - return wresult; - } - - R getResult() - { - if (!future.is_ready()) - future.wait(); - assert(future.is_ready()); - return future.get(); - } - - double getTaskTime() - { - return tasktime; - } - -private: - - Queuemt<J>& q; - - boost::unique_future<R> future; - - bool isRunning() - { - return q.running || q.size() > 0; - } - - Timer timer; - double tasktime; - int id; - int num_workers; -}; - -template<typename J, typename R> -class WorkerPool -{ -typedef boost::packaged_task<R> PackagedTask; -typedef Worker<J,R> WJR; -typedef boost::ptr_vector<WJR> WorkerVector; -public: - - WorkerPool(int num_workers) - { - q.reset(new Queuemt<J>(running)); - running = true; - for (int i = 0; i < num_workers; ++i) - workers.push_back( new Worker<J, R>(*q, i, num_workers) ); - } - - ~WorkerPool() - { - } - - R get_result() - { - running = false; - q->cond.notify_all(); - R tmp = 0; - double tasktime = 0.0; - for (typename WorkerVector::iterator it = workers.begin(); it != workers.end(); it++) - { - R res = it->getResult(); - tmp += res; - //std::cerr << "tasktime: " << it->getTaskTime() << std::endl; - tasktime += it->getTaskTime(); - } -// std::cerr << " maxQ = " << q->getMaxsize() << std::endl; - return tmp; - } - - void addJob(J job) - { - q->push(job); - } - -private: - - WorkerVector workers; - - boost::shared_ptr<Queuemt<J> > q; - - bool running; -}; - -/////////////////// -template <typename J, typename R> -class SimpleWorker -{ -typedef boost::packaged_task<R> PackagedTask; -public: - SimpleWorker(J& job) : job(job), tasktime(0.0) - { - PackagedTask task(boost::bind(&SimpleWorker<J, R>::run, this)); - future = task.get_future(); - boost::thread t(boost::move(task)); - } - - R run() //this is called upon thread creation - { - R wresult = 0; - - assert(job); - timer.Reset(); - wresult = job(); - tasktime = timer.Elapsed(); - std::cerr << tasktime << " s" << std::endl; - return wresult; - } - - R getResult() - { - if (!future.is_ready()) - future.wait(); - assert(future.is_ready()); - return future.get(); - } - - double getTaskTime() - { - return tasktime; - } - -private: - - J job; - - boost::unique_future<R> future; - - Timer timer; - double tasktime; - -}; - - - -#endif diff --git a/gi/scripts/buck2utf8.pl b/gi/scripts/buck2utf8.pl deleted file mode 100755 index 1acfae8d..00000000 --- a/gi/scripts/buck2utf8.pl +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/perl -w -use strict; -use utf8; -binmode(STDOUT, ":utf8"); -while(<>) { - chomp; - my @words = split /\s+/; - for my $w (@words) { - $_ = $w; - if ($w =~ /^__NTK__/o) { - s/__NTK__//go; - next if /^$/; - print STDOUT "$_ "; - next; - } -s/tR/\x{0679}/g; # retroflex t -s/dR/\x{0688}/g; # retroflex d -s/rR/\x{0691}/g; # retroflex r -s/p/\x{067E}/g; # peh -s/c/\x{0686}/g; # tcheh -s/g/\x{06AF}/g; # geh (G=ghain) -s/@/\x{06BE}/g; # heh doachashmee -s/h'/\x{06c2}/g; # heh goal + hamza -s/h/\x{06c1}/g; # heh goal -s/J/\x{0698}/g; # zheh (rare, usually persian loan words) -s/k/\x{06A9}/g; # k -s/Y'/\x{06d3}/g; # yeh barree + hamza above (ligature) -s/y/\x{06cc}/g; # same as ya' in arabic -s/Y/\x{06d2}/g; # yeh barree -s/N/\x{06BA}/g; # Ghunna - - s/\'/\x{0621}/g; - s/\|/\x{0622}/g; - s/\>/\x{0623}/g; - s/\&/\x{0624}/g; - s/\</\x{0625}/g; - s/\}/\x{0626}/g; - s/A/\x{0627}/g; - s/b/\x{0628}/g; - s/t/\x{062A}/g; - s/v/\x{062B}/g; - s/j/\x{062C}/g; - s/H/\x{062D}/g; - s/x/\x{062E}/g; - s/d/\x{062F}/g; - s/\*/\x{0630}/g; - s/r/\x{0631}/g; - s/z/\x{0632}/g; - s/s/\x{0633}/g; - s/\$/\x{0634}/g; - s/S/\x{0635}/g; - s/D/\x{0636}/g; - s/T/\x{0637}/g; - s/Z/\x{0638}/g; - s/E/\x{0639}/g; - s/g/\x{063A}/g; - s/_/\x{0640}/g; - s/f/\x{0641}/g; - s/q/\x{0642}/g; - s/k/\x{0643}/g; - s/l/\x{0644}/g; - s/m/\x{0645}/g; - s/n/\x{0646}/g; - s/h/\x{0647}/g; - s/w/\x{0648}/g; - s/Y/\x{0649}/g; - s/y/\x{064A}/g; - s/F/\x{064B}/g; - s/N/\x{064C}/g; - s/K/\x{064D}/g; - s/a/\x{064E}/g; - s/u/\x{064F}/g; - s/i/\x{0650}/g; - s/\~/\x{0651}/g; - s/o/\x{0652}/g; - s/\`/\x{0670}/g; - s/\{/\x{0671}/g; - s/P/\x{067E}/g; - s/J/\x{0686}/g; - s/V/\x{06A4}/g; - s/G/\x{06AF}/g; - - -print STDOUT "$_ "; - } - print STDOUT "\n"; -} diff --git a/jam-files/LICENSE_1_0.txt b/jam-files/LICENSE_1_0.txt deleted file mode 100644 index 36b7cd93..00000000 --- a/jam-files/LICENSE_1_0.txt +++ /dev/null @@ -1,23 +0,0 @@ -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/jam-files/boost-build/boost-build.jam b/jam-files/boost-build/boost-build.jam deleted file mode 100644 index 73db0497..00000000 --- a/jam-files/boost-build/boost-build.jam +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2001, 2002 Dave Abrahams -# Copyright 2002 Rene Rivera -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - - -boost-build kernel ; diff --git a/jam-files/boost-build/bootstrap.jam b/jam-files/boost-build/bootstrap.jam deleted file mode 100644 index af3e8bf5..00000000 --- a/jam-files/boost-build/bootstrap.jam +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2003 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# This file handles initial phase of Boost.Build loading. -# Boost.Jam has already figured out where Boost.Build is -# and loads this file, which is responsible for initialization -# of basic facilities such a module system and loading the -# main Boost.Build module, build-system.jam. -# -# Exact operation of this module is not interesting, it makes -# sense to look at build-system.jam right away. - -# Load the kernel/bootstrap.jam, which does all the work. -.bootstrap-file = $(.bootstrap-file:D)/kernel/bootstrap.jam ; -include $(.bootstrap-file) ;
\ No newline at end of file diff --git a/jam-files/boost-build/build-system.jam b/jam-files/boost-build/build-system.jam deleted file mode 100644 index 9f9c884c..00000000 --- a/jam-files/boost-build/build-system.jam +++ /dev/null @@ -1,1008 +0,0 @@ -# Copyright 2003, 2005, 2007 Dave Abrahams -# Copyright 2006, 2007 Rene Rivera -# Copyright 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This file is part of Boost Build version 2. You can think of it as forming the -# main() routine. It is invoked by the bootstrapping code in bootstrap.jam. - -import build-request ; -import builtin ; -import "class" : new ; -import errors ; -import feature ; -import make ; -import modules ; -import os ; -import path ; -import project ; -import property-set ; -import regex ; -import sequence ; -import targets ; -import toolset ; -import utility ; -import version ; -import virtual-target ; -import generators ; -import configure ; - -################################################################################ -# -# Module global data. -# -################################################################################ - -# Shortcut used in this module for accessing used command-line parameters. -.argv = [ modules.peek : ARGV ] ; - -# Flag indicating we should display additional debugging information related to -# locating and loading Boost Build configuration files. -.debug-config = [ MATCH ^(--debug-configuration)$ : $(.argv) ] ; - -# Legacy option doing too many things, some of which are not even documented. -# Should be phased out. -# * Disables loading site and user configuration files. -# * Disables auto-configuration for toolsets specified explicitly on the -# command-line. -# * Causes --toolset command-line options to be ignored. -# * Prevents the default toolset from being used even if no toolset has been -# configured at all. -.legacy-ignore-config = [ MATCH ^(--ignore-config)$ : $(.argv) ] ; - -# The cleaning is tricky. Say, if user says 'bjam --clean foo' where 'foo' is a -# directory, then we want to clean targets which are in 'foo' as well as those -# in any children Jamfiles under foo but not in any unrelated Jamfiles. To -# achieve this we collect a list of projects under which cleaning is allowed. -.project-targets = ; - -# Virtual targets obtained when building main targets references on the command -# line. When running 'bjam --clean main_target' we want to clean only files -# belonging to that main target so we need to record which targets are produced -# for it. -.results-of-main-targets = ; - -# Was an XML dump requested? -.out-xml = [ MATCH ^--out-xml=(.*)$ : $(.argv) ] ; - -# Default toolset & version to be used in case no other toolset has been used -# explicitly by either the loaded configuration files, the loaded project build -# scripts or an explicit toolset request on the command line. If not specified, -# an arbitrary default will be used based on the current host OS. This value, -# while not strictly necessary, has been added to allow testing Boost-Build's -# default toolset usage functionality. -.default-toolset = ; -.default-toolset-version = ; - - -################################################################################ -# -# Public rules. -# -################################################################################ - -# Returns the property set with the free features from the currently processed -# build request. -# -rule command-line-free-features ( ) -{ - return $(.command-line-free-features) ; -} - - -# Returns the location of the build system. The primary use case is building -# Boost where it is sometimes needed to get the location of other components -# (e.g. BoostBook files) and it is convenient to use locations relative to the -# Boost Build path. -# -rule location ( ) -{ - local r = [ modules.binding build-system ] ; - return $(r:P) ; -} - - -# Sets the default toolset & version to be used in case no other toolset has -# been used explicitly by either the loaded configuration files, the loaded -# project build scripts or an explicit toolset request on the command line. For -# more detailed information see the comment related to used global variables. -# -rule set-default-toolset ( toolset : version ? ) -{ - .default-toolset = $(toolset) ; - .default-toolset-version = $(version) ; -} - -rule set-pre-build-hook ( function ) -{ - .pre-build-hook = $(function) ; -} - -rule set-post-build-hook ( function ) -{ - .post-build-hook = $(function) ; -} - -################################################################################ -# -# Local rules. -# -################################################################################ - -# Returns actual Jam targets to be used for executing a clean request. -# -local rule actual-clean-targets ( ) -{ - # Construct a list of projects explicitly detected as targets on this build - # system run. These are the projects under which cleaning is allowed. - for local t in $(targets) - { - if [ class.is-a $(t) : project-target ] - { - .project-targets += [ $(t).project-module ] ; - } - } - - # Construct a list of targets explicitly detected on this build system run - # as a result of building main targets. - local targets-to-clean ; - for local t in $(.results-of-main-targets) - { - # Do not include roots or sources. - targets-to-clean += [ virtual-target.traverse $(t) ] ; - } - targets-to-clean = [ sequence.unique $(targets-to-clean) ] ; - - local to-clean ; - for local t in [ virtual-target.all-targets ] - { - local p = [ $(t).project ] ; - - # Remove only derived targets. - if [ $(t).action ] - { - if $(t) in $(targets-to-clean) || - [ should-clean-project [ $(p).project-module ] ] = true - { - to-clean += $(t) ; - } - } - } - - local to-clean-actual ; - for local t in $(to-clean) - { - to-clean-actual += [ $(t).actualize ] ; - } - return $(to-clean-actual) ; -} - - -# Given a target id, try to find and return the corresponding target. This is -# only invoked when there is no Jamfile in ".". This code somewhat duplicates -# code in project-target.find but we can not reuse that code without a -# project-targets instance. -# -local rule find-target ( target-id ) -{ - local split = [ MATCH (.*)//(.*) : $(target-id) ] ; - - local pm ; - if $(split) - { - pm = [ project.find $(split[1]) : "." ] ; - } - else - { - pm = [ project.find $(target-id) : "." ] ; - } - - local result ; - if $(pm) - { - result = [ project.target $(pm) ] ; - } - - if $(split) - { - result = [ $(result).find $(split[2]) ] ; - } - - return $(result) ; -} - - -# Initializes a new configuration module. -# -local rule initialize-config-module ( module-name : location ? ) -{ - project.initialize $(module-name) : $(location) ; - if USER_MODULE in [ RULENAMES ] - { - USER_MODULE $(module-name) ; - } -} - - -# Helper rule used to load configuration files. Loads the first configuration -# file with the given 'filename' at 'path' into module with name 'module-name'. -# Not finding the requested file may or may not be treated as an error depending -# on the must-find parameter. Returns a normalized path to the loaded -# configuration file or nothing if no file was loaded. -# -local rule load-config ( module-name : filename : path + : must-find ? ) -{ - if $(.debug-config) - { - ECHO "notice: Searching" "$(path)" "for" "$(module-name)" - "configuration file" "$(filename)" "." ; - } - local where = [ GLOB $(path) : $(filename) ] ; - if $(where) - { - where = [ NORMALIZE_PATH $(where[1]) ] ; - if $(.debug-config) - { - ECHO "notice: Loading" "$(module-name)" "configuration file" - "$(filename)" "from" $(where) "." ; - } - - # Set source location so that path-constant in config files - # with relative paths work. This is of most importance - # for project-config.jam, but may be used in other - # config files as well. - local attributes = [ project.attributes $(module-name) ] ; - $(attributes).set source-location : $(where:D) : exact ; - modules.load $(module-name) : $(filename) : $(path) ; - project.load-used-projects $(module-name) ; - } - else - { - if $(must-find) - { - errors.user-error "Configuration file" "$(filename)" "not found in" - "$(path)" "." ; - } - if $(.debug-config) - { - ECHO "notice:" "Configuration file" "$(filename)" "not found in" - "$(path)" "." ; - } - } - return $(where) ; -} - - -# Loads all the configuration files used by Boost Build in the following order: -# -# -- test-config -- -# Loaded only if specified on the command-line using the --test-config -# command-line parameter. It is ok for this file not to exist even if specified. -# If this configuration file is loaded, regular site and user configuration -# files will not be. If a relative path is specified, file is searched for in -# the current folder. -# -# -- site-config -- -# Always named site-config.jam. Will only be found if located on the system -# root path (Windows), /etc (non-Windows), user's home folder or the Boost Build -# path, in that order. Not loaded in case the test-config configuration file is -# loaded or either the --ignore-site-config or the --ignore-config command-line -# option is specified. -# -# -- user-config -- -# Named user-config.jam by default or may be named explicitly using the -# --user-config command-line option or the BOOST_BUILD_USER_CONFIG environment -# variable. If named explicitly the file is looked for from the current working -# directory and if the default one is used then it is searched for in the -# user's home directory and the Boost Build path, in that order. Not loaded in -# case either the test-config configuration file is loaded, --ignore-config -# command-line option is specified or an empty file name is explicitly -# specified. If the file name has been given explicitly then the file must -# exist. -# -# Test configurations have been added primarily for use by Boost Build's -# internal unit testing system but may be used freely in other places as well. -# -local rule load-configuration-files -{ - # Flag indicating that site configuration should not be loaded. - local ignore-site-config = - [ MATCH ^(--ignore-site-config)$ : $(.argv) ] ; - - if $(.legacy-ignore-config) && $(.debug-config) - { - ECHO "notice: Regular site and user configuration files will be ignored" ; - ECHO "notice: due to the --ignore-config command-line option." ; - } - - initialize-config-module test-config ; - local test-config = [ MATCH ^--test-config=(.*)$ : $(.argv) ] ; - local uq = [ MATCH \"(.*)\" : $(test-config) ] ; - if $(uq) - { - test-config = $(uq) ; - } - if $(test-config) - { - local where = - [ load-config test-config : $(test-config:BS) : $(test-config:D) ] ; - if $(where) - { - if $(.debug-config) && ! $(.legacy-ignore-config) - { - ECHO "notice: Regular site and user configuration files will" ; - ECHO "notice: be ignored due to the test configuration being" - "loaded." ; - } - } - else - { - test-config = ; - } - } - - local user-path = [ os.home-directories ] [ os.environ BOOST_BUILD_PATH ] ; - local site-path = /etc $(user-path) ; - if [ os.name ] in NT CYGWIN - { - site-path = [ modules.peek : SystemRoot ] $(user-path) ; - } - - if $(ignore-site-config) && !$(.legacy-ignore-config) - { - ECHO "notice: Site configuration files will be ignored due to the" ; - ECHO "notice: --ignore-site-config command-line option." ; - } - - initialize-config-module site-config ; - if ! $(test-config) && ! $(ignore-site-config) && ! $(.legacy-ignore-config) - { - load-config site-config : site-config.jam : $(site-path) ; - } - - initialize-config-module user-config ; - if ! $(test-config) && ! $(.legacy-ignore-config) - { - local user-config = [ MATCH ^--user-config=(.*)$ : $(.argv) ] ; - user-config = $(user-config[-1]) ; - user-config ?= [ os.environ BOOST_BUILD_USER_CONFIG ] ; - # Special handling for the case when the OS does not strip the quotes - # around the file name, as is the case when using Cygwin bash. - user-config = [ utility.unquote $(user-config) ] ; - local explicitly-requested = $(user-config) ; - user-config ?= user-config.jam ; - - if $(user-config) - { - if $(explicitly-requested) - { - # Treat explicitly entered user paths as native OS path - # references and, if non-absolute, root them at the current - # working directory. - user-config = [ path.make $(user-config) ] ; - user-config = [ path.root $(user-config) [ path.pwd ] ] ; - user-config = [ path.native $(user-config) ] ; - - if $(.debug-config) - { - ECHO "notice: Loading explicitly specified user" - "configuration file:" ; - ECHO " $(user-config)" ; - } - - load-config user-config : $(user-config:BS) : $(user-config:D) - : must-exist ; - } - else - { - load-config user-config : $(user-config) : $(user-path) ; - } - } - else if $(.debug-config) - { - ECHO "notice: User configuration file loading explicitly disabled." ; - } - } - - # We look for project-config.jam from "." upward. - # I am not sure this is 100% right decision, we might as well check for - # it only alonside the Jamroot file. However: - # - # - We need to load project-root.jam before Jamroot - # - We probably would need to load project-root.jam even if there's no - # Jamroot - e.g. to implement automake-style out-of-tree builds. - local file = [ path.glob "." : project-config.jam ] ; - if ! $(file) - { - file = [ path.glob-in-parents "." : project-config.jam ] ; - } - if $(file) - { - initialize-config-module project-config : $(file:D) ; - load-config project-config : project-config.jam : $(file:D) ; - } -} - - -# Autoconfigure toolsets based on any instances of --toolset=xx,yy,...zz or -# toolset=xx,yy,...zz in the command line. May return additional properties to -# be processed as if they had been specified by the user. -# -local rule process-explicit-toolset-requests -{ - local extra-properties ; - - local option-toolsets = [ regex.split-list [ MATCH ^--toolset=(.*)$ : $(.argv) ] : "," ] ; - local feature-toolsets = [ regex.split-list [ MATCH ^toolset=(.*)$ : $(.argv) ] : "," ] ; - - for local t in $(option-toolsets) $(feature-toolsets) - { - # Parse toolset-version/properties. - local (t-v,t,v) = [ MATCH (([^-/]+)-?([^/]+)?)/?.* : $(t) ] ; - local toolset-version = $((t-v,t,v)[1]) ; - local toolset = $((t-v,t,v)[2]) ; - local version = $((t-v,t,v)[3]) ; - - if $(.debug-config) - { - ECHO notice: [cmdline-cfg] Detected command-line request for - $(toolset-version): "toolset=" $(toolset) "version=" - $(version) ; - } - - # If the toolset is not known, configure it now. - local known ; - if $(toolset) in [ feature.values <toolset> ] - { - known = true ; - } - if $(known) && $(version) && ! [ feature.is-subvalue toolset - : $(toolset) : version : $(version) ] - { - known = ; - } - # TODO: we should do 'using $(toolset)' in case no version has been - # specified and there are no versions defined for the given toolset to - # allow the toolset to configure its default version. For this we need - # to know how to detect whether a given toolset has any versions - # defined. An alternative would be to do this whenever version is not - # specified but that would require that toolsets correctly handle the - # case when their default version is configured multiple times which - # should be checked for all existing toolsets first. - - if ! $(known) - { - if $(.debug-config) - { - ECHO "notice: [cmdline-cfg] toolset $(toolset-version) not" - "previously configured; attempting to auto-configure now" ; - } - toolset.using $(toolset) : $(version) ; - } - else - { - if $(.debug-config) - { - ECHO notice: [cmdline-cfg] toolset $(toolset-version) already - configured ; - } - } - - # Make sure we get an appropriate property into the build request in - # case toolset has been specified using the "--toolset=..." command-line - # option form. - if ! $(t) in $(.argv) && ! $(t) in $(feature-toolsets) - { - if $(.debug-config) - { - ECHO notice: [cmdline-cfg] adding toolset=$(t) to the build - request. ; - } - extra-properties += toolset=$(t) ; - } - } - - return $(extra-properties) ; -} - - -# Returns 'true' if the given 'project' is equal to or is a (possibly indirect) -# child to any of the projects requested to be cleaned in this build system run. -# Returns 'false' otherwise. Expects the .project-targets list to have already -# been constructed. -# -local rule should-clean-project ( project ) -{ - if ! $(.should-clean-project.$(project)) - { - local r = false ; - if $(project) in $(.project-targets) - { - r = true ; - } - else - { - local parent = [ project.attribute $(project) parent-module ] ; - if $(parent) && $(parent) != user-config - { - r = [ should-clean-project $(parent) ] ; - } - } - .should-clean-project.$(project) = $(r) ; - } - - return $(.should-clean-project.$(project)) ; -} - - -################################################################################ -# -# main() -# ------ -# -################################################################################ - -{ - if --version in $(.argv) - { - version.print ; - EXIT ; - } - - version.verify-engine-version ; - - load-configuration-files ; - - local extra-properties ; - # Note that this causes --toolset options to be ignored if --ignore-config - # is specified. - if ! $(.legacy-ignore-config) - { - extra-properties = [ process-explicit-toolset-requests ] ; - } - - - # We always load project in "." so that 'use-project' directives have any - # chance of being seen. Otherwise, we would not be able to refer to - # subprojects using target ids. - local current-project ; - if [ project.find "." : "." ] - { - current-project = [ project.target [ project.load "." ] ] ; - } - - - # In case there are no toolsets currently defined makes the build run using - # the default toolset. - if ! $(.legacy-ignore-config) && ! [ feature.values <toolset> ] - { - local default-toolset = $(.default-toolset) ; - local default-toolset-version = ; - if $(default-toolset) - { - default-toolset-version = $(.default-toolset-version) ; - } - else - { - default-toolset = gcc ; - if [ os.name ] = NT - { - default-toolset = msvc ; - } - else if [ os.name ] = MACOSX - { - default-toolset = darwin ; - } - } - - ECHO "warning: No toolsets are configured." ; - ECHO "warning: Configuring default toolset" \"$(default-toolset)\". ; - ECHO "warning: If the default is wrong, your build may not work correctly." ; - ECHO "warning: Use the \"toolset=xxxxx\" option to override our guess." ; - ECHO "warning: For more configuration options, please consult" ; - ECHO "warning: http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html" ; - - toolset.using $(default-toolset) : $(default-toolset-version) ; - } - - - # Parse command line for targets and properties. Note that this requires - # that all project files already be loaded. - local build-request = [ build-request.from-command-line $(.argv) - $(extra-properties) ] ; - local target-ids = [ $(build-request).get-at 1 ] ; - local properties = [ $(build-request).get-at 2 ] ; - - - # Expand properties specified on the command line into multiple property - # sets consisting of all legal property combinations. Each expanded property - # set will be used for a single build run. E.g. if multiple toolsets are - # specified then requested targets will be built with each of them. - if $(properties) - { - expanded = [ build-request.expand-no-defaults $(properties) ] ; - local xexpanded ; - for local e in $(expanded) - { - xexpanded += [ property-set.create [ feature.split $(e) ] ] ; - } - expanded = $(xexpanded) ; - } - else - { - expanded = [ property-set.empty ] ; - } - - - # Check that we actually found something to build. - if ! $(current-project) && ! $(target-ids) - { - errors.user-error "error: no Jamfile in current directory found, and no" - "target references specified." ; - EXIT ; - } - - - # Flags indicating that this build system run has been started in order to - # clean existing instead of create new targets. Note that these are not the - # final flag values as they may get changed later on due to some special - # targets being specified on the command line. - local clean ; if "--clean" in $(.argv) { clean = true ; } - local cleanall ; if "--clean-all" in $(.argv) { cleanall = true ; } - - - # List of explicitly requested files to build. Any target references read - # from the command line parameter not recognized as one of the targets - # defined in the loaded Jamfiles will be interpreted as an explicitly - # requested file to build. If any such files are explicitly requested then - # only those files and the targets they depend on will be built and they - # will be searched for among targets that would have been built had there - # been no explicitly requested files. - local explicitly-requested-files - - - # List of Boost Build meta-targets, virtual-targets and actual Jam targets - # constructed in this build system run. - local targets ; - local virtual-targets ; - local actual-targets ; - - - # Process each target specified on the command-line and convert it into - # internal Boost Build target objects. Detect special clean target. If no - # main Boost Build targets were explictly requested use the current project - # as the target. - for local id in $(target-ids) - { - if $(id) = clean - { - clean = true ; - } - else - { - local t ; - if $(current-project) - { - t = [ $(current-project).find $(id) : no-error ] ; - } - else - { - t = [ find-target $(id) ] ; - } - - if ! $(t) - { - ECHO "notice: could not find main target" $(id) ; - ECHO "notice: assuming it is a name of file to create." ; - explicitly-requested-files += $(id) ; - } - else - { - targets += $(t) ; - } - } - } - if ! $(targets) - { - targets += [ project.target [ project.module-name "." ] ] ; - } - - if [ option.get dump-generators : : true ] - { - generators.dump ; - } - - # We wish to put config.log in the build directory corresponding - # to Jamroot, so that the location does not differ depending on - # directory where we do build. The amount of indirection necessary - # here is scary. - local first-project = [ $(targets[0]).project ] ; - local first-project-root-location = [ $(first-project).get project-root ] ; - local first-project-root-module = [ project.load $(first-project-root-location) ] ; - local first-project-root = [ project.target $(first-project-root-module) ] ; - local first-build-build-dir = [ $(first-project-root).build-dir ] ; - configure.set-log-file $(first-build-build-dir)/config.log ; - - # Now that we have a set of targets to build and a set of property sets to - # build the targets with, we can start the main build process by using each - # property set to generate virtual targets from all of our listed targets - # and any of their dependants. - for local p in $(expanded) - { - .command-line-free-features = [ property-set.create [ $(p).free ] ] ; - for local t in $(targets) - { - local g = [ $(t).generate $(p) ] ; - if ! [ class.is-a $(t) : project-target ] - { - .results-of-main-targets += $(g[2-]) ; - } - virtual-targets += $(g[2-]) ; - } - } - - - # Convert collected virtual targets into actual raw Jam targets. - for t in $(virtual-targets) - { - actual-targets += [ $(t).actualize ] ; - } - - - # If XML data output has been requested prepare additional rules and targets - # so we can hook into Jam to collect build data while its building and have - # it trigger the final XML report generation after all the planned targets - # have been built. - if $(.out-xml) - { - # Get a qualified virtual target name. - rule full-target-name ( target ) - { - local name = [ $(target).name ] ; - local project = [ $(target).project ] ; - local project-path = [ $(project).get location ] ; - return $(project-path)//$(name) ; - } - - # Generate an XML file containing build statistics for each constituent. - # - rule out-xml ( xml-file : constituents * ) - { - # Prepare valid XML header and footer with some basic info. - local nl = " -" ; - local os = [ modules.peek : OS OSPLAT JAMUNAME ] "" ; - local timestamp = [ modules.peek : JAMDATE ] ; - local cwd = [ PWD ] ; - local command = $(.argv) ; - local bb-version = [ version.boost-build ] ; - .header on $(xml-file) = - "<?xml version=\"1.0\" encoding=\"utf-8\"?>" - "$(nl)<build format=\"1.0\" version=\"$(bb-version)\">" - "$(nl) <os name=\"$(os[1])\" platform=\"$(os[2])\"><![CDATA[$(os[3-]:J= )]]></os>" - "$(nl) <timestamp><![CDATA[$(timestamp)]]></timestamp>" - "$(nl) <directory><![CDATA[$(cwd)]]></directory>" - "$(nl) <command><![CDATA[\"$(command:J=\" \")\"]]></command>" - ; - .footer on $(xml-file) = - "$(nl)</build>" ; - - # Generate the target dependency graph. - .contents on $(xml-file) += - "$(nl) <targets>" ; - for local t in [ virtual-target.all-targets ] - { - local action = [ $(t).action ] ; - if $(action) - # If a target has no action, it has no dependencies. - { - local name = [ full-target-name $(t) ] ; - local sources = [ $(action).sources ] ; - local dependencies ; - for local s in $(sources) - { - dependencies += [ full-target-name $(s) ] ; - } - - local path = [ $(t).path ] ; - local jam-target = [ $(t).actual-name ] ; - - .contents on $(xml-file) += - "$(nl) <target>" - "$(nl) <name><![CDATA[$(name)]]></name>" - "$(nl) <dependencies>" - "$(nl) <dependency><![CDATA[$(dependencies)]]></dependency>" - "$(nl) </dependencies>" - "$(nl) <path><![CDATA[$(path)]]></path>" - "$(nl) <jam-target><![CDATA[$(jam-target)]]></jam-target>" - "$(nl) </target>" - ; - } - } - .contents on $(xml-file) += - "$(nl) </targets>" ; - - # Build $(xml-file) after $(constituents). Do so even if a - # constituent action fails and regenerate the xml on every bjam run. - INCLUDES $(xml-file) : $(constituents) ; - ALWAYS $(xml-file) ; - __ACTION_RULE__ on $(xml-file) = build-system.out-xml.generate-action ; - out-xml.generate $(xml-file) ; - } - - # The actual build actions are here; if we did this work in the actions - # clause we would have to form a valid command line containing the - # result of @(...) below (the name of the XML file). - # - rule out-xml.generate-action ( args * : xml-file - : command status start end user system : output ? ) - { - local contents = - [ on $(xml-file) return $(.header) $(.contents) $(.footer) ] ; - local f = @($(xml-file):E=$(contents)) ; - } - - # Nothing to do here; the *real* actions happen in - # out-xml.generate-action. - actions quietly out-xml.generate { } - - # Define the out-xml file target, which depends on all the targets so - # that it runs the collection after the targets have run. - out-xml $(.out-xml) : $(actual-targets) ; - - # Set up a global __ACTION_RULE__ that records all the available - # statistics about each actual target in a variable "on" the --out-xml - # target. - # - rule out-xml.collect ( xml-file : target : command status start end user - system : output ? ) - { - local nl = " -" ; - # Open the action with some basic info. - .contents on $(xml-file) += - "$(nl) <action status=\"$(status)\" start=\"$(start)\" end=\"$(end)\" user=\"$(user)\" system=\"$(system)\">" ; - - # If we have an action object we can print out more detailed info. - local action = [ on $(target) return $(.action) ] ; - if $(action) - { - local action-name = [ $(action).action-name ] ; - local action-sources = [ $(action).sources ] ; - local action-props = [ $(action).properties ] ; - - # The qualified name of the action which we created the target. - .contents on $(xml-file) += - "$(nl) <name><![CDATA[$(action-name)]]></name>" ; - - # The sources that made up the target. - .contents on $(xml-file) += - "$(nl) <sources>" ; - for local source in $(action-sources) - { - local source-actual = [ $(source).actual-name ] ; - .contents on $(xml-file) += - "$(nl) <source><![CDATA[$(source-actual)]]></source>" ; - } - .contents on $(xml-file) += - "$(nl) </sources>" ; - - # The properties that define the conditions under which the - # target was built. - .contents on $(xml-file) += - "$(nl) <properties>" ; - for local prop in [ $(action-props).raw ] - { - local prop-name = [ MATCH ^<(.*)>$ : $(prop:G) ] ; - .contents on $(xml-file) += - "$(nl) <property name=\"$(prop-name)\"><![CDATA[$(prop:G=)]]></property>" ; - } - .contents on $(xml-file) += - "$(nl) </properties>" ; - } - - local locate = [ on $(target) return $(LOCATE) ] ; - locate ?= "" ; - .contents on $(xml-file) += - "$(nl) <jam-target><![CDATA[$(target)]]></jam-target>" - "$(nl) <path><![CDATA[$(target:G=:R=$(locate))]]></path>" - "$(nl) <command><![CDATA[$(command)]]></command>" - "$(nl) <output><![CDATA[$(output)]]></output>" ; - .contents on $(xml-file) += - "$(nl) </action>" ; - } - - # When no __ACTION_RULE__ is set "on" a target, the search falls back to - # the global module. - module - { - __ACTION_RULE__ = build-system.out-xml.collect - [ modules.peek build-system : .out-xml ] ; - } - - IMPORT - build-system : - out-xml.collect - out-xml.generate-action - : : - build-system.out-xml.collect - build-system.out-xml.generate-action - ; - } - - local j = [ option.get jobs ] ; - if $(j) - { - modules.poke : PARALLELISM : $(j) ; - } - - local k = [ option.get keep-going : true : true ] ; - if $(k) in "on" "yes" "true" - { - modules.poke : KEEP_GOING : 1 ; - } - else if $(k) in "off" "no" "false" - { - modules.poke : KEEP_GOING : 0 ; - } - else - { - ECHO "error: Invalid value for the --keep-going option" ; - EXIT ; - } - - # The 'all' pseudo target is not strictly needed expect in the case when we - # use it below but people often assume they always have this target - # available and do not declare it themselves before use which may cause - # build failures with an error message about not being able to build the - # 'all' target. - NOTFILE all ; - - # And now that all the actual raw Jam targets and all the dependencies - # between them have been prepared all that is left is to tell Jam to update - # those targets. - if $(explicitly-requested-files) - { - # Note that this case can not be joined with the regular one when only - # exact Boost Build targets are requested as here we do not build those - # requested targets but only use them to construct the dependency tree - # needed to build the explicitly requested files. - UPDATE $(explicitly-requested-files:G=e) $(.out-xml) ; - } - else if $(cleanall) - { - UPDATE clean-all ; - } - else if $(clean) - { - common.Clean clean : [ actual-clean-targets ] ; - UPDATE clean ; - } - else - { - configure.print-configure-checks-summary ; - - if $(.pre-build-hook) - { - $(.pre-build-hook) ; - } - - DEPENDS all : $(actual-targets) ; - if UPDATE_NOW in [ RULENAMES ] - { - local ok = [ UPDATE_NOW all $(.out-xml) ] ; - if $(.post-build-hook) - { - $(.post-build-hook) $(ok) ; - } - # Prevent automatic update of the 'all' target, now that - # we have explicitly updated what we wanted. - UPDATE ; - } - else - { - UPDATE all $(.out-xml) ; - } - } -} diff --git a/jam-files/boost-build/build/__init__.py b/jam-files/boost-build/build/__init__.py deleted file mode 100644 index e69de29b..00000000 --- a/jam-files/boost-build/build/__init__.py +++ /dev/null diff --git a/jam-files/boost-build/build/ac.jam b/jam-files/boost-build/build/ac.jam deleted file mode 100644 index 6768f358..00000000 --- a/jam-files/boost-build/build/ac.jam +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import property-set ; -import path ; -import modules ; -import "class" ; -import errors ; -import configure ; - -rule find-include-path ( variable : properties : header - : provided-path ? ) -{ - # FIXME: document which properties affect this function by - # default. - local target-os = [ $(properties).get <target-os> ] ; - properties = [ property-set.create <target-os>$(toolset) ] ; - if $($(variable)-$(properties)) - { - return $($(variable)-$(properties)) ; - } - else - { - provided-path ?= [ modules.peek : $(variable) ] ; - includes = $(provided-path) ; - includes += [ $(properties).get <include> ] ; - if [ $(properties).get <target-os> ] != windows - { - # FIXME: use sysroot - includes += /usr/include ; - } - - local result ; - while ! $(result) && $(includes) - { - local f = [ path.root $(header) $(includes[1]) ] ; - ECHO "Checking " $(f) ; - if [ path.exists $(f) ] - { - result = $(includes[1]) ; - } - else if $(provided-path) - { - errors.user-error "Could not find header" $(header) - : "in the user-specified directory" $(provided-path) ; - } - includes = $(includes[2-]) ; - } - $(variable)-$(properties) = $(result) ; - return $(result) ; - } -} - -rule find-library ( variable : properties : names + : provided-path ? ) -{ - local target-os = [ $(properties).get <target-os> ] ; - properties = [ property-set.create <target-os>$(toolset) ] ; - if $($(variable)-$(properties)) - { - return $($(variable)-$(properties)) ; - } - else - { - provided-path ?= [ modules.peek : $(variable) ] ; - paths = $(provided-path) ; - paths += [ $(properties).get <library-path> ] ; - if [ $(properties).get <target-os> ] != windows - { - paths += /usr/lib /usr/lib32 /usr/lib64 ; - } - - local result ; - while ! $(result) && $(paths) - { - while ! $(result) && $(names) - { - local f ; - if $(target-os) = windows - { - f = $(paths[1])/$(names[1]).lib ; - if [ path.exists $(f) ] - { - result = $(f) ; - } - } - else - { - # FIXME: check for .a as well, depending on - # the 'link' feature. - f = $(paths[1])/lib$(names[1]).so ; - ECHO "CHECKING $(f) " ; - if [ path.exists $(f) ] - { - result = $(f) ; - } - } - if ! $(result) && $(provided-path) - { - errors.user-error "Could not find either of: " $(names) - : "in the user-specified directory" $(provided-path) ; - - } - names = $(names[2-]) ; - } - paths = $(paths[2-]) ; - } - $(variable)-$(properties) = $(result) ; - return $(result) ; - } -} - -class ac-library : basic-target -{ - import errors ; - import indirect ; - import virtual-target ; - import ac ; - import configure ; - - rule __init__ ( name : project : * : * ) - { - basic-target.__init__ $(name) : $(project) : $(sources) - : $(requirements) ; - - reconfigure $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule set-header ( header ) - { - self.header = $(header) ; - } - - rule set-default-names ( names + ) - { - self.default-names = $(names) ; - } - - rule reconfigure ( * : * ) - { - ECHO "XXX" $(1) ; - if ! $(1) - { - # This is 'using xxx ;'. Nothing to configure, really. - } - else - { - for i in 1 2 3 4 5 6 7 8 9 - { - # FIXME: this naming is inconsistent with XXX_INCLUDE/XXX_LIBRARY - if ! ( $($(i)[1]) in root include-path library-path library-name condition ) - { - errors.user-error "Invalid named parameter" $($(i)[1]) ; - } - local name = $($(i)[1]) ; - local value = $($(i)[2-]) ; - if $($(name)) && $($(name)) != $(value) - { - errors.user-error "Attempt to change value of '$(name)'" ; - } - $(name) = $(value) ; - } - - include-path ?= $(root)/include ; - library-path ?= $(root)/lib ; - } - } - - rule construct ( name : sources * : property-set ) - { - # FIXME: log results. - local libnames = $(library-name) ; - if ! $(libnames) && ! $(include-path) && ! $(library-path) - { - libnames = [ modules.peek : $(name:U)_NAME ] ; - # Backward compatibility only. - libnames ?= [ modules.peek : $(name:U)_BINARY ] ; - } - libnames ?= $(self.default-names) ; - - local includes = [ - ac.find-include-path $(name:U)_INCLUDE : $(property-set) : $(self.header) : $(include-path) ] ; - local library = [ ac.find-library $(name:U)_LIBRARY : $(property-set) : $(libnames) : $(library-path) ] ; - if $(includes) && $(library) - { - library = [ virtual-target.from-file $(library) : . : $(self.project) ] ; - configure.log-library-search-result $(name) : "found" ; - return [ property-set.create <include>$(includes) <source>$(library) ] ; - } - else - { - configure.log-library-search-result $(name) : "no found" ; - } - } -} - diff --git a/jam-files/boost-build/build/alias.jam b/jam-files/boost-build/build/alias.jam deleted file mode 100644 index 48019cb9..00000000 --- a/jam-files/boost-build/build/alias.jam +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2003, 2004, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines the 'alias' rule and the associated target class. -# -# Alias is just a main target which returns its source targets without any -# processing. For example: -# -# alias bin : hello test_hello ; -# alias lib : helpers xml_parser ; -# -# Another important use of 'alias' is to conveniently group source files: -# -# alias platform-src : win.cpp : <os>NT ; -# alias platform-src : linux.cpp : <os>LINUX ; -# exe main : main.cpp platform-src ; -# -# Lastly, it is possible to create a local alias for some target, with different -# properties: -# -# alias big_lib : : @/external_project/big_lib/<link>static ; -# - -import "class" : new ; -import project ; -import property-set ; -import targets ; - - -class alias-target-class : basic-target -{ - rule __init__ ( name : project : sources * : requirements * - : default-build * : usage-requirements * ) - { - basic-target.__init__ $(name) : $(project) : $(sources) : - $(requirements) : $(default-build) : $(usage-requirements) ; - } - - rule construct ( name : source-targets * : property-set ) - { - return [ property-set.empty ] $(source-targets) ; - } - - rule compute-usage-requirements ( subvariant ) - { - local base = [ basic-target.compute-usage-requirements $(subvariant) ] ; - return [ $(base).add [ $(subvariant).sources-usage-requirements ] ] ; - } -} - - -# Declares the 'alias' target. It will process its sources virtual-targets by -# returning them unaltered as its own constructed virtual-targets. -# -rule alias ( name : sources * : requirements * : default-build * : - usage-requirements * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new alias-target-class $(name) : $(project) - : [ targets.main-target-sources $(sources) : $(name) : no-renaming ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) - ] - : [ targets.main-target-usage-requirements $(usage-requirements) : - $(project) ] - ] ; -} - - -IMPORT $(__name__) : alias : : alias ; diff --git a/jam-files/boost-build/build/alias.py b/jam-files/boost-build/build/alias.py deleted file mode 100644 index 575e5360..00000000 --- a/jam-files/boost-build/build/alias.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2003, 2004, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Status: ported (danielw) -# Base revision: 56043 - -# This module defines the 'alias' rule and associated class. -# -# Alias is just a main target which returns its source targets without any -# processing. For example:: -# -# alias bin : hello test_hello ; -# alias lib : helpers xml_parser ; -# -# Another important use of 'alias' is to conveniently group source files:: -# -# alias platform-src : win.cpp : <os>NT ; -# alias platform-src : linux.cpp : <os>LINUX ; -# exe main : main.cpp platform-src ; -# -# Lastly, it's possible to create local alias for some target, with different -# properties:: -# -# alias big_lib : : @/external_project/big_lib/<link>static ; -# - -import targets -import property_set -from b2.manager import get_manager - -from b2.util import metatarget - -class AliasTarget(targets.BasicTarget): - - def __init__(self, *args): - targets.BasicTarget.__init__(self, *args) - - def construct(self, name, source_targets, properties): - return [property_set.empty(), source_targets] - - def compute_usage_requirements(self, subvariant): - base = targets.BasicTarget.compute_usage_requirements(self, subvariant) - # Add source's usage requirement. If we don't do this, "alias" does not - # look like 100% alias. - return base.add(subvariant.sources_usage_requirements()) - -@metatarget -def alias(name, sources=[], requirements=[], default_build=[], usage_requirements=[]): - - project = get_manager().projects().current() - targets = get_manager().targets() - - targets.main_target_alternative(AliasTarget( - name, project, - targets.main_target_sources(sources, name, no_renaming=True), - targets.main_target_requirements(requirements or [], project), - targets.main_target_default_build(default_build, project), - targets.main_target_usage_requirements(usage_requirements or [], project))) - -# Declares the 'alias' target. It will build sources, and return them unaltered. -get_manager().projects().add_rule("alias", alias) - diff --git a/jam-files/boost-build/build/build-request.jam b/jam-files/boost-build/build/build-request.jam deleted file mode 100644 index 8a1f7b0e..00000000 --- a/jam-files/boost-build/build/build-request.jam +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright 2002 Dave Abrahams -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import "class" : new ; -import sequence ; -import set ; -import regex ; -import feature ; -import property ; -import container ; -import string ; - - -# Transform property-set by applying f to each component property. -# -local rule apply-to-property-set ( f property-set ) -{ - local properties = [ feature.split $(property-set) ] ; - return [ string.join [ $(f) $(properties) ] : / ] ; -} - - -# Expand the given build request by combining all property-sets which do not -# specify conflicting non-free features. Expects all the project files to -# already be loaded. -# -rule expand-no-defaults ( property-sets * ) -{ - # First make all features and subfeatures explicit. - local expanded-property-sets = [ sequence.transform apply-to-property-set - feature.expand-subfeatures : $(property-sets) ] ; - - # Now combine all of the expanded property-sets - local product = [ x-product $(expanded-property-sets) : $(feature-space) ] ; - - return $(product) ; -} - - -# Implementation of x-product, below. Expects all the project files to already -# be loaded. -# -local rule x-product-aux ( property-sets + ) -{ - local result ; - local p = [ feature.split $(property-sets[1]) ] ; - local f = [ set.difference $(p:G) : [ feature.free-features ] ] ; - local seen ; - # No conflict with things used at a higher level? - if ! [ set.intersection $(f) : $(x-product-used) ] - { - local x-product-seen ; - { - # Do not mix in any conflicting features. - local x-product-used = $(x-product-used) $(f) ; - - if $(property-sets[2]) - { - local rest = [ x-product-aux $(property-sets[2-]) : $(feature-space) ] ; - result = $(property-sets[1])/$(rest) ; - } - - result ?= $(property-sets[1]) ; - } - - # If we did not encounter a conflicting feature lower down, do not - # recurse again. - if ! [ set.intersection $(f) : $(x-product-seen) ] - { - property-sets = ; - } - - seen = $(x-product-seen) ; - } - - if $(property-sets[2]) - { - result += [ x-product-aux $(property-sets[2-]) : $(feature-space) ] ; - } - - # Note that we have seen these features so that higher levels will recurse - # again without them set. - x-product-seen += $(f) $(seen) ; - return $(result) ; -} - - -# Return the cross-product of all elements of property-sets, less any that would -# contain conflicting values for single-valued features. Expects all the project -# files to already be loaded. -# -local rule x-product ( property-sets * ) -{ - if $(property-sets).non-empty - { - # Prepare some "scoped globals" that can be used by the implementation - # function, x-product-aux. - local x-product-seen x-product-used ; - return [ x-product-aux $(property-sets) : $(feature-space) ] ; - } - # Otherwise return empty. -} - - -# Returns true if either 'v' or the part of 'v' before the first '-' symbol is -# an implicit value. Expects all the project files to already be loaded. -# -local rule looks-like-implicit-value ( v ) -{ - if [ feature.is-implicit-value $(v) ] - { - return true ; - } - else - { - local split = [ regex.split $(v) - ] ; - if [ feature.is-implicit-value $(split[1]) ] - { - return true ; - } - } -} - - -# Takes the command line tokens (such as taken from the ARGV rule) and -# constructs a build request from them. Returns a vector of two vectors (where -# "vector" means container.jam's "vector"). First is the set of targets -# specified in the command line, and second is the set of requested build -# properties. Expects all the project files to already be loaded. -# -rule from-command-line ( command-line * ) -{ - local targets ; - local properties ; - - command-line = $(command-line[2-]) ; - local skip-next = ; - for local e in $(command-line) - { - if $(skip-next) - { - skip-next = ; - } - else if ! [ MATCH "^(-).*" : $(e) ] - { - # Build request spec either has "=" in it or completely consists of - # implicit feature values. - local fs = feature-space ; - if [ MATCH "(.*=.*)" : $(e) ] - || [ looks-like-implicit-value $(e:D=) : $(feature-space) ] - { - properties += [ convert-command-line-element $(e) : - $(feature-space) ] ; - } - else - { - targets += $(e) ; - } - } - else if [ MATCH "^(-[-ldjfsto])$" : $(e) ] - { - skip-next = true ; - } - } - return [ new vector - [ new vector $(targets) ] - [ new vector $(properties) ] ] ; -} - - -# Converts one element of command line build request specification into internal -# form. Expects all the project files to already be loaded. -# -local rule convert-command-line-element ( e ) -{ - local result ; - local parts = [ regex.split $(e) "/" ] ; - while $(parts) - { - local p = $(parts[1]) ; - local m = [ MATCH "([^=]*)=(.*)" : $(p) ] ; - local lresult ; - local feature ; - local values ; - if $(m) - { - feature = $(m[1]) ; - values = [ regex.split $(m[2]) "," ] ; - lresult = <$(feature)>$(values) ; - } - else - { - lresult = [ regex.split $(p) "," ] ; - } - - if $(feature) && free in [ feature.attributes $(feature) ] - { - # If we have free feature, then the value is everything - # until the end of the command line token. Slashes in - # the following string are not taked to mean separation - # of properties. Commas are also not interpreted specially. - values = $(values:J=,) ; - values = $(values) $(parts[2-]) ; - values = $(values:J=/) ; - lresult = <$(feature)>$(values) ; - parts = ; - } - - if ! [ MATCH (.*-.*) : $(p) ] - { - # property.validate cannot handle subfeatures, so we avoid the check - # here. - for local p in $(lresult) - { - property.validate $(p) : $(feature-space) ; - } - } - - if ! $(result) - { - result = $(lresult) ; - } - else - { - result = $(result)/$(lresult) ; - } - - parts = $(parts[2-]) ; - } - - return $(result) ; -} - - -rule __test__ ( ) -{ - import assert ; - import feature ; - - feature.prepare-test build-request-test-temp ; - - import build-request ; - import build-request : expand-no-defaults : build-request.expand-no-defaults ; - import errors : try catch ; - import feature : feature subfeature ; - - feature toolset : gcc msvc borland : implicit ; - subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4 - 3.0 3.0.1 3.0.2 : optional ; - - feature variant : debug release : implicit composite ; - feature inlining : on off ; - feature "include" : : free ; - - feature stdlib : native stlport : implicit ; - - feature runtime-link : dynamic static : symmetric ; - - # Empty build requests should expand to empty. - assert.result - : build-request.expand-no-defaults ; - - assert.result - <toolset>gcc/<toolset-gcc:version>3.0.1/<stdlib>stlport/<variant>debug - <toolset>msvc/<stdlib>stlport/<variant>debug - <toolset>msvc/<variant>debug - : build-request.expand-no-defaults gcc-3.0.1/stlport msvc/stlport msvc debug ; - - assert.result - <toolset>gcc/<toolset-gcc:version>3.0.1/<stdlib>stlport/<variant>debug - <toolset>msvc/<variant>debug - <variant>debug/<toolset>msvc/<stdlib>stlport - : build-request.expand-no-defaults gcc-3.0.1/stlport msvc debug msvc/stlport ; - - assert.result - <toolset>gcc/<toolset-gcc:version>3.0.1/<stdlib>stlport/<variant>debug/<inlining>off - <toolset>gcc/<toolset-gcc:version>3.0.1/<stdlib>stlport/<variant>release/<inlining>off - : build-request.expand-no-defaults gcc-3.0.1/stlport debug release <inlining>off ; - - assert.result - <include>a/b/c/<toolset>gcc/<toolset-gcc:version>3.0.1/<stdlib>stlport/<variant>debug/<include>x/y/z - <include>a/b/c/<toolset>msvc/<stdlib>stlport/<variant>debug/<include>x/y/z - <include>a/b/c/<toolset>msvc/<variant>debug/<include>x/y/z - : build-request.expand-no-defaults <include>a/b/c gcc-3.0.1/stlport msvc/stlport msvc debug <include>x/y/z ; - - local r ; - - r = [ build-request.from-command-line bjam debug runtime-link=dynamic ] ; - assert.equal [ $(r).get-at 1 ] : ; - assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ; - - try ; - { - build-request.from-command-line bjam gcc/debug runtime-link=dynamic/static ; - } - catch \"static\" is not a value of an implicit feature ; - - r = [ build-request.from-command-line bjam -d2 --debug debug target runtime-link=dynamic ] ; - assert.equal [ $(r).get-at 1 ] : target ; - assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ; - - r = [ build-request.from-command-line bjam debug runtime-link=dynamic,static ] ; - assert.equal [ $(r).get-at 1 ] : ; - assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic <runtime-link>static ; - - r = [ build-request.from-command-line bjam debug gcc/runtime-link=dynamic,static ] ; - assert.equal [ $(r).get-at 1 ] : ; - assert.equal [ $(r).get-at 2 ] : debug gcc/<runtime-link>dynamic - gcc/<runtime-link>static ; - - r = [ build-request.from-command-line bjam msvc gcc,borland/runtime-link=static ] ; - assert.equal [ $(r).get-at 1 ] : ; - assert.equal [ $(r).get-at 2 ] : msvc gcc/<runtime-link>static - borland/<runtime-link>static ; - - r = [ build-request.from-command-line bjam gcc-3.0 ] ; - assert.equal [ $(r).get-at 1 ] : ; - assert.equal [ $(r).get-at 2 ] : gcc-3.0 ; - - feature.finish-test build-request-test-temp ; -} diff --git a/jam-files/boost-build/build/build_request.py b/jam-files/boost-build/build/build_request.py deleted file mode 100644 index cc9f2400..00000000 --- a/jam-files/boost-build/build/build_request.py +++ /dev/null @@ -1,216 +0,0 @@ -# Status: being ported by Vladimir Prus -# TODO: need to re-compare with mainline of .jam -# Base revision: 40480 -# -# (C) Copyright David Abrahams 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -import b2.build.feature -feature = b2.build.feature - -from b2.util.utility import * -import b2.build.property_set as property_set - -def expand_no_defaults (property_sets): - """ Expand the given build request by combining all property_sets which don't - specify conflicting non-free features. - """ - # First make all features and subfeatures explicit - expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] - - # Now combine all of the expanded property_sets - product = __x_product (expanded_property_sets) - - return [property_set.create(p) for p in product] - - -def __x_product (property_sets): - """ Return the cross-product of all elements of property_sets, less any - that would contain conflicting values for single-valued features. - """ - x_product_seen = set() - return __x_product_aux (property_sets, x_product_seen)[0] - -def __x_product_aux (property_sets, seen_features): - """Returns non-conflicting combinations of property sets. - - property_sets is a list of PropertySet instances. seen_features is a set of Property - instances. - - Returns a tuple of: - - list of lists of Property instances, such that within each list, no two Property instance - have the same feature, and no Property is for feature in seen_features. - - set of features we saw in property_sets - """ - if not property_sets: - return ([], set()) - - properties = property_sets[0].all() - - these_features = set() - for p in property_sets[0].non_free(): - these_features.add(p.feature()) - - # Note: the algorithm as implemented here, as in original Jam code, appears to - # detect conflicts based on features, not properties. For example, if command - # line build request say: - # - # <a>1/<b>1 c<1>/<b>1 - # - # It will decide that those two property sets conflict, because they both specify - # a value for 'b' and will not try building "<a>1 <c1> <b1>", but rather two - # different property sets. This is a topic for future fixing, maybe. - if these_features & seen_features: - - (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features) - return (inner_result, inner_seen | these_features) - - else: - - result = [] - (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features) - if inner_result: - for inner in inner_result: - result.append(properties + inner) - else: - result.append(properties) - - if inner_seen & these_features: - # Some of elements in property_sets[1:] conflict with elements of property_sets[0], - # Try again, this time omitting elements of property_sets[0] - (inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features) - result.extend(inner_result2) - - return (result, inner_seen | these_features) - - - -def looks_like_implicit_value(v): - """Returns true if 'v' is either implicit value, or - the part before the first '-' symbol is implicit value.""" - if feature.is_implicit_value(v): - return 1 - else: - split = v.split("-") - if feature.is_implicit_value(split[0]): - return 1 - - return 0 - -def from_command_line(command_line): - """Takes the command line tokens (such as taken from ARGV rule) - and constructs build request from it. Returns a list of two - lists. First is the set of targets specified in the command line, - and second is the set of requested build properties.""" - - targets = [] - properties = [] - - for e in command_line: - if e[0] != "-": - # Build request spec either has "=" in it, or completely - # consists of implicit feature values. - if e.find("=") != -1 or looks_like_implicit_value(e.split("/")[0]): - properties += convert_command_line_element(e) - else: - targets.append(e) - - return [targets, properties] - -# Converts one element of command line build request specification into -# internal form. -def convert_command_line_element(e): - - result = None - parts = e.split("/") - for p in parts: - m = p.split("=") - if len(m) > 1: - feature = m[0] - values = m[1].split(",") - lresult = [("<%s>%s" % (feature, v)) for v in values] - else: - lresult = p.split(",") - - if p.find('-') == -1: - # FIXME: first port property.validate - # property.validate cannot handle subfeatures, - # so we avoid the check here. - #for p in lresult: - # property.validate(p) - pass - - if not result: - result = lresult - else: - result = [e1 + "/" + e2 for e1 in result for e2 in lresult] - - return [property_set.create(b2.build.feature.split(r)) for r in result] - -### -### rule __test__ ( ) -### { -### import assert feature ; -### -### feature.prepare-test build-request-test-temp ; -### -### import build-request ; -### import build-request : expand_no_defaults : build-request.expand_no_defaults ; -### import errors : try catch ; -### import feature : feature subfeature ; -### -### feature toolset : gcc msvc borland : implicit ; -### subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4 -### 3.0 3.0.1 3.0.2 : optional ; -### -### feature variant : debug release : implicit composite ; -### feature inlining : on off ; -### feature "include" : : free ; -### -### feature stdlib : native stlport : implicit ; -### -### feature runtime-link : dynamic static : symmetric ; -### -### -### local r ; -### -### r = [ build-request.from-command-line bjam debug runtime-link=dynamic ] ; -### assert.equal [ $(r).get-at 1 ] : ; -### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ; -### -### try ; -### { -### -### build-request.from-command-line bjam gcc/debug runtime-link=dynamic/static ; -### } -### catch \"static\" is not a value of an implicit feature ; -### -### -### r = [ build-request.from-command-line bjam -d2 --debug debug target runtime-link=dynamic ] ; -### assert.equal [ $(r).get-at 1 ] : target ; -### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic ; -### -### r = [ build-request.from-command-line bjam debug runtime-link=dynamic,static ] ; -### assert.equal [ $(r).get-at 1 ] : ; -### assert.equal [ $(r).get-at 2 ] : debug <runtime-link>dynamic <runtime-link>static ; -### -### r = [ build-request.from-command-line bjam debug gcc/runtime-link=dynamic,static ] ; -### assert.equal [ $(r).get-at 1 ] : ; -### assert.equal [ $(r).get-at 2 ] : debug gcc/<runtime-link>dynamic -### gcc/<runtime-link>static ; -### -### r = [ build-request.from-command-line bjam msvc gcc,borland/runtime-link=static ] ; -### assert.equal [ $(r).get-at 1 ] : ; -### assert.equal [ $(r).get-at 2 ] : msvc gcc/<runtime-link>static -### borland/<runtime-link>static ; -### -### r = [ build-request.from-command-line bjam gcc-3.0 ] ; -### assert.equal [ $(r).get-at 1 ] : ; -### assert.equal [ $(r).get-at 2 ] : gcc-3.0 ; -### -### feature.finish-test build-request-test-temp ; -### } -### -### diff --git a/jam-files/boost-build/build/configure.jam b/jam-files/boost-build/build/configure.jam deleted file mode 100644 index 14c1328a..00000000 --- a/jam-files/boost-build/build/configure.jam +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) 2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# This module defines function to help with two main tasks: -# -# - Discovering build-time configuration for the purposes of adjusting -# build process. -# - Reporting what is built, and how it is configured. - -import targets ; -import errors ; -import targets ; -import sequence ; -import property ; -import property-set ; -import "class" : new ; -import common ; -import path ; - -rule log-summary ( ) -{ - -} - -.width = 30 ; - -rule set-width ( width ) -{ - .width = $(width) ; -} - -# Declare that the components specified by the parameter exist. -rule register-components ( components * ) -{ - .components += $(components) ; -} - -# Declare that the components specified by the parameters will -# be build. -rule components-building ( components * ) -{ - .built-components += $(components) ; -} - -# Report something about component configuration that the -# user should better know. -rule log-component-configuration ( component : message ) -{ - # FIXME: implement per-property-set logs - .component-logs.$(component) += $(message) ; -} - - - -rule log-check-result ( result ) -{ - if ! $(.announced-checks) - { - ECHO "Performing configuration checks\n" ; - .announced-checks = 1 ; - } - - ECHO $(result) ; - #.check-results += $(result) ; -} - -rule log-library-search-result ( library : result ) -{ - local x = [ PAD " - $(library) : $(result)" : $(.width) ] ; - log-check-result "$(x)" ; -} - -rule print-component-configuration ( ) -{ - local c = [ sequence.unique $(.components) ] ; - - ECHO "\nComponent configuration:\n" ; - for c in $(.components) - { - local s ; - if $(c) in $(.built-components) - { - s = "building" ; - } - else - { - s = "not building" ; - } - ECHO [ PAD " - $(c)" : $(.width) ] ": $(s)" ; - for local m in $(.component-logs.$(c)) - { - ECHO " -" $(m) ; - } - } - ECHO ; -} - -rule print-configure-checks-summary ( ) -{ - # FIXME: the problem with that approach is tha - # the user sees checks summary when all checks are - # done, and has no progress reporting while the - # checks are being executed. - if $(.check-results) - { - ECHO "Configuration checks summary\n" ; - - for local r in $(.check-results) - { - ECHO $(r) ; - } - ECHO ; - } -} - -# Attempt to build a metatarget named by 'metatarget-reference' -# in context of 'project' with properties 'ps'. -# Returns non-empty value if build is OK. -rule builds-raw ( metatarget-reference : project : ps : what : retry ? ) -{ - local result ; - - if ! $(retry) && ! $(.$(what)-tested.$(ps)) - { - .$(what)-tested.$(ps) = true ; - - local targets = [ targets.generate-from-reference - $(metatarget-reference) : $(project) : $(ps) ] ; - - local jam-targets ; - for local t in $(targets[2-]) - { - jam-targets += [ $(t).actualize ] ; - } - - if ! UPDATE_NOW in [ RULENAMES ] - { - # Cannot determine. Assume existance. - } - else - { - local x = [ PAD " - $(what)" : $(.width) ] ; - if [ UPDATE_NOW $(jam-targets) : - $(.log-fd) : ignore-minus-n : ignore-minus-q ] - { - .$(what)-supported.$(ps) = yes ; - result = true ; - log-check-result "$(x) : yes" ; - } - else - { - log-check-result "$(x) : no" ; - } - } - return $(result) ; - } - else - { - return $(.$(what)-supported.$(ps)) ; - } -} - -rule builds ( metatarget-reference : properties * : what ? : retry ? ) -{ - what ?= "$(metatarget-reference) builds" ; - - # FIXME: this should not be hardcoded. Other checks might - # want to consider different set of features as relevant. - local toolset = [ property.select <toolset> : $(properties) ] ; - local toolset-version-property = "<toolset-$(toolset:G=):version>" ; - local relevant = [ property.select <target-os> <toolset> $(toolset-version-property) - <address-model> <architecture> - : $(properties) ] ; - local ps = [ property-set.create $(relevant) ] ; - local t = [ targets.current ] ; - local p = [ $(t).project ] ; - - return [ builds-raw $(metatarget-reference) : $(p) : $(ps) : $(what) : $(retry) ] ; -} - - -# Called by Boost.Build startup code to specify name of a file -# that will receive results of configure checks. This -# should never be called by users. -rule set-log-file ( log-file ) -{ - path.makedirs [ path.parent $(log-file) ] ; - - .log-fd = [ FILE_OPEN $(log-file) : "w" ] ; -} - -# Frontend rules - -class check-target-builds-worker -{ - import configure ; - import property-set ; - import targets ; - import property ; - - rule __init__ ( target message ? : true-properties * : false-properties * ) - { - self.target = $(target) ; - self.message = $(message) ; - self.true-properties = $(true-properties) ; - self.false-properties = $(false-properties) ; - } - - rule check ( properties * ) - { - local choosen ; - if [ configure.builds $(self.target) : $(properties) : $(self.message) ] - { - choosen = $(self.true-properties) ; - } - else - { - choosen = $(self.false-properties) ; - } - return [ property.evaluate-conditionals-in-context $(choosen) : $(properties) ] ; - } -} - - -rule check-target-builds ( target message ? : true-properties * : false-properties * ) -{ - local instance = [ new check-target-builds-worker $(target) $(message) : $(true-properties) - : $(false-properties) ] ; - return <conditional>@$(instance).check ; -} - -IMPORT $(__name__) : check-target-builds : : check-target-builds ; - - diff --git a/jam-files/boost-build/build/configure.py b/jam-files/boost-build/build/configure.py deleted file mode 100644 index 0426832c..00000000 --- a/jam-files/boost-build/build/configure.py +++ /dev/null @@ -1,164 +0,0 @@ -# Status: ported. -# Base revison: 64488 -# -# Copyright (c) 2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# This module defines function to help with two main tasks: -# -# - Discovering build-time configuration for the purposes of adjusting -# build process. -# - Reporting what is built, and how it is configured. - -import b2.build.property as property -import b2.build.property_set as property_set - -import b2.build.targets - -from b2.manager import get_manager -from b2.util.sequence import unique -from b2.util import bjam_signature, value_to_jam - -import bjam -import os - -__width = 30 - -def set_width(width): - global __width - __width = 30 - -__components = [] -__built_components = [] -__component_logs = {} -__announced_checks = False - -__log_file = None -__log_fd = -1 - -def register_components(components): - """Declare that the components specified by the parameter exist.""" - __components.extend(components) - -def components_building(components): - """Declare that the components specified by the parameters will be build.""" - __built_components.extend(components) - -def log_component_configuration(component, message): - """Report something about component configuration that the user should better know.""" - __component_logs.setdefault(component, []).append(message) - -def log_check_result(result): - global __announced_checks - if not __announced_checks: - print "Performing configuration checks" - __announced_checks = True - - print result - -def log_library_search_result(library, result): - log_check_result((" - %(library)s : %(result)s" % locals()).rjust(width)) - - -def print_component_configuration(): - - print "\nComponent configuration:" - for c in __components: - if c in __built_components: - s = "building" - else: - s = "not building" - message = " - %s)" % c - message = message.rjust(__width) - message += " : " + s - for m in __component_logs.get(c, []): - print " -" + m - print "" - -__builds_cache = {} - -def builds(metatarget_reference, project, ps, what): - # Attempt to build a metatarget named by 'metatarget-reference' - # in context of 'project' with properties 'ps'. - # Returns non-empty value if build is OK. - - result = [] - - existing = __builds_cache.get((what, ps), None) - if existing is None: - - result = False - __builds_cache[(what, ps)] = False - - targets = b2.build.targets.generate_from_reference( - metatarget_reference, project, ps).targets() - jam_targets = [] - for t in targets: - jam_targets.append(t.actualize()) - - x = (" - %s" % what).rjust(__width) - if bjam.call("UPDATE_NOW", jam_targets, str(__log_fd), "ignore-minus-n"): - __builds_cache[(what, ps)] = True - result = True - log_check_result("%s: yes" % x) - else: - log_check_result("%s: no" % x) - - return result - else: - return existing - -def set_log_file(log_file_name): - # Called by Boost.Build startup code to specify name of a file - # that will receive results of configure checks. This - # should never be called by users. - global __log_file, __log_fd - dirname = os.path.dirname(log_file_name) - if not os.path.exists(dirname): - os.makedirs(dirname) - # Make sure to keep the file around, so that it's not - # garbage-collected and closed - __log_file = open(log_file_name, "w") - __log_fd = __log_file.fileno() - -# Frontend rules - -class CheckTargetBuildsWorker: - - def __init__(self, target, true_properties, false_properties): - self.target = target - self.true_properties = property.create_from_strings(true_properties, True) - self.false_properties = property.create_from_strings(false_properties, True) - - def check(self, ps): - - # FIXME: this should not be hardcoded. Other checks might - # want to consider different set of features as relevant. - toolset = ps.get('toolset')[0] - toolset_version_property = "<toolset-" + toolset + ":version>" ; - relevant = ps.get_properties('target-os') + \ - ps.get_properties("toolset") + \ - ps.get_properties(toolset_version_property) + \ - ps.get_properties("address-model") + \ - ps.get_properties("architecture") - rps = property_set.create(relevant) - t = get_manager().targets().current() - p = t.project() - if builds(self.target, p, rps, "%s builds" % self.target): - choosen = self.true_properties - else: - choosen = self.false_properties - return property.evaluate_conditionals_in_context(choosen, ps) - -@bjam_signature((["target"], ["true_properties", "*"], ["false_properties", "*"])) -def check_target_builds(target, true_properties, false_properties): - worker = CheckTargetBuildsWorker(target, true_properties, false_properties) - value = value_to_jam(worker.check) - return "<conditional>" + value - -get_manager().projects().add_rule("check-target-builds", check_target_builds) - - diff --git a/jam-files/boost-build/build/engine.py b/jam-files/boost-build/build/engine.py deleted file mode 100644 index be9736e0..00000000 --- a/jam-files/boost-build/build/engine.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright Pedro Ferreira 2005. -# Copyright Vladimir Prus 2007. -# Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -bjam_interface = __import__('bjam') - -import operator -import re - -import b2.build.property_set as property_set -import b2.util - -class BjamAction: - """Class representing bjam action defined from Python.""" - - def __init__(self, action_name, function): - self.action_name = action_name - self.function = function - - def __call__(self, targets, sources, property_set): - if self.function: - self.function(targets, sources, property_set) - - # Bjam actions defined from Python have only the command - # to execute, and no associated jam procedural code. So - # passing 'property_set' to it is not necessary. - bjam_interface.call("set-update-action", self.action_name, - targets, sources, []) - -class BjamNativeAction: - """Class representing bjam action defined by Jam code. - - We still allow to associate a Python callable that will - be called when this action is installed on any target. - """ - - def __init__(self, action_name, function): - self.action_name = action_name - self.function = function - - def __call__(self, targets, sources, property_set): - if self.function: - self.function(targets, sources, property_set) - - p = [] - if property_set: - p = property_set.raw() - - b2.util.set_jam_action(self.action_name, targets, sources, p) - -action_modifiers = {"updated": 0x01, - "together": 0x02, - "ignore": 0x04, - "quietly": 0x08, - "piecemeal": 0x10, - "existing": 0x20} - -class Engine: - """ The abstract interface to a build engine. - - For now, the naming of targets, and special handling of some - target variables like SEARCH and LOCATE make this class coupled - to bjam engine. - """ - def __init__ (self): - self.actions = {} - - def add_dependency (self, targets, sources): - """Adds a dependency from 'targets' to 'sources' - - Both 'targets' and 'sources' can be either list - of target names, or a single target name. - """ - if isinstance (targets, str): - targets = [targets] - if isinstance (sources, str): - sources = [sources] - - for target in targets: - for source in sources: - self.do_add_dependency (target, source) - - def set_target_variable (self, targets, variable, value, append=0): - """ Sets a target variable. - - The 'variable' will be available to bjam when it decides - where to generate targets, and will also be available to - updating rule for that 'taret'. - """ - if isinstance (targets, str): - targets = [targets] - - for target in targets: - self.do_set_target_variable (target, variable, value, append) - - def set_update_action (self, action_name, targets, sources, properties=property_set.empty()): - """ Binds a target to the corresponding update action. - If target needs to be updated, the action registered - with action_name will be used. - The 'action_name' must be previously registered by - either 'register_action' or 'register_bjam_action' - method. - """ - assert(isinstance(properties, property_set.PropertySet)) - if isinstance (targets, str): - targets = [targets] - self.do_set_update_action (action_name, targets, sources, properties) - - def register_action (self, action_name, command, bound_list = [], flags = [], - function = None): - """Creates a new build engine action. - - Creates on bjam side an action named 'action_name', with - 'command' as the command to be executed, 'bound_variables' - naming the list of variables bound when the command is executed - and specified flag. - If 'function' is not None, it should be a callable taking three - parameters: - - targets - - sources - - instance of the property_set class - This function will be called by set_update_action, and can - set additional target variables. - """ - if self.actions.has_key(action_name): - raise "Bjam action %s is already defined" % action_name - - assert(isinstance(flags, list)) - - bjam_flags = reduce(operator.or_, - (action_modifiers[flag] for flag in flags), 0) - - bjam_interface.define_action(action_name, command, bound_list, bjam_flags) - - self.actions[action_name] = BjamAction(action_name, function) - - def register_bjam_action (self, action_name, function=None): - """Informs self that 'action_name' is declared in bjam. - - From this point, 'action_name' is a valid argument to the - set_update_action method. The action_name should be callable - in the global module of bjam. - """ - - # We allow duplicate calls to this rule for the same - # action name. This way, jamfile rules that take action names - # can just register them without specially checking if - # action is already registered. - if not self.actions.has_key(action_name): - self.actions[action_name] = BjamNativeAction(action_name, function) - - # Overridables - - - def do_set_update_action (self, action_name, targets, sources, property_set): - action = self.actions.get(action_name) - if not action: - raise Exception("No action %s was registered" % action_name) - action(targets, sources, property_set) - - def do_set_target_variable (self, target, variable, value, append): - if append: - bjam_interface.call("set-target-variable", target, variable, value, "true") - else: - bjam_interface.call("set-target-variable", target, variable, value) - - def do_add_dependency (self, target, source): - bjam_interface.call("DEPENDS", target, source) - - diff --git a/jam-files/boost-build/build/errors.py b/jam-files/boost-build/build/errors.py deleted file mode 100644 index d9dceefe..00000000 --- a/jam-files/boost-build/build/errors.py +++ /dev/null @@ -1,127 +0,0 @@ -# Status: being written afresh by Vladimir Prus - -# Copyright 2007 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This file is supposed to implement error reporting for Boost.Build. -# Experience with jam version has shown that printing full backtrace -# on each error is buffling. Further, for errors printed after parsing -- -# during target building, the stacktrace does not even mention what -# target is being built. - -# This module implements explicit contexts -- where other code can -# communicate which projects/targets are being built, and error -# messages will show those contexts. For programming errors, -# Python assertions are to be used. - -import bjam -import traceback -import sys - -def format(message, prefix=""): - parts = str(message).split("\n") - return "\n".join(prefix+p for p in parts) - - -class Context: - - def __init__(self, message, nested=None): - self.message_ = message - self.nested_ = nested - - def report(self, indent=""): - print indent + " -", self.message_ - if self.nested_: - print indent + " declared at:" - for n in self.nested_: - n.report(indent + " ") - -class JamfileContext: - - def __init__(self): - raw = bjam.backtrace() - self.raw_ = raw - - def report(self, indent=""): - for r in self.raw_: - print indent + " - %s:%s" % (r[0], r[1]) - -class ExceptionWithUserContext(Exception): - - def __init__(self, message, context, - original_exception=None, original_tb=None, stack=None): - Exception.__init__(self, message) - self.context_ = context - self.original_exception_ = original_exception - self.original_tb_ = original_tb - self.stack_ = stack - - def report(self): - print "error:", self.args[0] - if self.original_exception_: - print format(str(self.original_exception_), " ") - print - print " error context (most recent first):" - for c in self.context_[::-1]: - c.report() - print - if "--stacktrace" in bjam.variable("ARGV"): - if self.original_tb_: - traceback.print_tb(self.original_tb_) - elif self.stack_: - for l in traceback.format_list(self.stack_): - print l, - else: - print " use the '--stacktrace' option to get Python stacktrace" - print - -def user_error_checkpoint(callable): - def wrapper(self, *args): - errors = self.manager().errors() - try: - return callable(self, *args) - except ExceptionWithUserContext, e: - raise - except Exception, e: - errors.handle_stray_exception(e) - finally: - errors.pop_user_context() - - return wrapper - -class Errors: - - def __init__(self): - self.contexts_ = [] - self._count = 0 - - def count(self): - return self._count - - def push_user_context(self, message, nested=None): - self.contexts_.append(Context(message, nested)) - - def pop_user_context(self): - del self.contexts_[-1] - - def push_jamfile_context(self): - self.contexts_.append(JamfileContext()) - - def pop_jamfile_context(self): - del self.contexts_[-1] - - def capture_user_context(self): - return self.contexts_[:] - - def handle_stray_exception(self, e): - raise ExceptionWithUserContext("unexpected exception", self.contexts_[:], - e, sys.exc_info()[2]) - def __call__(self, message): - self._count = self._count + 1 - raise ExceptionWithUserContext(message, self.contexts_[:], - stack=traceback.extract_stack()) - - - - diff --git a/jam-files/boost-build/build/feature.jam b/jam-files/boost-build/build/feature.jam deleted file mode 100644 index 6f54adef..00000000 --- a/jam-files/boost-build/build/feature.jam +++ /dev/null @@ -1,1335 +0,0 @@ -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2002, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import assert : * ; -import "class" : * ; -import errors : lol->list ; -import indirect ; -import modules ; -import regex ; -import sequence ; -import set ; -import utility ; - - -local rule setup ( ) -{ - .all-attributes = - implicit - composite - optional - symmetric - free - incidental - path - dependency - propagated - link-incompatible - subfeature - order-sensitive - ; - - .all-features = ; - .all-subfeatures = ; - .all-top-features = ; # non-subfeatures - .all-implicit-values = ; -} -setup ; - - -# Prepare a fresh space to test in by moving all global variable settings into -# the given temporary module and erasing them here. -# -rule prepare-test ( temp-module ) -{ - DELETE_MODULE $(temp-module) ; - - # Transfer globals to temp-module. - for local v in [ VARNAMES feature ] - { - if [ MATCH (\\.) : $(v) ] - { - modules.poke $(temp-module) : $(v) : $($(v)) ; - $(v) = ; - } - } - setup ; -} - - -# Clear out all global variables and recover all variables from the given -# temporary module. -# -rule finish-test ( temp-module ) -{ - # Clear globals. - for local v in [ VARNAMES feature ] - { - if [ MATCH (\\.) : $(v) ] - { - $(v) = ; - } - } - - for local v in [ VARNAMES $(temp-module) ] - { - $(v) = [ modules.peek $(temp-module) : $(v) ] ; - } - DELETE_MODULE $(temp-module) ; -} - - -# Transform features by bracketing any elements which are not already bracketed -# by "<>". -# -local rule grist ( features * ) -{ - local empty = "" ; - return $(empty:G=$(features)) ; -} - - -# Declare a new feature with the given name, values, and attributes. -# -rule feature ( - name # Feature name. - : values * # Allowable values - may be extended later using feature.extend. - : attributes * # Feature attributes (e.g. implicit, free, propagated...). -) -{ - name = [ grist $(name) ] ; - - local error ; - - # Check for any unknown attributes. - if ! ( $(attributes) in $(.all-attributes) ) - { - error = unknown attributes: - [ set.difference $(attributes) : $(.all-attributes) ] ; - } - else if $(name) in $(.all-features) - { - error = feature already defined: ; - } - else if implicit in $(attributes) && free in $(attributes) - { - error = free features cannot also be implicit ; - } - else if free in $(attributes) && propagated in $(attributes) - { - error = free features cannot be propagated ; - } - else - { - local m = [ MATCH (.*=.*) : $(values) ] ; - if $(m[1]) - { - error = "feature value may not contain '='" ; - } - } - - if $(error) - { - errors.error $(error) - : "in" feature declaration: - : feature [ lol->list $(1) : $(2) : $(3) ] ; - } - - $(name).values ?= ; - $(name).attributes = $(attributes) ; - $(name).subfeatures ?= ; - $(attributes).features += $(name) ; - - .all-features += $(name) ; - if subfeature in $(attributes) - { - .all-subfeatures += $(name) ; - } - else - { - .all-top-features += $(name) ; - } - extend $(name) : $(values) ; -} - - -# Sets the default value of the given feature, overriding any previous default. -# -rule set-default ( feature : value ) -{ - local f = [ grist $(feature) ] ; - local a = $($(f).attributes) ; - local bad-attribute = ; - if free in $(a) - { - bad-attribute = free ; - } - else if optional in $(a) - { - bad-attribute = optional ; - } - if $(bad-attribute) - { - errors.error "$(bad-attribute) property $(f) cannot have a default." ; - } - if ! $(value) in $($(f).values) - { - errors.error "The specified default value, '$(value)' is invalid" - : "allowed values are: " $($(f).values) ; - } - $(f).default = $(value) ; -} - - -# Returns the default property values for the given features. -# -rule defaults ( features * ) -{ - local result ; - for local f in $(features) - { - local gf = $(:E=:G=$(f)) ; - local a = $($(gf).attributes) ; - if ( free in $(a) ) || ( optional in $(a) ) - { - } - else - { - result += $(gf)$($(gf).default) ; - } - } - return $(result) ; -} - - -# Returns true iff all 'names' elements are valid features. -# -rule valid ( names + ) -{ - if $(names) in $(.all-features) - { - return true ; - } -} - - -# Returns the attibutes of the given feature. -# -rule attributes ( feature ) -{ - return $($(:E=:G=$(feature)).attributes) ; -} - - -# Returns the values of the given feature. -# -rule values ( feature ) -{ - return $($(:E=:G=$(feature)).values) ; -} - - -# Returns true iff 'value-string' is a value-string of an implicit feature. -# -rule is-implicit-value ( value-string ) -{ - local v = [ regex.split $(value-string) - ] ; - local failed ; - if ! $(v[1]) in $(.all-implicit-values) - { - failed = true ; - } - else - { - local feature = $($(v[1]).implicit-feature) ; - for local subvalue in $(v[2-]) - { - if ! [ find-implied-subfeature $(feature) $(subvalue) : $(v[1]) ] - { - failed = true ; - } - } - } - - if ! $(failed) - { - return true ; - } -} - - -# Returns the implicit feature associated with the given implicit value. -# -rule implied-feature ( implicit-value ) -{ - local components = [ regex.split $(implicit-value) "-" ] ; - - local feature = $($(components[1]).implicit-feature) ; - if ! $(feature) - { - errors.error \"$(implicit-value)\" is not a value of an implicit feature ; - feature = "" ; # Keep testing happy; it expects a result. - } - return $(feature) ; -} - - -local rule find-implied-subfeature ( feature subvalue : value-string ? ) -{ - # Feature should be of the form <feature-name>. - if $(feature) != $(feature:G) - { - errors.error invalid feature $(feature) ; - } - - return $($(feature)$(value-string:E="")<>$(subvalue).subfeature) ; -} - - -# Given a feature and a value of one of its subfeatures, find the name of the -# subfeature. If value-string is supplied, looks for implied subfeatures that -# are specific to that value of feature -# -rule implied-subfeature ( - feature # The main feature name. - subvalue # The value of one of its subfeatures. - : value-string ? # The value of the main feature. -) -{ - local subfeature = [ find-implied-subfeature $(feature) $(subvalue) - : $(value-string) ] ; - if ! $(subfeature) - { - value-string ?= "" ; - errors.error \"$(subvalue)\" is not a known subfeature value of - $(feature)$(value-string) ; - } - return $(subfeature) ; -} - - -# Generate an error if the feature is unknown. -# -local rule validate-feature ( feature ) -{ - if ! $(feature) in $(.all-features) - { - errors.error unknown feature \"$(feature)\" ; - } -} - - -# Given a feature and its value or just a value corresponding to an implicit -# feature, returns a property set consisting of all component subfeatures and -# their values. For example all the following calls: -# -# expand-subfeatures-aux <toolset>gcc-2.95.2-linux-x86 -# expand-subfeatures-aux gcc-2.95.2-linux-x86 -# -# return: -# -# <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 -# -local rule expand-subfeatures-aux ( - feature ? # Feature name or empty if value corresponds to an - # implicit property. - : value # Feature value. - : dont-validate ? # If set, no value string validation will be done. -) -{ - if $(feature) - { - feature = $(feature) ; - } - - if ! $(feature) - { - feature = [ implied-feature $(value) ] ; - } - else - { - validate-feature $(feature) ; - } - if ! $(dont-validate) - { - validate-value-string $(feature) $(value) ; - } - - local components = [ regex.split $(value) "-" ] ; - - # Get the top-level feature's value. - local value = $(components[1]:G=) ; - - local result = $(components[1]:G=$(feature)) ; - - local subvalues = $(components[2-]) ; - while $(subvalues) - { - local subvalue = $(subvalues[1]) ; # Pop the head off of subvalues. - subvalues = $(subvalues[2-]) ; - - local subfeature = [ find-implied-subfeature $(feature) $(subvalue) : - $(value) ] ; - - # If no subfeature was found reconstitute the value string and use that. - if ! $(subfeature) - { - result = $(components:J=-) ; - result = $(result:G=$(feature)) ; - subvalues = ; # Stop looping. - } - else - { - local f = [ MATCH ^<(.*)>$ : $(feature) ] ; - result += $(subvalue:G=$(f)-$(subfeature)) ; - } - } - - return $(result) ; -} - - -# Make all elements of properties corresponding to implicit features explicit, -# and express all subfeature values as separate properties in their own right. -# For example, all of the following properties -# -# gcc-2.95.2-linux-x86 -# <toolset>gcc-2.95.2-linux-x86 -# -# might expand to -# -# <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 -# -rule expand-subfeatures ( - properties * # Property set with elements of the form - # <feature>value-string or just value-string in the case - # of implicit features. - : dont-validate ? -) -{ - local result ; - for local p in $(properties) - { - # Don't expand subfeatures in subfeatures - if ! [ MATCH "(:)" : $(p:G) ] - { - result += [ expand-subfeatures-aux $(p:G) : $(p:G=) : $(dont-validate) ] ; - } - else - { - result += $(p) ; - } - } - return $(result) ; -} - - -# Helper for extend, below. Handles the feature case. -# -local rule extend-feature ( feature : values * ) -{ - feature = [ grist $(feature) ] ; - validate-feature $(feature) ; - if implicit in $($(feature).attributes) - { - for local v in $(values) - { - if $($(v).implicit-feature) - { - errors.error $(v) is already associated with the \"$($(v).implicit-feature)\" feature ; - } - $(v).implicit-feature = $(feature) ; - } - - .all-implicit-values += $(values) ; - } - if ! $($(feature).values) - { - # This is the first value specified for this feature so make it be the - # default. - $(feature).default = $(values[1]) ; - } - $(feature).values += $(values) ; -} - - -# Checks that value-string is a valid value-string for the given feature. -# -rule validate-value-string ( feature value-string ) -{ - if ! ( - free in $($(feature).attributes) - || ( $(value-string) in $(feature).values ) - ) - { - local values = $(value-string) ; - - if $($(feature).subfeatures) - { - if ! ( $(value-string) in $($(feature).values) ) - && ! ( $(value-string) in $($(feature).subfeatures) ) - { - values = [ regex.split $(value-string) - ] ; - } - } - - if ! ( $(values[1]) in $($(feature).values) ) && - - # An empty value is allowed for optional features. - ( $(values[1]) || ! ( optional in $($(feature).attributes) ) ) - { - errors.error \"$(values[1])\" is not a known value of feature $(feature) - : legal values: \"$($(feature).values)\" ; - } - - for local v in $(values[2-]) - { - # This will validate any subfeature values in value-string. - implied-subfeature $(feature) $(v) : $(values[1]) ; - } - } -} - - -# A helper that computes: -# * name(s) of module-local variable(s) used to record the correspondence -# between subvalue(s) and a subfeature -# * value of that variable when such a subfeature/subvalue has been defined and -# returns a list consisting of the latter followed by the former. -# -local rule subvalue-var ( - feature # Main feature name. - value-string ? # If supplied, specifies a specific value of the main - # feature for which the subfeature values are valid. - : subfeature # Subfeature name. - : subvalues * # Subfeature values. -) -{ - feature = [ grist $(feature) ] ; - validate-feature $(feature) ; - if $(value-string) - { - validate-value-string $(feature) $(value-string) ; - } - - local subfeature-name = [ get-subfeature-name $(subfeature) $(value-string) ] ; - - return $(subfeature-name) - $(feature)$(value-string:E="")<>$(subvalues).subfeature ; -} - - -# Extends the given subfeature with the subvalues. If the optional value-string -# is provided, the subvalues are only valid for the given value of the feature. -# Thus, you could say that <target-platform>mingw is specific to -# <toolset>gcc-2.95.2 as follows: -# -# extend-subfeature toolset gcc-2.95.2 : target-platform : mingw ; -# -rule extend-subfeature ( - feature # The feature whose subfeature is being extended. - - value-string ? # If supplied, specifies a specific value of the main - # feature for which the new subfeature values are valid. - - : subfeature # Subfeature name. - : subvalues * # Additional subfeature values. -) -{ - local subfeature-vars = [ subvalue-var $(feature) $(value-string) - : $(subfeature) : $(subvalues) ] ; - - local f = [ utility.ungrist [ grist $(feature) ] ] ; - extend $(f)-$(subfeature-vars[1]) : $(subvalues) ; - - # Provide a way to get from the given feature or property and subfeature - # value to the subfeature name. - $(subfeature-vars[2-]) = $(subfeature-vars[1]) ; -} - - -# Returns true iff the subvalues are valid for the feature. When the optional -# value-string is provided, returns true iff the subvalues are valid for the -# given value of the feature. -# -rule is-subvalue ( feature : value-string ? : subfeature : subvalue ) -{ - local subfeature-vars = [ subvalue-var $(feature) $(value-string) - : $(subfeature) : $(subvalue) ] ; - - if $($(subfeature-vars[2])) = $(subfeature-vars[1]) - { - return true ; - } -} - - -# Can be called three ways: -# -# 1. extend feature : values * -# 2. extend <feature> subfeature : values * -# 3. extend <feature>value-string subfeature : values * -# -# * Form 1 adds the given values to the given feature. -# * Forms 2 and 3 add subfeature values to the given feature. -# * Form 3 adds the subfeature values as specific to the given property -# value-string. -# -rule extend ( feature-or-property subfeature ? : values * ) -{ - local feature ; # If a property was specified this is its feature. - local value-string ; # E.g., the gcc-2.95-2 part of <toolset>gcc-2.95.2. - - # If a property was specified. - if $(feature-or-property:G) && $(feature-or-property:G=) - { - # Extract the feature and value-string, if any. - feature = $(feature-or-property:G) ; - value-string = $(feature-or-property:G=) ; - } - else - { - feature = [ grist $(feature-or-property) ] ; - } - - # Dispatch to the appropriate handler. - if $(subfeature) - { - extend-subfeature $(feature) $(value-string) : $(subfeature) - : $(values) ; - } - else - { - # If no subfeature was specified, we do not expect to see a - # value-string. - if $(value-string) - { - errors.error can only specify a property as the first argument when - extending a subfeature - : usage: - : " extend" feature ":" values... - : " | extend" <feature>value-string subfeature ":" values... - ; - } - - extend-feature $(feature) : $(values) ; - } -} - - -local rule get-subfeature-name ( subfeature value-string ? ) -{ - local prefix = $(value-string): ; - return $(prefix:E="")$(subfeature) ; -} - - -# Declares a subfeature. -# -rule subfeature ( - feature # Root feature that is not a subfeature. - value-string ? # A value-string specifying which feature or subfeature - # values this subfeature is specific to, if any. - : subfeature # The name of the subfeature being declared. - : subvalues * # The allowed values of this subfeature. - : attributes * # The attributes of the subfeature. -) -{ - feature = [ grist $(feature) ] ; - validate-feature $(feature) ; - - # Add grist to the subfeature name if a value-string was supplied. - local subfeature-name = [ get-subfeature-name $(subfeature) $(value-string) ] ; - - if $(subfeature-name) in $($(feature).subfeatures) - { - errors.error \"$(subfeature)\" already declared as a subfeature of \"$(feature)\" - "specific to "$(value-string) ; - } - $(feature).subfeatures += $(subfeature-name) ; - - # First declare the subfeature as a feature in its own right. - local f = [ utility.ungrist $(feature) ] ; - feature $(f)-$(subfeature-name) : $(subvalues) : $(attributes) subfeature ; - - # Now make sure the subfeature values are known. - extend-subfeature $(feature) $(value-string) : $(subfeature) : $(subvalues) ; -} - - -# Set components of the given composite property. -# -rule compose ( composite-property : component-properties * ) -{ - local feature = $(composite-property:G) ; - if ! ( composite in [ attributes $(feature) ] ) - { - errors.error "$(feature)" is not a composite feature ; - } - - $(composite-property).components ?= ; - if $($(composite-property).components) - { - errors.error components of "$(composite-property)" already set: - $($(composite-property).components) ; - } - - if $(composite-property) in $(component-properties) - { - errors.error composite property "$(composite-property)" cannot have itself as a component ; - } - $(composite-property).components = $(component-properties) ; -} - - -local rule expand-composite ( property ) -{ - return $(property) - [ sequence.transform expand-composite : $($(property).components) ] ; -} - - -# Return all values of the given feature specified by the given property set. -# -rule get-values ( feature : properties * ) -{ - local result ; - - feature = $(:E=:G=$(feature)) ; # Add <> if necessary. - for local p in $(properties) - { - if $(p:G) = $(feature) - { - # Use MATCH instead of :G= to get the value, in order to preserve - # the value intact instead of having bjam treat it as a decomposable - # path. - result += [ MATCH ">(.*)" : $(p) ] ; - } - } - return $(result) ; -} - - -rule free-features ( ) -{ - return $(free.features) ; -} - - -# Expand all composite properties in the set so that all components are -# explicitly expressed. -# -rule expand-composites ( properties * ) -{ - local explicit-features = $(properties:G) ; - local result ; - - # Now expand composite features. - for local p in $(properties) - { - local expanded = [ expand-composite $(p) ] ; - - for local x in $(expanded) - { - if ! $(x) in $(result) - { - local f = $(x:G) ; - - if $(f) in $(free.features) - { - result += $(x) ; - } - else if ! $(x) in $(properties) # x is the result of expansion - { - if ! $(f) in $(explicit-features) # not explicitly-specified - { - if $(f) in $(result:G) - { - errors.error expansions of composite features result - in conflicting values for $(f) - : values: [ get-values $(f) : $(result) ] $(x:G=) - : one contributing composite property was $(p) ; - } - else - { - result += $(x) ; - } - } - } - else if $(f) in $(result:G) - { - errors.error explicitly-specified values of non-free feature - $(f) conflict : - "existing values:" [ get-values $(f) : $(properties) ] : - "value from expanding " $(p) ":" $(x:G=) ; - } - else - { - result += $(x) ; - } - } - } - } - return $(result) ; -} - - -# Return true iff f is an ordinary subfeature of the parent-property's feature, -# or if f is a subfeature of the parent-property's feature specific to the -# parent-property's value. -# -local rule is-subfeature-of ( parent-property f ) -{ - if subfeature in $($(f).attributes) - { - local specific-subfeature = [ MATCH <(.*):(.*)> : $(f) ] ; - if $(specific-subfeature) - { - # The feature has the form <topfeature-topvalue:subfeature>, e.g. - # <toolset-msvc:version>. - local feature-value = [ split-top-feature $(specific-subfeature[1]) - ] ; - if <$(feature-value[1])>$(feature-value[2]) = $(parent-property) - { - return true ; - } - } - else - { - # The feature has the form <topfeature-subfeature>, e.g. - # <toolset-version> - local top-sub = [ split-top-feature [ utility.ungrist $(f) ] ] ; - if $(top-sub[2]) && <$(top-sub[1])> = $(parent-property:G) - { - return true ; - } - } - } -} - - -# As for is-subfeature-of but for subproperties. -# -local rule is-subproperty-of ( parent-property p ) -{ - return [ is-subfeature-of $(parent-property) $(p:G) ] ; -} - - -# Given a property, return the subset of features consisting of all ordinary -# subfeatures of the property's feature, and all specific subfeatures of the -# property's feature which are conditional on the property's value. -# -local rule select-subfeatures ( parent-property : features * ) -{ - return [ sequence.filter is-subfeature-of $(parent-property) : $(features) ] ; -} - - -# As for select-subfeatures but for subproperties. -# -local rule select-subproperties ( parent-property : properties * ) -{ - return [ sequence.filter is-subproperty-of $(parent-property) : $(properties) ] ; -} - - -# Given a property set which may consist of composite and implicit properties -# and combined subfeature values, returns an expanded, normalized property set -# with all implicit features expressed explicitly, all subfeature values -# individually expressed, and all components of composite properties expanded. -# Non-free features directly expressed in the input properties cause any values -# of those features due to composite feature expansion to be dropped. If two -# values of a given non-free feature are directly expressed in the input, an -# error is issued. -# -rule expand ( properties * ) -{ - local expanded = [ expand-subfeatures $(properties) ] ; - return [ expand-composites $(expanded) ] ; -} - - -# Helper rule for minimize. Returns true iff property's feature is present in -# the contents of the variable named by feature-set-var. -# -local rule in-features ( feature-set-var property ) -{ - if $(property:G) in $($(feature-set-var)) - { - return true ; - } -} - - -# Helper rule for minimize. Returns the list with the same properties, but with -# all subfeatures moved to the end of the list. -# -local rule move-subfeatures-to-the-end ( properties * ) -{ - local x1 ; - local x2 ; - for local p in $(properties) - { - if subfeature in $($(p:G).attributes) - { - x2 += $(p) ; - } - else - { - x1 += $(p) ; - } - } - return $(x1) $(x2) ; -} - - -# Given an expanded property set, eliminate all redundancy: properties that are -# elements of other (composite) properties in the set will be eliminated. -# Non-symmetric properties equal to default values will be eliminated unless -# they override a value from some composite property. Implicit properties will -# be expressed without feature grist, and sub-property values will be expressed -# as elements joined to the corresponding main property. -# -rule minimize ( properties * ) -{ - # Precondition checking - local implicits = [ set.intersection $(p:G=) : $(p:G) ] ; - if $(implicits) - { - errors.error minimize requires an expanded property set, but - \"$(implicits[1])\" appears to be the value of an un-expanded - implicit feature ; - } - - # Remove properties implied by composite features. - local components = $($(properties).components) ; - local x = [ set.difference $(properties) : $(components) ] ; - - # Handle subfeatures and implicit features. - x = [ move-subfeatures-to-the-end $(x) ] ; - local result ; - while $(x) - { - local p fullp = $(x[1]) ; - local f = $(p:G) ; - local v = $(p:G=) ; - - # Eliminate features in implicit properties. - if implicit in [ attributes $(f) ] - { - p = $(v) ; - } - - # Locate all subproperties of $(x[1]) in the property set. - local subproperties = [ select-subproperties $(fullp) : $(x) ] ; - if $(subproperties) - { - # Reconstitute the joined property name. - local sorted = [ sequence.insertion-sort $(subproperties) ] ; - result += $(p)-$(sorted:G="":J=-) ; - - x = [ set.difference $(x[2-]) : $(subproperties) ] ; - } - else - { - # Eliminate properties whose value is equal to feature's default, - # which are not symmetric and which do not contradict values implied - # by composite properties. - - # Since all component properties of composites in the set have been - # eliminated, any remaining property whose feature is the same as a - # component of a composite in the set must have a non-redundant - # value. - if $(fullp) != [ defaults $(f) ] - || symmetric in [ attributes $(f) ] - || $(fullp:G) in $(components:G) - { - result += $(p) ; - } - - x = $(x[2-]) ; - } - } - return $(result) ; -} - - -# Combine all subproperties into their parent properties -# -# Requires: for every subproperty, there is a parent property. All features are -# explicitly expressed. -# -# This rule probably should not be needed, but build-request.expand-no-defaults -# is being abused for unintended purposes and it needs help. -# -rule compress-subproperties ( properties * ) -{ - local all-subs ; - local matched-subs ; - local result ; - - for local p in $(properties) - { - if ! $(p:G) - { - # Expecting fully-gristed properties. - assert.variable-not-empty p:G ; - } - - if ! subfeature in $($(p:G).attributes) - { - local subs = [ sequence.insertion-sort - [ sequence.filter is-subproperty-of $(p) : $(properties) ] ] ; - - matched-subs += $(subs) ; - - local subvalues = -$(subs:G=:J=-) ; - subvalues ?= "" ; - result += $(p)$(subvalues) ; - } - else - { - all-subs += $(p) ; - } - } - assert.result true : set.equal $(all-subs) : $(matched-subs) ; - return $(result) ; -} - - -# Given an ungristed string, finds the longest prefix which is a top-level -# feature name followed by a dash, and return a pair consisting of the parts -# before and after that dash. More interesting than a simple split because -# feature names may contain dashes. -# -local rule split-top-feature ( feature-plus ) -{ - local e = [ regex.split $(feature-plus) - ] ; - local f = $(e[1]) ; - local v ; - while $(e) - { - if <$(f)> in $(.all-top-features) - { - v = $(f) $(e[2-]:J=-) ; - } - e = $(e[2-]) ; - f = $(f)-$(e[1]) ; - } - return $(v) ; -} - - -# Given a set of properties, add default values for features not represented in -# the set. -# -# Note: if there's an ordinary feature F1 and a composite feature F2 which -# includes some value for F1 and both feature have default values then the -# default value of F1 will be added (as opposed to the value in F2). This might -# not be the right idea, e.g. consider: -# -# feature variant : debug ... ; -# <variant>debug : .... <runtime-debugging>on -# feature <runtime-debugging> : off on ; -# -# Here, when adding default for an empty property set, we'll get -# -# <variant>debug <runtime_debugging>off -# -# and that's kind of strange. -# -rule add-defaults ( properties * ) -{ - for local v in $(properties:G=) - { - if $(v) in $(properties) - { - errors.error add-defaults requires explicitly specified features, - but \"$(v)\" appears to be the value of an un-expanded implicit - feature ; - } - } - # We don't add default for elements with ":" inside. This catches: - # 1. Conditional properties --- we don't want <variant>debug:<define>DEBUG - # to be takes as specified value for <variant> - # 2. Free properties with ":" in values. We don't care, since free - # properties don't have defaults. - local xproperties = [ MATCH "^([^:]+)$" : $(properties) ] ; - local missing-top = [ set.difference $(.all-top-features) : $(xproperties:G) ] ; - local more = [ defaults $(missing-top) ] ; - properties += $(more) ; - xproperties += $(more) ; - - # Add defaults for subfeatures of features which are present. - for local p in $(xproperties) - { - local s = $($(p:G).subfeatures) ; - local f = [ utility.ungrist $(p:G) ] ; - local missing-subs = [ set.difference <$(f)-$(s)> : $(properties:G) ] ; - properties += [ defaults [ select-subfeatures $(p) : $(missing-subs) ] ] ; - } - - return $(properties) ; -} - - -# Given a property-set of the form -# v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM -# -# Returns -# v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM -# -# Note that vN...vM may contain slashes. This needs to be resilient to the -# substitution of backslashes for slashes, since Jam, unbidden, sometimes swaps -# slash direction on NT. -# -rule split ( property-set ) -{ - local pieces = [ regex.split $(property-set) [\\/] ] ; - local result ; - - for local x in $(pieces) - { - if ( ! $(x:G) ) && $(result[-1]:G) - { - result = $(result[1--2]) $(result[-1])/$(x) ; - } - else - { - result += $(x) ; - } - } - - return $(result) ; -} - - -# Tests of module feature. -# -rule __test__ ( ) -{ - # Use a fresh copy of the feature module. - prepare-test feature-test-temp ; - - import assert ; - import errors : try catch ; - - # These are local rules and so must be explicitly reimported into the - # testing module. - import feature : extend-feature validate-feature select-subfeatures ; - - feature toolset : gcc : implicit ; - feature define : : free ; - feature runtime-link : dynamic static : symmetric ; - feature optimization : on off ; - feature variant : debug release profile : implicit composite symmetric ; - feature stdlib : native stlport ; - feature magic : : free ; - - compose <variant>debug : <define>_DEBUG <optimization>off ; - compose <variant>release : <define>NDEBUG <optimization>on ; - - assert.result dynamic static : values <runtime-link> ; - assert.result dynamic static : values runtime-link ; - - try ; - { - compose <variant>profile : <variant>profile ; - } - catch composite property <variant>profile cannot have itself as a component ; - - extend-feature toolset : msvc metrowerks ; - subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4 3.0 3.0.1 3.0.2 ; - - assert.true is-subvalue toolset : gcc : version : 2.95.3 ; - assert.false is-subvalue toolset : gcc : version : 1.1 ; - - assert.false is-subvalue toolset : msvc : version : 2.95.3 ; - assert.false is-subvalue toolset : : version : yabba ; - - feature yabba ; - subfeature yabba : version : dabba ; - assert.true is-subvalue yabba : : version : dabba ; - - subfeature toolset gcc : platform : linux cygwin : optional ; - - assert.result <toolset-gcc:version> - : select-subfeatures <toolset>gcc - : <toolset-gcc:version> - <toolset-msvc:version> - <toolset-version> - <stdlib> ; - - subfeature stdlib : version : 3 4 : optional ; - - assert.result <stdlib-version> - : select-subfeatures <stdlib>native - : <toolset-gcc:version> - <toolset-msvc:version> - <toolset-version> - <stdlib-version> ; - - assert.result <toolset>gcc <toolset-gcc:version>3.0.1 - : expand-subfeatures <toolset>gcc-3.0.1 ; - - assert.result <toolset>gcc <toolset-gcc:version>3.0.1 <toolset-gcc:platform>linux - : expand-subfeatures <toolset>gcc-3.0.1-linux ; - - assert.result <toolset>gcc <toolset-gcc:version>3.0.1 - : expand <toolset>gcc <toolset-gcc:version>3.0.1 ; - - assert.result <define>foo=x-y - : expand-subfeatures <define>foo=x-y ; - - assert.result <toolset>gcc <toolset-gcc:version>3.0.1 - : expand-subfeatures gcc-3.0.1 ; - - assert.result a c e - : get-values <x> : <x>a <y>b <x>c <y>d <x>e ; - - assert.result <toolset>gcc <toolset-gcc:version>3.0.1 - <variant>debug <define>_DEBUG <optimization>on - : expand gcc-3.0.1 debug <optimization>on ; - - assert.result <variant>debug <define>_DEBUG <optimization>on - : expand debug <optimization>on ; - - assert.result <optimization>on <variant>debug <define>_DEBUG - : expand <optimization>on debug ; - - assert.result <runtime-link>dynamic <optimization>on - : defaults <runtime-link> <define> <optimization> ; - - # Make sure defaults is resilient to missing grist. - assert.result <runtime-link>dynamic <optimization>on - : defaults runtime-link define optimization ; - - feature dummy : dummy1 dummy2 ; - subfeature dummy : subdummy : x y z : optional ; - - feature fu : fu1 fu2 : optional ; - subfeature fu : subfu : x y z : optional ; - subfeature fu : subfu2 : q r s ; - - assert.result optional : attributes <fu> ; - assert.result optional : attributes fu ; - - assert.result <runtime-link>static <define>foobar <optimization>on - <toolset>gcc:<define>FOO <toolset>gcc <variant>debug <stdlib>native - <dummy>dummy1 <toolset-gcc:version>2.95.2 - : add-defaults <runtime-link>static <define>foobar <optimization>on - <toolset>gcc:<define>FOO ; - - assert.result <runtime-link>static <define>foobar <optimization>on - <toolset>gcc:<define>FOO <fu>fu1 <toolset>gcc <variant>debug - <stdlib>native <dummy>dummy1 <fu-subfu2>q <toolset-gcc:version>2.95.2 - : add-defaults <runtime-link>static <define>foobar <optimization>on - <toolset>gcc:<define>FOO <fu>fu1 ; - - set-default <runtime-link> : static ; - assert.result <runtime-link>static : defaults <runtime-link> ; - - assert.result gcc-3.0.1 debug <optimization>on - : minimize [ expand gcc-3.0.1 debug <optimization>on <stdlib>native ] ; - - assert.result gcc-3.0.1 debug <runtime-link>dynamic - : minimize - [ expand gcc-3.0.1 debug <optimization>off <runtime-link>dynamic ] ; - - assert.result gcc-3.0.1 debug - : minimize [ expand gcc-3.0.1 debug <optimization>off ] ; - - assert.result debug <optimization>on - : minimize [ expand debug <optimization>on ] ; - - assert.result gcc-3.0 - : minimize <toolset>gcc <toolset-gcc:version>3.0 ; - - assert.result gcc-3.0 - : minimize <toolset-gcc:version>3.0 <toolset>gcc ; - - assert.result <x>y/z <a>b/c <d>e/f - : split <x>y/z/<a>b/c/<d>e/f ; - - assert.result <x>y/z <a>b/c <d>e/f - : split <x>y\\z\\<a>b\\c\\<d>e\\f ; - - assert.result a b c <d>e/f/g <h>i/j/k - : split a/b/c/<d>e/f/g/<h>i/j/k ; - - assert.result a b c <d>e/f/g <h>i/j/k - : split a\\b\\c\\<d>e\\f\\g\\<h>i\\j\\k ; - - # Test error checking. - - try ; - { - expand release <optimization>off <optimization>on ; - } - catch explicitly-specified values of non-free feature <optimization> conflict ; - - try ; - { - validate-feature <foobar> ; - } - catch unknown feature ; - - validate-value-string <toolset> gcc ; - validate-value-string <toolset> gcc-3.0.1 ; - - try ; - { - validate-value-string <toolset> digital_mars ; - } - catch \"digital_mars\" is not a known value of <toolset> ; - - try ; - { - feature foobar : : baz ; - } - catch unknown attributes: baz ; - - feature feature1 ; - try ; - { - feature feature1 ; - } - catch feature already defined: ; - - try ; - { - feature feature2 : : free implicit ; - } - catch free features cannot also be implicit ; - - try ; - { - feature feature3 : : free propagated ; - } - catch free features cannot be propagated ; - - try ; - { - implied-feature lackluster ; - } - catch \"lackluster\" is not a value of an implicit feature ; - - try ; - { - implied-subfeature <toolset> 3.0.1 ; - } - catch \"3.0.1\" is not a known subfeature value of <toolset> ; - - try ; - { - implied-subfeature <toolset> not-a-version : gcc ; - } - catch \"not-a-version\" is not a known subfeature value of <toolset>gcc ; - - # Leave a clean copy of the features module behind. - finish-test feature-test-temp ; -} diff --git a/jam-files/boost-build/build/feature.py b/jam-files/boost-build/build/feature.py deleted file mode 100644 index 315a18e9..00000000 --- a/jam-files/boost-build/build/feature.py +++ /dev/null @@ -1,905 +0,0 @@ -# Status: ported, except for unit tests. -# Base revision: 64488 -# -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2002, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import re - -from b2.util import utility, bjam_signature -import b2.util.set -from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, to_seq -from b2.exceptions import * - -__re_split_subfeatures = re.compile ('<(.*):(.*)>') -__re_no_hyphen = re.compile ('^([^:]+)$') -__re_slash_or_backslash = re.compile (r'[\\/]') - -class Feature(object): - - # Map from string attribute names to integers bit flags. - # This will be initialized after declaration of the class. - _attribute_name_to_integer = {} - - def __init__(self, name, values, attributes): - self._name = name - self._values = values - self._default = None - self._attributes = 0 - for a in attributes: - self._attributes = self._attributes | Feature._attribute_name_to_integer[a] - self._attributes_string_list = attributes - self._subfeatures = [] - self._parent = None - - def name(self): - return self._name - - def values(self): - return self._values - - def add_values(self, values): - self._values.extend(values) - - def attributes(self): - return self._attributes - - def set_default(self, value): - self._default = value - - def default(self): - return self._default - - # FIXME: remove when we fully move to using classes for features/properties - def attributes_string_list(self): - return self._attributes_string_list - - def subfeatures(self): - return self._subfeatures - - def add_subfeature(self, name): - self._subfeatures.append(name) - - def parent(self): - """For subfeatures, return pair of (parent_feature, value). - - Value may be None if this subfeature is not specific to any - value of the parent feature. - """ - return self._parent - - def set_parent(self, feature, value): - self._parent = (feature, value) - - def __str__(self): - return self._name - - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __all_attributes, __all_features, __implicit_features, __composite_properties - global __features_with_attributes, __subfeature_from_value, __all_top_features, __free_features - global __all_subfeatures - - # The list with all attribute names. - __all_attributes = [ 'implicit', - 'composite', - 'optional', - 'symmetric', - 'free', - 'incidental', - 'path', - 'dependency', - 'propagated', - 'link-incompatible', - 'subfeature', - 'order-sensitive' - ] - i = 1 - for a in __all_attributes: - setattr(Feature, a.upper(), i) - Feature._attribute_name_to_integer[a] = i - def probe(self, flag=i): - return getattr(self, "_attributes") & flag - setattr(Feature, a.replace("-", "_"), probe) - i = i << 1 - - # A map containing all features. The key is the feature name. - # The value is an instance of Feature class. - __all_features = {} - - # All non-subfeatures. - __all_top_features = [] - - # Maps valus to the corresponding implicit feature - __implicit_features = {} - - # A map containing all composite properties. The key is a Property instance, - # and the value is a list of Property instances - __composite_properties = {} - - __features_with_attributes = {} - for attribute in __all_attributes: - __features_with_attributes [attribute] = [] - - # Maps a value to the corresponding subfeature name. - __subfeature_from_value = {} - - # All free features - __free_features = [] - - __all_subfeatures = [] - -reset () - -def enumerate (): - """ Returns an iterator to the features map. - """ - return __all_features.iteritems () - -def get(name): - """Return the Feature instance for the specified name. - - Throws if no feature by such name exists - """ - return __all_features[name] - -# FIXME: prepare-test/finish-test? - -@bjam_signature((["name"], ["values", "*"], ["attributes", "*"])) -def feature (name, values, attributes = []): - """ Declares a new feature with the given name, values, and attributes. - name: the feature name - values: a sequence of the allowable values - may be extended later with feature.extend - attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...) - """ - __validate_feature_attributes (name, attributes) - - feature = Feature(name, [], attributes) - __all_features[name] = feature - # Temporary measure while we have not fully moved from 'gristed strings' - __all_features["<" + name + ">"] = feature - - for attribute in attributes: - __features_with_attributes [attribute].append (name) - - name = add_grist(name) - - if 'subfeature' in attributes: - __all_subfeatures.append(name) - else: - __all_top_features.append(feature) - - extend (name, values) - - # FIXME: why his is needed. - if 'free' in attributes: - __free_features.append (name) - - return feature - -@bjam_signature((["feature"], ["value"])) -def set_default (feature, value): - """ Sets the default value of the given feature, overriding any previous default. - feature: the name of the feature - value: the default value to assign - """ - f = __all_features[feature] - attributes = f.attributes() - bad_attribute = None - - if attributes & Feature.FREE: - bad_attribute = "free" - elif attributes & Feature.OPTIONAL: - bad_attribute = "optional" - - if bad_attribute: - raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, feature.name())) - - if not value in f.values(): - raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % values) - - f.set_default(value) - -def defaults(features): - """ Returns the default property values for the given features. - """ - # FIXME: should merge feature and property modules. - import property - - result = [] - for f in features: - if not f.free() and not f.optional() and f.default(): - result.append(property.Property(f, f.default())) - - return result - -def valid (names): - """ Returns true iff all elements of names are valid features. - """ - def valid_one (name): return __all_features.has_key (name) - - if isinstance (names, str): - return valid_one (names) - else: - return [ valid_one (name) for name in names ] - -def attributes (feature): - """ Returns the attributes of the given feature. - """ - return __all_features[feature].attributes_string_list() - -def values (feature): - """ Return the values of the given feature. - """ - validate_feature (feature) - return __all_features[feature].values() - -def is_implicit_value (value_string): - """ Returns true iff 'value_string' is a value_string - of an implicit feature. - """ - - if __implicit_features.has_key(value_string): - return __implicit_features[value_string] - - v = value_string.split('-') - - if not __implicit_features.has_key(v[0]): - return False - - feature = __implicit_features[v[0]] - - for subvalue in (v[1:]): - if not __find_implied_subfeature(feature, subvalue, v[0]): - return False - - return True - -def implied_feature (implicit_value): - """ Returns the implicit feature associated with the given implicit value. - """ - components = implicit_value.split('-') - - if not __implicit_features.has_key(components[0]): - raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value) - - return __implicit_features[components[0]] - -def __find_implied_subfeature (feature, subvalue, value_string): - - #if value_string == None: value_string = '' - - if not __subfeature_from_value.has_key(feature) \ - or not __subfeature_from_value[feature].has_key(value_string) \ - or not __subfeature_from_value[feature][value_string].has_key (subvalue): - return None - - return __subfeature_from_value[feature][value_string][subvalue] - -# Given a feature and a value of one of its subfeatures, find the name -# of the subfeature. If value-string is supplied, looks for implied -# subfeatures that are specific to that value of feature -# feature # The main feature name -# subvalue # The value of one of its subfeatures -# value-string # The value of the main feature - -def implied_subfeature (feature, subvalue, value_string): - result = __find_implied_subfeature (feature, subvalue, value_string) - if not result: - raise InvalidValue ("'%s' is not a known subfeature value of '%s%s'" % (subvalue, feature, value_string)) - - return result - -def validate_feature (name): - """ Checks if all name is a valid feature. Otherwise, raises an exception. - """ - if not __all_features.has_key(name): - raise InvalidFeature ("'%s' is not a valid feature name" % name) - else: - return __all_features[name] - -def valid (names): - """ Returns true iff all elements of names are valid features. - """ - def valid_one (name): return __all_features.has_key (name) - - if isinstance (names, str): - return valid_one (names) - else: - return [ valid_one (name) for name in names ] - -# Uses Property -def __expand_subfeatures_aux (property, dont_validate = False): - """ Helper for expand_subfeatures. - Given a feature and value, or just a value corresponding to an - implicit feature, returns a property set consisting of all component - subfeatures and their values. For example: - - expand_subfeatures <toolset>gcc-2.95.2-linux-x86 - -> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 - equivalent to: - expand_subfeatures gcc-2.95.2-linux-x86 - - feature: The name of the feature, or empty if value corresponds to an implicit property - value: The value of the feature. - dont_validate: If True, no validation of value string will be done. - """ - f = property.feature() - v = property.value() - if not dont_validate: - validate_value_string(f, v) - - components = v.split ("-") - - v = components[0] - - import property - - result = [property.Property(f, components[0])] - - subvalues = components[1:] - - while len(subvalues) > 0: - subvalue = subvalues [0] # pop the head off of subvalues - subvalues = subvalues [1:] - - subfeature = __find_implied_subfeature (f, subvalue, v) - - # If no subfeature was found, reconstitute the value string and use that - if not subfeature: - return [property.Property(f, '-'.join(components))] - - result.append(property.Property(subfeature, subvalue)) - - return result - -def expand_subfeatures(properties, dont_validate = False): - """ - Make all elements of properties corresponding to implicit features - explicit, and express all subfeature values as separate properties - in their own right. For example, the property - - gcc-2.95.2-linux-x86 - - might expand to - - <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86 - - properties: A sequence with elements of the form - <feature>value-string or just value-string in the - case of implicit features. - : dont_validate: If True, no validation of value string will be done. - """ - result = [] - for p in properties: - # Don't expand subfeatures in subfeatures - if p.feature().subfeature(): - result.append (p) - else: - result.extend(__expand_subfeatures_aux (p, dont_validate)) - - return result - - - -# rule extend was defined as below: - # Can be called three ways: - # - # 1. extend feature : values * - # 2. extend <feature> subfeature : values * - # 3. extend <feature>value-string subfeature : values * - # - # * Form 1 adds the given values to the given feature - # * Forms 2 and 3 add subfeature values to the given feature - # * Form 3 adds the subfeature values as specific to the given - # property value-string. - # - #rule extend ( feature-or-property subfeature ? : values * ) -# -# Now, the specific rule must be called, depending on the desired operation: -# extend_feature -# extend_subfeature - -def extend (name, values): - """ Adds the given values to the given feature. - """ - name = add_grist (name) - __validate_feature (name) - feature = __all_features [name] - - if feature.implicit(): - for v in values: - if __implicit_features.has_key(v): - raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v])) - - __implicit_features[v] = feature - - if len (feature.values()) == 0 and len (values) > 0: - # This is the first value specified for this feature, - # take it as default value - feature.set_default(values[0]) - - feature.add_values(values) - -def validate_value_string (f, value_string): - """ Checks that value-string is a valid value-string for the given feature. - """ - if f.free() or value_string in f.values(): - return - - values = [value_string] - - if f.subfeatures(): - if not value_string in f.values() and \ - not value_string in f.subfeatures(): - values = value_string.split('-') - - # An empty value is allowed for optional features - if not values[0] in f.values() and \ - (values[0] or not f.optional()): - raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], feature, f.values())) - - for v in values [1:]: - # this will validate any subfeature values in value-string - implied_subfeature(f, v, values[0]) - - -""" Extends the given subfeature with the subvalues. If the optional - value-string is provided, the subvalues are only valid for the given - value of the feature. Thus, you could say that - <target-platform>mingw is specifc to <toolset>gcc-2.95.2 as follows: - - extend-subfeature toolset gcc-2.95.2 : target-platform : mingw ; - - feature: The feature whose subfeature is being extended. - - value-string: If supplied, specifies a specific value of the - main feature for which the new subfeature values - are valid. - - subfeature: The name of the subfeature. - - subvalues: The additional values of the subfeature being defined. -""" -def extend_subfeature (feature_name, value_string, subfeature_name, subvalues): - - feature = validate_feature(feature_name) - - if value_string: - validate_value_string(feature, value_string) - - subfeature_name = feature_name + '-' + __get_subfeature_name (subfeature_name, value_string) - - extend(subfeature_name, subvalues) ; - subfeature = __all_features[subfeature_name] - - if value_string == None: value_string = '' - - if not __subfeature_from_value.has_key(feature): - __subfeature_from_value [feature] = {} - - if not __subfeature_from_value[feature].has_key(value_string): - __subfeature_from_value [feature][value_string] = {} - - for subvalue in subvalues: - __subfeature_from_value [feature][value_string][subvalue] = subfeature - -@bjam_signature((["feature_name", "value_string", "?"], ["subfeature"], - ["subvalues", "*"], ["attributes", "*"])) -def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []): - """ Declares a subfeature. - feature_name: Root feature that is not a subfeature. - value_string: An optional value-string specifying which feature or - subfeature values this subfeature is specific to, - if any. - subfeature: The name of the subfeature being declared. - subvalues: The allowed values of this subfeature. - attributes: The attributes of the subfeature. - """ - parent_feature = validate_feature (feature_name) - - # Add grist to the subfeature name if a value-string was supplied - subfeature_name = __get_subfeature_name (subfeature, value_string) - - if subfeature_name in __all_features[feature_name].subfeatures(): - message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name) - message += " specific to '%s'" % value_string - raise BaseException (message) - - # First declare the subfeature as a feature in its own right - f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature']) - f.set_parent(parent_feature, value_string) - - parent_feature.add_subfeature(f) - - # Now make sure the subfeature values are known. - extend_subfeature (feature_name, value_string, subfeature, subvalues) - - -@bjam_signature((["composite_property_s"], ["component_properties_s", "*"])) -def compose (composite_property_s, component_properties_s): - """ Sets the components of the given composite property. - - All paremeters are <feature>value strings - """ - import property - - component_properties_s = to_seq (component_properties_s) - composite_property = property.create_from_string(composite_property_s) - f = composite_property.feature() - - if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property): - component_properties = component_properties_s - else: - component_properties = [property.create_from_string(p) for p in component_properties_s] - - if not f.composite(): - raise BaseException ("'%s' is not a composite feature" % f) - - if __composite_properties.has_key(property): - raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property]))) - - if composite_property in component_properties: - raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property) - - __composite_properties[composite_property] = component_properties - - -def expand_composite(property): - result = [ property ] - if __composite_properties.has_key(property): - for p in __composite_properties[property]: - result.extend(expand_composite(p)) - return result - - -def get_values (feature, properties): - """ Returns all values of the given feature specified by the given property set. - """ - result = [] - for p in properties: - if get_grist (p) == feature: - result.append (replace_grist (p, '')) - - return result - -def free_features (): - """ Returns all free features. - """ - return __free_features - -def expand_composites (properties): - """ Expand all composite properties in the set so that all components - are explicitly expressed. - """ - explicit_features = set(p.feature() for p in properties) - - result = [] - - # now expand composite features - for p in properties: - expanded = expand_composite(p) - - for x in expanded: - if not x in result: - f = x.feature() - - if f.free(): - result.append (x) - elif not x in properties: # x is the result of expansion - if not f in explicit_features: # not explicitly-specified - if any(r.feature() == f for r in result): - raise FeatureConflict( - "expansions of composite features result in " - "conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" % - (f.name(), [r.value() for r in result if r.feature() == f] + [x.value()], p)) - else: - result.append (x) - elif any(r.feature() == f for r in result): - raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n" - "existing values: '%s'\nvalue from expanding '%s': '%s'" % (f, - [r.value() for r in result if r.feature() == f], p, x.value())) - else: - result.append (x) - - return result - -# Uses Property -def is_subfeature_of (parent_property, f): - """ Return true iff f is an ordinary subfeature of the parent_property's - feature, or if f is a subfeature of the parent_property's feature - specific to the parent_property's value. - """ - if not f.subfeature(): - return False - - p = f.parent() - if not p: - return False - - parent_feature = p[0] - parent_value = p[1] - - if parent_feature != parent_property.feature(): - return False - - if parent_value and parent_value != parent_property.value(): - return False - - return True - -def __is_subproperty_of (parent_property, p): - """ As is_subfeature_of, for subproperties. - """ - return is_subfeature_of (parent_property, p.feature()) - - -# Returns true iff the subvalue is valid for the feature. When the -# optional value-string is provided, returns true iff the subvalues -# are valid for the given value of the feature. -def is_subvalue(feature, value_string, subfeature, subvalue): - - if not value_string: - value_string = '' - - if not __subfeature_from_value.has_key(feature): - return False - - if not __subfeature_from_value[feature].has_key(value_string): - return False - - if not __subfeature_from_value[feature][value_string].has_key(subvalue): - return False - - if __subfeature_from_value[feature][value_string][subvalue]\ - != subfeature: - return False - - return True - -def implied_subfeature (feature, subvalue, value_string): - result = __find_implied_subfeature (feature, subvalue, value_string) - if not result: - raise InvalidValue ("'%s' is not a known subfeature value of '%s%s'" % (subvalue, feature, value_string)) - - return result - - -# Uses Property -def expand (properties): - """ Given a property set which may consist of composite and implicit - properties and combined subfeature values, returns an expanded, - normalized property set with all implicit features expressed - explicitly, all subfeature values individually expressed, and all - components of composite properties expanded. Non-free features - directly expressed in the input properties cause any values of - those features due to composite feature expansion to be dropped. If - two values of a given non-free feature are directly expressed in the - input, an error is issued. - """ - expanded = expand_subfeatures(properties) - return expand_composites (expanded) - -# Accepts list of Property objects -def add_defaults (properties): - """ Given a set of properties, add default values for features not - represented in the set. - Note: if there's there's ordinary feature F1 and composite feature - F2, which includes some value for F1, and both feature have default values, - then the default value of F1 will be added, not the value in F2. This might - not be right idea: consider - - feature variant : debug ... ; - <variant>debug : .... <runtime-debugging>on - feature <runtime-debugging> : off on ; - - Here, when adding default for an empty property set, we'll get - - <variant>debug <runtime_debugging>off - - and that's kind of strange. - """ - result = [x for x in properties] - - handled_features = set() - for p in properties: - # We don't add default for conditional properties. We don't want - # <variant>debug:<define>DEBUG to be takes as specified value for <variant> - if not p.condition(): - handled_features.add(p.feature()) - - missing_top = [f for f in __all_top_features if not f in handled_features] - more = defaults(missing_top) - result.extend(more) - for p in more: - handled_features.add(p.feature()) - - # Add defaults for subfeatures of features which are present - for p in result[:]: - s = p.feature().subfeatures() - more = defaults([s for s in p.feature().subfeatures() if not s in handled_features]) - for p in more: - handled_features.add(p.feature()) - result.extend(more) - - return result - -def minimize (properties): - """ Given an expanded property set, eliminate all redundancy: properties - which are elements of other (composite) properties in the set will - be eliminated. Non-symmetric properties equal to default values will be - eliminated, unless the override a value from some composite property. - Implicit properties will be expressed without feature - grist, and sub-property values will be expressed as elements joined - to the corresponding main property. - """ - - # remove properties implied by composite features - components = [] - for property in properties: - if __composite_properties.has_key (property): - components.extend(__composite_properties[property]) - properties = b2.util.set.difference (properties, components) - - # handle subfeatures and implicit features - - # move subfeatures to the end of the list - properties = [p for p in properties if not p.feature().subfeature()] +\ - [p for p in properties if p.feature().subfeature()] - - result = [] - while properties: - p = properties[0] - f = p.feature() - - # locate all subproperties of $(x[1]) in the property set - subproperties = __select_subproperties (p, properties) - - if subproperties: - # reconstitute the joined property name - subproperties.sort () - joined = b2.build.property.Property(p.feature(), p.value() + '-' + '-'.join ([sp.value() for sp in subproperties])) - result.append(joined) - - properties = b2.util.set.difference(properties[1:], subproperties) - - else: - # eliminate properties whose value is equal to feature's - # default and which are not symmetric and which do not - # contradict values implied by composite properties. - - # since all component properties of composites in the set - # have been eliminated, any remaining property whose - # feature is the same as a component of a composite in the - # set must have a non-redundant value. - if p.value() != f.default() or f.symmetric(): - result.append (p) - #\ - #or get_grist (fullp) in get_grist (components): - # FIXME: restore above - - - properties = properties[1:] - - return result - - -def split (properties): - """ Given a property-set of the form - v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM - - Returns - v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM - - Note that vN...vM may contain slashes. This is resilient to the - substitution of backslashes for slashes, since Jam, unbidden, - sometimes swaps slash direction on NT. - """ - - def split_one (properties): - pieces = re.split (__re_slash_or_backslash, properties) - result = [] - - for x in pieces: - if not get_grist (x) and len (result) > 0 and get_grist (result [-1]): - result = result [0:-1] + [ result [-1] + '/' + x ] - else: - result.append (x) - - return result - - if isinstance (properties, str): - return split_one (properties) - - result = [] - for p in properties: - result += split_one (p) - return result - - -def compress_subproperties (properties): - """ Combine all subproperties into their parent properties - - Requires: for every subproperty, there is a parent property. All - features are explicitly expressed. - - This rule probably shouldn't be needed, but - build-request.expand-no-defaults is being abused for unintended - purposes and it needs help - """ - result = [] - matched_subs = set() - all_subs = set() - for p in properties: - f = p.feature() - - if not f.subfeature(): - subs = __select_subproperties (p, properties) - if subs: - - matched_subs.update(subs) - - subvalues = '-'.join (sub.value() for sub in subs) - result.append(b2.build.property.Property( - p.feature(), p.value() + '-' + subvalues, - p.condition())) - else: - result.append(p) - - else: - all_subs.add(p) - - # TODO: this variables are used just for debugging. What's the overhead? - assert all_subs == matched_subs - - return result - -###################################################################################### -# Private methods - -def __select_subproperties (parent_property, properties): - return [ x for x in properties if __is_subproperty_of (parent_property, x) ] - -def __get_subfeature_name (subfeature, value_string): - if value_string == None: - prefix = '' - else: - prefix = value_string + ':' - - return prefix + subfeature - - -def __validate_feature_attributes (name, attributes): - for attribute in attributes: - if not attribute in __all_attributes: - raise InvalidAttribute ("unknown attributes: '%s' in feature declaration: '%s'" % (str (b2.util.set.difference (attributes, __all_attributes)), name)) - - if name in __all_features: - raise AlreadyDefined ("feature '%s' already defined" % name) - elif 'implicit' in attributes and 'free' in attributes: - raise InvalidAttribute ("free features cannot also be implicit (in declaration of feature '%s')" % name) - elif 'free' in attributes and 'propagated' in attributes: - raise InvalidAttribute ("free features cannot also be propagated (in declaration of feature '%s')" % name) - - -def __validate_feature (feature): - """ Generates an error if the feature is unknown. - """ - if not __all_features.has_key (feature): - raise BaseException ('unknown feature "%s"' % feature) - - -def __select_subfeatures (parent_property, features): - """ Given a property, return the subset of features consisting of all - ordinary subfeatures of the property's feature, and all specific - subfeatures of the property's feature which are conditional on the - property's value. - """ - return [f for f in features if is_subfeature_of (parent_property, f)] - -# FIXME: copy over tests. diff --git a/jam-files/boost-build/build/generators.jam b/jam-files/boost-build/build/generators.jam deleted file mode 100644 index 1515525f..00000000 --- a/jam-files/boost-build/build/generators.jam +++ /dev/null @@ -1,1408 +0,0 @@ -# Copyright Vladimir Prus 2002. -# Copyright Rene Rivera 2006. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Manages 'generators' --- objects which can do transformation between different -# target types and contain algorithm for finding transformation from sources to -# targets. -# -# The main entry point to this module is generators.construct rule. It is given -# a list of source targets, desired target type and a set of properties. It -# starts by selecting 'viable generators', which have any chances of producing -# the desired target type with the required properties. Generators are ranked -# and a set of the most specific ones is selected. -# -# The most specific generators have their 'run' methods called, with the -# properties and list of sources. Each one selects a target which can be -# directly consumed, and tries to convert the remaining ones to the types it can -# consume. This is done by recursively calling 'construct' with all consumable -# types. -# -# If the generator has collected all the targets it needs, it creates targets -# corresponding to result, and returns it. When all generators have been run, -# results of one of them are selected and returned as a result. -# -# It is quite possible for 'construct' to return more targets that it was asked -# for. For example, if it were asked to generate a target of type EXE, but the -# only found generator produces both EXE and TDS (file with debug) information. -# The extra target will be returned. -# -# Likewise, when generator tries to convert sources to consumable types, it can -# get more targets that it was asked for. The question is what to do with extra -# targets. Boost.Build attempts to convert them to requested types, and attempts -# that as early as possible. Specifically, this is done after invoking each -# generator. TODO: An example is needed to document the rationale for trying -# extra target conversion at that point. -# -# In order for the system to be able to use a specific generator instance 'when -# needed', the instance needs to be registered with the system using -# generators.register() or one of its related rules. Unregistered generators may -# only be run explicitly and will not be considered by Boost.Build when when -# converting between given target types. - -import "class" : new ; -import errors ; -import property-set ; -import sequence ; -import set ; -import type ; -import utility ; -import virtual-target ; - - -if "--debug-generators" in [ modules.peek : ARGV ] -{ - .debug = true ; -} - - -# Updated cached viable source target type information as needed after a new -# target type gets defined. This is needed because if a target type is a viable -# source target type for some generator then all of the target type's derived -# target types should automatically be considered as viable source target types -# for the same generator as well. Does nothing if a non-derived target type is -# passed to it. -# -rule update-cached-information-with-a-new-type ( type ) -{ - local base-type = [ type.base $(type) ] ; - if $(base-type) - { - for local g in $(.vstg-cached-generators) - { - if $(base-type) in $(.vstg.$(g)) - { - .vstg.$(g) += $(type) ; - } - } - - for local t in $(.vst-cached-types) - { - if $(base-type) in $(.vst.$(t)) - { - .vst.$(t) += $(type) ; - } - } - } -} - - -# Clears cached viable source target type information except for target types -# and generators with all source types listed as viable. Should be called when -# something invalidates those cached values by possibly causing some new source -# types to become viable. -# -local rule invalidate-extendable-viable-source-target-type-cache ( ) -{ - local generators-with-cached-source-types = $(.vstg-cached-generators) ; - .vstg-cached-generators = ; - for local g in $(generators-with-cached-source-types) - { - if $(.vstg.$(g)) = * - { - .vstg-cached-generators += $(g) ; - } - else - { - .vstg.$(g) = ; - } - } - - local types-with-cached-source-types = $(.vst-cached-types) ; - .vst-cached-types = ; - for local t in $(types-with-cached-source-types) - { - if $(.vst.$(t)) = * - { - .vst-cached-types += $(t) ; - } - else - { - .vst.$(t) = ; - } - } -} - - -# Outputs a debug message if generators debugging is on. Each element of -# 'message' is checked to see if it is a class instance. If so, instead of the -# value, the result of 'str' call is output. -# -local rule generators.dout ( message * ) -{ - if $(.debug) - { - ECHO [ sequence.transform utility.str : $(message) ] ; - } -} - - -local rule indent ( ) -{ - return $(.indent:J="") ; -} - - -local rule increase-indent ( ) -{ - .indent += " " ; -} - - -local rule decrease-indent ( ) -{ - .indent = $(.indent[2-]) ; -} - - -# Models a generator. -# -class generator -{ - import generators : indent increase-indent decrease-indent generators.dout ; - import set ; - import utility ; - import feature ; - import errors ; - import sequence ; - import type ; - import virtual-target ; - import "class" : new ; - import property ; - import path ; - - EXPORT class@generator : indent increase-indent decrease-indent - generators.dout ; - - rule __init__ ( - id # Identifies the generator - should be name - # of the rule which sets up the build - # actions. - - composing ? # Whether generator processes each source - # target in turn, converting it to required - # types. Ordinary generators pass all - # sources together to the recursive - # generators.construct-types call. - - : source-types * # Types that this generator can handle. If - # empty, the generator can consume anything. - - : target-types-and-names + # Types the generator will create and, - # optionally, names for created targets. - # Each element should have the form - # type["(" name-pattern ")"], for example, - # obj(%_x). Generated target name will be - # found by replacing % with the name of - # source, provided an explicit name was not - # specified. - - : requirements * - ) - { - self.id = $(id) ; - self.rule-name = $(id) ; - self.composing = $(composing) ; - self.source-types = $(source-types) ; - self.target-types-and-names = $(target-types-and-names) ; - self.requirements = $(requirements) ; - - for local e in $(target-types-and-names) - { - # Create three parallel lists: one with the list of target types, - # and two other with prefixes and postfixes to be added to target - # name. We use parallel lists for prefix and postfix (as opposed to - # mapping), because given target type might occur several times, for - # example "H H(%_symbols)". - local m = [ MATCH ([^\\(]*)(\\((.*)%(.*)\\))? : $(e) ] ; - self.target-types += $(m[1]) ; - self.name-prefix += $(m[3]:E="") ; - self.name-postfix += $(m[4]:E="") ; - } - - # Note that 'transform' here, is the same as 'for_each'. - sequence.transform type.validate : $(self.source-types) ; - sequence.transform type.validate : $(self.target-types) ; - } - - ################# End of constructor ################# - - rule id ( ) - { - return $(self.id) ; - } - - # Returns the list of target type the generator accepts. - # - rule source-types ( ) - { - return $(self.source-types) ; - } - - # Returns the list of target types that this generator produces. It is - # assumed to be always the same -- i.e. it can not change depending on some - # provided list of sources. - # - rule target-types ( ) - { - return $(self.target-types) ; - } - - # Returns the required properties for this generator. Properties in returned - # set must be present in build properties if this generator is to be used. - # If result has grist-only element, that build properties must include some - # value of that feature. - # - # XXX: remove this method? - # - rule requirements ( ) - { - return $(self.requirements) ; - } - - rule set-rule-name ( rule-name ) - { - self.rule-name = $(rule-name) ; - } - - rule rule-name ( ) - { - return $(self.rule-name) ; - } - - # Returns a true value if the generator can be run with the specified - # properties. - # - rule match-rank ( property-set-to-match ) - { - # See if generator requirements are satisfied by 'properties'. Treat a - # feature name in requirements (i.e. grist-only element), as matching - # any value of the feature. - local all-requirements = [ requirements ] ; - - local property-requirements feature-requirements ; - for local r in $(all-requirements) - { - if $(r:G=) - { - property-requirements += $(r) ; - } - else - { - feature-requirements += $(r) ; - } - } - - local properties-to-match = [ $(property-set-to-match).raw ] ; - if $(property-requirements) in $(properties-to-match) && - $(feature-requirements) in $(properties-to-match:G) - { - return true ; - } - else - { - return ; - } - } - - # Returns another generator which differs from $(self) in - # - id - # - value to <toolset> feature in properties - # - rule clone ( new-id : new-toolset-properties + ) - { - local g = [ new $(__class__) $(new-id) $(self.composing) : - $(self.source-types) : $(self.target-types-and-names) : - # Note: this does not remove any subfeatures of <toolset> which - # might cause problems. - [ property.change $(self.requirements) : <toolset> ] - $(new-toolset-properties) ] ; - return $(g) ; - } - - # Creates another generator that is the same as $(self), except that if - # 'base' is in target types of $(self), 'type' will in target types of the - # new generator. - # - rule clone-and-change-target-type ( base : type ) - { - local target-types ; - for local t in $(self.target-types-and-names) - { - local m = [ MATCH ([^\\(]*)(\\(.*\\))? : $(t) ] ; - if $(m) = $(base) - { - target-types += $(type)$(m[2]:E="") ; - } - else - { - target-types += $(t) ; - } - } - - local g = [ new $(__class__) $(self.id) $(self.composing) : - $(self.source-types) : $(target-types) : $(self.requirements) ] ; - if $(self.rule-name) - { - $(g).set-rule-name $(self.rule-name) ; - } - return $(g) ; - } - - # Tries to invoke this generator on the given sources. Returns a list of - # generated targets (instances of 'virtual-target') and optionally a set of - # properties to be added to the usage-requirements for all the generated - # targets. Returning nothing from run indicates that the generator was - # unable to create the target. - # - rule run - ( - project # Project for which the targets are generated. - name ? # Used when determining the 'name' attribute for all - # generated targets. See the 'generated-targets' method. - : property-set # Desired properties for generated targets. - : sources + # Source targets. - ) - { - generators.dout [ indent ] " ** generator" $(self.id) ; - generators.dout [ indent ] " composing:" $(self.composing) ; - - if ! $(self.composing) && $(sources[2]) && $(self.source-types[2]) - { - errors.error "Unsupported source/source-type combination" ; - } - - # We do not run composing generators if no name is specified. The reason - # is that composing generator combines several targets, which can have - # different names, and it cannot decide which name to give for produced - # target. Therefore, the name must be passed. - # - # This in effect, means that composing generators are runnable only at - # the top-level of a transformation graph, or if their name is passed - # explicitly. Thus, we dissallow composing generators in the middle. For - # example, the transformation CPP -> OBJ -> STATIC_LIB -> RSP -> EXE - # will not be allowed as the OBJ -> STATIC_LIB generator is composing. - if ! $(self.composing) || $(name) - { - run-really $(project) $(name) : $(property-set) : $(sources) ; - } - } - - rule run-really ( project name ? : property-set : sources + ) - { - # Targets that this generator will consume directly. - local consumed = ; - # Targets that can not be consumed and will be returned as-is. - local bypassed = ; - - if $(self.composing) - { - convert-multiple-sources-to-consumable-types $(project) - : $(property-set) : $(sources) : consumed bypassed ; - } - else - { - convert-to-consumable-types $(project) $(name) : $(property-set) - : $(sources) : : consumed bypassed ; - } - - local result ; - if $(consumed) - { - result = [ construct-result $(consumed) : $(project) $(name) : - $(property-set) ] ; - } - - if $(result) - { - generators.dout [ indent ] " SUCCESS: " $(result) ; - } - else - { - generators.dout [ indent ] " FAILURE" ; - } - generators.dout ; - return $(result) ; - } - - # Constructs the dependency graph to be returned by this generator. - # - rule construct-result - ( - consumed + # Already prepared list of consumable targets. - # Composing generators may receive multiple sources - # all of which will have types matching those in - # $(self.source-types). Non-composing generators with - # multiple $(self.source-types) will receive exactly - # len $(self.source-types) sources with types matching - # those in $(self.source-types). And non-composing - # generators with only a single source type may - # receive multiple sources with all of them of the - # type listed in $(self.source-types). - : project name ? - : property-set # Properties to be used for all actions created here. - ) - { - local result ; - # If this is 1->1 transformation, apply it to all consumed targets in - # order. - if ! $(self.source-types[2]) && ! $(self.composing) - { - for local r in $(consumed) - { - result += [ generated-targets $(r) : $(property-set) : - $(project) $(name) ] ; - } - } - else if $(consumed) - { - result += [ generated-targets $(consumed) : $(property-set) : - $(project) $(name) ] ; - } - return $(result) ; - } - - # Determine target name from fullname (maybe including path components) - # Place optional prefix and postfix around basename - # - rule determine-target-name ( fullname : prefix ? : postfix ? ) - { - # See if we need to add directory to the target name. - local dir = $(fullname:D) ; - local name = $(fullname:B) ; - - name = $(prefix:E=)$(name) ; - name = $(name)$(postfix:E=) ; - - if $(dir) && - # Never append '..' to target path. - ! [ MATCH .*(\\.\\.).* : $(dir) ] - && - ! [ path.is-rooted $(dir) ] - { - # Relative path is always relative to the source - # directory. Retain it, so that users can have files - # with the same in two different subdirectories. - name = $(dir)/$(name) ; - } - return $(name) ; - } - - # Determine the name of the produced target from the names of the sources. - # - rule determine-output-name ( sources + ) - { - # The simple case if when a name of source has single dot. Then, we take - # the part before dot. Several dots can be caused by: - # - using source file like a.host.cpp, or - # - a type whose suffix has a dot. Say, we can type 'host_cpp' with - # extension 'host.cpp'. - # In the first case, we want to take the part up to the last dot. In the - # second case -- not sure, but for now take the part up to the last dot - # too. - name = [ utility.basename [ $(sources[1]).name ] ] ; - - for local s in $(sources[2]) - { - local n2 = [ utility.basename [ $(s).name ] ] ; - if $(n2) != $(name) - { - errors.error "$(self.id): source targets have different names: cannot determine target name" ; - } - } - name = [ determine-target-name [ $(sources[1]).name ] ] ; - return $(name) ; - } - - # Constructs targets that are created after consuming 'sources'. The result - # will be the list of virtual-target, which has the same length as the - # 'target-types' attribute and with corresponding types. - # - # When 'name' is empty, all source targets must have the same 'name' - # attribute value, which will be used instead of the 'name' argument. - # - # The 'name' attribute value for each generated target will be equal to - # the 'name' parameter if there is no name pattern for this type. Otherwise, - # the '%' symbol in the name pattern will be replaced with the 'name' - # parameter to obtain the 'name' attribute. - # - # For example, if targets types are T1 and T2 (with name pattern "%_x"), - # suffixes for T1 and T2 are .t1 and .t2, and source is foo.z, then created - # files would be "foo.t1" and "foo_x.t2". The 'name' attribute actually - # determines the basename of a file. - # - # Note that this pattern mechanism has nothing to do with implicit patterns - # in make. It is a way to produce a target whose name is different than the - # name of its source. - # - rule generated-targets ( sources + : property-set : project name ? ) - { - if ! $(name) - { - name = [ determine-output-name $(sources) ] ; - } - - # Assign an action for each target. - local action = [ action-class ] ; - local a = [ class.new $(action) $(sources) : $(self.rule-name) : - $(property-set) ] ; - - # Create generated target for each target type. - local targets ; - local pre = $(self.name-prefix) ; - local post = $(self.name-postfix) ; - for local t in $(self.target-types) - { - local generated-name = $(pre[1])$(name:BS)$(post[1]) ; - generated-name = $(generated-name:R=$(name:D)) ; - pre = $(pre[2-]) ; - post = $(post[2-]) ; - - targets += [ class.new file-target $(generated-name) : $(t) : - $(project) : $(a) ] ; - } - - return [ sequence.transform virtual-target.register : $(targets) ] ; - } - - # Attempts to convert 'sources' to targets of types that this generator can - # handle. The intention is to produce the set of targets that can be used - # when the generator is run. - # - rule convert-to-consumable-types - ( - project name ? - : property-set - : sources + - : only-one ? # Convert 'source' to only one of the source types. If - # there is more that one possibility, report an error. - : consumed-var # Name of the variable which receives all targets which - # can be consumed. - bypassed-var # Name of the variable which receives all targets which - # can not be consumed. - ) - { - # We are likely to be passed 'consumed' and 'bypassed' var names. Use - # '_' to avoid name conflicts. - local _consumed ; - local _bypassed ; - local missing-types ; - - if $(sources[2]) - { - # Do not know how to handle several sources yet. Just try to pass - # the request to other generator. - missing-types = $(self.source-types) ; - } - else - { - consume-directly $(sources) : _consumed : missing-types ; - } - - # No need to search for transformation if some source type has consumed - # source and no more source types are needed. - if $(only-one) && $(_consumed) - { - missing-types = ; - } - - # TODO: we should check that only one source type if create of - # 'only-one' is true. - # TODO: consider if consumed/bypassed separation should be done by - # 'construct-types'. - - if $(missing-types) - { - local transformed = [ generators.construct-types $(project) $(name) - : $(missing-types) : $(property-set) : $(sources) ] ; - - # Add targets of right type to 'consumed'. Add others to 'bypassed'. - # The 'generators.construct' rule has done its best to convert - # everything to the required type. There is no need to rerun it on - # targets of different types. - - # NOTE: ignoring usage requirements. - for local t in $(transformed[2-]) - { - if [ $(t).type ] in $(missing-types) - { - _consumed += $(t) ; - } - else - { - _bypassed += $(t) ; - } - } - } - - _consumed = [ sequence.unique $(_consumed) ] ; - _bypassed = [ sequence.unique $(_bypassed) ] ; - - # Remove elements of '_bypassed' that are in '_consumed'. - - # Suppose the target type of current generator, X is produced from X_1 - # and X_2, which are produced from Y by one generator. When creating X_1 - # from Y, X_2 will be added to 'bypassed'. Likewise, when creating X_2 - # from Y, X_1 will be added to 'bypassed', but they are also in - # 'consumed'. We have to remove them from bypassed, so that generators - # up the call stack do not try to convert them. - - # In this particular case, X_1 instance in 'consumed' and X_1 instance - # in 'bypassed' will be the same: because they have the same source and - # action name, and 'virtual-target.register' will not allow two - # different instances. Therefore, it is OK to use 'set.difference'. - - _bypassed = [ set.difference $(_bypassed) : $(_consumed) ] ; - - $(consumed-var) += $(_consumed) ; - $(bypassed-var) += $(_bypassed) ; - } - - # Converts several files to consumable types. Called for composing - # generators only. - # - rule convert-multiple-sources-to-consumable-types ( project : property-set : - sources * : consumed-var bypassed-var ) - { - # We process each source one-by-one, trying to convert it to a usable - # type. - for local source in $(sources) - { - local _c ; - local _b ; - # TODO: need to check for failure on each source. - convert-to-consumable-types $(project) : $(property-set) : $(source) - : true : _c _b ; - if ! $(_c) - { - generators.dout [ indent ] " failed to convert " $(source) ; - } - $(consumed-var) += $(_c) ; - $(bypassed-var) += $(_b) ; - } - } - - rule consume-directly ( source : consumed-var : missing-types-var ) - { - local real-source-type = [ $(source).type ] ; - - # If there are no source types, we can consume anything. - local source-types = $(self.source-types) ; - source-types ?= $(real-source-type) ; - - for local st in $(source-types) - { - # The 'source' if of the right type already. - if $(real-source-type) = $(st) || [ type.is-derived - $(real-source-type) $(st) ] - { - $(consumed-var) += $(source) ; - } - else - { - $(missing-types-var) += $(st) ; - } - } - } - - # Returns the class to be used to actions. Default implementation returns - # "action". - # - rule action-class ( ) - { - return "action" ; - } -} - - -# Registers a new generator instance 'g'. -# -rule register ( g ) -{ - .all-generators += $(g) ; - - # A generator can produce several targets of the same type. We want unique - # occurrence of that generator in .generators.$(t) in that case, otherwise, - # it will be tried twice and we will get a false ambiguity. - for local t in [ sequence.unique [ $(g).target-types ] ] - { - .generators.$(t) += $(g) ; - } - - # Update the set of generators for toolset. - - # TODO: should we check that generator with this id is not already - # registered. For example, the fop.jam module intentionally declared two - # generators with the same id, so such check will break it. - local id = [ $(g).id ] ; - - # Some generators have multiple periods in their name, so a simple $(id:S=) - # will not generate the right toolset name. E.g. if id = gcc.compile.c++, - # then .generators-for-toolset.$(id:S=) will append to - # .generators-for-toolset.gcc.compile, which is a separate value from - # .generators-for-toolset.gcc. Correcting this makes generator inheritance - # work properly. See also inherit-generators in the toolset module. - local base = $(id) ; - while $(base:S) - { - base = $(base:B) ; - } - .generators-for-toolset.$(base) += $(g) ; - - - # After adding a new generator that can construct new target types, we need - # to clear the related cached viable source target type information for - # constructing a specific target type or using a specific generator. Cached - # viable source target type lists affected by this are those containing any - # of the target types constructed by the new generator or any of their base - # target types. - # - # A more advanced alternative to clearing that cached viable source target - # type information would be to expand it with additional source types or - # even better - mark it as needing to be expanded on next use. - # - # Also see the http://thread.gmane.org/gmane.comp.lib.boost.build/19077 - # mailing list thread for an even more advanced idea of how we could convert - # Boost Build's Jamfile processing, target selection and generator selection - # into separate steps which would prevent these caches from ever being - # invalidated. - # - # For now we just clear all the cached viable source target type information - # that does not simply state 'all types' and may implement a more detailed - # algorithm later on if it becomes needed. - - invalidate-extendable-viable-source-target-type-cache ; -} - - -# Creates a new non-composing 'generator' class instance and registers it. -# Returns the created instance. Rationale: the instance is returned so that it -# is possible to first register a generator and then call its 'run' method, -# bypassing the whole generator selection process. -# -rule register-standard ( id : source-types * : target-types + : requirements * ) -{ - local g = [ new generator $(id) : $(source-types) : $(target-types) : - $(requirements) ] ; - register $(g) ; - return $(g) ; -} - - -# Creates a new composing 'generator' class instance and registers it. -# -rule register-composing ( id : source-types * : target-types + : requirements * - ) -{ - local g = [ new generator $(id) true : $(source-types) : $(target-types) : - $(requirements) ] ; - register $(g) ; - return $(g) ; -} - - -# Returns all generators belonging to the given 'toolset', i.e. whose ids are -# '$(toolset).<something>'. -# -rule generators-for-toolset ( toolset ) -{ - return $(.generators-for-toolset.$(toolset)) ; -} - - -# Make generator 'overrider-id' be preferred to 'overridee-id'. If, when -# searching for generators that could produce a target of a certain type, both -# those generators are among viable generators, the overridden generator is -# immediately discarded. -# -# The overridden generators are discarded immediately after computing the list -# of viable generators but before running any of them. -# -rule override ( overrider-id : overridee-id ) -{ - .override.$(overrider-id) += $(overridee-id) ; -} - - -# Returns a list of source type which can possibly be converted to 'target-type' -# by some chain of generator invocation. -# -# More formally, takes all generators for 'target-type' and returns a union of -# source types for those generators and result of calling itself recursively on -# source types. -# -# Returns '*' in case any type should be considered a viable source type for the -# given type. -# -local rule viable-source-types-real ( target-type ) -{ - local result ; - - # 't0' is the initial list of target types we need to process to get a list - # of their viable source target types. New target types will not be added to - # this list. - local t0 = [ type.all-bases $(target-type) ] ; - - # 't' is the list of target types which have not yet been processed to get a - # list of their viable source target types. This list will get expanded as - # we locate more target types to process. - local t = $(t0) ; - - while $(t) - { - # Find all generators for the current type. Unlike - # 'find-viable-generators' we do not care about the property-set. - local generators = $(.generators.$(t[1])) ; - t = $(t[2-]) ; - - while $(generators) - { - local g = $(generators[1]) ; - generators = $(generators[2-]) ; - - if ! [ $(g).source-types ] - { - # Empty source types -- everything can be accepted. - result = * ; - # This will terminate this loop. - generators = ; - # This will terminate the outer loop. - t = ; - } - - for local source-type in [ $(g).source-types ] - { - if ! $(source-type) in $(result) - { - # If a generator accepts a 'source-type' it will also - # happily accept any type derived from it. - for local n in [ type.all-derived $(source-type) ] - { - if ! $(n) in $(result) - { - # Here there is no point in adding target types to - # the list of types to process in case they are or - # have already been on that list. We optimize this - # check by realizing that we only need to avoid the - # original target type's base types. Other target - # types that are or have been on the list of target - # types to process have been added to the 'result' - # list as well and have thus already been eliminated - # by the previous if. - if ! $(n) in $(t0) - { - t += $(n) ; - } - result += $(n) ; - } - } - } - } - } - } - - return $(result) ; -} - - -# Helper rule, caches the result of 'viable-source-types-real'. -# -rule viable-source-types ( target-type ) -{ - local key = .vst.$(target-type) ; - if ! $($(key)) - { - .vst-cached-types += $(target-type) ; - local v = [ viable-source-types-real $(target-type) ] ; - if ! $(v) - { - v = none ; - } - $(key) = $(v) ; - } - - if $($(key)) != none - { - return $($(key)) ; - } -} - - -# Returns the list of source types, which, when passed to 'run' method of -# 'generator', has some change of being eventually used (probably after -# conversion by other generators). -# -# Returns '*' in case any type should be considered a viable source type for the -# given generator. -# -rule viable-source-types-for-generator-real ( generator ) -{ - local source-types = [ $(generator).source-types ] ; - if ! $(source-types) - { - # If generator does not specify any source types, it might be a special - # generator like builtin.lib-generator which just relays to other - # generators. Return '*' to indicate that any source type is possibly - # OK, since we do not know for sure. - return * ; - } - else - { - local result ; - while $(source-types) - { - local s = $(source-types[1]) ; - source-types = $(source-types[2-]) ; - local viable-sources = [ generators.viable-source-types $(s) ] ; - if $(viable-sources) = * - { - result = * ; - source-types = ; # Terminate the loop. - } - else - { - result += [ type.all-derived $(s) ] $(viable-sources) ; - } - } - return [ sequence.unique $(result) ] ; - } -} - - -# Helper rule, caches the result of 'viable-source-types-for-generator'. -# -local rule viable-source-types-for-generator ( generator ) -{ - local key = .vstg.$(generator) ; - if ! $($(key)) - { - .vstg-cached-generators += $(generator) ; - local v = [ viable-source-types-for-generator-real $(generator) ] ; - if ! $(v) - { - v = none ; - } - $(key) = $(v) ; - } - - if $($(key)) != none - { - return $($(key)) ; - } -} - - -# Returns usage requirements + list of created targets. -# -local rule try-one-generator-really ( project name ? : generator : target-type - : property-set : sources * ) -{ - local targets = - [ $(generator).run $(project) $(name) : $(property-set) : $(sources) ] ; - - local usage-requirements ; - local success ; - - generators.dout [ indent ] returned $(targets) ; - - if $(targets) - { - success = true ; - - if [ class.is-a $(targets[1]) : property-set ] - { - usage-requirements = $(targets[1]) ; - targets = $(targets[2-]) ; - } - else - { - usage-requirements = [ property-set.empty ] ; - } - } - - generators.dout [ indent ] " generator" [ $(generator).id ] " spawned " ; - generators.dout [ indent ] " " $(targets) ; - if $(usage-requirements) - { - generators.dout [ indent ] " with usage requirements:" $(x) ; - } - - if $(success) - { - return $(usage-requirements) $(targets) ; - } -} - - -# Checks if generator invocation can be pruned, because it is guaranteed to -# fail. If so, quickly returns an empty list. Otherwise, calls -# try-one-generator-really. -# -local rule try-one-generator ( project name ? : generator : target-type - : property-set : sources * ) -{ - local source-types ; - for local s in $(sources) - { - source-types += [ $(s).type ] ; - } - local viable-source-types = [ viable-source-types-for-generator $(generator) - ] ; - - if $(source-types) && $(viable-source-types) != * && - ! [ set.intersection $(source-types) : $(viable-source-types) ] - { - local id = [ $(generator).id ] ; - generators.dout [ indent ] " ** generator '$(id)' pruned" ; - #generators.dout [ indent ] "source-types" '$(source-types)' ; - #generators.dout [ indent ] "viable-source-types" '$(viable-source-types)' ; - } - else - { - return [ try-one-generator-really $(project) $(name) : $(generator) : - $(target-type) : $(property-set) : $(sources) ] ; - } -} - - -rule construct-types ( project name ? : target-types + : property-set - : sources + ) -{ - local result ; - local matched-types ; - local usage-requirements = [ property-set.empty ] ; - for local t in $(target-types) - { - local r = [ construct $(project) $(name) : $(t) : $(property-set) : - $(sources) ] ; - if $(r) - { - usage-requirements = [ $(usage-requirements).add $(r[1]) ] ; - result += $(r[2-]) ; - matched-types += $(t) ; - } - } - # TODO: have to introduce parameter controlling if several types can be - # matched and add appropriate checks. - - # TODO: need to review the documentation for 'construct' to see if it should - # return $(source) even if nothing can be done with it. Currents docs seem - # to imply that, contrary to the behaviour. - if $(result) - { - return $(usage-requirements) $(result) ; - } - else - { - return $(usage-requirements) $(sources) ; - } -} - - -# Ensures all 'targets' have their type. If this is not so, exists with error. -# -local rule ensure-type ( targets * ) -{ - for local t in $(targets) - { - if ! [ $(t).type ] - { - errors.error "target" [ $(t).str ] "has no type" ; - } - } -} - - -# Returns generators which can be used to construct target of specified type -# with specified properties. Uses the following algorithm: -# - iterates over requested target-type and all its bases (in the order returned -# by type.all-bases). -# - for each type find all generators that generate that type and whose -# requirements are satisfied by properties. -# - if the set of generators is not empty, returns that set. -# -# Note: this algorithm explicitly ignores generators for base classes if there -# is at least one generator for the requested target-type. -# -local rule find-viable-generators-aux ( target-type : property-set ) -{ - # Select generators that can create the required target type. - local viable-generators = ; - local generator-rank = ; - - import type ; - local t = [ type.all-bases $(target-type) ] ; - - generators.dout [ indent ] find-viable-generators target-type= $(target-type) - property-set= [ $(property-set).as-path ] ; - - # Get the list of generators for the requested type. If no generator is - # registered, try base type, and so on. - local generators ; - while $(t[1]) - { - generators.dout [ indent ] "trying type" $(t[1]) ; - if $(.generators.$(t[1])) - { - generators.dout [ indent ] "there are generators for this type" ; - generators = $(.generators.$(t[1])) ; - - if $(t[1]) != $(target-type) - { - # We are here because there were no generators found for - # target-type but there are some generators for its base type. - # We will try to use them, but they will produce targets of - # base type, not of 'target-type'. So, we clone the generators - # and modify the list of target types. - local generators2 ; - for local g in $(generators) - { - # generators.register adds a generator to the list of - # generators for toolsets, which is a bit strange, but - # should work. That list is only used when inheriting a - # toolset, which should have been done before running - # generators. - generators2 += [ $(g).clone-and-change-target-type $(t[1]) : - $(target-type) ] ; - generators.register $(generators2[-1]) ; - } - generators = $(generators2) ; - } - t = ; - } - t = $(t[2-]) ; - } - - for local g in $(generators) - { - generators.dout [ indent ] "trying generator" [ $(g).id ] "(" [ $(g).source-types ] -> [ $(g).target-types ] ")" ; - - local m = [ $(g).match-rank $(property-set) ] ; - if $(m) - { - generators.dout [ indent ] " is viable" ; - viable-generators += $(g) ; - } - } - - return $(viable-generators) ; -} - - -rule find-viable-generators ( target-type : property-set ) -{ - local key = $(target-type).$(property-set) ; - local l = $(.fv.$(key)) ; - if ! $(l) - { - l = [ find-viable-generators-aux $(target-type) : $(property-set) ] ; - if ! $(l) - { - l = none ; - } - .fv.$(key) = $(l) ; - } - - if $(l) = none - { - l = ; - } - - local viable-generators ; - for local g in $(l) - { - # Avoid trying the same generator twice on different levels. - if ! $(g) in $(.active-generators) - { - viable-generators += $(g) ; - } - else - { - generators.dout [ indent ] " generator " [ $(g).id ] "is active, discaring" ; - } - } - - # Generators which override 'all'. - local all-overrides ; - # Generators which are overriden. - local overriden-ids ; - for local g in $(viable-generators) - { - local id = [ $(g).id ] ; - local this-overrides = $(.override.$(id)) ; - overriden-ids += $(this-overrides) ; - if all in $(this-overrides) - { - all-overrides += $(g) ; - } - } - if $(all-overrides) - { - viable-generators = $(all-overrides) ; - } - local result ; - for local g in $(viable-generators) - { - if ! [ $(g).id ] in $(overriden-ids) - { - result += $(g) ; - } - } - - return $(result) ; -} - - -.construct-stack = ; - - -# Attempts to construct a target by finding viable generators, running them and -# selecting the dependency graph. -# -local rule construct-really ( project name ? : target-type : property-set : - sources * ) -{ - viable-generators = [ find-viable-generators $(target-type) : - $(property-set) ] ; - - generators.dout [ indent ] "*** " [ sequence.length $(viable-generators) ] - " viable generators" ; - - local result ; - local generators-that-succeeded ; - for local g in $(viable-generators) - { - # This variable will be restored on exit from this scope. - local .active-generators = $(g) $(.active-generators) ; - - local r = [ try-one-generator $(project) $(name) : $(g) : $(target-type) - : $(property-set) : $(sources) ] ; - - if $(r) - { - generators-that-succeeded += $(g) ; - if $(result) - { - ECHO "Error: ambiguity found when searching for best transformation" ; - ECHO "Trying to produce type '$(target-type)' from: " ; - for local s in $(sources) - { - ECHO " - " [ $(s).str ] ; - } - ECHO "Generators that succeeded:" ; - for local g in $(generators-that-succeeded) - { - ECHO " - " [ $(g).id ] ; - } - ECHO "First generator produced: " ; - for local t in $(result[2-]) - { - ECHO " - " [ $(t).str ] ; - } - ECHO "Second generator produced: " ; - for local t in $(r[2-]) - { - ECHO " - " [ $(t).str ] ; - } - EXIT ; - } - else - { - result = $(r) ; - } - } - } - - return $(result) ; -} - - -# Attempts to create a target of 'target-type' with 'properties' from 'sources'. -# The 'sources' are treated as a collection of *possible* ingridients, i.e. -# there is no obligation to consume them all. -# -# Returns a list of targets. When this invocation is first instance of -# 'construct' in stack, returns only targets of requested 'target-type', -# otherwise, returns also unused sources and additionally generated targets. -# -# If 'top-level' is set, does not suppress generators that are already -# used in the stack. This may be useful in cases where a generator -# has to build a metatargets -- for example a target corresponding to -# built tool. -# -rule construct ( project name ? : target-type : property-set * : sources * : top-level ? ) -{ - local saved-stack ; - if $(top-level) - { - saved-active = $(.active-generators) ; - .active-generators = ; - } - - if (.construct-stack) - { - ensure-type $(sources) ; - } - - .construct-stack += 1 ; - - increase-indent ; - - if $(.debug) - { - generators.dout [ indent ] "*** construct" $(target-type) ; - - for local s in $(sources) - { - generators.dout [ indent ] " from" $(s) ; - } - generators.dout [ indent ] " properties:" [ $(property-set).raw ] ; - } - - local result = [ construct-really $(project) $(name) : $(target-type) : - $(property-set) : $(sources) ] ; - - decrease-indent ; - - .construct-stack = $(.construct-stack[2-]) ; - - if $(top-level) - { - .active-generators = $(saved-active) ; - } - - return $(result) ; -} - -# Given 'result', obtained from some generator or generators.construct, adds -# 'raw-properties' as usage requirements to it. If result already contains usage -# requirements -- that is the first element of result of an instance of the -# property-set class, the existing usage requirements and 'raw-properties' are -# combined. -# -rule add-usage-requirements ( result * : raw-properties * ) -{ - if $(result) - { - if [ class.is-a $(result[1]) : property-set ] - { - return [ $(result[1]).add-raw $(raw-properties) ] $(result[2-]) ; - } - else - { - return [ property-set.create $(raw-properties) ] $(result) ; - } - } -} - -rule dump ( ) -{ - for local g in $(.all-generators) - { - ECHO [ $(g).id ] ":" [ $(g).source-types ] -> [ $(g).target-types ] ; - } -} - diff --git a/jam-files/boost-build/build/generators.py b/jam-files/boost-build/build/generators.py deleted file mode 100644 index 2c59f7ca..00000000 --- a/jam-files/boost-build/build/generators.py +++ /dev/null @@ -1,1089 +0,0 @@ -# Status: being ported by Vladimir Prus -# Base revision: 48649 -# TODO: replace the logging with dout - -# Copyright Vladimir Prus 2002. -# Copyright Rene Rivera 2006. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Manages 'generators' --- objects which can do transformation between different -# target types and contain algorithm for finding transformation from sources -# to targets. -# -# The main entry point to this module is generators.construct rule. It is given -# a list of source targets, desired target type and a set of properties. -# It starts by selecting 'viable generators', which have any chances of producing -# the desired target type with the required properties. Generators are ranked and -# a set of most specific ones is selected. -# -# The most specific generators have their 'run' methods called, with the properties -# and list of sources. Each one selects target which can be directly consumed, and -# tries to convert the remaining ones to the types it can consume. This is done -# by recursively calling 'construct' with all consumable types. -# -# If the generator has collected all the targets it needs, it creates targets -# corresponding to result, and returns it. When all generators have been run, -# results of one of them are selected and returned as result. -# -# It's quite possible that 'construct' returns more targets that it was asked for. -# For example, it was asked to target type EXE, but the only found generators produces -# both EXE and TDS (file with debug) information. The extra target will be returned. -# -# Likewise, when generator tries to convert sources to consumable types, it can get -# more targets that it was asked for. The question is what to do with extra targets. -# Boost.Build attempts to convert them to requested types, and attempts as early as -# possible. Specifically, this is done after invoking each generator. (Later I'll -# document the rationale for trying extra target conversion at that point). -# -# That early conversion is not always desirable. Suppose a generator got a source of -# type Y and must consume one target of type X_1 and one target of type X_2. -# When converting Y to X_1 extra target of type Y_2 is created. We should not try to -# convert it to type X_1, because if we do so, the generator will get two targets -# of type X_1, and will be at loss as to which one to use. Because of that, the -# 'construct' rule has a parameter, telling if multiple targets can be returned. If -# the parameter is false, conversion of extra targets is not performed. - - -import re -import cStringIO -import os.path - -from virtual_target import Subvariant -import virtual_target, type, property_set, property -from b2.util.logger import * -from b2.util.utility import * -from b2.util import set -from b2.util.sequence import unique -import b2.util.sequence as sequence -from b2.manager import get_manager -import b2.build.type - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __generators, __type_to_generators, __generators_for_toolset, __construct_stack - global __overrides, __active_generators - global __viable_generators_cache, __viable_source_types_cache - global __vstg_cached_generators, __vst_cached_types - - __generators = {} - __type_to_generators = {} - __generators_for_toolset = {} - __overrides = {} - - # TODO: can these be global? - __construct_stack = [] - __viable_generators_cache = {} - __viable_source_types_cache = {} - __active_generators = [] - - __vstg_cached_generators = [] - __vst_cached_types = [] - -reset () - -_re_separate_types_prefix_and_postfix = re.compile ('([^\\(]*)(\\((.*)%(.*)\\))?') -_re_match_type = re.compile('([^\\(]*)(\\(.*\\))?') - - -__debug = None -__indent = "" - -def debug(): - global __debug - if __debug is None: - __debug = "--debug-generators" in bjam.variable("ARGV") - return __debug - -def increase_indent(): - global __indent - __indent += " " - -def decrease_indent(): - global __indent - __indent = __indent[0:-4] - - -# Updated cached viable source target type information as needed after a new -# derived target type gets added. This is needed because if a target type is a -# viable source target type for some generator then all of the target type's -# derived target types are automatically viable as source target types for the -# same generator. Does nothing if a non-derived target type is passed to it. -# -def update_cached_information_with_a_new_type(type): - - base_type = b2.build.type.base(type) - - if base_type: - for g in __vstg_cached_generators: - if base_type in __viable_source_types_cache.get(g, []): - __viable_source_types_cache[g].append(type) - - for t in __vst_cached_types: - if base_type in __viable_source_types_cache.get(t, []): - __viable_source_types_cache[t].append(type) - -# Clears cached viable source target type information except for target types -# and generators with all source types listed as viable. Should be called when -# something invalidates those cached values by possibly causing some new source -# types to become viable. -# -def invalidate_extendable_viable_source_target_type_cache(): - - global __vstg_cached_generators - generators_with_cached_source_types = __vstg_cached_generators - __vstg_cached_generators = [] - - for g in generators_with_cached_source_types: - if __viable_source_types_cache.has_key(g): - if __viable_source_types_cache[g] == ["*"]: - __vstg_cached_generators.append(g) - else: - del __viable_source_types_cache[g] - - global __vst_cached_types - types_with_cached_sources_types = __vst_cached_types - __vst_cached_types = [] - for t in types_with_cached_sources_types: - if __viable_source_types_cache.has_key(t): - if __viable_source_types_cache[t] == ["*"]: - __vst_cached_types.append(t) - else: - del __viable_source_types_cache[t] - -def dout(message): - if debug(): - print __indent + message - -class Generator: - """ Creates a generator. - manager: the build manager. - id: identifies the generator - - rule: the rule which sets up build actions. - - composing: whether generator processes each source target in - turn, converting it to required types. - Ordinary generators pass all sources together to - recusrive generators.construct_types call. - - source_types (optional): types that this generator can handle - - target_types_and_names: types the generator will create and, optionally, names for - created targets. Each element should have the form - type["(" name-pattern ")"] - for example, obj(%_x). Name of generated target will be found - by replacing % with the name of source, provided explicit name - was not specified. - - requirements (optional) - - NOTE: all subclasses must have a similar signature for clone to work! - """ - def __init__ (self, id, composing, source_types, target_types_and_names, requirements = []): - assert(not isinstance(source_types, str)) - assert(not isinstance(target_types_and_names, str)) - self.id_ = id - self.composing_ = composing - self.source_types_ = source_types - self.target_types_and_names_ = target_types_and_names - self.requirements_ = requirements - - self.target_types_ = [] - self.name_prefix_ = [] - self.name_postfix_ = [] - - for e in target_types_and_names: - # Create three parallel lists: one with the list of target types, - # and two other with prefixes and postfixes to be added to target - # name. We use parallel lists for prefix and postfix (as opposed - # to mapping), because given target type might occur several times, - # for example "H H(%_symbols)". - m = _re_separate_types_prefix_and_postfix.match (e) - - if not m: - raise BaseException ("Invalid type and name '%s' in declaration of type '%s'" % (e, id)) - - target_type = m.group (1) - if not target_type: target_type = '' - prefix = m.group (3) - if not prefix: prefix = '' - postfix = m.group (4) - if not postfix: postfix = '' - - self.target_types_.append (target_type) - self.name_prefix_.append (prefix) - self.name_postfix_.append (postfix) - - for x in self.source_types_: - type.validate (x) - - for x in self.target_types_: - type.validate (x) - - def clone (self, new_id, new_toolset_properties): - """ Returns another generator which differers from $(self) in - - id - - value to <toolset> feature in properties - """ - return self.__class__ (new_id, - self.composing_, - self.source_types_, - self.target_types_and_names_, - # Note: this does not remove any subfeatures of <toolset> - # which might cause problems - property.change (self.requirements_, '<toolset>') + new_toolset_properties) - - def clone_and_change_target_type(self, base, type): - """Creates another generator that is the same as $(self), except that - if 'base' is in target types of $(self), 'type' will in target types - of the new generator.""" - target_types = [] - for t in self.target_types_and_names_: - m = _re_match_type.match(t) - assert m - - if m.group(1) == base: - if m.group(2): - target_types.append(type + m.group(2)) - else: - target_types.append(type) - else: - target_types.append(t) - - return self.__class__(self.id_, self.composing_, - self.source_types_, - target_types, - self.requirements_) - - - def id(self): - return self.id_ - - def source_types (self): - """ Returns the list of target type the generator accepts. - """ - return self.source_types_ - - def target_types (self): - """ Returns the list of target types that this generator produces. - It is assumed to be always the same -- i.e. it cannot change depending - list of sources. - """ - return self.target_types_ - - def requirements (self): - """ Returns the required properties for this generator. Properties - in returned set must be present in build properties if this - generator is to be used. If result has grist-only element, - that build properties must include some value of that feature. - """ - return self.requirements_ - - def match_rank (self, ps): - """ Returns true if the generator can be run with the specified - properties. - """ - # See if generator's requirements are satisfied by - # 'properties'. Treat a feature name in requirements - # (i.e. grist-only element), as matching any value of the - # feature. - all_requirements = self.requirements () - - property_requirements = [] - feature_requirements = [] - # This uses strings because genenator requirements allow - # the '<feature>' syntax without value and regular validation - # is not happy about that. - for r in all_requirements: - if get_value (r): - property_requirements.append (r) - - else: - feature_requirements.append (r) - - return all(ps.get(get_grist(s)) == [get_value(s)] for s in property_requirements) \ - and all(ps.get(get_grist(s)) for s in feature_requirements) - - def run (self, project, name, prop_set, sources): - """ Tries to invoke this generator on the given sources. Returns a - list of generated targets (instances of 'virtual-target'). - - project: Project for which the targets are generated. - - name: Determines the name of 'name' attribute for - all generated targets. See 'generated_targets' method. - - prop_set: Desired properties for generated targets. - - sources: Source targets. - """ - - if project.manager ().logger ().on (): - project.manager ().logger ().log (__name__, " generator '%s'" % self.id_) - project.manager ().logger ().log (__name__, " composing: '%s'" % self.composing_) - - if not self.composing_ and len (sources) > 1 and len (self.source_types_) > 1: - raise BaseException ("Unsupported source/source_type combination") - - # We don't run composing generators if no name is specified. The reason - # is that composing generator combines several targets, which can have - # different names, and it cannot decide which name to give for produced - # target. Therefore, the name must be passed. - # - # This in effect, means that composing generators are runnable only - # at top-level of transofrmation graph, or if name is passed explicitly. - # Thus, we dissallow composing generators in the middle. For example, the - # transofrmation CPP -> OBJ -> STATIC_LIB -> RSP -> EXE won't be allowed - # (the OBJ -> STATIC_LIB generator is composing) - if not self.composing_ or name: - return self.run_really (project, name, prop_set, sources) - else: - return [] - - def run_really (self, project, name, prop_set, sources): - - # consumed: Targets that this generator will consume directly. - # bypassed: Targets that can't be consumed and will be returned as-is. - - if self.composing_: - (consumed, bypassed) = self.convert_multiple_sources_to_consumable_types (project, prop_set, sources) - else: - (consumed, bypassed) = self.convert_to_consumable_types (project, name, prop_set, sources) - - result = [] - if consumed: - result = self.construct_result (consumed, project, name, prop_set) - result.extend (bypassed) - - if result: - if project.manager ().logger ().on (): - project.manager ().logger ().log (__name__, " SUCCESS: ", result) - - else: - project.manager ().logger ().log (__name__, " FAILURE") - - return result - - def construct_result (self, consumed, project, name, prop_set): - """ Constructs the dependency graph that will be returned by this - generator. - consumed: Already prepared list of consumable targets - If generator requires several source files will contain - exactly len $(self.source_types_) targets with matching types - Otherwise, might contain several targets with the type of - self.source_types_ [0] - project: - name: - prop_set: Properties to be used for all actions create here - """ - result = [] - # If this is 1->1 transformation, apply it to all consumed targets in order. - if len (self.source_types_) < 2 and not self.composing_: - - for r in consumed: - result.extend (self.generated_targets ([r], prop_set, project, name)) - - else: - - if consumed: - result.extend (self.generated_targets (consumed, prop_set, project, name)) - - return result - - def determine_target_name(self, fullname): - # Determine target name from fullname (maybe including path components) - # Place optional prefix and postfix around basename - - dir = os.path.dirname(fullname) - name = os.path.basename(fullname) - - if dir and not ".." in dir and not os.path.isabs(dir): - # Relative path is always relative to the source - # directory. Retain it, so that users can have files - # with the same in two different subdirectories. - name = dir + "/" + name - - return name - - def determine_output_name(self, sources): - """Determine the name of the produced target from the - names of the sources.""" - - # The simple case if when a name - # of source has single dot. Then, we take the part before - # dot. Several dots can be caused by: - # - Using source file like a.host.cpp - # - A type which suffix has a dot. Say, we can - # type 'host_cpp' with extension 'host.cpp'. - # In the first case, we want to take the part till the last - # dot. In the second case -- no sure, but for now take - # the part till the last dot too. - name = os.path.splitext(sources[0].name())[0] - - for s in sources[1:]: - n2 = os.path.splitext(s.name()) - if n2 != name: - get_manager().errors()( - "%s: source targets have different names: cannot determine target name" - % (self.id_)) - - # Names of sources might include directory. We should strip it. - return self.determine_target_name(sources[0].name()) - - - def generated_targets (self, sources, prop_set, project, name): - """ Constructs targets that are created after consuming 'sources'. - The result will be the list of virtual-target, which the same length - as 'target_types' attribute and with corresponding types. - - When 'name' is empty, all source targets must have the same value of - the 'name' attribute, which will be used instead of the 'name' argument. - - The value of 'name' attribute for each generated target will be equal to - the 'name' parameter if there's no name pattern for this type. Otherwise, - the '%' symbol in the name pattern will be replaced with the 'name' parameter - to obtain the 'name' attribute. - - For example, if targets types are T1 and T2(with name pattern "%_x"), suffixes - for T1 and T2 are .t1 and t2, and source if foo.z, then created files would - be "foo.t1" and "foo_x.t2". The 'name' attribute actually determined the - basename of a file. - - Note that this pattern mechanism has nothing to do with implicit patterns - in make. It's a way to produce target which name is different for name of - source. - """ - if not name: - name = self.determine_output_name(sources) - - # Assign an action for each target - action = self.action_class() - a = action(project.manager(), sources, self.id_, prop_set) - - # Create generated target for each target type. - targets = [] - pre = self.name_prefix_ - post = self.name_postfix_ - for t in self.target_types_: - basename = os.path.basename(name) - idx = basename.find(".") - if idx != -1: - basename = basename[:idx] - generated_name = pre[0] + basename + post[0] - generated_name = os.path.join(os.path.dirname(name), generated_name) - pre = pre[1:] - post = post[1:] - - targets.append(virtual_target.FileTarget(generated_name, t, project, a)) - - return [ project.manager().virtual_targets().register(t) for t in targets ] - - def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False): - """ Attempts to convert 'source' to the types that this generator can - handle. The intention is to produce the set of targets can should be - used when generator is run. - only_one: convert 'source' to only one of source types - if there's more that one possibility, report an - error. - - Returns a pair: - consumed: all targets that can be consumed. - bypassed: all targets that cannot be consumed. - """ - consumed = [] - bypassed = [] - missing_types = [] - - if len (sources) > 1: - # Don't know how to handle several sources yet. Just try - # to pass the request to other generator - missing_types = self.source_types_ - - else: - (c, m) = self.consume_directly (sources [0]) - consumed += c - missing_types += m - - # No need to search for transformation if - # some source type has consumed source and - # no more source types are needed. - if only_one and consumed: - missing_types = [] - - #TODO: we should check that only one source type - #if create of 'only_one' is true. - # TODO: consider if consuned/bypassed separation should - # be done by 'construct_types'. - - if missing_types: - transformed = construct_types (project, name, missing_types, prop_set, sources) - - # Add targets of right type to 'consumed'. Add others to - # 'bypassed'. The 'generators.construct' rule has done - # its best to convert everything to the required type. - # There's no need to rerun it on targets of different types. - - # NOTE: ignoring usage requirements - for t in transformed[1]: - if t.type() in missing_types: - consumed.append(t) - - else: - bypassed.append(t) - - consumed = unique(consumed) - bypassed = unique(bypassed) - - # remove elements of 'bypassed' that are in 'consumed' - - # Suppose the target type of current generator, X is produced from - # X_1 and X_2, which are produced from Y by one generator. - # When creating X_1 from Y, X_2 will be added to 'bypassed' - # Likewise, when creating X_2 from Y, X_1 will be added to 'bypassed' - # But they are also in 'consumed'. We have to remove them from - # bypassed, so that generators up the call stack don't try to convert - # them. - - # In this particular case, X_1 instance in 'consumed' and X_1 instance - # in 'bypassed' will be the same: because they have the same source and - # action name, and 'virtual-target.register' won't allow two different - # instances. Therefore, it's OK to use 'set.difference'. - - bypassed = set.difference(bypassed, consumed) - - return (consumed, bypassed) - - - def convert_multiple_sources_to_consumable_types (self, project, prop_set, sources): - """ Converts several files to consumable types. - """ - consumed = [] - bypassed = [] - - # We process each source one-by-one, trying to convert it to - # a usable type. - for s in sources: - # TODO: need to check for failure on each source. - (c, b) = self.convert_to_consumable_types (project, None, prop_set, [s], True) - if not c: - project.manager ().logger ().log (__name__, " failed to convert ", s) - - consumed.extend (c) - bypassed.extend (b) - - return (consumed, bypassed) - - def consume_directly (self, source): - real_source_type = source.type () - - # If there are no source types, we can consume anything - source_types = self.source_types() - if not source_types: - source_types = [real_source_type] - - consumed = [] - missing_types = [] - for st in source_types: - # The 'source' if of right type already) - if real_source_type == st or type.is_derived (real_source_type, st): - consumed.append (source) - - else: - missing_types.append (st) - - return (consumed, missing_types) - - def action_class (self): - """ Returns the class to be used to actions. Default implementation - returns "action". - """ - return virtual_target.Action - - -def find (id): - """ Finds the generator with id. Returns None if not found. - """ - return __generators.get (id, None) - -def register (g): - """ Registers new generator instance 'g'. - """ - id = g.id() - - __generators [id] = g - - # A generator can produce several targets of the - # same type. We want unique occurence of that generator - # in .generators.$(t) in that case, otherwise, it will - # be tried twice and we'll get false ambiguity. - for t in sequence.unique(g.target_types()): - __type_to_generators.setdefault(t, []).append(g) - - # Update the set of generators for toolset - - # TODO: should we check that generator with this id - # is not already registered. For example, the fop.jam - # module intentionally declared two generators with the - # same id, so such check will break it. - - # Some generators have multiple periods in their name, so the - # normal $(id:S=) won't generate the right toolset name. - # e.g. if id = gcc.compile.c++, then - # .generators-for-toolset.$(id:S=) will append to - # .generators-for-toolset.gcc.compile, which is a separate - # value from .generators-for-toolset.gcc. Correcting this - # makes generator inheritance work properly. - # See also inherit-generators in module toolset - base = id.split ('.', 100) [0] - - __generators_for_toolset.setdefault(base, []).append(g) - - # After adding a new generator that can construct new target types, we need - # to clear the related cached viable source target type information for - # constructing a specific target type or using a specific generator. Cached - # viable source target type lists affected by this are those containing any - # of the target types constructed by the new generator or any of their base - # target types. - # - # A more advanced alternative to clearing that cached viable source target - # type information would be to expand it with additional source types or - # even better - mark it as needing to be expanded on next use. - # - # For now we just clear all the cached viable source target type information - # that does not simply state 'all types' and may implement a more detailed - # algorithm later on if it becomes needed. - - invalidate_extendable_viable_source_target_type_cache() - - -def register_standard (id, source_types, target_types, requirements = []): - """ Creates new instance of the 'generator' class and registers it. - Returns the creates instance. - Rationale: the instance is returned so that it's possible to first register - a generator and then call 'run' method on that generator, bypassing all - generator selection. - """ - g = Generator (id, False, source_types, target_types, requirements) - register (g) - return g - -def register_composing (id, source_types, target_types, requirements = []): - g = Generator (id, True, source_types, target_types, requirements) - register (g) - return g - -def generators_for_toolset (toolset): - """ Returns all generators which belong to 'toolset'. - """ - return __generators_for_toolset.get(toolset, []) - -def override (overrider_id, overridee_id): - """Make generator 'overrider-id' be preferred to - 'overridee-id'. If, when searching for generators - that could produce a target of certain type, - both those generators are amoung viable generators, - the overridden generator is immediately discarded. - - The overridden generators are discarded immediately - after computing the list of viable generators, before - running any of them.""" - - __overrides.get(overrider_id, []).append(overridee_id) - -def __viable_source_types_real (target_type): - """ Returns a list of source type which can possibly be converted - to 'target_type' by some chain of generator invocation. - - More formally, takes all generators for 'target_type' and - returns union of source types for those generators and result - of calling itself recusrively on source types. - """ - generators = [] - - # 't0' is the initial list of target types we need to process to get a list - # of their viable source target types. New target types will not be added to - # this list. - t0 = type.all_bases (target_type) - - - # 't' is the list of target types which have not yet been processed to get a - # list of their viable source target types. This list will get expanded as - # we locate more target types to process. - t = t0 - - result = [] - while t: - # Find all generators for current type. - # Unlike 'find_viable_generators' we don't care about prop_set. - generators = __type_to_generators.get (t [0], []) - t = t[1:] - - for g in generators: - if not g.source_types(): - # Empty source types -- everything can be accepted - result = "*" - # This will terminate outer loop. - t = None - break - - for source_type in g.source_types (): - if not source_type in result: - # If generator accepts 'source_type' it - # will happily accept any type derived from it - all = type.all_derived (source_type) - for n in all: - if not n in result: - - # Here there is no point in adding target types to - # the list of types to process in case they are or - # have already been on that list. We optimize this - # check by realizing that we only need to avoid the - # original target type's base types. Other target - # types that are or have been on the list of target - # types to process have been added to the 'result' - # list as well and have thus already been eliminated - # by the previous if. - if not n in t0: - t.append (n) - result.append (n) - - return result - - -def viable_source_types (target_type): - """ Helper rule, caches the result of '__viable_source_types_real'. - """ - if not __viable_source_types_cache.has_key(target_type): - __vst_cached_types.append(target_type) - __viable_source_types_cache [target_type] = __viable_source_types_real (target_type) - return __viable_source_types_cache [target_type] - -def viable_source_types_for_generator_real (generator): - """ Returns the list of source types, which, when passed to 'run' - method of 'generator', has some change of being eventually used - (probably after conversion by other generators) - """ - source_types = generator.source_types () - - if not source_types: - # If generator does not specify any source types, - # it might be special generator like builtin.lib-generator - # which just relays to other generators. Return '*' to - # indicate that any source type is possibly OK, since we don't - # know for sure. - return ['*'] - - else: - result = [] - for s in source_types: - viable_sources = viable_source_types(s) - if viable_sources == "*": - result = ["*"] - break - else: - result.extend(type.all_derived(s) + viable_sources) - return unique(result) - -def viable_source_types_for_generator (generator): - """ Caches the result of 'viable_source_types_for_generator'. - """ - if not __viable_source_types_cache.has_key(generator): - __vstg_cached_generators.append(generator) - __viable_source_types_cache[generator] = viable_source_types_for_generator_real (generator) - - return __viable_source_types_cache[generator] - -def try_one_generator_really (project, name, generator, target_type, properties, sources): - """ Returns usage requirements + list of created targets. - """ - targets = generator.run (project, name, properties, sources) - - usage_requirements = [] - success = False - - dout("returned " + str(targets)) - - if targets: - success = True; - - if isinstance (targets[0], property_set.PropertySet): - usage_requirements = targets [0] - targets = targets [1] - - else: - usage_requirements = property_set.empty () - - dout( " generator" + generator.id() + " spawned ") - # generators.dout [ indent ] " " $(targets) ; -# if $(usage-requirements) -# { -# generators.dout [ indent ] " with usage requirements:" $(x) ; -# } - - if success: - return (usage_requirements, targets) - else: - return None - -def try_one_generator (project, name, generator, target_type, properties, sources): - """ Checks if generator invocation can be pruned, because it's guaranteed - to fail. If so, quickly returns empty list. Otherwise, calls - try_one_generator_really. - """ - source_types = [] - - for s in sources: - source_types.append (s.type ()) - - viable_source_types = viable_source_types_for_generator (generator) - - if source_types and viable_source_types != ['*'] and\ - not set.intersection (source_types, viable_source_types): - if project.manager ().logger ().on (): - id = generator.id () - project.manager ().logger ().log (__name__, "generator '%s' pruned" % id) - project.manager ().logger ().log (__name__, "source_types" '%s' % source_types) - project.manager ().logger ().log (__name__, "viable_source_types '%s'" % viable_source_types) - - return [] - - else: - return try_one_generator_really (project, name, generator, target_type, properties, sources) - - -def construct_types (project, name, target_types, prop_set, sources): - - result = [] - usage_requirements = property_set.empty() - - for t in target_types: - r = construct (project, name, t, prop_set, sources) - - if r: - (ur, targets) = r - usage_requirements = usage_requirements.add(ur) - result.extend(targets) - - # TODO: have to introduce parameter controlling if - # several types can be matched and add appropriate - # checks - - # TODO: need to review the documentation for - # 'construct' to see if it should return $(source) even - # if nothing can be done with it. Currents docs seem to - # imply that, contrary to the behaviour. - if result: - return (usage_requirements, result) - - else: - return (usage_requirements, sources) - -def __ensure_type (targets): - """ Ensures all 'targets' have types. If this is not so, exists with - error. - """ - for t in targets: - if not t.type (): - get_manager().errors()("target '%s' has no type" % str (t)) - -def find_viable_generators_aux (target_type, prop_set): - """ Returns generators which can be used to construct target of specified type - with specified properties. Uses the following algorithm: - - iterates over requested target_type and all it's bases (in the order returned bt - type.all-bases. - - for each type find all generators that generate that type and which requirements - are satisfied by properties. - - if the set of generators is not empty, returns that set. - - Note: this algorithm explicitly ignores generators for base classes if there's - at least one generator for requested target_type. - """ - # Select generators that can create the required target type. - viable_generators = [] - initial_generators = [] - - import type - - # Try all-type generators first. Assume they have - # quite specific requirements. - all_bases = type.all_bases(target_type) - - for t in all_bases: - - initial_generators = __type_to_generators.get(t, []) - - if initial_generators: - dout("there are generators for this type") - if t != target_type: - # We're here, when no generators for target-type are found, - # but there are some generators for a base type. - # We'll try to use them, but they will produce targets of - # base type, not of 'target-type'. So, we clone the generators - # and modify the list of target types. - generators2 = [] - for g in initial_generators[:]: - # generators.register adds generator to the list of generators - # for toolsets, which is a bit strange, but should work. - # That list is only used when inheriting toolset, which - # should have being done before generators are run. - ng = g.clone_and_change_target_type(t, target_type) - generators2.append(ng) - register(ng) - - initial_generators = generators2 - break - - for g in initial_generators: - dout("trying generator " + g.id() - + "(" + str(g.source_types()) + "->" + str(g.target_types()) + ")") - - m = g.match_rank(prop_set) - if m: - dout(" is viable") - viable_generators.append(g) - - return viable_generators - -def find_viable_generators (target_type, prop_set): - key = target_type + '.' + str (prop_set) - - l = __viable_generators_cache.get (key, None) - if not l: - l = [] - - if not l: - l = find_viable_generators_aux (target_type, prop_set) - - __viable_generators_cache [key] = l - - viable_generators = [] - for g in l: - # Avoid trying the same generator twice on different levels. - # TODO: is this really used? - if not g in __active_generators: - viable_generators.append (g) - else: - dout(" generator %s is active, discarding" % g.id()) - - # Generators which override 'all'. - all_overrides = [] - - # Generators which are overriden - overriden_ids = [] - - for g in viable_generators: - id = g.id () - - this_overrides = __overrides.get (id, []) - - if this_overrides: - overriden_ids.extend (this_overrides) - if 'all' in this_overrides: - all_overrides.append (g) - - if all_overrides: - viable_generators = all_overrides - - result = [] - for g in viable_generators: - if not g.id () in overriden_ids: - result.append (g) - - - return result - -def __construct_really (project, name, target_type, prop_set, sources): - """ Attempts to construct target by finding viable generators, running them - and selecting the dependency graph. - """ - viable_generators = find_viable_generators (target_type, prop_set) - - result = [] - - project.manager ().logger ().log (__name__, "*** %d viable generators" % len (viable_generators)) - - generators_that_succeeded = [] - - for g in viable_generators: - __active_generators.append(g) - r = try_one_generator (project, name, g, target_type, prop_set, sources) - del __active_generators[-1] - - if r: - generators_that_succeeded.append(g) - if result: - output = cStringIO.StringIO() - print >>output, "ambiguity found when searching for best transformation" - print >>output, "Trying to produce type '%s' from: " % (target_type) - for s in sources: - print >>output, " - " + s.str() - print >>output, "Generators that succeeded:" - for g in generators_that_succeeded: - print >>output, " - " + g.id() - print >>output, "First generator produced: " - for t in result[1:]: - print >>output, " - " + str(t) - print >>output, "Second generator produced:" - for t in r[1:]: - print >>output, " - " + str(t) - get_manager().errors()(output.getvalue()) - else: - result = r; - - return result; - - -def construct (project, name, target_type, prop_set, sources, top_level=False): - """ Attempts to create target of 'target-type' with 'properties' - from 'sources'. The 'sources' are treated as a collection of - *possible* ingridients -- i.e. it is not required to consume - them all. If 'multiple' is true, the rule is allowed to return - several targets of 'target-type'. - - Returns a list of target. When this invocation is first instance of - 'construct' in stack, returns only targets of requested 'target-type', - otherwise, returns also unused sources and additionally generated - targets. - - If 'top-level' is set, does not suppress generators that are already - used in the stack. This may be useful in cases where a generator - has to build a metatarget -- for example a target corresponding to - built tool. - """ - - global __active_generators - if top_level: - saved_active = __active_generators - __active_generators = [] - - global __construct_stack - if not __construct_stack: - __ensure_type (sources) - - __construct_stack.append (1) - - if project.manager().logger().on(): - increase_indent () - - dout( "*** construct " + target_type) - - for s in sources: - dout(" from " + str(s)) - - project.manager().logger().log (__name__, " properties: ", prop_set.raw ()) - - result = __construct_really(project, name, target_type, prop_set, sources) - - project.manager().logger().decrease_indent() - - __construct_stack = __construct_stack [1:] - - if top_level: - __active_generators = saved_active - - return result - diff --git a/jam-files/boost-build/build/modifiers.jam b/jam-files/boost-build/build/modifiers.jam deleted file mode 100644 index 6b009343..00000000 --- a/jam-files/boost-build/build/modifiers.jam +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2003 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Modifiers are generalized generators that mutate targets in specific ways. -# This structure allows for grouping a variety of functionality in an -# orthogonal way to the functionality in toolsets, and without specifying -# more target variations. In turn the modifiers can be used as building -# blocks to implement simple requests, like the <version> feature. - -import modules ; -import feature ; -import errors ; -import type ; -import "class" : new ; -import generators ; -import property ; -import virtual-target ; -import numbers ; -import sequence ; -import symlink ; -import property-set ; - -# Base generator for creating targets that are modifications of existing -# targets. -# -class modifier : generator -{ - rule __init__ ( - id - composing ? - : source-types * - : target-types-and-names + - : requirements * - ) - { - generator.__init__ $(id) $(composing) - : $(source-types) - : $(target-types-and-names) - : $(requirements) ; - - self.targets-in-progress = ; - } - - # Wraps the generation of the target to call before and after rules to - # affect the real target. - # - rule run ( project name ? : property-set : sources + ) - { - local result ; - local current-target = $(project)^$(name) ; - if ! $(current-target) in $(self.targets-in-progress) - { - # Before modifications... - local project_ = - [ modify-project-before - $(project) $(name) : $(property-set) : $(sources) ] ; - local name_ = - [ modify-name-before - $(project) $(name) : $(property-set) : $(sources) ] ; - local property-set_ = - [ modify-properties-before - $(project) $(name) : $(property-set) : $(sources) ] ; - local sources_ = - [ modify-sources-before - $(project) $(name) : $(property-set) : $(sources) ] ; - project = $(project_) ; - name = $(name_) ; - property-set = $(property-set_) ; - sources = $(sources_) ; - - # Generate the real target... - local target-type-p = - [ property.select <main-target-type> : [ $(property-set).raw ] ] ; - self.targets-in-progress += $(current-target) ; - result = - [ generators.construct $(project) $(name) - : $(target-type-p:G=) - : $(property-set) - : $(sources) ] ; - self.targets-in-progress = $(self.targets-in-progress[1--2]) ; - - # After modifications... - result = - [ modify-target-after $(result) - : $(project) $(name) - : $(property-set) - : $(sources) ] ; - } - return $(result) ; - } - - rule modify-project-before ( project name ? : property-set : sources + ) - { - return $(project) ; - } - - rule modify-name-before ( project name ? : property-set : sources + ) - { - return $(name) ; - } - - rule modify-properties-before ( project name ? : property-set : sources + ) - { - return $(property-set) ; - } - - rule modify-sources-before ( project name ? : property-set : sources + ) - { - return $(sources) ; - } - - rule modify-target-after ( target : project name ? : property-set : sources + ) - { - return $(target) ; - } - - # Utility, clones a file-target with optional changes to the name, type and - # project of the target. - # NOTE: This functionality should be moved, and generalized, to - # virtual-targets. - # - rule clone-file-target ( target : new-name ? : new-type ? : new-project ? ) - { - # Need a MUTCH better way to clone a target... - new-name ?= [ $(target).name ] ; - new-type ?= [ $(target).type ] ; - new-project ?= [ $(target).project ] ; - local result = [ new file-target $(new-name) : $(new-type) : $(new-project) ] ; - - if [ $(target).dependencies ] { $(result).depends [ $(target).dependencies ] ; } - $(result).root [ $(target).root ] ; - $(result).set-usage-requirements [ $(target).usage-requirements ] ; - - local action = [ $(target).action ] ; - local action-class = [ modules.peek $(action) : __class__ ] ; - - local ps = [ $(action).properties ] ; - local cloned-action = [ new $(action-class) $(result) : - [ $(action).sources ] : [ $(action).action-name ] : $(ps) ] ; - $(result).action $(cloned-action) ; - - return $(result) ; - } -} - - -# A modifier that changes the name of a target, after it's generated, given a -# regular expression to split the name, and a set of token to insert between the -# split tokens of the name. This also exposes the target for other uses with a -# symlink to the original name (optionally). -# -class name-modifier : modifier -{ - rule __init__ ( ) - { - # Apply ourselves to EXE targets, for now. - modifier.__init__ name.modifier : : EXE LIB : <name-modify>yes ; - } - - # Modifies the name, by cloning the target with the new name. - # - rule modify-target-after ( target : project name ? : property-set : sources + ) - { - local result = $(target) ; - - local name-mod-p = [ property.select <name-modifier> : [ $(property-set).raw ] ] ; - if $(name-mod-p) - { - local new-name = [ modify-name [ $(target).name ] : $(name-mod-p:G=) ] ; - if $(new-name) != [ $(target).name ] - { - result = [ clone-file-target $(target) : $(new-name) ] ; - } - local expose-original-as-symlink = [ MATCH "<symlink>(.*)" : $(name-mod-p) ] ; - if $(expose-original-as-symlink) - { - local symlink-t = [ new symlink-targets $(project) : $(name) : [ $(result).name ] ] ; - result = [ $(symlink-t).construct $(result) - : [ property-set.create [ $(property-set).raw ] <symlink-location>build-relative ] ] ; - } - } - - return $(result) ; - } - - # Do the transformation of the name. - # - rule modify-name ( name : modifier-spec + ) - { - local match = [ MATCH "<match>(.*)" : $(modifier-spec) ] ; - local name-parts = [ MATCH $(match) : $(name) ] ; - local insertions = [ sequence.insertion-sort [ MATCH "(<[0123456789]+>.*)" : $(modifier-spec) ] ] ; - local new-name-parts ; - local insert-position = 1 ; - while $(insertions) - { - local insertion = [ MATCH "<$(insert-position)>(.*)" : $(insertions[1]) ] ; - if $(insertion) - { - new-name-parts += $(insertion) ; - insertions = $(insertions[2-]) ; - } - new-name-parts += $(name-parts[1]) ; - name-parts = $(name-parts[2-]) ; - insert-position = [ numbers.increment $(insert-position) ] ; - } - new-name-parts += $(name-parts) ; - return [ sequence.join $(new-name-parts) ] ; - } - - rule optional-properties ( ) - { - return <name-modify>yes ; - } -} -feature.feature name-modifier : : free ; -feature.feature name-modify : no yes : incidental optional ; -generators.register [ new name-modifier ] ; - -# Translates <version> property to a set of modification properties -# that are applied by the name-modifier, and symlink-modifier. -# -rule version-to-modifier ( property : properties * ) -{ - return - <name-modify>yes - <name-modifier><match>"^([^.]*)(.*)" <name-modifier><2>.$(property:G=) - <name-modifier><symlink>yes - ; -} -feature.action <version> : version-to-modifier ; diff --git a/jam-files/boost-build/build/project.ann.py b/jam-files/boost-build/build/project.ann.py deleted file mode 100644 index 349f5495..00000000 --- a/jam-files/boost-build/build/project.ann.py +++ /dev/null @@ -1,996 +0,0 @@ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 1) # Status: being ported by Vladimir Prus -ddc17f01 (vladimir_prus 2007-10-26 14:57:56 +0000 2) # Base revision: 40480 -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 3) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 4) # Copyright 2002, 2003 Dave Abrahams -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 5) # Copyright 2002, 2005, 2006 Rene Rivera -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 6) # Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 7) # Distributed under the Boost Software License, Version 1.0. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 8) # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 9) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 10) # Implements project representation and loading. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 11) # Each project is represented by -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 12) # - a module where all the Jamfile content live. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 13) # - an instance of 'project-attributes' class. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 14) # (given module name, can be obtained by 'attributes' rule) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 15) # - an instance of 'project-target' class (from targets.jam) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 16) # (given a module name, can be obtained by 'target' rule) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 17) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 18) # Typically, projects are created as result of loading Jamfile, which is -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 19) # do by rules 'load' and 'initialize', below. First, module for Jamfile -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 20) # is loaded and new project-attributes instance is created. Some rules -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 21) # necessary for project are added to the module (see 'project-rules' module) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 22) # at the bottom of this file. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 23) # Default project attributes are set (inheriting attributes of parent project, if -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 24) # it exists). After that, Jamfile is read. It can declare its own attributes, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 25) # via 'project' rule, which will be combined with already set attributes. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 26) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 27) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 28) # The 'project' rule can also declare project id, which will be associated with -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 29) # the project module. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 30) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 31) # There can also be 'standalone' projects. They are created by calling 'initialize' -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 32) # on arbitrary module, and not specifying location. After the call, the module can -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 33) # call 'project' rule, declare main target and behave as regular projects. However, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 34) # since it's not associated with any location, it's better declare only prebuilt -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 35) # targets. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 36) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 37) # The list of all loaded Jamfile is stored in variable .project-locations. It's possible -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 38) # to obtain module name for a location using 'module-name' rule. The standalone projects -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 39) # are not recorded, the only way to use them is by project id. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 40) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 41) import b2.util.path -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 42) from b2.build import property_set, property -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 43) from b2.build.errors import ExceptionWithUserContext -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 44) import b2.build.targets -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 45) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 46) import bjam -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 47) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 48) import re -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 49) import sys -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 50) import os -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 51) import string -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 52) import imp -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 53) import traceback -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 54) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 55) class ProjectRegistry: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 56) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 57) def __init__(self, manager, global_build_dir): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 58) self.manager = manager -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 59) self.global_build_dir = None -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 60) self.project_rules_ = ProjectRules(self) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 61) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 62) # The target corresponding to the project being loaded now -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 63) self.current_project = None -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 64) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 65) # The set of names of loaded project modules -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 66) self.jamfile_modules = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 67) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 68) # Mapping from location to module name -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 69) self.location2module = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 70) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 71) # Mapping from project id to project module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 72) self.id2module = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 73) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 74) # Map from Jamfile directory to parent Jamfile/Jamroot -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 75) # location. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 76) self.dir2parent_jamfile = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 77) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 78) # Map from directory to the name of Jamfile in -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 79) # that directory (or None). -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 80) self.dir2jamfile = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 81) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 82) # Map from project module to attributes object. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 83) self.module2attributes = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 84) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 85) # Map from project module to target for the project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 86) self.module2target = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 87) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 88) # Map from names to Python modules, for modules loaded -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 89) # via 'using' and 'import' rules in Jamfiles. -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 90) self.loaded_tool_modules_ = {} -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 91) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 92) # Map from project target to the list of -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 93) # (id,location) pairs corresponding to all 'use-project' -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 94) # invocations. -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 95) # TODO: should not have a global map, keep this -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 96) # in ProjectTarget. -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 97) self.used_projects = {} -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 98) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 99) self.saved_current_project = [] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 100) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 101) self.JAMROOT = self.manager.getenv("JAMROOT"); -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 102) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 103) # Note the use of character groups, as opposed to listing -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 104) # 'Jamroot' and 'jamroot'. With the latter, we'd get duplicate -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 105) # matches on windows and would have to eliminate duplicates. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 106) if not self.JAMROOT: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 107) self.JAMROOT = ["project-root.jam", "[Jj]amroot", "[Jj]amroot.jam"] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 108) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 109) # Default patterns to search for the Jamfiles to use for build -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 110) # declarations. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 111) self.JAMFILE = self.manager.getenv("JAMFILE") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 112) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 113) if not self.JAMFILE: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 114) self.JAMFILE = ["[Bb]uild.jam", "[Jj]amfile.v2", "[Jj]amfile", -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 115) "[Jj]amfile.jam"] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 116) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 117) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 118) def load (self, jamfile_location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 119) """Loads jamfile at the given location. After loading, project global -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 120) file and jamfile needed by the loaded one will be loaded recursively. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 121) If the jamfile at that location is loaded already, does nothing. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 122) Returns the project module for the Jamfile.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 123) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 124) absolute = os.path.join(os.getcwd(), jamfile_location) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 125) absolute = os.path.normpath(absolute) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 126) jamfile_location = b2.util.path.relpath(os.getcwd(), absolute) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 127) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 128) if "--debug-loading" in self.manager.argv(): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 129) print "Loading Jamfile at '%s'" % jamfile_location -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 130) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 131) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 132) mname = self.module_name(jamfile_location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 133) # If Jamfile is already loaded, don't try again. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 134) if not mname in self.jamfile_modules: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 135) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 136) self.load_jamfile(jamfile_location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 137) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 138) # We want to make sure that child project are loaded only -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 139) # after parent projects. In particular, because parent projects -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 140) # define attributes whch are inherited by children, and we don't -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 141) # want children to be loaded before parents has defined everything. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 142) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 143) # While "build-project" and "use-project" can potentially refer -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 144) # to child projects from parent projects, we don't immediately -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 145) # load child projects when seing those attributes. Instead, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 146) # we record the minimal information that will be used only later. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 147) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 148) self.load_used_projects(mname) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 149) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 150) return mname -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 151) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 152) def load_used_projects(self, module_name): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 153) # local used = [ modules.peek $(module-name) : .used-projects ] ; -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 154) used = self.used_projects[module_name] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 155) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 156) location = self.attribute(module_name, "location") -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 157) for u in used: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 158) id = u[0] -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 159) where = u[1] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 160) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 161) self.use(id, os.path.join(location, where)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 162) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 163) def load_parent(self, location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 164) """Loads parent of Jamfile at 'location'. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 165) Issues an error if nothing is found.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 166) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 167) found = b2.util.path.glob_in_parents( -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 168) location, self.JAMROOT + self.JAMFILE) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 169) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 170) if not found: -1674e2d9 (jhunold 2008-08-08 19:52:05 +0000 171) print "error: Could not find parent for project at '%s'" % location -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 172) print "error: Did not find Jamfile or project-root.jam in any parent directory." -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 173) sys.exit(1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 174) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 175) return self.load(os.path.dirname(found[0])) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 176) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 177) def act_as_jamfile(self, module, location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 178) """Makes the specified 'module' act as if it were a regularly loaded Jamfile -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 179) at 'location'. If Jamfile is already located for that location, it's an -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 180) error.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 181) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 182) if self.module_name(location) in self.jamfile_modules: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 183) self.manager.errors()( -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 184) "Jamfile was already loaded for '%s'" % location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 185) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 186) # Set up non-default mapping from location to module. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 187) self.location2module[location] = module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 188) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 189) # Add the location to the list of project locations -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 190) # so that we don't try to load Jamfile in future -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 191) self.jamfile_modules.append(location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 192) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 193) self.initialize(module, location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 194) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 195) def find(self, name, current_location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 196) """Given 'name' which can be project-id or plain directory name, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 197) return project module corresponding to that id or directory. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 198) Returns nothing of project is not found.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 199) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 200) project_module = None -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 201) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 202) # Try interpreting name as project id. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 203) if name[0] == '/': -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 204) project_module = self.id2module.get(name) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 205) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 206) if not project_module: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 207) location = os.path.join(current_location, name) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 208) # If no project is registered for the given location, try to -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 209) # load it. First see if we have Jamfile. If not we might have project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 210) # root, willing to act as Jamfile. In that case, project-root -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 211) # must be placed in the directory referred by id. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 212) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 213) project_module = self.module_name(location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 214) if not project_module in self.jamfile_modules and \ -49c03622 (jhunold 2008-07-23 09:57:41 +0000 215) b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 216) project_module = self.load(location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 217) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 218) return project_module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 219) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 220) def module_name(self, jamfile_location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 221) """Returns the name of module corresponding to 'jamfile-location'. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 222) If no module corresponds to location yet, associates default -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 223) module name with that location.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 224) module = self.location2module.get(jamfile_location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 225) if not module: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 226) # Root the path, so that locations are always umbiguious. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 227) # Without this, we can't decide if '../../exe/program1' and '.' -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 228) # are the same paths, or not. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 229) jamfile_location = os.path.realpath( -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 230) os.path.join(os.getcwd(), jamfile_location)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 231) module = "Jamfile<%s>" % jamfile_location -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 232) self.location2module[jamfile_location] = module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 233) return module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 234) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 235) def find_jamfile (self, dir, parent_root=0, no_errors=0): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 236) """Find the Jamfile at the given location. This returns the -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 237) exact names of all the Jamfiles in the given directory. The optional -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 238) parent-root argument causes this to search not the given directory -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 239) but the ones above it up to the directory given in it.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 240) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 241) # Glob for all the possible Jamfiles according to the match pattern. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 242) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 243) jamfile_glob = None -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 244) if parent_root: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 245) parent = self.dir2parent_jamfile.get(dir) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 246) if not parent: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 247) parent = b2.util.path.glob_in_parents(dir, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 248) self.JAMFILE) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 249) self.dir2parent_jamfile[dir] = parent -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 250) jamfile_glob = parent -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 251) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 252) jamfile = self.dir2jamfile.get(dir) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 253) if not jamfile: -49c03622 (jhunold 2008-07-23 09:57:41 +0000 254) jamfile = b2.util.path.glob([dir], self.JAMFILE) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 255) self.dir2jamfile[dir] = jamfile -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 256) jamfile_glob = jamfile -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 257) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 258) if len(jamfile_glob): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 259) # Multiple Jamfiles found in the same place. Warn about this. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 260) # And ensure we use only one of them. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 261) # As a temporary convenience measure, if there's Jamfile.v2 amount -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 262) # found files, suppress the warning and use it. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 263) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 264) pattern = "(.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam)" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 265) v2_jamfiles = [x for x in jamfile_glob if re.match(pattern, x)] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 266) if len(v2_jamfiles) == 1: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 267) jamfile_glob = v2_jamfiles -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 268) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 269) print """warning: Found multiple Jamfiles at '%s'! -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 270) Loading the first one: '%s'.""" % (dir, jamfile_glob[0]) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 271) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 272) # Could not find it, error. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 273) if not no_errors and not jamfile_glob: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 274) self.manager.errors()( -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 275) """Unable to load Jamfile. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 276) Could not find a Jamfile in directory '%s' -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 277) Attempted to find it with pattern '%s'. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 278) Please consult the documentation at 'http://boost.org/b2.'.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 279) % (dir, string.join(self.JAMFILE))) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 280) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 281) return jamfile_glob[0] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 282) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 283) def load_jamfile(self, dir): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 284) """Load a Jamfile at the given directory. Returns nothing. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 285) Will attempt to load the file as indicated by the JAMFILE patterns. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 286) Effect of calling this rule twice with the same 'dir' is underfined.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 287) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 288) # See if the Jamfile is where it should be. -49c03622 (jhunold 2008-07-23 09:57:41 +0000 289) jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 290) if not jamfile_to_load: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 291) jamfile_to_load = self.find_jamfile(dir) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 292) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 293) jamfile_to_load = jamfile_to_load[0] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 294) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 295) # The module of the jamfile. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 296) dir = os.path.realpath(os.path.dirname(jamfile_to_load)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 297) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 298) jamfile_module = self.module_name (dir) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 299) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 300) # Initialize the jamfile module before loading. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 301) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 302) self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 303) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 304) saved_project = self.current_project -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 305) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 306) self.used_projects[jamfile_module] = [] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 307) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 308) # Now load the Jamfile in it's own context. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 309) # Initialization might have load parent Jamfiles, which might have -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 310) # loaded the current Jamfile with use-project. Do a final check to make -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 311) # sure it's not loaded already. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 312) if not jamfile_module in self.jamfile_modules: -49c03622 (jhunold 2008-07-23 09:57:41 +0000 313) self.jamfile_modules[jamfile_module] = True -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 314) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 315) # FIXME: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 316) # mark-as-user $(jamfile-module) ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 317) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 318) bjam.call("load", jamfile_module, jamfile_to_load) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 319) basename = os.path.basename(jamfile_to_load) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 320) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 321) # Now do some checks -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 322) if self.current_project != saved_project: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 323) self.manager.errors()( -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 324) """The value of the .current-project variable -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 325) has magically changed after loading a Jamfile. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 326) This means some of the targets might be defined a the wrong project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 327) after loading %s -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 328) expected value %s -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 329) actual value %s""" % (jamfile_module, saved_project, self.current_project)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 330) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 331) if self.global_build_dir: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 332) id = self.attribute(jamfile_module, "id") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 333) project_root = self.attribute(jamfile_module, "project-root") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 334) location = self.attribute(jamfile_module, "location") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 335) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 336) if location and project_root == dir: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 337) # This is Jamroot -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 338) if not id: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 339) # FIXME: go via errors module, so that contexts are -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 340) # shown? -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 341) print "warning: the --build-dir option was specified" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 342) print "warning: but Jamroot at '%s'" % dir -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 343) print "warning: specified no project id" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 344) print "warning: the --build-dir option will be ignored" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 345) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 346) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 347) def load_standalone(self, jamfile_module, file): -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 348) """Loads 'file' as standalone project that has no location -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 349) associated with it. This is mostly useful for user-config.jam, -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 350) which should be able to define targets, but although it has -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 351) some location in filesystem, we don't want any build to -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 352) happen in user's HOME, for example. -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 353) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 354) The caller is required to never call this method twice on -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 355) the same file. -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 356) """ -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 357) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 358) self.initialize(jamfile_module) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 359) self.used_projects[jamfile_module] = [] -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 360) bjam.call("load", jamfile_module, file) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 361) self.load_used_projects(jamfile_module) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 362) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 363) def is_jamroot(self, basename): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 364) match = [ pat for pat in self.JAMROOT if re.match(pat, basename)] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 365) if match: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 366) return 1 -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 367) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 368) return 0 -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 369) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 370) def initialize(self, module_name, location=None, basename=None): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 371) """Initialize the module for a project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 372) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 373) module-name is the name of the project module. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 374) location is the location (directory) of the project to initialize. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 375) If not specified, stanalone project will be initialized -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 376) """ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 377) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 378) if "--debug-loading" in self.manager.argv(): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 379) print "Initializing project '%s'" % module_name -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 380) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 381) # TODO: need to consider if standalone projects can do anything but defining -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 382) # prebuilt targets. If so, we need to give more sensible "location", so that -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 383) # source paths are correct. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 384) if not location: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 385) location = "" -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 386) else: -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 387) location = b2.util.path.relpath(os.getcwd(), location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 388) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 389) attributes = ProjectAttributes(self.manager, location, module_name) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 390) self.module2attributes[module_name] = attributes -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 391) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 392) if location: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 393) attributes.set("source-location", location, exact=1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 394) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 395) attributes.set("source-location", "", exact=1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 396) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 397) attributes.set("requirements", property_set.empty(), exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 398) attributes.set("usage-requirements", property_set.empty(), exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 399) attributes.set("default-build", [], exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 400) attributes.set("projects-to-build", [], exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 401) attributes.set("project-root", None, exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 402) attributes.set("build-dir", None, exact=True) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 403) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 404) self.project_rules_.init_project(module_name) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 405) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 406) jamroot = False -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 407) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 408) parent_module = None; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 409) if module_name == "site-config": -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 410) # No parent -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 411) pass -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 412) elif module_name == "user-config": -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 413) parent_module = "site-config" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 414) elif location and not self.is_jamroot(basename): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 415) # We search for parent/project-root only if jamfile was specified -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 416) # --- i.e -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 417) # if the project is not standalone. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 418) parent_module = self.load_parent(location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 419) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 420) # It's either jamroot, or standalone project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 421) # If it's jamroot, inherit from user-config. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 422) if location: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 423) parent_module = "user-config" ; -49c03622 (jhunold 2008-07-23 09:57:41 +0000 424) jamroot = True ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 425) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 426) if parent_module: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 427) self.inherit_attributes(module_name, parent_module) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 428) attributes.set("parent-module", parent_module, exact=1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 429) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 430) if jamroot: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 431) attributes.set("project-root", location, exact=1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 432) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 433) parent = None -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 434) if parent_module: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 435) parent = self.target(parent_module) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 436) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 437) if not self.module2target.has_key(module_name): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 438) target = b2.build.targets.ProjectTarget(self.manager, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 439) module_name, module_name, parent, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 440) self.attribute(module_name,"requirements"), -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 441) # FIXME: why we need to pass this? It's not -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 442) # passed in jam code. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 443) self.attribute(module_name, "default-build")) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 444) self.module2target[module_name] = target -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 445) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 446) self.current_project = self.target(module_name) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 447) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 448) def inherit_attributes(self, project_module, parent_module): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 449) """Make 'project-module' inherit attributes of project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 450) root and parent module.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 451) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 452) attributes = self.module2attributes[project_module] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 453) pattributes = self.module2attributes[parent_module] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 454) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 455) # Parent module might be locationless user-config. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 456) # FIXME: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 457) #if [ modules.binding $(parent-module) ] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 458) #{ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 459) # $(attributes).set parent : [ path.parent -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 460) # [ path.make [ modules.binding $(parent-module) ] ] ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 461) # } -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 462) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 463) attributes.set("project-root", pattributes.get("project-root"), exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 464) attributes.set("default-build", pattributes.get("default-build"), exact=True) -49c03622 (jhunold 2008-07-23 09:57:41 +0000 465) attributes.set("requirements", pattributes.get("requirements"), exact=True) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 466) attributes.set("usage-requirements", -cde6f09a (vladimir_prus 2007-10-19 23:12:33 +0000 467) pattributes.get("usage-requirements"), exact=1) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 468) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 469) parent_build_dir = pattributes.get("build-dir") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 470) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 471) if parent_build_dir: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 472) # Have to compute relative path from parent dir to our dir -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 473) # Convert both paths to absolute, since we cannot -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 474) # find relative path from ".." to "." -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 475) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 476) location = attributes.get("location") -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 477) parent_location = pattributes.get("location") -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 478) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 479) our_dir = os.path.join(os.getcwd(), location) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 480) parent_dir = os.path.join(os.getcwd(), parent_location) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 481) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 482) build_dir = os.path.join(parent_build_dir, -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 483) b2.util.path.relpath(parent_dir, -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 484) our_dir)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 485) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 486) def register_id(self, id, module): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 487) """Associate the given id with the given project module.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 488) self.id2module[id] = module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 489) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 490) def current(self): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 491) """Returns the project which is currently being loaded.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 492) return self.current_project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 493) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 494) def push_current(self, project): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 495) """Temporary changes the current project to 'project'. Should -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 496) be followed by 'pop-current'.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 497) self.saved_current_project.append(self.current_project) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 498) self.current_project = project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 499) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 500) def pop_current(self): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 501) self.current_project = self.saved_current_project[-1] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 502) del self.saved_current_project[-1] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 503) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 504) def attributes(self, project): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 505) """Returns the project-attribute instance for the -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 506) specified jamfile module.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 507) return self.module2attributes[project] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 508) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 509) def attribute(self, project, attribute): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 510) """Returns the value of the specified attribute in the -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 511) specified jamfile module.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 512) return self.module2attributes[project].get(attribute) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 513) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 514) def target(self, project_module): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 515) """Returns the project target corresponding to the 'project-module'.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 516) if not self.module2target[project_module]: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 517) self.module2target[project_module] = \ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 518) ProjectTarget(project_module, project_module, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 519) self.attribute(project_module, "requirements")) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 520) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 521) return self.module2target[project_module] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 522) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 523) def use(self, id, location): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 524) # Use/load a project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 525) saved_project = self.current_project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 526) project_module = self.load(location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 527) declared_id = self.attribute(project_module, "id") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 528) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 529) if not declared_id or declared_id != id: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 530) # The project at 'location' either have no id or -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 531) # that id is not equal to the 'id' parameter. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 532) if self.id2module[id] and self.id2module[id] != project_module: -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 533) self.manager.errors()( -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 534) """Attempt to redeclare already existing project id '%s'""" % id) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 535) self.id2module[id] = project_module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 536) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 537) self.current_module = saved_project -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 538) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 539) def add_rule(self, name, callable): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 540) """Makes rule 'name' available to all subsequently loaded Jamfiles. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 541) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 542) Calling that rule wil relay to 'callable'.""" -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 543) self.project_rules_.add_rule(name, callable) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 544) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 545) def project_rules(self): -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 546) return self.project_rules_ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 547) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 548) def glob_internal(self, project, wildcards, excludes, rule_name): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 549) location = project.get("source-location") -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 550) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 551) result = [] -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 552) callable = b2.util.path.__dict__[rule_name] -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 553) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 554) paths = callable(location, wildcards, excludes) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 555) has_dir = 0 -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 556) for w in wildcards: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 557) if os.path.dirname(w): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 558) has_dir = 1 -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 559) break -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 560) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 561) if has_dir or rule_name != "glob": -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 562) # The paths we've found are relative to current directory, -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 563) # but the names specified in sources list are assumed to -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 564) # be relative to source directory of the corresponding -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 565) # prject. So, just make the name absolute. -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 566) result = [os.path.join(os.getcwd(), p) for p in paths] -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 567) else: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 568) # There were not directory in wildcard, so the files are all -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 569) # in the source directory of the project. Just drop the -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 570) # directory, instead of making paths absolute. -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 571) result = [os.path.basename(p) for p in paths] -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 572) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 573) return result -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 574) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 575) def load_module(self, name, extra_path=None): -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 576) """Classic Boost.Build 'modules' are in fact global variables. -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 577) Therefore, try to find an already loaded Python module called 'name' in sys.modules. -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 578) If the module ist not loaded, find it Boost.Build search -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 579) path and load it. The new module is not entered in sys.modules. -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 580) The motivation here is to have disjoint namespace of modules -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 581) loaded via 'import/using' in Jamfile, and ordinary Python -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 582) modules. We don't want 'using foo' in Jamfile to load ordinary -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 583) Python module 'foo' which is going to not work. And we -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 584) also don't want 'import foo' in regular Python module to -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 585) accidentally grab module named foo that is internal to -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 586) Boost.Build and intended to provide interface to Jamfiles.""" -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 587) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 588) existing = self.loaded_tool_modules_.get(name) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 589) if existing: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 590) return existing -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 591) -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 592) modules = sys.modules -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 593) for class_name in modules: -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 594) if name in class_name: -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 595) module = modules[class_name] -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 596) self.loaded_tool_modules_[name] = module -53b0faa2 (jhunold 2008-08-10 18:25:50 +0000 597) return module -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 598) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 599) path = extra_path -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 600) if not path: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 601) path = [] -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 602) path.extend(self.manager.b2.path()) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 603) location = None -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 604) for p in path: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 605) l = os.path.join(p, name + ".py") -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 606) if os.path.exists(l): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 607) location = l -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 608) break -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 609) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 610) if not location: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 611) self.manager.errors()("Cannot find module '%s'" % name) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 612) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 613) mname = "__build_build_temporary__" -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 614) file = open(location) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 615) try: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 616) # TODO: this means we'll never make use of .pyc module, -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 617) # which might be a problem, or not. -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 618) module = imp.load_module(mname, file, os.path.basename(location), -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 619) (".py", "r", imp.PY_SOURCE)) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 620) del sys.modules[mname] -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 621) self.loaded_tool_modules_[name] = module -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 622) return module -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 623) finally: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 624) file.close() -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 625) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 626) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 627) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 628) # FIXME: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 629) # Defines a Boost.Build extension project. Such extensions usually -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 630) # contain library targets and features that can be used by many people. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 631) # Even though extensions are really projects, they can be initialize as -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 632) # a module would be with the "using" (project.project-rules.using) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 633) # mechanism. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 634) #rule extension ( id : options * : * ) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 635) #{ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 636) # # The caller is a standalone module for the extension. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 637) # local mod = [ CALLER_MODULE ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 638) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 639) # # We need to do the rest within the extension module. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 640) # module $(mod) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 641) # { -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 642) # import path ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 643) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 644) # # Find the root project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 645) # local root-project = [ project.current ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 646) # root-project = [ $(root-project).project-module ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 647) # while -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 648) # [ project.attribute $(root-project) parent-module ] && -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 649) # [ project.attribute $(root-project) parent-module ] != user-config -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 650) # { -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 651) # root-project = [ project.attribute $(root-project) parent-module ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 652) # } -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 653) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 654) # # Create the project data, and bring in the project rules -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 655) # # into the module. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 656) # project.initialize $(__name__) : -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 657) # [ path.join [ project.attribute $(root-project) location ] ext $(1:L) ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 658) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 659) # # Create the project itself, i.e. the attributes. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 660) # # All extensions are created in the "/ext" project space. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 661) # project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 662) # local attributes = [ project.attributes $(__name__) ] ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 663) # -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 664) # # Inherit from the root project of whomever is defining us. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 665) # project.inherit-attributes $(__name__) : $(root-project) ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 666) # $(attributes).set parent-module : $(root-project) : exact ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 667) # } -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 668) #} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 669) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 670) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 671) class ProjectAttributes: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 672) """Class keeping all the attributes of a project. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 673) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 674) The standard attributes are 'id', "location", "project-root", "parent" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 675) "requirements", "default-build", "source-location" and "projects-to-build". -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 676) """ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 677) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 678) def __init__(self, manager, location, project_module): -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 679) self.manager = manager -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 680) self.location = location -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 681) self.project_module = project_module -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 682) self.attributes = {} -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 683) self.usage_requirements = None -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 684) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 685) def set(self, attribute, specification, exact): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 686) """Set the named attribute from the specification given by the user. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 687) The value actually set may be different.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 688) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 689) if exact: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 690) self.__dict__[attribute] = specification -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 691) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 692) elif attribute == "requirements": -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 693) self.requirements = property_set.refine_from_user_input( -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 694) self.requirements, specification, -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 695) self.project_module, self.location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 696) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 697) elif attribute == "usage-requirements": -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 698) unconditional = [] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 699) for p in specification: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 700) split = property.split_conditional(p) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 701) if split: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 702) unconditional.append(split[1]) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 703) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 704) unconditional.append(p) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 705) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 706) non_free = property.remove("free", unconditional) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 707) if non_free: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 708) pass -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 709) # FIXME: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 710) #errors.error "usage-requirements" $(specification) "have non-free properties" $(non-free) ; -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 711) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 712) t = property.translate_paths(specification, self.location) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 713) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 714) existing = self.__dict__.get("usage-requirements") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 715) if existing: -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 716) new = property_set.create(existing.raw() + t) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 717) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 718) new = property_set.create(t) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 719) self.__dict__["usage-requirements"] = new -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 720) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 721) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 722) elif attribute == "default-build": -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 723) self.__dict__["default-build"] = property_set.create(specification) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 724) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 725) elif attribute == "source-location": -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 726) source_location = [] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 727) for path in specification: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 728) source_location += os.path.join(self.location, path) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 729) self.__dict__["source-location"] = source_location -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 730) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 731) elif attribute == "build-dir": -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 732) self.__dict__["build-dir"] = os.path.join(self.location, specification) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 733) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 734) elif not attribute in ["id", "default-build", "location", -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 735) "source-location", "parent", -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 736) "projects-to-build", "project-root"]: -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 737) self.manager.errors()( -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 738) """Invalid project attribute '%s' specified -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 739) for project at '%s'""" % (attribute, self.location)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 740) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 741) self.__dict__[attribute] = specification -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 742) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 743) def get(self, attribute): -cde6f09a (vladimir_prus 2007-10-19 23:12:33 +0000 744) return self.__dict__[attribute] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 745) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 746) def dump(self): -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 747) """Prints the project attributes.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 748) id = self.get("id") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 749) if not id: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 750) id = "(none)" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 751) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 752) id = id[0] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 753) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 754) parent = self.get("parent") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 755) if not parent: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 756) parent = "(none)" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 757) else: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 758) parent = parent[0] -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 759) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 760) print "'%s'" % id -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 761) print "Parent project:%s", parent -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 762) print "Requirements:%s", self.get("requirements") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 763) print "Default build:%s", string.join(self.get("debuild-build")) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 764) print "Source location:%s", string.join(self.get("source-location")) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 765) print "Projects to build:%s", string.join(self.get("projects-to-build").sort()); -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 766) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 767) class ProjectRules: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 768) """Class keeping all rules that are made available to Jamfile.""" -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 769) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 770) def __init__(self, registry): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 771) self.registry = registry -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 772) self.manager_ = registry.manager -38d984eb (vladimir_prus 2007-10-13 17:52:25 +0000 773) self.rules = {} -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 774) self.local_names = [x for x in self.__class__.__dict__ -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 775) if x not in ["__init__", "init_project", "add_rule", -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 776) "error_reporting_wrapper", "add_rule_for_type"]] -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 777) self.all_names_ = [x for x in self.local_names] -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 778) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 779) def add_rule_for_type(self, type): -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 780) rule_name = type.lower(); -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 781) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 782) def xpto (name, sources, requirements = [], default_build = None, usage_requirements = []): -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 783) return self.manager_.targets().create_typed_target( -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 784) type, self.registry.current(), name[0], sources, -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 785) requirements, default_build, usage_requirements) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 786) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 787) self.add_rule(type.lower(), xpto) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 788) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 789) def add_rule(self, name, callable): -38d984eb (vladimir_prus 2007-10-13 17:52:25 +0000 790) self.rules[name] = callable -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 791) self.all_names_.append(name) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 792) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 793) def all_names(self): -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 794) return self.all_names_ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 795) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 796) def call_and_report_errors(self, callable, *args): -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 797) result = None -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 798) try: -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 799) self.manager_.errors().push_jamfile_context() -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 800) result = callable(*args) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 801) except ExceptionWithUserContext, e: -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 802) e.report() -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 803) except Exception, e: -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 804) try: -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 805) self.manager_.errors().handle_stray_exception (e) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 806) except ExceptionWithUserContext, e: -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 807) e.report() -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 808) finally: -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 809) self.manager_.errors().pop_jamfile_context() -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 810) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 811) return result -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 812) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 813) def make_wrapper(self, callable): -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 814) """Given a free-standing function 'callable', return a new -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 815) callable that will call 'callable' and report all exceptins, -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 816) using 'call_and_report_errors'.""" -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 817) def wrapper(*args): -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 818) self.call_and_report_errors(callable, *args) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 819) return wrapper -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 820) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 821) def init_project(self, project_module): -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 822) -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 823) for n in self.local_names: -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 824) # Using 'getattr' here gives us a bound method, -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 825) # while using self.__dict__[r] would give unbound one. -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 826) v = getattr(self, n) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 827) if callable(v): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 828) if n == "import_": -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 829) n = "import" -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 830) else: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 831) n = string.replace(n, "_", "-") -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 832) -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 833) bjam.import_rule(project_module, n, -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 834) self.make_wrapper(v)) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 835) -38d984eb (vladimir_prus 2007-10-13 17:52:25 +0000 836) for n in self.rules: -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 837) bjam.import_rule(project_module, n, -0317671e (vladimir_prus 2007-10-28 14:02:06 +0000 838) self.make_wrapper(self.rules[n])) -38d984eb (vladimir_prus 2007-10-13 17:52:25 +0000 839) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 840) def project(self, *args): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 841) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 842) jamfile_module = self.registry.current().project_module() -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 843) attributes = self.registry.attributes(jamfile_module) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 844) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 845) id = None -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 846) if args and args[0]: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 847) id = args[0][0] -092119e3 (vladimir_prus 2007-10-16 05:45:31 +0000 848) args = args[1:] -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 849) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 850) if id: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 851) if id[0] != '/': -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 852) id = '/' + id -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 853) self.registry.register_id (id, jamfile_module) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 854) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 855) explicit_build_dir = None -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 856) for a in args: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 857) if a: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 858) attributes.set(a[0], a[1:], exact=0) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 859) if a[0] == "build-dir": -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 860) explicit_build_dir = a[1] -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 861) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 862) # If '--build-dir' is specified, change the build dir for the project. -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 863) if self.registry.global_build_dir: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 864) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 865) location = attributes.get("location") -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 866) # Project with empty location is 'standalone' project, like -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 867) # user-config, or qt. It has no build dir. -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 868) # If we try to set build dir for user-config, we'll then -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 869) # try to inherit it, with either weird, or wrong consequences. -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 870) if location and location == attributes.get("project-root"): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 871) # This is Jamroot. -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 872) if id: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 873) if explicit_build_dir and os.path.isabs(explicit_build_dir): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 874) self.register.manager.errors()( -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 875) """Absolute directory specified via 'build-dir' project attribute -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 876) Don't know how to combine that with the --build-dir option.""") -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 877) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 878) rid = id -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 879) if rid[0] == '/': -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 880) rid = rid[1:] -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 881) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 882) p = os.path.join(self.registry.global_build_dir, -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 883) rid, explicit_build_dir) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 884) attributes.set("build-dir", p, exact=1) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 885) elif explicit_build_dir: -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 886) self.registry.manager.errors()( -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 887) """When --build-dir is specified, the 'build-project' -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 888) attribute is allowed only for top-level 'project' invocations""") -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 889) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 890) def constant(self, name, value): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 891) """Declare and set a project global constant. -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 892) Project global constants are normal variables but should -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 893) not be changed. They are applied to every child Jamfile.""" -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 894) m = "Jamfile</home/ghost/Work/Boost/boost-svn/tools/build/v2_python/python/tests/bjam/make>" -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 895) self.registry.current().add_constant(name[0], value) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 896) -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 897) def path_constant(self, name, value): -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 898) """Declare and set a project global constant, whose value is a path. The -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 899) path is adjusted to be relative to the invocation directory. The given -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 900) value path is taken to be either absolute, or relative to this project -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 901) root.""" -0ed8e16d (vladimir_prus 2007-10-13 21:34:05 +0000 902) self.registry.current().add_constant(name[0], value, path=1) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 903) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 904) def use_project(self, id, where): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 905) # See comment in 'load' for explanation why we record the -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 906) # parameters as opposed to loading the project now. -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 907) m = self.registry.current().project_module(); -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 908) self.registry.used_projects[m].append((id, where)) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 909) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 910) def build_project(self, dir): -1674e2d9 (jhunold 2008-08-08 19:52:05 +0000 911) assert(isinstance(dir, list)) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 912) jamfile_module = self.registry.current().project_module() -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 913) attributes = self.registry.attributes(jamfile_module) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 914) now = attributes.get("projects-to-build") -1674e2d9 (jhunold 2008-08-08 19:52:05 +0000 915) attributes.set("projects-to-build", now + dir, exact=True) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 916) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 917) def explicit(self, target_names): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 918) t = self.registry.current() -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 919) for n in target_names: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 920) t.mark_target_as_explicit(n) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 921) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 922) def glob(self, wildcards, excludes=None): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 923) return self.registry.glob_internal(self.registry.current(), -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 924) wildcards, excludes, "glob") -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 925) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 926) def glob_tree(self, wildcards, excludes=None): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 927) bad = 0 -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 928) for p in wildcards: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 929) if os.path.dirname(p): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 930) bad = 1 -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 931) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 932) if excludes: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 933) for p in excludes: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 934) if os.path.dirname(p): -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 935) bad = 1 -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 936) -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 937) if bad: -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 938) self.registry.manager().errors()( -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 939) "The patterns to 'glob-tree' may not include directory") -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 940) return self.registry.glob_internal(self.registry.current(), -2a36874b (vladimir_prus 2007-10-14 07:20:55 +0000 941) wildcards, excludes, "glob_tree") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 942) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 943) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 944) def using(self, toolset, *args): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 945) # The module referred by 'using' can be placed in -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 946) # the same directory as Jamfile, and the user -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 947) # will expect the module to be found even though -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 948) # the directory is not in BOOST_BUILD_PATH. -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 949) # So temporary change the search path. -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 950) jamfile_module = self.registry.current().project_module() -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 951) attributes = self.registry.attributes(jamfile_module) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 952) location = attributes.get("location") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 953) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 954) m = self.registry.load_module(toolset[0], [location]) -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 955) if not m.__dict__.has_key("init"): -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 956) self.registry.manager.errors()( -7da7f9c1 (vladimir_prus 2008-05-18 04:29:53 +0000 957) "Tool module '%s' does not define the 'init' method" % toolset[0]) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 958) m.init(*args) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 959) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 960) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 961) def import_(self, name, names_to_import=None, local_names=None): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 962) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 963) name = name[0] -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 964) jamfile_module = self.registry.current().project_module() -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 965) attributes = self.registry.attributes(jamfile_module) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 966) location = attributes.get("location") -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 967) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 968) m = self.registry.load_module(name, [location]) -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 969) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 970) for f in m.__dict__: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 971) v = m.__dict__[f] -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 972) if callable(v): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 973) bjam.import_rule(jamfile_module, name + "." + f, v) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 974) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 975) if names_to_import: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 976) if not local_names: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 977) local_names = names_to_import -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 978) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 979) if len(names_to_import) != len(local_names): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 980) self.registry.manager.errors()( -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 981) """The number of names to import and local names do not match.""") -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 982) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 983) for n, l in zip(names_to_import, local_names): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 984) bjam.import_rule(jamfile_module, l, m.__dict__[n]) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 985) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 986) def conditional(self, condition, requirements): -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 987) """Calculates conditional requirements for multiple requirements -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 988) at once. This is a shorthand to be reduce duplication and to -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 989) keep an inline declarative syntax. For example: -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 990) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 991) lib x : x.cpp : [ conditional <toolset>gcc <variant>debug : -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 992) <define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ; -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 993) """ -f049766b (vladimir_prus 2007-10-10 09:31:06 +0000 994) -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 995) c = string.join(condition, ",") -f2aef897 (vladimir_prus 2007-10-14 09:19:52 +0000 996) return [c + ":" + r for r in requirements] diff --git a/jam-files/boost-build/build/project.jam b/jam-files/boost-build/build/project.jam deleted file mode 100644 index c9967613..00000000 --- a/jam-files/boost-build/build/project.jam +++ /dev/null @@ -1,1110 +0,0 @@ -# Copyright 2002, 2003 Dave Abrahams -# Copyright 2002, 2005, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Implements project representation and loading. Each project is represented by: -# - a module where all the Jamfile content live. -# - an instance of 'project-attributes' class. -# (given a module name, can be obtained using the 'attributes' rule) -# - an instance of 'project-target' class (from targets.jam) -# (given a module name, can be obtained using the 'target' rule) -# -# Typically, projects are created as result of loading a Jamfile, which is done -# by rules 'load' and 'initialize', below. First, module for Jamfile is loaded -# and new project-attributes instance is created. Some rules necessary for -# project are added to the module (see 'project-rules' module) at the bottom of -# this file. Default project attributes are set (inheriting attributes of parent -# project, if it exists). After that the Jamfile is read. It can declare its own -# attributes using the 'project' rule which will be combined with any already -# set attributes. -# -# The 'project' rule can also declare a project id which will be associated with -# the project module. -# -# There can also be 'standalone' projects. They are created by calling -# 'initialize' on an arbitrary module and not specifying their location. After -# the call, the module can call the 'project' rule, declare main targets and -# behave as a regular project except that, since it is not associated with any -# location, it should not declare targets that are not prebuilt. -# -# The list of all loaded Jamfile is stored in the .project-locations variable. -# It is possible to obtain a module name for a location using the 'module-name' -# rule. Standalone projects are not recorded and can only be referenced using -# their project id. - -import "class" : new ; -import errors ; -import modules ; -import path ; -import print ; -import property-set ; -import sequence ; - - -# Loads the Jamfile at the given location. After loading, project global file -# and Jamfiles needed by the requested one will be loaded recursively. If the -# Jamfile at that location is loaded already, does nothing. Returns the project -# module for the Jamfile. -# -rule load ( jamfile-location ) -{ - if --debug-loading in [ modules.peek : ARGV ] - { - ECHO "Loading Jamfile at" '$(jamfile-location)' ; - } - - local module-name = [ module-name $(jamfile-location) ] ; - # If Jamfile is already loaded, don't try again. - if ! $(module-name) in $(.jamfile-modules) - { - load-jamfile $(jamfile-location) : $(module-name) ; - - # We want to make sure that child project are loaded only after parent - # projects. In particular, because parent projects define attributes - # which are inherited by children, and we don't want children to be - # loaded before parent has defined everything. - # - # While "build-project" and "use-project" can potentially refer to child - # projects from parent projects, we don't immediately load child - # projects when seeing those attributes. Instead, we record the minimal - # information to be used only later. - load-used-projects $(module-name) ; - } - return $(module-name) ; -} - - -rule load-used-projects ( module-name ) -{ - local used = [ modules.peek $(module-name) : .used-projects ] ; - local location = [ attribute $(module-name) location ] ; - import project ; - while $(used) - { - local id = $(used[1]) ; - local where = $(used[2]) ; - - project.use $(id) : [ path.root [ path.make $(where) ] $(location) ] ; - used = $(used[3-]) ; - } -} - - -# Note the use of character groups, as opposed to listing 'Jamroot' and -# 'jamroot'. With the latter, we would get duplicate matches on Windows and -# would have to eliminate duplicates. -JAMROOT ?= [ modules.peek : JAMROOT ] ; -JAMROOT ?= project-root.jam [Jj]amroot [Jj]amroot.jam ; - - -# Loads parent of Jamfile at 'location'. Issues an error if nothing is found. -# -rule load-parent ( location ) -{ - local found = [ path.glob-in-parents $(location) : $(JAMROOT) $(JAMFILE) ] ; - - if ! $(found) - { - ECHO error: Could not find parent for project at '$(location)' ; - EXIT error: Did not find Jamfile.jam or Jamroot.jam in any parent - directory. ; - } - - return [ load $(found[1]:D) ] ; -} - - -# Makes the specified 'module' act as if it were a regularly loaded Jamfile at -# 'location'. Reports an error if a Jamfile has already been loaded for that -# location. -# -rule act-as-jamfile ( module : location ) -{ - if [ module-name $(location) ] in $(.jamfile-modules) - { - errors.error "Jamfile was already loaded for '$(location)'" ; - } - # Set up non-default mapping from location to module. - .module.$(location) = $(module) ; - - # Add the location to the list of project locations so that we don't try to - # reload the same Jamfile in the future. - .jamfile-modules += [ module-name $(location) ] ; - - initialize $(module) : $(location) ; -} - - -# Returns the project module corresponding to the given project-id or plain -# directory name. Returns nothing if such a project can not be found. -# -rule find ( name : current-location ) -{ - local project-module ; - - # Try interpreting name as project id. - if [ path.is-rooted $(name) ] - { - project-module = $($(name).jamfile-module) ; - } - - if ! $(project-module) - { - local location = [ path.root [ path.make $(name) ] $(current-location) ] - ; - - # If no project is registered for the given location, try to load it. - # First see if we have a Jamfile. If not, then see if we might have a - # project root willing to act as a Jamfile. In that case, project root - # must be placed in the directory referred by id. - - project-module = [ module-name $(location) ] ; - if ! $(project-module) in $(.jamfile-modules) - { - if [ path.glob $(location) : $(JAMROOT) $(JAMFILE) ] - { - project-module = [ load $(location) ] ; - } - else - { - project-module = ; - } - } - } - - return $(project-module) ; -} - - -# Returns the name of the module corresponding to 'jamfile-location'. If no -# module corresponds to that location yet, associates the default module name -# with that location. -# -rule module-name ( jamfile-location ) -{ - if ! $(.module.$(jamfile-location)) - { - # Root the path, so that locations are always unambiguous. Without this, - # we can't decide if '../../exe/program1' and '.' are the same paths. - jamfile-location = [ path.root $(jamfile-location) [ path.pwd ] ] ; - .module.$(jamfile-location) = Jamfile<$(jamfile-location)> ; - } - return $(.module.$(jamfile-location)) ; -} - - -# Default patterns to search for the Jamfiles to use for build declarations. -# -JAMFILE = [ modules.peek : JAMFILE ] ; -JAMFILE ?= [Bb]uild.jam [Jj]amfile.v2 [Jj]amfile [Jj]amfile.jam ; - - -# Find the Jamfile at the given location. This returns the exact names of all -# the Jamfiles in the given directory. The optional parent-root argument causes -# this to search not the given directory but the ones above it up to the -# directory given in it. -# -rule find-jamfile ( - dir # The directory(s) to look for a Jamfile. - parent-root ? # Optional flag indicating to search for the parent Jamfile. - : no-errors ? - ) -{ - # Glob for all the possible Jamfiles according to the match pattern. - # - local jamfile-glob = ; - if $(parent-root) - { - if ! $(.parent-jamfile.$(dir)) - { - .parent-jamfile.$(dir) = [ path.glob-in-parents $(dir) : $(JAMFILE) - ] ; - } - jamfile-glob = $(.parent-jamfile.$(dir)) ; - } - else - { - if ! $(.jamfile.$(dir)) - { - .jamfile.$(dir) = [ path.glob $(dir) : $(JAMFILE) ] ; - } - jamfile-glob = $(.jamfile.$(dir)) ; - - } - - local jamfile-to-load = $(jamfile-glob) ; - # Multiple Jamfiles found in the same place. Warn about this and ensure we - # use only one of them. As a temporary convenience measure, if there is - # Jamfile.v2 among found files, suppress the warning and use it. - # - if $(jamfile-to-load[2-]) - { - local v2-jamfiles = [ MATCH (.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam) : $(jamfile-to-load) ] ; - - if $(v2-jamfiles) && ! $(v2-jamfiles[2]) - { - jamfile-to-load = $(v2-jamfiles) ; - } - else - { - local jamfile = [ path.basename $(jamfile-to-load[1]) ] ; - ECHO "warning: Found multiple Jamfiles at '"$(dir)"'!" - "Loading the first one: '$(jamfile)'." ; - } - - jamfile-to-load = $(jamfile-to-load[1]) ; - } - - # Could not find it, error. - # - if ! $(no-errors) && ! $(jamfile-to-load) - { - errors.error Unable to load Jamfile. - : Could not find a Jamfile in directory '$(dir)'. - : Attempted to find it with pattern '"$(JAMFILE:J=" ")"'. - : Please consult the documentation at 'http://www.boost.org'. ; - } - - return $(jamfile-to-load) ; -} - - -# Load a Jamfile at the given directory. Returns nothing. Will attempt to load -# the file as indicated by the JAMFILE patterns. Effect of calling this rule -# twice with the same 'dir' is undefined. -# -local rule load-jamfile ( - dir # The directory of the project Jamfile. - : jamfile-module - ) -{ - # See if the Jamfile is where it should be. - # - local jamfile-to-load = [ path.glob $(dir) : $(JAMROOT) ] ; - if ! $(jamfile-to-load) - { - jamfile-to-load = [ find-jamfile $(dir) ] ; - } - - if $(jamfile-to-load[2]) - { - errors.error "Multiple Jamfiles found at '$(dir)'" - : "Filenames are: " $(jamfile-to-load:D=) ; - } - - # Now load the Jamfile in it's own context. - # The call to 'initialize' may load parent Jamfile, which might have - # 'use-project' statement that causes a second attempt to load the - # same project we're loading now. Checking inside .jamfile-modules - # prevents that second attempt from messing up. - if ! $(jamfile-module) in $(.jamfile-modules) - { - .jamfile-modules += $(jamfile-module) ; - - # Initialize the Jamfile module before loading. - # - initialize $(jamfile-module) : [ path.parent $(jamfile-to-load) ] - : $(jamfile-to-load:BS) ; - - local saved-project = $(.current-project) ; - - mark-as-user $(jamfile-module) ; - modules.load $(jamfile-module) : [ path.native $(jamfile-to-load) ] : . ; - if [ MATCH ($(JAMROOT)) : $(jamfile-to-load:BS) ] - { - jamfile = [ find-jamfile $(dir) : no-errors ] ; - if $(jamfile) - { - load-aux $(jamfile-module) : [ path.native $(jamfile) ] ; - } - } - - # Now do some checks. - if $(.current-project) != $(saved-project) - { - errors.error "The value of the .current-project variable has magically" - : "changed after loading a Jamfile. This means some of the targets" - : "might be defined in the wrong project." - : "after loading" $(jamfile-module) - : "expected value" $(saved-project) - : "actual value" $(.current-project) ; - } - - if $(.global-build-dir) - { - local id = [ attribute $(jamfile-module) id ] ; - local project-root = [ attribute $(jamfile-module) project-root ] ; - local location = [ attribute $(jamfile-module) location ] ; - - if $(location) && $(project-root) = $(dir) - { - # This is Jamroot. - if ! $(id) - { - ECHO "warning: the --build-dir option was specified" ; - ECHO "warning: but Jamroot at '$(dir)'" ; - ECHO "warning: specified no project id" ; - ECHO "warning: the --build-dir option will be ignored" ; - } - } - } - } -} - - -rule mark-as-user ( module-name ) -{ - if USER_MODULE in [ RULENAMES ] - { - USER_MODULE $(module-name) ; - } -} - - -rule load-aux ( module-name : file ) -{ - mark-as-user $(module-name) ; - - module $(module-name) - { - include $(2) ; - local rules = [ RULENAMES $(1) ] ; - IMPORT $(1) : $(rules) : $(1) : $(1).$(rules) ; - } -} - - -.global-build-dir = [ MATCH --build-dir=(.*) : [ modules.peek : ARGV ] ] ; -if $(.global-build-dir) -{ - # If the option is specified several times, take the last value. - .global-build-dir = [ path.make $(.global-build-dir[-1]) ] ; -} - - -# Initialize the module for a project. -# -rule initialize ( - module-name # The name of the project module. - : location ? # The location (directory) of the project to initialize. If - # not specified, a standalone project will be initialized. - : basename ? - ) -{ - if --debug-loading in [ modules.peek : ARGV ] - { - ECHO "Initializing project '$(module-name)'" ; - } - - # TODO: need to consider if standalone projects can do anything but define - # prebuilt targets. If so, we need to give it a more sensible "location", so - # that source paths are correct. - location ?= "" ; - # Create the module for the Jamfile first. - module $(module-name) - { - } - $(module-name).attributes = [ new project-attributes $(location) - $(module-name) ] ; - local attributes = $($(module-name).attributes) ; - - if $(location) - { - $(attributes).set source-location : [ path.make $(location) ] : exact ; - } - else if ! $(module-name) in test-config site-config user-config project-config - { - # This is a standalone project with known location. Set source location - # so that it can declare targets. This is intended so that you can put - # a .jam file in your sources and use it via 'using'. Standard modules - # (in 'tools' subdir) may not assume source dir is set. - local s = [ modules.binding $(module-name) ] ; - if ! $(s) - { - errors.error "Could not determine project location $(module-name)" ; - } - $(attributes).set source-location : $(s:D) : exact ; - } - - $(attributes).set requirements : [ property-set.empty ] : exact ; - $(attributes).set usage-requirements : [ property-set.empty ] : exact ; - - # Import rules common to all project modules from project-rules module, - # defined at the end of this file. - local rules = [ RULENAMES project-rules ] ; - IMPORT project-rules : $(rules) : $(module-name) : $(rules) ; - - local jamroot ; - - local parent-module ; - if $(module-name) = test-config - { - # No parent. - } - else if $(module-name) = site-config - { - parent-module = test-config ; - } - else if $(module-name) = user-config - { - parent-module = site-config ; - } - else if $(module-name) = project-config - { - parent-module = user-config ; - } - else - { - # We search for parent/project-root only if Jamfile was specified, i.e. - # if the project is not standalone. - if $(location) && ! [ MATCH ($(JAMROOT)) : $(basename) ] - { - parent-module = [ load-parent $(location) ] ; - } - else - { - # It's either jamroot or standalone project. If it's jamroot, - # inherit from user-config. - if $(location) - { - # If project-config module exist, inherit from it. - if $(project-config.attributes) - { - parent-module = project-config ; - } - else - { - parent-module = user-config ; - } - jamroot = true ; - } - } - } - - if $(parent-module) - { - inherit-attributes $(module-name) : $(parent-module) ; - $(attributes).set parent-module : $(parent-module) : exact ; - } - - if $(jamroot) - { - $(attributes).set project-root : $(location) : exact ; - } - - local parent ; - if $(parent-module) - { - parent = [ target $(parent-module) ] ; - } - - if ! $(.target.$(module-name)) - { - .target.$(module-name) = [ new project-target $(module-name) - : $(module-name) $(parent) - : [ attribute $(module-name) requirements ] ] ; - - if --debug-loading in [ modules.peek : ARGV ] - { - ECHO "Assigned project target" $(.target.$(module-name)) - "to '$(module-name)'" ; - } - } - - .current-project = [ target $(module-name) ] ; -} - - -# Make 'project-module' inherit attributes of project root and parent module. -# -rule inherit-attributes ( project-module : parent-module ) -{ - local attributes = $($(project-module).attributes) ; - local pattributes = [ attributes $(parent-module) ] ; - # Parent module might be locationless configuration module. - if [ modules.binding $(parent-module) ] - { - $(attributes).set parent : [ path.parent - [ path.make [ modules.binding $(parent-module) ] ] ] ; - } - local v = [ $(pattributes).get project-root ] ; - $(attributes).set project-root : $(v) : exact ; - $(attributes).set default-build - : [ $(pattributes).get default-build ] ; - $(attributes).set requirements - : [ $(pattributes).get requirements ] : exact ; - $(attributes).set usage-requirements - : [ $(pattributes).get usage-requirements ] : exact ; - - local parent-build-dir = [ $(pattributes).get build-dir ] ; - if $(parent-build-dir) - { - # Have to compute relative path from parent dir to our dir. Convert both - # paths to absolute, since we cannot find relative path from ".." to - # ".". - - local location = [ attribute $(project-module) location ] ; - local parent-location = [ attribute $(parent-module) location ] ; - - local pwd = [ path.pwd ] ; - local parent-dir = [ path.root $(parent-location) $(pwd) ] ; - local our-dir = [ path.root $(location) $(pwd) ] ; - $(attributes).set build-dir : [ path.join $(parent-build-dir) - [ path.relative $(our-dir) $(parent-dir) ] ] : exact ; - } -} - - -# Associate the given id with the given project module. -# -rule register-id ( id : module ) -{ - $(id).jamfile-module = $(module) ; -} - - -# Class keeping all the attributes of a project. -# -# The standard attributes are "id", "location", "project-root", "parent" -# "requirements", "default-build", "source-location" and "projects-to-build". -# -class project-attributes -{ - import property ; - import property-set ; - import errors ; - import path ; - import print ; - import sequence ; - import project ; - - rule __init__ ( location project-module ) - { - self.location = $(location) ; - self.project-module = $(project-module) ; - } - - # Set the named attribute from the specification given by the user. The - # value actually set may be different. - # - rule set ( attribute : specification * - : exact ? # Sets value from 'specification' without any processing. - ) - { - if $(exact) - { - self.$(attribute) = $(specification) ; - } - else if $(attribute) = "requirements" - { - local result = [ property-set.refine-from-user-input - $(self.requirements) : $(specification) - : $(self.project-module) : $(self.location) ] ; - - if $(result[1]) = "@error" - { - errors.error Requirements for project at '$(self.location)' - conflict with parent's. : Explanation: $(result[2-]) ; - } - else - { - self.requirements = $(result) ; - } - } - else if $(attribute) = "usage-requirements" - { - local unconditional ; - for local p in $(specification) - { - local split = [ property.split-conditional $(p) ] ; - split ?= nothing $(p) ; - unconditional += $(split[2]) ; - } - - local non-free = [ property.remove free : $(unconditional) ] ; - if $(non-free) - { - errors.error usage-requirements $(specification) have non-free - properties $(non-free) ; - } - local t = [ property.translate-paths $(specification) - : $(self.location) ] ; - if $(self.usage-requirements) - { - self.usage-requirements = [ property-set.create - [ $(self.usage-requirements).raw ] $(t) ] ; - } - else - { - self.usage-requirements = [ property-set.create $(t) ] ; - } - } - else if $(attribute) = "default-build" - { - self.default-build = [ property.make $(specification) ] ; - } - else if $(attribute) = "source-location" - { - self.source-location = ; - for local src-path in $(specification) - { - self.source-location += [ path.root [ path.make $(src-path) ] - $(self.location) ] ; - } - } - else if $(attribute) = "build-dir" - { - self.build-dir = [ path.root - [ path.make $(specification) ] $(self.location) ] ; - } - else if $(attribute) = "id" - { - id = [ path.root $(specification) / ] ; - project.register-id $(id) : $(self.project-module) ; - self.id = $(id) ; - } - else if ! $(attribute) in "default-build" "location" "parent" - "projects-to-build" "project-root" "source-location" - { - errors.error Invalid project attribute '$(attribute)' specified for - project at '$(self.location)' ; - } - else - { - self.$(attribute) = $(specification) ; - } - } - - # Returns the value of the given attribute. - # - rule get ( attribute ) - { - return $(self.$(attribute)) ; - } - - # Prints the project attributes. - # - rule print ( ) - { - local id = $(self.id) ; id ?= (none) ; - local parent = $(self.parent) ; parent ?= (none) ; - print.section "'"$(id)"'" ; - print.list-start ; - print.list-item "Parent project:" $(parent) ; - print.list-item "Requirements:" [ $(self.requirements).raw ] ; - print.list-item "Default build:" $(self.default-build) ; - print.list-item "Source location:" $(self.source-location) ; - print.list-item "Projects to build:" - [ sequence.insertion-sort $(self.projects-to-build) ] ; - print.list-end ; - } -} - - -# Returns the project which is currently being loaded. -# -rule current ( ) -{ - return $(.current-project) ; -} - - -# Temporarily changes the current project to 'project'. Should be followed by -# 'pop-current'. -# -rule push-current ( project ) -{ - .saved-current-project += $(.current-project) ; - .current-project = $(project) ; -} - - -rule pop-current ( ) -{ - .current-project = $(.saved-current-project[-1]) ; - .saved-current-project = $(.saved-current-project[1--2]) ; -} - - -# Returns the project-attribute instance for the specified Jamfile module. -# -rule attributes ( project ) -{ - return $($(project).attributes) ; -} - - -# Returns the value of the specified attribute in the specified Jamfile module. -# -rule attribute ( project attribute ) -{ - return [ $($(project).attributes).get $(attribute) ] ; -} - - -# Returns the project target corresponding to the 'project-module'. -# -rule target ( project-module ) -{ - if ! $(.target.$(project-module)) - { - .target.$(project-module) = [ new project-target $(project-module) - : $(project-module) - : [ attribute $(project-module) requirements ] ] ; - } - return $(.target.$(project-module)) ; -} - - -# Use/load a project. -# -rule use ( id : location ) -{ - local saved-project = $(.current-project) ; - local project-module = [ project.load $(location) ] ; - local declared-id = [ project.attribute $(project-module) id ] ; - - if ! $(declared-id) || $(declared-id) != $(id) - { - # The project at 'location' either has no id or that id is not equal to - # the 'id' parameter. - if $($(id).jamfile-module) && ( $($(id).jamfile-module) != - $(project-module) ) - { - errors.user-error Attempt to redeclare already existing project id - '$(id)' - location '$(location)' ; - } - $(id).jamfile-module = $(project-module) ; - } - .current-project = $(saved-project) ; -} - - -# Defines a Boost.Build extension project. Such extensions usually contain -# library targets and features that can be used by many people. Even though -# extensions are really projects, they can be initialized as a module would be -# with the "using" (project.project-rules.using) mechanism. -# -rule extension ( id : options * : * ) -{ - # The caller is a standalone module for the extension. - local mod = [ CALLER_MODULE ] ; - - # We need to do the rest within the extension module. - module $(mod) - { - import path ; - - # Find the root project. - local root-project = [ project.current ] ; - root-project = [ $(root-project).project-module ] ; - while - [ project.attribute $(root-project) parent-module ] && - [ project.attribute $(root-project) parent-module ] != user-config - { - root-project = [ project.attribute $(root-project) parent-module ] ; - } - - # Create the project data, and bring in the project rules into the - # module. - project.initialize $(__name__) : [ path.join [ project.attribute - $(root-project) location ] ext $(1:L) ] ; - - # Create the project itself, i.e. the attributes. All extensions are - # created in the "/ext" project space. - project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : - $(9) ; - local attributes = [ project.attributes $(__name__) ] ; - - # Inherit from the root project of whomever is defining us. - project.inherit-attributes $(__name__) : $(root-project) ; - $(attributes).set parent-module : $(root-project) : exact ; - } -} - - -rule glob-internal ( project : wildcards + : excludes * : rule-name ) -{ - local location = [ $(project).get source-location ] ; - - local result ; - local paths = [ path.$(rule-name) $(location) : - [ sequence.transform path.make : $(wildcards) ] : - [ sequence.transform path.make : $(excludes) ] ] ; - if $(wildcards:D) || $(rule-name) != glob - { - # The paths we have found are relative to the current directory, but the - # names specified in the sources list are assumed to be relative to the - # source directory of the corresponding project. So, just make the names - # absolute. - for local p in $(paths) - { - # If the path is below source location, use relative path. - # Otherwise, use full path just to avoid any ambiguities. - local rel = [ path.relative $(p) $(location) : no-error ] ; - if $(rel) = not-a-child - { - result += [ path.root $(p) [ path.pwd ] ] ; - } - else - { - result += $(rel) ; - } - } - } - else - { - # There were no wildcards in the directory path, so the files are all in - # the source directory of the project. Just drop the directory, instead - # of making paths absolute. - result = $(paths:D="") ; - } - - return $(result) ; -} - - -# This module defines rules common to all projects. -# -module project-rules -{ - rule using ( toolset-module : * ) - { - import toolset ; - import modules ; - import project ; - - # Temporarily change the search path so the module referred to by - # 'using' can be placed in the same directory as Jamfile. User will - # expect the module to be found even though the directory is not in - # BOOST_BUILD_PATH. - local x = [ modules.peek : BOOST_BUILD_PATH ] ; - local caller = [ CALLER_MODULE ] ; - local caller-location = [ modules.binding $(caller) ] ; - modules.poke : BOOST_BUILD_PATH : $(caller-location:D) $(x) ; - toolset.using $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - modules.poke : BOOST_BUILD_PATH : $(x) ; - - # The above might have clobbered .current-project. Restore the correct - # value. - modules.poke project : .current-project - : [ project.target $(caller) ] ; - } - - import modules ; - - rule import ( * : * : * ) - { - modules.import project ; - - local caller = [ CALLER_MODULE ] ; - local saved = [ modules.peek project : .current-project ] ; - module $(caller) - { - modules.import $(1) : $(2) : $(3) ; - } - modules.poke project : .current-project : $(saved) ; - } - - rule project ( id ? : options * : * ) - { - import errors ; - import path ; - import project ; - - local caller = [ CALLER_MODULE ] ; - local attributes = [ project.attributes $(caller) ] ; - if $(id) - { - $(attributes).set id : $(id) ; - } - - local explicit-build-dir ; - - for n in 2 3 4 5 6 7 8 9 - { - local option = $($(n)) ; - if $(option) - { - $(attributes).set $(option[1]) : $(option[2-]) ; - } - if $(option[1]) = "build-dir" - { - explicit-build-dir = [ path.make $(option[2-]) ] ; - } - } - - # If '--build-dir' is specified, change the build dir for the project. - local global-build-dir = - [ modules.peek project : .global-build-dir ] ; - - if $(global-build-dir) - { - local location = [ $(attributes).get location ] ; - # Project with an empty location is a 'standalone' project such as - # user-config or qt. It has no build dir. If we try to set build dir - # for user-config, we shall then try to inherit it, with either - # weird or wrong consequences. - if $(location) && $(location) = [ $(attributes).get project-root ] - { - # Re-read the project id, since it might have been changed in - # the project's attributes. - id = [ $(attributes).get id ] ; - # This is Jamroot. - if $(id) - { - if $(explicit-build-dir) && - [ path.is-rooted $(explicit-build-dir) ] - { - errors.user-error Absolute directory specified via - 'build-dir' project attribute : Do not know how to - combine that with the --build-dir option. ; - } - # Strip the leading slash from id. - local rid = [ MATCH /(.*) : $(id) ] ; - local p = [ path.join - $(global-build-dir) $(rid) $(explicit-build-dir) ] ; - - $(attributes).set build-dir : $(p) : exact ; - } - } - else - { - # Not Jamroot. - if $(explicit-build-dir) - { - errors.user-error When --build-dir is specified, the - 'build-dir' project : attribute is allowed only for - top-level 'project' invocations ; - } - } - } - } - - # Declare and set a project global constant. Project global constants are - # normal variables but should not be changed. They are applied to every - # child Jamfile. - # - rule constant ( - name # Variable name of the constant. - : value + # Value of the constant. - ) - { - import project ; - local caller = [ CALLER_MODULE ] ; - local p = [ project.target $(caller) ] ; - $(p).add-constant $(name) : $(value) ; - } - - # Declare and set a project global constant, whose value is a path. The path - # is adjusted to be relative to the invocation directory. The given value - # path is taken to be either absolute, or relative to this project root. - # - rule path-constant ( - name # Variable name of the constant. - : value + # Value of the constant. - ) - { - import project ; - local caller = [ CALLER_MODULE ] ; - local p = [ project.target $(caller) ] ; - $(p).add-constant $(name) : $(value) : path ; - } - - rule use-project ( id : where ) - { - import modules ; - # See comment in 'load' for explanation. - local caller = [ CALLER_MODULE ] ; - modules.poke $(caller) : .used-projects : - [ modules.peek $(caller) : .used-projects ] - $(id) $(where) ; - } - - rule build-project ( dir ) - { - import project ; - local caller = [ CALLER_MODULE ] ; - local attributes = [ project.attributes $(caller) ] ; - - local now = [ $(attributes).get projects-to-build ] ; - $(attributes).set projects-to-build : $(now) $(dir) ; - } - - rule explicit ( target-names * ) - { - import project ; - # If 'explicit' is used in a helper rule defined in Jamroot and - # inherited by children, then most of the time we want 'explicit' to - # operate on the Jamfile where the helper rule is invoked. - local t = [ project.current ] ; - for local n in $(target-names) - { - $(t).mark-target-as-explicit $(n) ; - } - } - - rule always ( target-names * ) - { - import project ; - local t = [ project.current ] ; - for local n in $(target-names) - { - $(t).mark-target-as-always $(n) ; - } - } - - rule glob ( wildcards + : excludes * ) - { - import project ; - return [ project.glob-internal [ project.current ] : $(wildcards) : - $(excludes) : glob ] ; - } - - rule glob-tree ( wildcards + : excludes * ) - { - import project ; - - if $(wildcards:D) || $(excludes:D) - { - errors.user-error The patterns to 'glob-tree' may not include - directory ; - } - return [ project.glob-internal [ project.current ] : $(wildcards) : - $(excludes) : glob-tree ] ; - } - - # Calculates conditional requirements for multiple requirements at once. - # This is a shorthand to reduce duplication and to keep an inline - # declarative syntax. For example: - # - # lib x : x.cpp : [ conditional <toolset>gcc <variant>debug : - # <define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ; - # - rule conditional ( condition + : requirements * ) - { - local condition = $(condition:J=,) ; - if [ MATCH (:) : $(condition) ] - { - return $(condition)$(requirements) ; - } - else - { - return $(condition):$(requirements) ; - } - } - - rule option ( name : value ) - { - if $(__name__) != site-config && $(__name__) != user-config && $(__name__) != project-config - { - import errors ; - errors.error "The 'option' rule may be used only in site-config or user-config" ; - } - import option ; - option.set $(name) : $(value) ; - } -} diff --git a/jam-files/boost-build/build/project.py b/jam-files/boost-build/build/project.py deleted file mode 100644 index 1e1e16fa..00000000 --- a/jam-files/boost-build/build/project.py +++ /dev/null @@ -1,1120 +0,0 @@ -# Status: ported. -# Base revision: 64488 - -# Copyright 2002, 2003 Dave Abrahams -# Copyright 2002, 2005, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Implements project representation and loading. -# Each project is represented by -# - a module where all the Jamfile content live. -# - an instance of 'project-attributes' class. -# (given module name, can be obtained by 'attributes' rule) -# - an instance of 'project-target' class (from targets.jam) -# (given a module name, can be obtained by 'target' rule) -# -# Typically, projects are created as result of loading Jamfile, which is -# do by rules 'load' and 'initialize', below. First, module for Jamfile -# is loaded and new project-attributes instance is created. Some rules -# necessary for project are added to the module (see 'project-rules' module) -# at the bottom of this file. -# Default project attributes are set (inheriting attributes of parent project, if -# it exists). After that, Jamfile is read. It can declare its own attributes, -# via 'project' rule, which will be combined with already set attributes. -# -# -# The 'project' rule can also declare project id, which will be associated with -# the project module. -# -# There can also be 'standalone' projects. They are created by calling 'initialize' -# on arbitrary module, and not specifying location. After the call, the module can -# call 'project' rule, declare main target and behave as regular projects. However, -# since it's not associated with any location, it's better declare only prebuilt -# targets. -# -# The list of all loaded Jamfile is stored in variable .project-locations. It's possible -# to obtain module name for a location using 'module-name' rule. The standalone projects -# are not recorded, the only way to use them is by project id. - -import b2.util.path -from b2.build import property_set, property -from b2.build.errors import ExceptionWithUserContext -import b2.build.targets - -import bjam - -import re -import sys -import os -import string -import imp -import traceback -import b2.util.option as option - -from b2.util import record_jam_to_value_mapping, qualify_jam_action - -class ProjectRegistry: - - def __init__(self, manager, global_build_dir): - self.manager = manager - self.global_build_dir = global_build_dir - self.project_rules_ = ProjectRules(self) - - # The target corresponding to the project being loaded now - self.current_project = None - - # The set of names of loaded project modules - self.jamfile_modules = {} - - # Mapping from location to module name - self.location2module = {} - - # Mapping from project id to project module - self.id2module = {} - - # Map from Jamfile directory to parent Jamfile/Jamroot - # location. - self.dir2parent_jamfile = {} - - # Map from directory to the name of Jamfile in - # that directory (or None). - self.dir2jamfile = {} - - # Map from project module to attributes object. - self.module2attributes = {} - - # Map from project module to target for the project - self.module2target = {} - - # Map from names to Python modules, for modules loaded - # via 'using' and 'import' rules in Jamfiles. - self.loaded_tool_modules_ = {} - - self.loaded_tool_module_path_ = {} - - # Map from project target to the list of - # (id,location) pairs corresponding to all 'use-project' - # invocations. - # TODO: should not have a global map, keep this - # in ProjectTarget. - self.used_projects = {} - - self.saved_current_project = [] - - self.JAMROOT = self.manager.getenv("JAMROOT"); - - # Note the use of character groups, as opposed to listing - # 'Jamroot' and 'jamroot'. With the latter, we'd get duplicate - # matches on windows and would have to eliminate duplicates. - if not self.JAMROOT: - self.JAMROOT = ["project-root.jam", "[Jj]amroot", "[Jj]amroot.jam"] - - # Default patterns to search for the Jamfiles to use for build - # declarations. - self.JAMFILE = self.manager.getenv("JAMFILE") - - if not self.JAMFILE: - self.JAMFILE = ["[Bb]uild.jam", "[Jj]amfile.v2", "[Jj]amfile", - "[Jj]amfile.jam"] - - - def load (self, jamfile_location): - """Loads jamfile at the given location. After loading, project global - file and jamfile needed by the loaded one will be loaded recursively. - If the jamfile at that location is loaded already, does nothing. - Returns the project module for the Jamfile.""" - - absolute = os.path.join(os.getcwd(), jamfile_location) - absolute = os.path.normpath(absolute) - jamfile_location = b2.util.path.relpath(os.getcwd(), absolute) - - if "--debug-loading" in self.manager.argv(): - print "Loading Jamfile at '%s'" % jamfile_location - - - mname = self.module_name(jamfile_location) - # If Jamfile is already loaded, don't try again. - if not mname in self.jamfile_modules: - - self.load_jamfile(jamfile_location, mname) - - # We want to make sure that child project are loaded only - # after parent projects. In particular, because parent projects - # define attributes whch are inherited by children, and we don't - # want children to be loaded before parents has defined everything. - # - # While "build-project" and "use-project" can potentially refer - # to child projects from parent projects, we don't immediately - # load child projects when seing those attributes. Instead, - # we record the minimal information that will be used only later. - - self.load_used_projects(mname) - - return mname - - def load_used_projects(self, module_name): - # local used = [ modules.peek $(module-name) : .used-projects ] ; - used = self.used_projects[module_name] - - location = self.attribute(module_name, "location") - for u in used: - id = u[0] - where = u[1] - - self.use(id, os.path.join(location, where)) - - def load_parent(self, location): - """Loads parent of Jamfile at 'location'. - Issues an error if nothing is found.""" - - found = b2.util.path.glob_in_parents( - location, self.JAMROOT + self.JAMFILE) - - if not found: - print "error: Could not find parent for project at '%s'" % location - print "error: Did not find Jamfile or project-root.jam in any parent directory." - sys.exit(1) - - return self.load(os.path.dirname(found[0])) - - def act_as_jamfile(self, module, location): - """Makes the specified 'module' act as if it were a regularly loaded Jamfile - at 'location'. If Jamfile is already located for that location, it's an - error.""" - - if self.module_name(location) in self.jamfile_modules: - self.manager.errors()( - "Jamfile was already loaded for '%s'" % location) - - # Set up non-default mapping from location to module. - self.location2module[location] = module - - # Add the location to the list of project locations - # so that we don't try to load Jamfile in future - self.jamfile_modules.append(location) - - self.initialize(module, location) - - def find(self, name, current_location): - """Given 'name' which can be project-id or plain directory name, - return project module corresponding to that id or directory. - Returns nothing of project is not found.""" - - project_module = None - - # Try interpreting name as project id. - if name[0] == '/': - project_module = self.id2module.get(name) - - if not project_module: - location = os.path.join(current_location, name) - # If no project is registered for the given location, try to - # load it. First see if we have Jamfile. If not we might have project - # root, willing to act as Jamfile. In that case, project-root - # must be placed in the directory referred by id. - - project_module = self.module_name(location) - if not project_module in self.jamfile_modules: - if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): - project_module = self.load(location) - else: - project_module = None - - return project_module - - def module_name(self, jamfile_location): - """Returns the name of module corresponding to 'jamfile-location'. - If no module corresponds to location yet, associates default - module name with that location.""" - module = self.location2module.get(jamfile_location) - if not module: - # Root the path, so that locations are always umbiguious. - # Without this, we can't decide if '../../exe/program1' and '.' - # are the same paths, or not. - jamfile_location = os.path.realpath( - os.path.join(os.getcwd(), jamfile_location)) - module = "Jamfile<%s>" % jamfile_location - self.location2module[jamfile_location] = module - return module - - def find_jamfile (self, dir, parent_root=0, no_errors=0): - """Find the Jamfile at the given location. This returns the - exact names of all the Jamfiles in the given directory. The optional - parent-root argument causes this to search not the given directory - but the ones above it up to the directory given in it.""" - - # Glob for all the possible Jamfiles according to the match pattern. - # - jamfile_glob = None - if parent_root: - parent = self.dir2parent_jamfile.get(dir) - if not parent: - parent = b2.util.path.glob_in_parents(dir, - self.JAMFILE) - self.dir2parent_jamfile[dir] = parent - jamfile_glob = parent - else: - jamfile = self.dir2jamfile.get(dir) - if not jamfile: - jamfile = b2.util.path.glob([dir], self.JAMFILE) - self.dir2jamfile[dir] = jamfile - jamfile_glob = jamfile - - if len(jamfile_glob) > 1: - # Multiple Jamfiles found in the same place. Warn about this. - # And ensure we use only one of them. - # As a temporary convenience measure, if there's Jamfile.v2 amount - # found files, suppress the warning and use it. - # - pattern = "(.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam)" - v2_jamfiles = [x for x in jamfile_glob if re.match(pattern, x)] - if len(v2_jamfiles) == 1: - jamfile_glob = v2_jamfiles - else: - print """warning: Found multiple Jamfiles at '%s'!""" % (dir) - for j in jamfile_glob: - print " -", j - print "Loading the first one" - - # Could not find it, error. - if not no_errors and not jamfile_glob: - self.manager.errors()( - """Unable to load Jamfile. -Could not find a Jamfile in directory '%s' -Attempted to find it with pattern '%s'. -Please consult the documentation at 'http://boost.org/boost-build2'.""" - % (dir, string.join(self.JAMFILE))) - - if jamfile_glob: - return jamfile_glob[0] - - def load_jamfile(self, dir, jamfile_module): - """Load a Jamfile at the given directory. Returns nothing. - Will attempt to load the file as indicated by the JAMFILE patterns. - Effect of calling this rule twice with the same 'dir' is underfined.""" - - # See if the Jamfile is where it should be. - is_jamroot = False - jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT) - if not jamfile_to_load: - jamfile_to_load = self.find_jamfile(dir) - else: - if len(jamfile_to_load) > 1: - get_manager().errors()("Multiple Jamfiles found at '%s'\n" +\ - "Filenames are: %s" - % (dir, [os.path.basename(j) for j in jamfile_to_load])) - - is_jamroot = True - jamfile_to_load = jamfile_to_load[0] - - dir = os.path.dirname(jamfile_to_load) - if not dir: - dir = "." - - self.used_projects[jamfile_module] = [] - - # Now load the Jamfile in it's own context. - # The call to 'initialize' may load parent Jamfile, which might have - # 'use-project' statement that causes a second attempt to load the - # same project we're loading now. Checking inside .jamfile-modules - # prevents that second attempt from messing up. - if not jamfile_module in self.jamfile_modules: - self.jamfile_modules[jamfile_module] = True - - # Initialize the jamfile module before loading. - # - self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load)) - - saved_project = self.current_project - - bjam.call("load", jamfile_module, jamfile_to_load) - basename = os.path.basename(jamfile_to_load) - - if is_jamroot: - jamfile = self.find_jamfile(dir, no_errors=True) - if jamfile: - bjam.call("load", jamfile_module, jamfile) - - # Now do some checks - if self.current_project != saved_project: - self.manager.errors()( -"""The value of the .current-project variable -has magically changed after loading a Jamfile. -This means some of the targets might be defined a the wrong project. -after loading %s -expected value %s -actual value %s""" % (jamfile_module, saved_project, self.current_project)) - - if self.global_build_dir: - id = self.attributeDefault(jamfile_module, "id", None) - project_root = self.attribute(jamfile_module, "project-root") - location = self.attribute(jamfile_module, "location") - - if location and project_root == dir: - # This is Jamroot - if not id: - # FIXME: go via errors module, so that contexts are - # shown? - print "warning: the --build-dir option was specified" - print "warning: but Jamroot at '%s'" % dir - print "warning: specified no project id" - print "warning: the --build-dir option will be ignored" - - - def load_standalone(self, jamfile_module, file): - """Loads 'file' as standalone project that has no location - associated with it. This is mostly useful for user-config.jam, - which should be able to define targets, but although it has - some location in filesystem, we don't want any build to - happen in user's HOME, for example. - - The caller is required to never call this method twice on - the same file. - """ - - self.used_projects[jamfile_module] = [] - bjam.call("load", jamfile_module, file) - self.load_used_projects(jamfile_module) - - def is_jamroot(self, basename): - match = [ pat for pat in self.JAMROOT if re.match(pat, basename)] - if match: - return 1 - else: - return 0 - - def initialize(self, module_name, location=None, basename=None): - """Initialize the module for a project. - - module-name is the name of the project module. - location is the location (directory) of the project to initialize. - If not specified, stanalone project will be initialized - """ - - if "--debug-loading" in self.manager.argv(): - print "Initializing project '%s'" % module_name - - # TODO: need to consider if standalone projects can do anything but defining - # prebuilt targets. If so, we need to give more sensible "location", so that - # source paths are correct. - if not location: - location = "" - - attributes = ProjectAttributes(self.manager, location, module_name) - self.module2attributes[module_name] = attributes - - python_standalone = False - if location: - attributes.set("source-location", [location], exact=1) - elif not module_name in ["test-config", "site-config", "user-config", "project-config"]: - # This is a standalone project with known location. Set source location - # so that it can declare targets. This is intended so that you can put - # a .jam file in your sources and use it via 'using'. Standard modules - # (in 'tools' subdir) may not assume source dir is set. - module = sys.modules[module_name] - attributes.set("source-location", self.loaded_tool_module_path_[module_name], exact=1) - python_standalone = True - - attributes.set("requirements", property_set.empty(), exact=True) - attributes.set("usage-requirements", property_set.empty(), exact=True) - attributes.set("default-build", property_set.empty(), exact=True) - attributes.set("projects-to-build", [], exact=True) - attributes.set("project-root", None, exact=True) - attributes.set("build-dir", None, exact=True) - - self.project_rules_.init_project(module_name, python_standalone) - - jamroot = False - - parent_module = None; - if module_name == "test-config": - # No parent - pass - elif module_name == "site-config": - parent_module = "test-config" - elif module_name == "user-config": - parent_module = "site-config" - elif module_name == "project-config": - parent_module = "user-config" - elif location and not self.is_jamroot(basename): - # We search for parent/project-root only if jamfile was specified - # --- i.e - # if the project is not standalone. - parent_module = self.load_parent(location) - else: - # It's either jamroot, or standalone project. - # If it's jamroot, inherit from user-config. - if location: - # If project-config module exist, inherit from it. - if self.module2attributes.has_key("project-config"): - parent_module = "project-config" - else: - parent_module = "user-config" ; - - jamroot = True ; - - if parent_module: - self.inherit_attributes(module_name, parent_module) - attributes.set("parent-module", parent_module, exact=1) - - if jamroot: - attributes.set("project-root", location, exact=1) - - parent = None - if parent_module: - parent = self.target(parent_module) - - if not self.module2target.has_key(module_name): - target = b2.build.targets.ProjectTarget(self.manager, - module_name, module_name, parent, - self.attribute(module_name,"requirements"), - # FIXME: why we need to pass this? It's not - # passed in jam code. - self.attribute(module_name, "default-build")) - self.module2target[module_name] = target - - self.current_project = self.target(module_name) - - def inherit_attributes(self, project_module, parent_module): - """Make 'project-module' inherit attributes of project - root and parent module.""" - - attributes = self.module2attributes[project_module] - pattributes = self.module2attributes[parent_module] - - # Parent module might be locationless user-config. - # FIXME: - #if [ modules.binding $(parent-module) ] - #{ - # $(attributes).set parent : [ path.parent - # [ path.make [ modules.binding $(parent-module) ] ] ] ; - # } - - attributes.set("project-root", pattributes.get("project-root"), exact=True) - attributes.set("default-build", pattributes.get("default-build"), exact=True) - attributes.set("requirements", pattributes.get("requirements"), exact=True) - attributes.set("usage-requirements", - pattributes.get("usage-requirements"), exact=1) - - parent_build_dir = pattributes.get("build-dir") - - if parent_build_dir: - # Have to compute relative path from parent dir to our dir - # Convert both paths to absolute, since we cannot - # find relative path from ".." to "." - - location = attributes.get("location") - parent_location = pattributes.get("location") - - our_dir = os.path.join(os.getcwd(), location) - parent_dir = os.path.join(os.getcwd(), parent_location) - - build_dir = os.path.join(parent_build_dir, - os.path.relpath(our_dir, parent_dir)) - attributes.set("build-dir", build_dir, exact=True) - - def register_id(self, id, module): - """Associate the given id with the given project module.""" - self.id2module[id] = module - - def current(self): - """Returns the project which is currently being loaded.""" - return self.current_project - - def set_current(self, c): - self.current_project = c - - def push_current(self, project): - """Temporary changes the current project to 'project'. Should - be followed by 'pop-current'.""" - self.saved_current_project.append(self.current_project) - self.current_project = project - - def pop_current(self): - self.current_project = self.saved_current_project[-1] - del self.saved_current_project[-1] - - def attributes(self, project): - """Returns the project-attribute instance for the - specified jamfile module.""" - return self.module2attributes[project] - - def attribute(self, project, attribute): - """Returns the value of the specified attribute in the - specified jamfile module.""" - return self.module2attributes[project].get(attribute) - try: - return self.module2attributes[project].get(attribute) - except: - raise BaseException("No attribute '%s' for project" % (attribute, project)) - - def attributeDefault(self, project, attribute, default): - """Returns the value of the specified attribute in the - specified jamfile module.""" - return self.module2attributes[project].getDefault(attribute, default) - - def target(self, project_module): - """Returns the project target corresponding to the 'project-module'.""" - if not self.module2target.has_key(project_module): - self.module2target[project_module] = \ - b2.build.targets.ProjectTarget(project_module, project_module, - self.attribute(project_module, "requirements")) - - return self.module2target[project_module] - - def use(self, id, location): - # Use/load a project. - saved_project = self.current_project - project_module = self.load(location) - declared_id = self.attributeDefault(project_module, "id", "") - - if not declared_id or declared_id != id: - # The project at 'location' either have no id or - # that id is not equal to the 'id' parameter. - if self.id2module.has_key(id) and self.id2module[id] != project_module: - self.manager.errors()( -"""Attempt to redeclare already existing project id '%s' at location '%s'""" % (id, location)) - self.id2module[id] = project_module - - self.current_module = saved_project - - def add_rule(self, name, callable): - """Makes rule 'name' available to all subsequently loaded Jamfiles. - - Calling that rule wil relay to 'callable'.""" - self.project_rules_.add_rule(name, callable) - - def project_rules(self): - return self.project_rules_ - - def glob_internal(self, project, wildcards, excludes, rule_name): - location = project.get("source-location")[0] - - result = [] - callable = b2.util.path.__dict__[rule_name] - - paths = callable([location], wildcards, excludes) - has_dir = 0 - for w in wildcards: - if os.path.dirname(w): - has_dir = 1 - break - - if has_dir or rule_name != "glob": - result = [] - # The paths we've found are relative to current directory, - # but the names specified in sources list are assumed to - # be relative to source directory of the corresponding - # prject. Either translate them or make absolute. - - for p in paths: - rel = os.path.relpath(p, location) - # If the path is below source location, use relative path. - if not ".." in rel: - result.append(rel) - else: - # Otherwise, use full path just to avoid any ambiguities. - result.append(os.path.abspath(p)) - - else: - # There were not directory in wildcard, so the files are all - # in the source directory of the project. Just drop the - # directory, instead of making paths absolute. - result = [os.path.basename(p) for p in paths] - - return result - - def load_module(self, name, extra_path=None): - """Load a Python module that should be useable from Jamfiles. - - There are generally two types of modules Jamfiles might want to - use: - - Core Boost.Build. Those are imported using plain names, e.g. - 'toolset', so this function checks if we have module named - b2.package.module already. - - Python modules in the same directory as Jamfile. We don't - want to even temporary add Jamfile's directory to sys.path, - since then we might get naming conflicts between standard - Python modules and those. - """ - - # See if we loaded module of this name already - existing = self.loaded_tool_modules_.get(name) - if existing: - return existing - - # See if we have a module b2.whatever.<name>, where <name> - # is what is passed to this function - modules = sys.modules - for class_name in modules: - parts = class_name.split('.') - if name is class_name or parts[0] == "b2" \ - and parts[-1] == name.replace("-", "_"): - module = modules[class_name] - self.loaded_tool_modules_[name] = module - return module - - # Lookup a module in BOOST_BUILD_PATH - path = extra_path - if not path: - path = [] - path.extend(self.manager.boost_build_path()) - location = None - for p in path: - l = os.path.join(p, name + ".py") - if os.path.exists(l): - location = l - break - - if not location: - self.manager.errors()("Cannot find module '%s'" % name) - - mname = name + "__for_jamfile" - file = open(location) - try: - # TODO: this means we'll never make use of .pyc module, - # which might be a problem, or not. - self.loaded_tool_module_path_[mname] = location - module = imp.load_module(mname, file, os.path.basename(location), - (".py", "r", imp.PY_SOURCE)) - self.loaded_tool_modules_[name] = module - return module - finally: - file.close() - - - -# FIXME: -# Defines a Boost.Build extension project. Such extensions usually -# contain library targets and features that can be used by many people. -# Even though extensions are really projects, they can be initialize as -# a module would be with the "using" (project.project-rules.using) -# mechanism. -#rule extension ( id : options * : * ) -#{ -# # The caller is a standalone module for the extension. -# local mod = [ CALLER_MODULE ] ; -# -# # We need to do the rest within the extension module. -# module $(mod) -# { -# import path ; -# -# # Find the root project. -# local root-project = [ project.current ] ; -# root-project = [ $(root-project).project-module ] ; -# while -# [ project.attribute $(root-project) parent-module ] && -# [ project.attribute $(root-project) parent-module ] != user-config -# { -# root-project = [ project.attribute $(root-project) parent-module ] ; -# } -# -# # Create the project data, and bring in the project rules -# # into the module. -# project.initialize $(__name__) : -# [ path.join [ project.attribute $(root-project) location ] ext $(1:L) ] ; -# -# # Create the project itself, i.e. the attributes. -# # All extensions are created in the "/ext" project space. -# project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -# local attributes = [ project.attributes $(__name__) ] ; -# -# # Inherit from the root project of whomever is defining us. -# project.inherit-attributes $(__name__) : $(root-project) ; -# $(attributes).set parent-module : $(root-project) : exact ; -# } -#} - - -class ProjectAttributes: - """Class keeping all the attributes of a project. - - The standard attributes are 'id', "location", "project-root", "parent" - "requirements", "default-build", "source-location" and "projects-to-build". - """ - - def __init__(self, manager, location, project_module): - self.manager = manager - self.location = location - self.project_module = project_module - self.attributes = {} - self.usage_requirements = None - - def set(self, attribute, specification, exact=False): - """Set the named attribute from the specification given by the user. - The value actually set may be different.""" - - if exact: - self.__dict__[attribute] = specification - - elif attribute == "requirements": - self.requirements = property_set.refine_from_user_input( - self.requirements, specification, - self.project_module, self.location) - - elif attribute == "usage-requirements": - unconditional = [] - for p in specification: - split = property.split_conditional(p) - if split: - unconditional.append(split[1]) - else: - unconditional.append(p) - - non_free = property.remove("free", unconditional) - if non_free: - get_manager().errors()("usage-requirements %s have non-free properties %s" \ - % (specification, non_free)) - - t = property.translate_paths( - property.create_from_strings(specification, allow_condition=True), - self.location) - - existing = self.__dict__.get("usage-requirements") - if existing: - new = property_set.create(existing.all() + t) - else: - new = property_set.create(t) - self.__dict__["usage-requirements"] = new - - - elif attribute == "default-build": - self.__dict__["default-build"] = property_set.create(specification) - - elif attribute == "source-location": - source_location = [] - for path in specification: - source_location.append(os.path.join(self.location, path)) - self.__dict__["source-location"] = source_location - - elif attribute == "build-dir": - self.__dict__["build-dir"] = os.path.join(self.location, specification[0]) - - elif attribute == "id": - id = specification[0] - if id[0] != '/': - id = "/" + id - self.manager.projects().register_id(id, self.project_module) - self.__dict__["id"] = id - - elif not attribute in ["default-build", "location", - "source-location", "parent", - "projects-to-build", "project-root"]: - self.manager.errors()( -"""Invalid project attribute '%s' specified -for project at '%s'""" % (attribute, self.location)) - else: - self.__dict__[attribute] = specification - - def get(self, attribute): - return self.__dict__[attribute] - - def getDefault(self, attribute, default): - return self.__dict__.get(attribute, default) - - def dump(self): - """Prints the project attributes.""" - id = self.get("id") - if not id: - id = "(none)" - else: - id = id[0] - - parent = self.get("parent") - if not parent: - parent = "(none)" - else: - parent = parent[0] - - print "'%s'" % id - print "Parent project:%s", parent - print "Requirements:%s", self.get("requirements") - print "Default build:%s", string.join(self.get("debuild-build")) - print "Source location:%s", string.join(self.get("source-location")) - print "Projects to build:%s", string.join(self.get("projects-to-build").sort()); - -class ProjectRules: - """Class keeping all rules that are made available to Jamfile.""" - - def __init__(self, registry): - self.registry = registry - self.manager_ = registry.manager - self.rules = {} - self.local_names = [x for x in self.__class__.__dict__ - if x not in ["__init__", "init_project", "add_rule", - "error_reporting_wrapper", "add_rule_for_type", "reverse"]] - self.all_names_ = [x for x in self.local_names] - - def _import_rule(self, bjam_module, name, callable): - if hasattr(callable, "bjam_signature"): - bjam.import_rule(bjam_module, name, self.make_wrapper(callable), callable.bjam_signature) - else: - bjam.import_rule(bjam_module, name, self.make_wrapper(callable)) - - - def add_rule_for_type(self, type): - rule_name = type.lower().replace("_", "-") - - def xpto (name, sources = [], requirements = [], default_build = [], usage_requirements = []): - return self.manager_.targets().create_typed_target( - type, self.registry.current(), name[0], sources, - requirements, default_build, usage_requirements) - - self.add_rule(rule_name, xpto) - - def add_rule(self, name, callable): - self.rules[name] = callable - self.all_names_.append(name) - - # Add new rule at global bjam scope. This might not be ideal, - # added because if a jamroot does 'import foo' where foo calls - # add_rule, we need to import new rule to jamroot scope, and - # I'm lazy to do this now. - self._import_rule("", name, callable) - - def all_names(self): - return self.all_names_ - - def call_and_report_errors(self, callable, *args, **kw): - result = None - try: - self.manager_.errors().push_jamfile_context() - result = callable(*args, **kw) - except ExceptionWithUserContext, e: - e.report() - except Exception, e: - try: - self.manager_.errors().handle_stray_exception (e) - except ExceptionWithUserContext, e: - e.report() - finally: - self.manager_.errors().pop_jamfile_context() - - return result - - def make_wrapper(self, callable): - """Given a free-standing function 'callable', return a new - callable that will call 'callable' and report all exceptins, - using 'call_and_report_errors'.""" - def wrapper(*args, **kw): - return self.call_and_report_errors(callable, *args, **kw) - return wrapper - - def init_project(self, project_module, python_standalone=False): - - if python_standalone: - m = sys.modules[project_module] - - for n in self.local_names: - if n != "import_": - setattr(m, n, getattr(self, n)) - - for n in self.rules: - setattr(m, n, self.rules[n]) - - return - - for n in self.local_names: - # Using 'getattr' here gives us a bound method, - # while using self.__dict__[r] would give unbound one. - v = getattr(self, n) - if callable(v): - if n == "import_": - n = "import" - else: - n = string.replace(n, "_", "-") - - self._import_rule(project_module, n, v) - - for n in self.rules: - self._import_rule(project_module, n, self.rules[n]) - - def project(self, *args): - - jamfile_module = self.registry.current().project_module() - attributes = self.registry.attributes(jamfile_module) - - id = None - if args and args[0]: - id = args[0][0] - args = args[1:] - - if id: - attributes.set('id', [id]) - - explicit_build_dir = None - for a in args: - if a: - attributes.set(a[0], a[1:], exact=0) - if a[0] == "build-dir": - explicit_build_dir = a[1] - - # If '--build-dir' is specified, change the build dir for the project. - if self.registry.global_build_dir: - - location = attributes.get("location") - # Project with empty location is 'standalone' project, like - # user-config, or qt. It has no build dir. - # If we try to set build dir for user-config, we'll then - # try to inherit it, with either weird, or wrong consequences. - if location and location == attributes.get("project-root"): - # Re-read the project id, since it might have been changed in - # the project's attributes. - id = attributes.get('id') - - # This is Jamroot. - if id: - if explicit_build_dir and os.path.isabs(explicit_build_dir): - self.registry.manager.errors()( -"""Absolute directory specified via 'build-dir' project attribute -Don't know how to combine that with the --build-dir option.""") - - rid = id - if rid[0] == '/': - rid = rid[1:] - - p = os.path.join(self.registry.global_build_dir, rid) - if explicit_build_dir: - p = os.path.join(p, explicit_build_dir) - attributes.set("build-dir", p, exact=1) - elif explicit_build_dir: - self.registry.manager.errors()( -"""When --build-dir is specified, the 'build-dir' -attribute is allowed only for top-level 'project' invocations""") - - def constant(self, name, value): - """Declare and set a project global constant. - Project global constants are normal variables but should - not be changed. They are applied to every child Jamfile.""" - m = "Jamfile</home/ghost/Work/Boost/boost-svn/tools/build/v2_python/python/tests/bjam/make>" - self.registry.current().add_constant(name[0], value) - - def path_constant(self, name, value): - """Declare and set a project global constant, whose value is a path. The - path is adjusted to be relative to the invocation directory. The given - value path is taken to be either absolute, or relative to this project - root.""" - if len(value) > 1: - self.registry.manager.error()("path constant should have one element") - self.registry.current().add_constant(name[0], value[0], path=1) - - def use_project(self, id, where): - # See comment in 'load' for explanation why we record the - # parameters as opposed to loading the project now. - m = self.registry.current().project_module(); - self.registry.used_projects[m].append((id[0], where[0])) - - def build_project(self, dir): - assert(isinstance(dir, list)) - jamfile_module = self.registry.current().project_module() - attributes = self.registry.attributes(jamfile_module) - now = attributes.get("projects-to-build") - attributes.set("projects-to-build", now + dir, exact=True) - - def explicit(self, target_names): - self.registry.current().mark_targets_as_explicit(target_names) - - def always(self, target_names): - self.registry.current().mark_targets_as_alays(target_names) - - def glob(self, wildcards, excludes=None): - return self.registry.glob_internal(self.registry.current(), - wildcards, excludes, "glob") - - def glob_tree(self, wildcards, excludes=None): - bad = 0 - for p in wildcards: - if os.path.dirname(p): - bad = 1 - - if excludes: - for p in excludes: - if os.path.dirname(p): - bad = 1 - - if bad: - self.registry.manager.errors()( -"The patterns to 'glob-tree' may not include directory") - return self.registry.glob_internal(self.registry.current(), - wildcards, excludes, "glob_tree") - - - def using(self, toolset, *args): - # The module referred by 'using' can be placed in - # the same directory as Jamfile, and the user - # will expect the module to be found even though - # the directory is not in BOOST_BUILD_PATH. - # So temporary change the search path. - current = self.registry.current() - location = current.get('location') - - m = self.registry.load_module(toolset[0], [location]) - if not m.__dict__.has_key("init"): - self.registry.manager.errors()( - "Tool module '%s' does not define the 'init' method" % toolset[0]) - m.init(*args) - - # The above might have clobbered .current-project. Restore the correct - # value. - self.registry.set_current(current) - - def import_(self, name, names_to_import=None, local_names=None): - - name = name[0] - py_name = name - if py_name == "os": - py_name = "os_j" - jamfile_module = self.registry.current().project_module() - attributes = self.registry.attributes(jamfile_module) - location = attributes.get("location") - - saved = self.registry.current() - - m = self.registry.load_module(py_name, [location]) - - for f in m.__dict__: - v = m.__dict__[f] - f = f.replace("_", "-") - if callable(v): - qn = name + "." + f - self._import_rule(jamfile_module, qn, v) - record_jam_to_value_mapping(qualify_jam_action(qn, jamfile_module), v) - - - if names_to_import: - if not local_names: - local_names = names_to_import - - if len(names_to_import) != len(local_names): - self.registry.manager.errors()( -"""The number of names to import and local names do not match.""") - - for n, l in zip(names_to_import, local_names): - self._import_rule(jamfile_module, l, m.__dict__[n]) - - self.registry.set_current(saved) - - def conditional(self, condition, requirements): - """Calculates conditional requirements for multiple requirements - at once. This is a shorthand to be reduce duplication and to - keep an inline declarative syntax. For example: - - lib x : x.cpp : [ conditional <toolset>gcc <variant>debug : - <define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ; - """ - - c = string.join(condition, ",") - if c.find(":") != -1: - return [c + r for r in requirements] - else: - return [c + ":" + r for r in requirements] - - def option(self, name, value): - name = name[0] - if not name in ["site-config", "user-config", "project-config"]: - get_manager().errors()("The 'option' rule may be used only in site-config or user-config") - - option.set(name, value[0]) diff --git a/jam-files/boost-build/build/property-set.jam b/jam-files/boost-build/build/property-set.jam deleted file mode 100644 index 70fd90cd..00000000 --- a/jam-files/boost-build/build/property-set.jam +++ /dev/null @@ -1,481 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import "class" : new ; -import feature ; -import path ; -import project ; -import property ; -import sequence ; -import set ; -import option ; - -# Class for storing a set of properties. -# -# There is 1<->1 correspondence between identity and value. No two instances -# of the class are equal. To maintain this property, the 'property-set.create' -# rule should be used to create new instances. Instances are immutable. -# -# Each property is classified with regard to its effect on build results. -# Incidental properties have no effect on build results, from Boost.Build's -# point of view. Others are either free, or non-free and we refer to non-free -# ones as 'base'. Each property belongs to exactly one of those categories. -# -# It is possible to get a list of properties belonging to each category as -# well as a list of properties with a specific attribute. -# -# Several operations, like and refine and as-path are provided. They all use -# caching whenever possible. -# -class property-set -{ - import errors ; - import feature ; - import path ; - import property ; - import property-set ; - import set ; - - rule __init__ ( raw-properties * ) - { - self.raw = $(raw-properties) ; - - for local p in $(raw-properties) - { - if ! $(p:G) - { - errors.error "Invalid property: '$(p)'" ; - } - - local att = [ feature.attributes $(p:G) ] ; - # A feature can be both incidental and free, in which case we add it - # to incidental. - if incidental in $(att) - { - self.incidental += $(p) ; - } - else if free in $(att) - { - self.free += $(p) ; - } - else - { - self.base += $(p) ; - } - - if dependency in $(att) - { - self.dependency += $(p) ; - } - else - { - self.non-dependency += $(p) ; - } - - if [ MATCH (:) : $(p:G=) ] - { - self.conditional += $(p) ; - } - else - { - self.non-conditional += $(p) ; - } - - if propagated in $(att) - { - self.propagated += $(p) ; - } - if link-incompatible in $(att) - { - self.link-incompatible += $(p) ; - } - } - } - - # Returns Jam list of stored properties. - # - rule raw ( ) - { - return $(self.raw) ; - } - - rule str ( ) - { - return "[" $(self.raw) "]" ; - } - - # Returns properties that are neither incidental nor free. - # - rule base ( ) - { - return $(self.base) ; - } - - # Returns free properties which are not incidental. - # - rule free ( ) - { - return $(self.free) ; - } - - # Returns dependency properties. - # - rule dependency ( ) - { - return $(self.dependency) ; - } - - rule non-dependency ( ) - { - return $(self.non-dependency) ; - } - - rule conditional ( ) - { - return $(self.conditional) ; - } - - rule non-conditional ( ) - { - return $(self.non-conditional) ; - } - - # Returns incidental properties. - # - rule incidental ( ) - { - return $(self.incidental) ; - } - - rule refine ( ps ) - { - if ! $(self.refined.$(ps)) - { - local r = [ property.refine $(self.raw) : [ $(ps).raw ] ] ; - if $(r[1]) != "@error" - { - self.refined.$(ps) = [ property-set.create $(r) ] ; - } - else - { - self.refined.$(ps) = $(r) ; - } - } - return $(self.refined.$(ps)) ; - } - - rule expand ( ) - { - if ! $(self.expanded) - { - self.expanded = [ property-set.create [ feature.expand $(self.raw) ] ] ; - } - return $(self.expanded) ; - } - - rule expand-composites ( ) - { - if ! $(self.composites) - { - self.composites = [ property-set.create - [ feature.expand-composites $(self.raw) ] ] ; - } - return $(self.composites) ; - } - - rule evaluate-conditionals ( context ? ) - { - context ?= $(__name__) ; - if ! $(self.evaluated.$(context)) - { - self.evaluated.$(context) = [ property-set.create - [ property.evaluate-conditionals-in-context $(self.raw) : [ $(context).raw ] ] ] ; - } - return $(self.evaluated.$(context)) ; - } - - rule propagated ( ) - { - if ! $(self.propagated-ps) - { - self.propagated-ps = [ property-set.create $(self.propagated) ] ; - } - return $(self.propagated-ps) ; - } - - rule link-incompatible ( ) - { - if ! $(self.link-incompatible-ps) - { - self.link-incompatible-ps = - [ property-set.create $(self.link-incompatible) ] ; - } - return $(self.link-incompatible-ps) ; - } - - rule run-actions ( ) - { - if ! $(self.run) - { - self.run = [ property-set.create [ feature.run-actions $(self.raw) ] ] ; - } - return $(self.run) ; - } - - rule add-defaults ( ) - { - if ! $(self.defaults) - { - self.defaults = [ property-set.create - [ feature.add-defaults $(self.raw) ] ] ; - } - return $(self.defaults) ; - } - - rule as-path ( ) - { - if ! $(self.as-path) - { - self.as-path = [ property.as-path $(self.base) ] ; - } - return $(self.as-path) ; - } - - # Computes the path to be used for a target with the given properties. - # Returns a list of - # - the computed path - # - if the path is relative to the build directory, a value of 'true'. - # - rule target-path ( ) - { - if ! $(self.target-path) - { - # The <location> feature can be used to explicitly change the - # location of generated targets. - local l = [ get <location> ] ; - if $(l) - { - self.target-path = $(l) ; - } - else - { - local p = [ as-path ] ; - p = [ property-set.hash-maybe $(p) ] ; - - # A real ugly hack. Boost regression test system requires - # specific target paths, and it seems that changing it to handle - # other directory layout is really hard. For that reason, we - # teach V2 to do the things regression system requires. The - # value of '<location-prefix>' is prepended to the path. - local prefix = [ get <location-prefix> ] ; - if $(prefix) - { - self.target-path = [ path.join $(prefix) $(p) ] ; - } - else - { - self.target-path = $(p) ; - } - if ! $(self.target-path) - { - self.target-path = . ; - } - # The path is relative to build dir. - self.target-path += true ; - } - } - return $(self.target-path) ; - } - - rule add ( ps ) - { - if ! $(self.added.$(ps)) - { - self.added.$(ps) = [ property-set.create $(self.raw) [ $(ps).raw ] ] ; - } - return $(self.added.$(ps)) ; - } - - rule add-raw ( properties * ) - { - return [ add [ property-set.create $(properties) ] ] ; - } - - rule link-incompatible-with ( ps ) - { - if ! $(.li.$(ps)) - { - local li1 = [ $(__name__).link-incompatible ] ; - local li2 = [ $(ps).link-incompatible ] ; - if [ set.equal $(li1) : $(li2) ] - { - .li.$(ps) = false ; - } - else - { - .li.$(ps) = true ; - } - } - if $(.li.$(ps)) = true - { - return true ; - } - else - { - return ; - } - } - - # Returns all values of 'feature'. - # - rule get ( feature ) - { - if ! $(self.map-built) - { - # For each feature, create a member var and assign all values to it. - # Since all regular member vars start with 'self', there will be no - # conflicts between names. - self.map-built = true ; - for local v in $(self.raw) - { - $(v:G) += $(v:G=) ; - } - } - return $($(feature)) ; - } -} - - -# Creates a new 'property-set' instance for the given raw properties or returns -# an already existing ones. -# -rule create ( raw-properties * ) -{ - raw-properties = [ sequence.unique - [ sequence.insertion-sort $(raw-properties) ] ] ; - - local key = $(raw-properties:J=-:E=) ; - - if ! $(.ps.$(key)) - { - .ps.$(key) = [ new property-set $(raw-properties) ] ; - } - return $(.ps.$(key)) ; -} -NATIVE_RULE property-set : create ; - - -# Creates a new 'property-set' instance after checking that all properties are -# valid and converting incidental properties into gristed form. -# -rule create-with-validation ( raw-properties * ) -{ - property.validate $(raw-properties) ; - return [ create [ property.make $(raw-properties) ] ] ; -} - - -# Creates a property-set from the input given by the user, in the context of -# 'jamfile-module' at 'location'. -# -rule create-from-user-input ( raw-properties * : jamfile-module location ) -{ - local specification = [ property.translate-paths $(raw-properties) - : $(location) ] ; - specification = [ property.translate-indirect $(specification) - : $(jamfile-module) ] ; - local project-id = [ project.attribute $(jamfile-module) id ] ; - project-id ?= [ path.root $(location) [ path.pwd ] ] ; - specification = [ property.translate-dependencies - $(specification) : $(project-id) : $(location) ] ; - specification = - [ property.expand-subfeatures-in-conditions $(specification) ] ; - specification = [ property.make $(specification) ] ; - return [ property-set.create $(specification) ] ; -} - - -# Refines requirements with requirements provided by the user. Specially handles -# "-<property>value" syntax in specification to remove given requirements. -# - parent-requirements -- property-set object with requirements to refine. -# - specification -- string list of requirements provided by the user. -# - project-module -- module to which context indirect features will be -# bound. -# - location -- path to which path features are relative. -# -rule refine-from-user-input ( parent-requirements : specification * : - project-module : location ) -{ - if ! $(specification) - { - return $(parent-requirements) ; - } - else - { - local add-requirements ; - local remove-requirements ; - - for local r in $(specification) - { - local m = [ MATCH "^-(.*)" : $(r) ] ; - if $(m) - { - remove-requirements += $(m) ; - } - else - { - add-requirements += $(r) ; - } - } - - if $(remove-requirements) - { - # Need to create a property set, so that path features and indirect - # features are translated just like they are in project - # requirements. - local ps = [ property-set.create-from-user-input - $(remove-requirements) : $(project-module) $(location) ] ; - - parent-requirements = [ property-set.create - [ set.difference [ $(parent-requirements).raw ] - : [ $(ps).raw ] ] ] ; - specification = $(add-requirements) ; - } - - local requirements = [ property-set.create-from-user-input - $(specification) : $(project-module) $(location) ] ; - - return [ $(parent-requirements).refine $(requirements) ] ; - } -} - - -# Returns a property-set with an empty set of properties. -# -rule empty ( ) -{ - if ! $(.empty) - { - .empty = [ create ] ; - } - return $(.empty) ; -} - -if [ option.get hash : : yes ] = yes -{ - rule hash-maybe ( path ? ) - { - path ?= "" ; - return [ MD5 $(path) ] ; - } -} -else -{ - rule hash-maybe ( path ? ) - { - return $(path) ; - } -} - diff --git a/jam-files/boost-build/build/property.jam b/jam-files/boost-build/build/property.jam deleted file mode 100644 index a2ad5226..00000000 --- a/jam-files/boost-build/build/property.jam +++ /dev/null @@ -1,788 +0,0 @@ -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import errors ; -import feature ; -import indirect ; -import path ; -import regex ; -import string ; -import sequence ; -import set ; -import utility ; - - -# Refines 'properties' by overriding any non-free and non-conditional properties -# for which a different value is specified in 'requirements'. Returns the -# resulting list of properties. -# -rule refine ( properties * : requirements * ) -{ - local result ; - local error ; - - # All the 'requirements' elements should be present in the result. Record - # them so that we can handle 'properties'. - for local r in $(requirements) - { - # Do not consider conditional requirements. - if ! [ MATCH (:) : $(r:G=) ] - { - # Note: cannot use a local variable here, so use an ugly name. - __require__$(r:G) = $(r:G=) ; - } - } - - for local p in $(properties) - { - if [ MATCH (:) : $(p:G=) ] - { - # Do not modify conditional properties. - result += $(p) ; - } - else if free in [ feature.attributes $(p:G) ] - { - # Do not modify free properties. - result += $(p) ; - } - else - { - local required-value = $(__require__$(p:G)) ; - if $(required-value) - { - if $(p:G=) != $(required-value) - { - result += $(p:G)$(required-value) ; - } - else - { - result += $(p) ; - } - } - else - { - result += $(p) ; - } - } - } - - # Unset our ugly map. - for local r in $(requirements) - { - __require__$(r:G) = ; - } - - if $(error) - { - return $(error) ; - } - else - { - return [ sequence.unique $(result) $(requirements) ] ; - } -} - - -# Removes all conditional properties whose conditions are not met. For those -# with met conditions, removes the condition. Properties in conditions are -# looked up in 'context'. -# -rule evaluate-conditionals-in-context ( properties * : context * ) -{ - local base ; - local conditionals ; - for local p in $(properties) - { - if [ MATCH (:<) : $(p) ] - { - conditionals += $(p) ; - } - else - { - base += $(p) ; - } - } - - local result = $(base) ; - for local p in $(conditionals) - { - # Separate condition and property. - local s = [ MATCH (.*):(<.*) : $(p) ] ; - # Split condition into individual properties. - local condition = [ regex.split $(s[1]) "," ] ; - # Evaluate condition. - if ! [ MATCH (!).* : $(condition:G=) ] - { - # Only positive checks - if $(condition) in $(context) - { - result += $(s[2]) ; - } - } - else - { - # Have negative checks - local fail ; - while $(condition) - { - local c = $(condition[1]) ; - local m = [ MATCH !(.*) : $(c) ] ; - if $(m) - { - local p = $(m:G=$(c:G)) ; - if $(p) in $(context) - { - fail = true ; - c = ; - } - } - else - { - if ! $(c) in $(context) - { - fail = true ; - c = ; - } - } - condition = $(condition[2-]) ; - } - if ! $(fail) - { - result += $(s[2]) ; - } - } - } - return $(result) ; -} - - -rule expand-subfeatures-in-conditions ( properties * ) -{ - local result ; - for local p in $(properties) - { - local s = [ MATCH (.*):(<.*) : $(p) ] ; - if ! $(s) - { - result += $(p) ; - } - else - { - local condition = $(s[1]) ; - local value = $(s[2]) ; - # Condition might include several elements. - condition = [ regex.split $(condition) "," ] ; - local e ; - for local c in $(condition) - { - # It is common for a condition to include a toolset or - # subfeatures that have not been defined. In that case we want - # the condition to simply 'never be satisfied' and validation - # would only produce a spurious error so we prevent it by - # passing 'true' as the second parameter. - e += [ feature.expand-subfeatures $(c) : true ] ; - } - if $(e) = $(condition) - { - # (todo) - # This is just an optimization and possibly a premature one at - # that. - # (todo) (12.07.2008.) (Jurko) - result += $(p) ; - } - else - { - result += $(e:J=,):$(value) ; - } - } - } - return $(result) ; -} - - -# Helper for as-path, below. Orders properties with the implicit ones first, and -# within the two sections in alphabetical order of feature name. -# -local rule path-order ( x y ) -{ - if $(y:G) && ! $(x:G) - { - return true ; - } - else if $(x:G) && ! $(y:G) - { - return ; - } - else - { - if ! $(x:G) - { - x = [ feature.expand-subfeatures $(x) ] ; - y = [ feature.expand-subfeatures $(y) ] ; - } - - if $(x[1]) < $(y[1]) - { - return true ; - } - } -} - - -local rule abbreviate-dashed ( string ) -{ - local r ; - for local part in [ regex.split $(string) - ] - { - r += [ string.abbreviate $(part) ] ; - } - return $(r:J=-) ; -} - - -local rule identity ( string ) -{ - return $(string) ; -} - - -if --abbreviate-paths in [ modules.peek : ARGV ] -{ - .abbrev = abbreviate-dashed ; -} -else -{ - .abbrev = identity ; -} - - -# Returns a path representing the given expanded property set. -# -rule as-path ( properties * ) -{ - local entry = .result.$(properties:J=-) ; - - if ! $($(entry)) - { - # Trim redundancy. - properties = [ feature.minimize $(properties) ] ; - - # Sort according to path-order. - properties = [ sequence.insertion-sort $(properties) : path-order ] ; - - local components ; - for local p in $(properties) - { - if $(p:G) - { - local f = [ utility.ungrist $(p:G) ] ; - p = $(f)-$(p:G=) ; - } - components += [ $(.abbrev) $(p) ] ; - } - - $(entry) = $(components:J=/) ; - } - - return $($(entry)) ; -} - - -# Exit with error if property is not valid. -# -local rule validate1 ( property ) -{ - local msg ; - if $(property:G) - { - local feature = $(property:G) ; - local value = $(property:G=) ; - - if ! [ feature.valid $(feature) ] - { - # Ungrist for better error messages. - feature = [ utility.ungrist $(property:G) ] ; - msg = "unknown feature '$(feature)'" ; - } - else if $(value) && ! free in [ feature.attributes $(feature) ] - { - feature.validate-value-string $(feature) $(value) ; - } - else if ! ( $(value) || ( optional in [ feature.attributes $(feature) ] ) ) - { - # Ungrist for better error messages. - feature = [ utility.ungrist $(property:G) ] ; - msg = "No value specified for feature '$(feature)'" ; - } - } - else - { - local feature = [ feature.implied-feature $(property) ] ; - feature.validate-value-string $(feature) $(property) ; - } - if $(msg) - { - errors.error "Invalid property "'$(property:J=" ")'": "$(msg:J=" "). ; - } -} - - -rule validate ( properties * ) -{ - for local p in $(properties) - { - validate1 $(p) ; - } -} - - -rule validate-property-sets ( property-sets * ) -{ - for local s in $(property-sets) - { - validate [ feature.split $(s) ] ; - } -} - - -# Expands any implicit property values in the given property 'specification' so -# they explicitly state their feature. -# -rule make ( specification * ) -{ - local result ; - for local e in $(specification) - { - if $(e:G) - { - result += $(e) ; - } - else if [ feature.is-implicit-value $(e) ] - { - local feature = [ feature.implied-feature $(e) ] ; - result += $(feature)$(e) ; - } - else - { - errors.error "'$(e)' is not a valid property specification" ; - } - } - return $(result) ; -} - - -# Returns a property set containing all the elements in 'properties' that do not -# have their attributes listed in 'attributes'. -# -rule remove ( attributes + : properties * ) -{ - local result ; - for local e in $(properties) - { - if ! [ set.intersection $(attributes) : [ feature.attributes $(e:G) ] ] - { - result += $(e) ; - } - } - return $(result) ; -} - - -# Returns a property set containing all the elements in 'properties' that have -# their attributes listed in 'attributes'. -# -rule take ( attributes + : properties * ) -{ - local result ; - for local e in $(properties) - { - if [ set.intersection $(attributes) : [ feature.attributes $(e:G) ] ] - { - result += $(e) ; - } - } - return $(result) ; -} - - -# Selects properties corresponding to any of the given features. -# -rule select ( features * : properties * ) -{ - local result ; - - # Add any missing angle brackets. - local empty = "" ; - features = $(empty:G=$(features)) ; - - for local p in $(properties) - { - if $(p:G) in $(features) - { - result += $(p) ; - } - } - return $(result) ; -} - - -# Returns a modified version of properties with all values of the given feature -# replaced by the given value. If 'value' is empty the feature will be removed. -# -rule change ( properties * : feature value ? ) -{ - local result ; - for local p in $(properties) - { - if $(p:G) = $(feature) - { - result += $(value:G=$(feature)) ; - } - else - { - result += $(p) ; - } - } - return $(result) ; -} - - -# If 'property' is a conditional property, returns the condition and the -# property. E.g. <variant>debug,<toolset>gcc:<inlining>full will become -# <variant>debug,<toolset>gcc <inlining>full. Otherwise, returns an empty -# string. -# -rule split-conditional ( property ) -{ - local m = [ MATCH "(.+):<(.+)" : $(property) ] ; - if $(m) - { - return $(m[1]) <$(m[2]) ; - } -} - - -# Interpret all path properties in 'properties' as relative to 'path'. The -# property values are assumed to be in system-specific form, and will be -# translated into normalized form. -# -rule translate-paths ( properties * : path ) -{ - local result ; - for local p in $(properties) - { - local split = [ split-conditional $(p) ] ; - local condition = "" ; - if $(split) - { - condition = $(split[1]): ; - p = $(split[2]) ; - } - - if path in [ feature.attributes $(p:G) ] - { - local values = [ regex.split $(p:TG=) "&&" ] ; - local t ; - for local v in $(values) - { - t += [ path.root [ path.make $(v) ] $(path) ] ; - } - t = $(t:J="&&") ; - result += $(condition)$(t:TG=$(p:G)) ; - } - else - { - result += $(condition)$(p) ; - } - } - return $(result) ; -} - - -# Assumes that all feature values that start with '@' are names of rules, used -# in 'context-module'. Such rules can be either local to the module or global. -# Converts such values into 'indirect-rule' format (see indirect.jam), so they -# can be called from other modules. Does nothing for such values that are -# already in the 'indirect-rule' format. -# -rule translate-indirect ( specification * : context-module ) -{ - local result ; - for local p in $(specification) - { - local m = [ MATCH ^@(.+) : $(p:G=) ] ; - if $(m) - { - local v ; - if [ MATCH "^([^%]*)%([^%]+)$" : $(m) ] - { - # Rule is already in the 'indirect-rule' format. - v = $(m) ; - } - else - { - if ! [ MATCH ".*([.]).*" : $(m) ] - { - # This is an unqualified rule name. The user might want to - # set flags on this rule name and toolset.flag - # auto-qualifies it. Need to do the same here so flag - # setting works. We can arrange for toolset.flag to *not* - # auto-qualify the argument but then two rules defined in - # two Jamfiles would conflict. - m = $(context-module).$(m) ; - } - v = [ indirect.make $(m) : $(context-module) ] ; - } - - v = @$(v) ; - result += $(v:G=$(p:G)) ; - } - else - { - result += $(p) ; - } - } - return $(result) ; -} - - -# Binds all dependency properties in a list relative to the given project. -# Targets with absolute paths will be left unchanged and targets which have a -# project specified will have the path to the project interpreted relative to -# the specified location. -# -rule translate-dependencies ( specification * : project-id : location ) -{ - local result ; - for local p in $(specification) - { - local split = [ split-conditional $(p) ] ; - local condition = "" ; - if $(split) - { - condition = $(split[1]): ; - p = $(split[2]) ; - } - if dependency in [ feature.attributes $(p:G) ] - { - local split-target = [ regex.match (.*)//(.*) : $(p:G=) ] ; - if $(split-target) - { - local rooted = [ path.root [ path.make $(split-target[1]) ] - [ path.root $(location) [ path.pwd ] ] ] ; - result += $(condition)$(p:G)$(rooted)//$(split-target[2]) ; - } - else if [ path.is-rooted $(p:G=) ] - { - result += $(condition)$(p) ; - } - else - { - result += $(condition)$(p:G)$(project-id)//$(p:G=) ; - } - } - else - { - result += $(condition)$(p) ; - } - } - return $(result) ; -} - - -# Class maintaining a property set -> string mapping. -# -class property-map -{ - import errors ; - import numbers ; - import sequence ; - - rule __init__ ( ) - { - self.next-flag = 1 ; - } - - # Associate 'value' with 'properties'. - # - rule insert ( properties + : value ) - { - self.all-flags += $(self.next-flag) ; - self.properties.$(self.next-flag) = $(properties) ; - self.value.$(self.next-flag) = $(value) ; - - self.next-flag = [ numbers.increment $(self.next-flag) ] ; - } - - # Returns the value associated with 'properties' or any subset of it. If - # more than one subset has a value assigned to it, returns the value for the - # longest subset, if it is unique. - # - rule find ( properties + ) - { - return [ find-replace $(properties) ] ; - } - - # Returns the value associated with 'properties'. If 'value' parameter is - # given, replaces the found value. - # - rule find-replace ( properties + : value ? ) - { - # First find all matches. - local matches ; - local match-ranks ; - for local i in $(self.all-flags) - { - if $(self.properties.$(i)) in $(properties) - { - matches += $(i) ; - match-ranks += [ sequence.length $(self.properties.$(i)) ] ; - } - } - local best = [ sequence.select-highest-ranked $(matches) - : $(match-ranks) ] ; - if $(best[2]) - { - errors.error "Ambiguous key $(properties:J= :E=)" ; - } - local original = $(self.value.$(best)) ; - if $(value) - { - self.value.$(best) = $(value) ; - } - return $(original) ; - } -} - - -rule __test__ ( ) -{ - import assert ; - import "class" : new ; - import errors : try catch ; - import feature ; - - # Local rules must be explicitly re-imported. - import property : path-order abbreviate-dashed ; - - feature.prepare-test property-test-temp ; - - feature.feature toolset : gcc : implicit symmetric ; - feature.subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4 3.0 3.0.1 - 3.0.2 : optional ; - feature.feature define : : free ; - feature.feature runtime-link : dynamic static : symmetric link-incompatible ; - feature.feature optimization : on off ; - feature.feature variant : debug release : implicit composite symmetric ; - feature.feature rtti : on off : link-incompatible ; - - feature.compose <variant>debug : <define>_DEBUG <optimization>off ; - feature.compose <variant>release : <define>NDEBUG <optimization>on ; - - validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ; - - assert.true path-order $(test-space) debug <define>foo ; - assert.false path-order $(test-space) <define>foo debug ; - assert.true path-order $(test-space) gcc debug ; - assert.false path-order $(test-space) debug gcc ; - assert.true path-order $(test-space) <optimization>on <rtti>on ; - assert.false path-order $(test-space) <rtti>on <optimization>on ; - - assert.result-set-equal <toolset>gcc <rtti>off <define>FOO - : refine <toolset>gcc <rtti>off - : <define>FOO - : $(test-space) ; - - assert.result-set-equal <toolset>gcc <optimization>on - : refine <toolset>gcc <optimization>off - : <optimization>on - : $(test-space) ; - - assert.result-set-equal <toolset>gcc <rtti>off - : refine <toolset>gcc : <rtti>off : $(test-space) ; - - assert.result-set-equal <toolset>gcc <rtti>off <rtti>off:<define>FOO - : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO - : $(test-space) ; - - assert.result-set-equal <toolset>gcc:<define>foo <toolset>gcc:<define>bar - : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar - : $(test-space) ; - - assert.result <define>MY_RELEASE - : evaluate-conditionals-in-context - <variant>release,<rtti>off:<define>MY_RELEASE - : <toolset>gcc <variant>release <rtti>off ; - - assert.result debug - : as-path <optimization>off <variant>debug - : $(test-space) ; - - assert.result gcc/debug/rtti-off - : as-path <toolset>gcc <optimization>off <rtti>off <variant>debug - : $(test-space) ; - - assert.result optmz-off : abbreviate-dashed optimization-off ; - assert.result rntm-lnk-sttc : abbreviate-dashed runtime-link-static ; - - try ; - validate <feature>value : $(test-space) ; - catch "Invalid property '<feature>value': unknown feature 'feature'." ; - - try ; - validate <rtti>default : $(test-space) ; - catch \"default\" is not a known value of feature <rtti> ; - - validate <define>WHATEVER : $(test-space) ; - - try ; - validate <rtti> : $(test-space) ; - catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ; - - try ; - validate value : $(test-space) ; - catch "value" is not a value of an implicit feature ; - - assert.result-set-equal <rtti>on - : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ; - - assert.result-set-equal <include>a - : select include : <include>a <toolset>gcc ; - - assert.result-set-equal <include>a - : select include bar : <include>a <toolset>gcc ; - - assert.result-set-equal <include>a <toolset>gcc - : select include <bar> <toolset> : <include>a <toolset>gcc ; - - assert.result-set-equal <toolset>kylix <include>a - : change <toolset>gcc <include>a : <toolset> kylix ; - - pm = [ new property-map ] ; - $(pm).insert <toolset>gcc : o ; - $(pm).insert <toolset>gcc <os>NT : obj ; - $(pm).insert <toolset>gcc <os>CYGWIN : obj ; - - assert.equal o : [ $(pm).find <toolset>gcc ] ; - - assert.equal obj : [ $(pm).find <toolset>gcc <os>NT ] ; - - try ; - $(pm).find <toolset>gcc <os>NT <os>CYGWIN ; - catch "Ambiguous key <toolset>gcc <os>NT <os>CYGWIN" ; - - # Test ordinary properties. - assert.result : split-conditional <toolset>gcc ; - - # Test properties with ":". - assert.result : split-conditional <define>FOO=A::B ; - - # Test conditional feature. - assert.result-set-equal <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO - : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO ; - - feature.finish-test property-test-temp ; -} diff --git a/jam-files/boost-build/build/property.py b/jam-files/boost-build/build/property.py deleted file mode 100644 index c4b13dbc..00000000 --- a/jam-files/boost-build/build/property.py +++ /dev/null @@ -1,593 +0,0 @@ -# Status: ported, except for tests and --abbreviate-paths. -# Base revision: 64070 -# -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import re -from b2.util.utility import * -from b2.build import feature -from b2.util import sequence, qualify_jam_action -import b2.util.set -from b2.manager import get_manager - -__re_two_ampersands = re.compile ('&&') -__re_comma = re.compile (',') -__re_split_condition = re.compile ('(.*):(<.*)') -__re_split_conditional = re.compile (r'(.+):<(.+)') -__re_colon = re.compile (':') -__re_has_condition = re.compile (r':<') -__re_separate_condition_and_property = re.compile (r'(.*):(<.*)') - -class Property(object): - - __slots__ = ('_feature', '_value', '_condition') - - def __init__(self, f, value, condition = []): - if type(f) == type(""): - f = feature.get(f) - # At present, single property has a single value. - assert type(value) != type([]) - assert(f.free() or value.find(':') == -1) - self._feature = f - self._value = value - self._condition = condition - - def feature(self): - return self._feature - - def value(self): - return self._value - - def condition(self): - return self._condition - - def to_raw(self): - result = "<" + self._feature.name() + ">" + str(self._value) - if self._condition: - result = ",".join(str(p) for p in self._condition) + ':' + result - return result - - def __str__(self): - return self.to_raw() - - def __hash__(self): - # FIXME: consider if this class should be value-is-identity one - return hash((self._feature, self._value, tuple(self._condition))) - - def __cmp__(self, other): - return cmp((self._feature, self._value, self._condition), - (other._feature, other._value, other._condition)) - - -def create_from_string(s, allow_condition=False): - - condition = [] - import types - if not isinstance(s, types.StringType): - print type(s) - if __re_has_condition.search(s): - - if not allow_condition: - raise BaseException("Conditional property is not allowed in this context") - - m = __re_separate_condition_and_property.match(s) - condition = m.group(1) - s = m.group(2) - - # FIXME: break dependency cycle - from b2.manager import get_manager - - feature_name = get_grist(s) - if not feature_name: - if feature.is_implicit_value(s): - f = feature.implied_feature(s) - value = s - else: - raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s) - else: - f = feature.get(feature_name) - - value = get_value(s) - if not value: - get_manager().errors()("Invalid property '%s' -- no value specified" % s) - - - if condition: - condition = [create_from_string(x) for x in condition.split(',')] - - return Property(f, value, condition) - -def create_from_strings(string_list, allow_condition=False): - - return [create_from_string(s, allow_condition) for s in string_list] - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __results - - # A cache of results from as_path - __results = {} - -reset () - - -def path_order (x, y): - """ Helper for as_path, below. Orders properties with the implicit ones - first, and within the two sections in alphabetical order of feature - name. - """ - if x == y: - return 0 - - xg = get_grist (x) - yg = get_grist (y) - - if yg and not xg: - return -1 - - elif xg and not yg: - return 1 - - else: - if not xg: - x = feature.expand_subfeatures([x]) - y = feature.expand_subfeatures([y]) - - if x < y: - return -1 - elif x > y: - return 1 - else: - return 0 - -def identify(string): - return string - -# Uses Property -def refine (properties, requirements): - """ Refines 'properties' by overriding any non-free properties - for which a different value is specified in 'requirements'. - Conditional requirements are just added without modification. - Returns the resulting list of properties. - """ - # The result has no duplicates, so we store it in a set - result = set() - - # Records all requirements. - required = {} - - # All the elements of requirements should be present in the result - # Record them so that we can handle 'properties'. - for r in requirements: - # Don't consider conditional requirements. - if not r.condition(): - required[r.feature()] = r - - for p in properties: - # Skip conditional properties - if p.condition(): - result.add(p) - # No processing for free properties - elif p.feature().free(): - result.add(p) - else: - if required.has_key(p.feature()): - result.add(required[p.feature()]) - else: - result.add(p) - - return sequence.unique(list(result) + requirements) - -def translate_paths (properties, path): - """ Interpret all path properties in 'properties' as relative to 'path' - The property values are assumed to be in system-specific form, and - will be translated into normalized form. - """ - result = [] - - for p in properties: - - if p.feature().path(): - values = __re_two_ampersands.split(p.value()) - - new_value = "&&".join(os.path.join(path, v) for v in values) - - if new_value != p.value(): - result.append(Property(p.feature(), new_value, p.condition())) - else: - result.append(p) - - else: - result.append (p) - - return result - -def translate_indirect(properties, context_module): - """Assumes that all feature values that start with '@' are - names of rules, used in 'context-module'. Such rules can be - either local to the module or global. Qualified local rules - with the name of the module.""" - result = [] - for p in properties: - if p.value()[0] == '@': - q = qualify_jam_action(p.value()[1:], context_module) - get_manager().engine().register_bjam_action(q) - result.append(Property(p.feature(), '@' + q, p.condition())) - else: - result.append(p) - - return result - -def validate (properties): - """ Exit with error if any of the properties is not valid. - properties may be a single property or a sequence of properties. - """ - - if isinstance (properties, str): - __validate1 (properties) - else: - for p in properties: - __validate1 (p) - -def expand_subfeatures_in_conditions (properties): - - result = [] - for p in properties: - - if not p.condition(): - result.append(p) - else: - expanded = [] - for c in p.condition(): - - if c.feature().name().startswith("toolset") or c.feature().name() == "os": - # It common that condition includes a toolset which - # was never defined, or mentiones subfeatures which - # were never defined. In that case, validation will - # only produce an spirious error, so don't validate. - expanded.extend(feature.expand_subfeatures ([c], True)) - else: - expanded.extend(feature.expand_subfeatures([c])) - - result.append(Property(p.feature(), p.value(), expanded)) - - return result - -# FIXME: this should go -def split_conditional (property): - """ If 'property' is conditional property, returns - condition and the property, e.g - <variant>debug,<toolset>gcc:<inlining>full will become - <variant>debug,<toolset>gcc <inlining>full. - Otherwise, returns empty string. - """ - m = __re_split_conditional.match (property) - - if m: - return (m.group (1), '<' + m.group (2)) - - return None - - -def select (features, properties): - """ Selects properties which correspond to any of the given features. - """ - result = [] - - # add any missing angle brackets - features = add_grist (features) - - return [p for p in properties if get_grist(p) in features] - -def validate_property_sets (sets): - for s in sets: - validate(s.all()) - -def evaluate_conditionals_in_context (properties, context): - """ Removes all conditional properties which conditions are not met - For those with met conditions, removes the condition. Properies - in conditions are looked up in 'context' - """ - base = [] - conditional = [] - - for p in properties: - if p.condition(): - conditional.append (p) - else: - base.append (p) - - result = base[:] - for p in conditional: - - # Evaluate condition - # FIXME: probably inefficient - if all(x in context for x in p.condition()): - result.append(Property(p.feature(), p.value())) - - return result - - -def change (properties, feature, value = None): - """ Returns a modified version of properties with all values of the - given feature replaced by the given value. - If 'value' is None the feature will be removed. - """ - result = [] - - feature = add_grist (feature) - - for p in properties: - if get_grist (p) == feature: - if value: - result.append (replace_grist (value, feature)) - - else: - result.append (p) - - return result - - -################################################################ -# Private functions - -def __validate1 (property): - """ Exit with error if property is not valid. - """ - msg = None - - if not property.feature().free(): - feature.validate_value_string (property.feature(), property.value()) - - -################################################################### -# Still to port. -# Original lines are prefixed with "# " -# -# -# import utility : ungrist ; -# import sequence : unique ; -# import errors : error ; -# import feature ; -# import regex ; -# import sequence ; -# import set ; -# import path ; -# import assert ; -# -# - - -# rule validate-property-sets ( property-sets * ) -# { -# for local s in $(property-sets) -# { -# validate [ feature.split $(s) ] ; -# } -# } -# - -def remove(attributes, properties): - """Returns a property sets which include all the elements - in 'properties' that do not have attributes listed in 'attributes'.""" - - result = [] - for e in properties: - attributes_new = feature.attributes(get_grist(e)) - has_common_features = 0 - for a in attributes_new: - if a in attributes: - has_common_features = 1 - break - - if not has_common_features: - result += e - - return result - - -def take(attributes, properties): - """Returns a property set which include all - properties in 'properties' that have any of 'attributes'.""" - result = [] - for e in properties: - if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))): - result.append(e) - return result - -def translate_dependencies(properties, project_id, location): - - result = [] - for p in properties: - - if not p.feature().dependency(): - result.append(p) - else: - v = p.value() - m = re.match("(.*)//(.*)", v) - if m: - rooted = m.group(1) - if rooted[0] == '/': - # Either project id or absolute Linux path, do nothing. - pass - else: - rooted = os.path.join(os.getcwd(), location, rooted) - - result.append(Property(p.feature(), rooted + "//" + m.group(2), p.condition())) - - elif os.path.isabs(v): - result.append(p) - else: - result.append(Property(p.feature(), project_id + "//" + v, p.condition())) - - return result - - -class PropertyMap: - """ Class which maintains a property set -> string mapping. - """ - def __init__ (self): - self.__properties = [] - self.__values = [] - - def insert (self, properties, value): - """ Associate value with properties. - """ - self.__properties.append(properties) - self.__values.append(value) - - def find (self, properties): - """ Return the value associated with properties - or any subset of it. If more than one - subset has value assigned to it, return the - value for the longest subset, if it's unique. - """ - return self.find_replace (properties) - - def find_replace(self, properties, value=None): - matches = [] - match_ranks = [] - - for i in range(0, len(self.__properties)): - p = self.__properties[i] - - if b2.util.set.contains (p, properties): - matches.append (i) - match_ranks.append(len(p)) - - best = sequence.select_highest_ranked (matches, match_ranks) - - if not best: - return None - - if len (best) > 1: - raise NoBestMatchingAlternative () - - best = best [0] - - original = self.__values[best] - - if value: - self.__values[best] = value - - return original - -# local rule __test__ ( ) -# { -# import errors : try catch ; -# import feature ; -# import feature : feature subfeature compose ; -# -# # local rules must be explicitly re-imported -# import property : path-order ; -# -# feature.prepare-test property-test-temp ; -# -# feature toolset : gcc : implicit symmetric ; -# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4 -# 3.0 3.0.1 3.0.2 : optional ; -# feature define : : free ; -# feature runtime-link : dynamic static : symmetric link-incompatible ; -# feature optimization : on off ; -# feature variant : debug release : implicit composite symmetric ; -# feature rtti : on off : link-incompatible ; -# -# compose <variant>debug : <define>_DEBUG <optimization>off ; -# compose <variant>release : <define>NDEBUG <optimization>on ; -# -# import assert ; -# import "class" : new ; -# -# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ; -# -# assert.result <toolset>gcc <rtti>off <define>FOO -# : refine <toolset>gcc <rtti>off -# : <define>FOO -# : $(test-space) -# ; -# -# assert.result <toolset>gcc <optimization>on -# : refine <toolset>gcc <optimization>off -# : <optimization>on -# : $(test-space) -# ; -# -# assert.result <toolset>gcc <rtti>off -# : refine <toolset>gcc : <rtti>off : $(test-space) -# ; -# -# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO -# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO -# : $(test-space) -# ; -# -# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar -# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar -# : $(test-space) -# ; -# -# assert.result <define>MY_RELEASE -# : evaluate-conditionals-in-context -# <variant>release,<rtti>off:<define>MY_RELEASE -# : <toolset>gcc <variant>release <rtti>off -# -# ; -# -# try ; -# validate <feature>value : $(test-space) ; -# catch "Invalid property '<feature>value': unknown feature 'feature'." ; -# -# try ; -# validate <rtti>default : $(test-space) ; -# catch \"default\" is not a known value of feature <rtti> ; -# -# validate <define>WHATEVER : $(test-space) ; -# -# try ; -# validate <rtti> : $(test-space) ; -# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ; -# -# try ; -# validate value : $(test-space) ; -# catch "value" is not a value of an implicit feature ; -# -# -# assert.result <rtti>on -# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ; -# -# assert.result <include>a -# : select include : <include>a <toolset>gcc ; -# -# assert.result <include>a -# : select include bar : <include>a <toolset>gcc ; -# -# assert.result <include>a <toolset>gcc -# : select include <bar> <toolset> : <include>a <toolset>gcc ; -# -# assert.result <toolset>kylix <include>a -# : change <toolset>gcc <include>a : <toolset> kylix ; -# -# # Test ordinary properties -# assert.result -# : split-conditional <toolset>gcc -# ; -# -# # Test properties with ":" -# assert.result -# : split-conditional <define>FOO=A::B -# ; -# -# # Test conditional feature -# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO -# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO -# ; -# -# feature.finish-test property-test-temp ; -# } -# - diff --git a/jam-files/boost-build/build/property_set.py b/jam-files/boost-build/build/property_set.py deleted file mode 100644 index f12eb90c..00000000 --- a/jam-files/boost-build/build/property_set.py +++ /dev/null @@ -1,449 +0,0 @@ -# Status: ported. -# Base revision: 40480 - -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -from b2.util.utility import * -import property, feature, string -import b2.build.feature -from b2.exceptions import * -from b2.util.sequence import unique -from b2.util.set import difference -from b2.util import cached - -from b2.manager import get_manager - - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __cache - - # A cache of property sets - # TODO: use a map of weak refs? - __cache = {} - -reset () - - -def create (raw_properties = []): - """ Creates a new 'PropertySet' instance for the given raw properties, - or returns an already existing one. - """ - # FIXME: propagate to callers. - if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property): - x = raw_properties - else: - x = [property.create_from_string(ps) for ps in raw_properties] - x.sort() - x = unique (x) - - # FIXME: can we do better, e.g. by directly computing - # has value of the list? - key = tuple(x) - - if not __cache.has_key (key): - __cache [key] = PropertySet(x) - - return __cache [key] - -def create_with_validation (raw_properties): - """ Creates new 'PropertySet' instances after checking - that all properties are valid and converting incidental - properties into gristed form. - """ - properties = [property.create_from_string(s) for s in raw_properties] - property.validate(properties) - - return create(properties) - -def empty (): - """ Returns PropertySet with empty set of properties. - """ - return create () - -def create_from_user_input(raw_properties, jamfile_module, location): - """Creates a property-set from the input given by the user, in the - context of 'jamfile-module' at 'location'""" - - properties = property.create_from_strings(raw_properties, True) - properties = property.translate_paths(properties, location) - properties = property.translate_indirect(properties, jamfile_module) - - project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None) - if not project_id: - project_id = os.path.abspath(location) - properties = property.translate_dependencies(properties, project_id, location) - properties = property.expand_subfeatures_in_conditions(properties) - return create(properties) - - -def refine_from_user_input(parent_requirements, specification, jamfile_module, - location): - """Refines requirements with requirements provided by the user. - Specially handles "-<property>value" syntax in specification - to remove given requirements. - - parent-requirements -- property-set object with requirements - to refine - - specification -- string list of requirements provided by the use - - project-module -- the module to which context indirect features - will be bound. - - location -- the path to which path features are relative.""" - - - if not specification: - return parent_requirements - - - add_requirements = [] - remove_requirements = [] - - for r in specification: - if r[0] == '-': - remove_requirements.append(r[1:]) - else: - add_requirements.append(r) - - if remove_requirements: - # Need to create property set, so that path features - # and indirect features are translated just like they - # are in project requirements. - ps = create_from_user_input(remove_requirements, - jamfile_module, location) - - parent_requirements = create(difference(parent_requirements.all(), - ps.all())) - specification = add_requirements - - requirements = create_from_user_input(specification, - jamfile_module, location) - - return parent_requirements.refine(requirements) - -class PropertySet: - """ Class for storing a set of properties. - - there's 1<->1 correspondence between identity and value. No - two instances of the class are equal. To maintain this property, - the 'PropertySet.create' rule should be used to create new instances. - Instances are immutable. - - - each property is classified with regard to it's effect on build - results. Incidental properties have no effect on build results, from - Boost.Build point of view. Others are either free, or non-free, which we - call 'base'. Each property belong to exactly one of those categories and - it's possible to get list of properties in each category. - - In addition, it's possible to get list of properties with specific - attribute. - - - several operations, like and refine and as_path are provided. They all use - caching whenever possible. - """ - def __init__ (self, properties = []): - - - raw_properties = [] - for p in properties: - raw_properties.append(p.to_raw()) - - self.all_ = properties - self.all_raw_ = raw_properties - self.all_set_ = set(properties) - - self.incidental_ = [] - self.free_ = [] - self.base_ = [] - self.dependency_ = [] - self.non_dependency_ = [] - self.conditional_ = [] - self.non_conditional_ = [] - self.propagated_ = [] - self.link_incompatible = [] - - # A cache of refined properties. - self.refined_ = {} - - # A cache of property sets created by adding properties to this one. - self.added_ = {} - - # Cache for the default properties. - self.defaults_ = None - - # Cache for the expanded properties. - self.expanded_ = None - - # Cache for the expanded composite properties - self.composites_ = None - - # Cache for property set with expanded subfeatures - self.subfeatures_ = None - - # Cache for the property set containing propagated properties. - self.propagated_ps_ = None - - # A map of features to its values. - self.feature_map_ = None - - # A tuple (target path, is relative to build directory) - self.target_path_ = None - - self.as_path_ = None - - # A cache for already evaluated sets. - self.evaluated_ = {} - - for p in raw_properties: - if not get_grist (p): - raise BaseException ("Invalid property: '%s'" % p) - - att = feature.attributes (get_grist (p)) - - if 'propagated' in att: - self.propagated_.append (p) - - if 'link_incompatible' in att: - self.link_incompatible.append (p) - - for p in properties: - - # A feature can be both incidental and free, - # in which case we add it to incidental. - if p.feature().incidental(): - self.incidental_.append(p) - elif p.feature().free(): - self.free_.append(p) - else: - self.base_.append(p) - - if p.condition(): - self.conditional_.append(p) - else: - self.non_conditional_.append(p) - - if p.feature().dependency(): - self.dependency_.append (p) - else: - self.non_dependency_.append (p) - - - def all(self): - return self.all_ - - def raw (self): - """ Returns the list of stored properties. - """ - return self.all_raw_ - - def __str__(self): - return ' '.join(str(p) for p in self.all_) - - def base (self): - """ Returns properties that are neither incidental nor free. - """ - return self.base_ - - def free (self): - """ Returns free properties which are not dependency properties. - """ - return self.free_ - - def non_free(self): - return self.base_ + self.incidental_ - - def dependency (self): - """ Returns dependency properties. - """ - return self.dependency_ - - def non_dependency (self): - """ Returns properties that are not dependencies. - """ - return self.non_dependency_ - - def conditional (self): - """ Returns conditional properties. - """ - return self.conditional_ - - def non_conditional (self): - """ Returns properties that are not conditional. - """ - return self.non_conditional_ - - def incidental (self): - """ Returns incidental properties. - """ - return self.incidental_ - - def refine (self, requirements): - """ Refines this set's properties using the requirements passed as an argument. - """ - assert isinstance(requirements, PropertySet) - if not self.refined_.has_key (requirements): - r = property.refine(self.all_, requirements.all_) - - self.refined_[requirements] = create(r) - - return self.refined_[requirements] - - def expand (self): - if not self.expanded_: - expanded = feature.expand(self.all_) - self.expanded_ = create(expanded) - return self.expanded_ - - def expand_subfeatures(self): - if not self.subfeatures_: - self.subfeatures_ = create(feature.expand_subfeatures(self.all_)) - return self.subfeatures_ - - def evaluate_conditionals(self, context=None): - if not context: - context = self - - if not self.evaluated_.has_key(context): - # FIXME: figure why the call messes up first parameter - self.evaluated_[context] = create( - property.evaluate_conditionals_in_context(self.all(), context)) - - return self.evaluated_[context] - - def propagated (self): - if not self.propagated_ps_: - self.propagated_ps_ = create (self.propagated_) - return self.propagated_ps_ - - def add_defaults (self): - # FIXME: this caching is invalidated when new features - # are declare inside non-root Jamfiles. - if not self.defaults_: - expanded = feature.add_defaults(self.all_) - self.defaults_ = create(expanded) - return self.defaults_ - - def as_path (self): - if not self.as_path_: - - def path_order (p1, p2): - - i1 = p1.feature().implicit() - i2 = p2.feature().implicit() - - if i1 != i2: - return i2 - i1 - else: - return cmp(p1.feature().name(), p2.feature().name()) - - # trim redundancy - properties = feature.minimize(self.base_) - - # sort according to path_order - properties.sort (path_order) - - components = [] - for p in properties: - if p.feature().implicit(): - components.append(p.value()) - else: - components.append(p.feature().name() + "-" + p.value()) - - self.as_path_ = '/'.join (components) - - return self.as_path_ - - def target_path (self): - """ Computes the target path that should be used for - target with these properties. - Returns a tuple of - - the computed path - - if the path is relative to build directory, a value of - 'true'. - """ - if not self.target_path_: - # The <location> feature can be used to explicitly - # change the location of generated targets - l = self.get ('<location>') - if l: - computed = l[0] - is_relative = False - - else: - p = self.as_path () - - # Really, an ugly hack. Boost regression test system requires - # specific target paths, and it seems that changing it to handle - # other directory layout is really hard. For that reason, - # we teach V2 to do the things regression system requires. - # The value o '<location-prefix>' is predended to the path. - prefix = self.get ('<location-prefix>') - - if prefix: - if len (prefix) > 1: - raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix) - - computed = os.path.join(prefix[0], p) - - else: - computed = p - - if not computed: - computed = "." - - is_relative = True - - self.target_path_ = (computed, is_relative) - - return self.target_path_ - - def add (self, ps): - """ Creates a new property set containing the properties in this one, - plus the ones of the property set passed as argument. - """ - if not self.added_.has_key(ps): - self.added_[ps] = create(self.all_ + ps.all()) - return self.added_[ps] - - def add_raw (self, properties): - """ Creates a new property set containing the properties in this one, - plus the ones passed as argument. - """ - return self.add (create (properties)) - - - def get (self, feature): - """ Returns all values of 'feature'. - """ - if type(feature) == type([]): - feature = feature[0] - if not isinstance(feature, b2.build.feature.Feature): - feature = b2.build.feature.get(feature) - - if not self.feature_map_: - self.feature_map_ = {} - - for v in self.all_: - if not self.feature_map_.has_key(v.feature()): - self.feature_map_[v.feature()] = [] - self.feature_map_[v.feature()].append(v.value()) - - return self.feature_map_.get(feature, []) - - @cached - def get_properties(self, feature): - """Returns all contained properties associated with 'feature'""" - - if not isinstance(feature, b2.build.feature.Feature): - feature = b2.build.feature.get(feature) - - result = [] - for p in self.all_: - if p.feature() == feature: - result.append(p) - return result - - def __contains__(self, item): - return item in self.all_set_ - diff --git a/jam-files/boost-build/build/readme.txt b/jam-files/boost-build/build/readme.txt deleted file mode 100644 index c3dddd8d..00000000 --- a/jam-files/boost-build/build/readme.txt +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2001, 2002 Dave Abrahams -Copyright 2002 Vladimir Prus -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -Development code for new build system. To run unit tests for jam code, execute: - - bjam --debug --build-system=test - -Comprehensive tests require Python. See ../test/readme.txt - - - diff --git a/jam-files/boost-build/build/scanner.jam b/jam-files/boost-build/build/scanner.jam deleted file mode 100644 index d6042ea2..00000000 --- a/jam-files/boost-build/build/scanner.jam +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Implements scanners: objects that compute implicit dependencies for -# files, such as includes in C++. -# -# Scanner has a regular expression used to find dependencies, some -# data needed to interpret those dependencies (for example, include -# paths), and a code which actually established needed relationship -# between actual jam targets. -# -# Scanner objects are created by actions, when they try to actualize -# virtual targets, passed to 'virtual-target.actualize' method and are -# then associated with actual targets. It is possible to use -# several scanners for a virtual-target. For example, a single source -# might be used by to compile actions, with different include paths. -# In this case, two different actual targets will be created, each -# having scanner of its own. -# -# Typically, scanners are created from target type and action's -# properties, using the rule 'get' in this module. Directly creating -# scanners is not recommended, because it might create many equvivalent -# but different instances, and lead in unneeded duplication of -# actual targets. However, actions can also create scanners in a special -# way, instead of relying on just target type. - -import "class" : new ; -import property virtual-target property-set ; -import errors : error ; - -# Base scanner class. -class scanner -{ - rule __init__ ( ) - { - } - - # Returns a pattern to use for scanning - rule pattern ( ) - { - error "method must be overriden" ; - } - - # Establish necessary relationship between targets, - # given actual target beeing scanned, and a list of - # pattern matches in that file. - rule process ( target : matches * ) - { - error "method must be overriden" ; - } -} - -# Registers a new generator class, specifying a set of -# properties relevant to this scanner. Ctor for that class -# should have one parameter: list of properties. -rule register ( scanner-class : relevant-properties * ) -{ - .registered += $(scanner-class) ; - .relevant-properties.$(scanner-class) = $(relevant-properties) ; -} - -# Common scanner class, which can be used when there's only one -# kind of includes (unlike C, where "" and <> includes have different -# search paths). -class common-scanner : scanner -{ - import scanner ; - rule __init__ ( includes * ) - { - scanner.__init__ ; - self.includes = $(includes) ; - } - - rule process ( target : matches * : binding ) - { - local target_path = [ NORMALIZE_PATH $(binding:D) ] ; - - NOCARE $(matches) ; - INCLUDES $(target) : $(matches) ; - SEARCH on $(matches) = $(target_path) $(self.includes:G=) ; - ISFILE $(matches) ; - - scanner.propagate $(__name__) : $(matches) : $(target) ; - } -} - - -# Returns an instance of previously registered scanner, -# with the specified properties. -rule get ( scanner-class : property-set ) -{ - if ! $(scanner-class) in $(.registered) - { - error "attempt to get unregisted scanner" ; - } - - local r = $(.rv-cache.$(property-set)) ; - if ! $(r) - { - r = [ property-set.create - [ property.select $(.relevant-properties.$(scanner-class)) : - [ $(property-set).raw ] ] ] ; - .rv-cache.$(property-set) = $(r) ; - } - - if ! $(scanner.$(scanner-class).$(r:J=-)) - { - scanner.$(scanner-class).$(r:J=-) = [ new $(scanner-class) [ $(r).raw ] ] ; - } - return $(scanner.$(scanner-class).$(r:J=-)) ; -} - - -# Installs the specified scanner on actual target 'target'. -rule install ( scanner : target - vtarget # virtual target from which 'target' was actualized -) -{ - HDRSCAN on $(target) = [ $(scanner).pattern ] ; - SCANNER on $(target) = $(scanner) ; - HDRRULE on $(target) = scanner.hdrrule ; - - # scanner reflects difference in properties affecting - # binding of 'target', which will be known when processing - # includes for it, will give information on how to - # interpret quoted includes. - HDRGRIST on $(target) = $(scanner) ; -} - -# Propagate scanner setting from 'including-target' to 'targets'. -rule propagate ( scanner : targets * : including-target ) -{ - HDRSCAN on $(targets) = [ on $(including-target) return $(HDRSCAN) ] ; - SCANNER on $(targets) = $(scanner) ; - HDRRULE on $(targets) = scanner.hdrrule ; - HDRGRIST on $(targets) = [ on $(including-target) return $(HDRGRIST) ] ; -} - - -rule hdrrule ( target : matches * : binding ) -{ - local scanner = [ on $(target) return $(SCANNER) ] ; - $(scanner).process $(target) : $(matches) : $(binding) ; -} -# hdrrule must be available at global scope so that it can be invoked -# by header scanning -IMPORT scanner : hdrrule : : scanner.hdrrule ; - - - - diff --git a/jam-files/boost-build/build/scanner.py b/jam-files/boost-build/build/scanner.py deleted file mode 100644 index 19f1431d..00000000 --- a/jam-files/boost-build/build/scanner.py +++ /dev/null @@ -1,158 +0,0 @@ -# Status: ported. -# Base revision: 45462 -# -# Copyright 2003 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Implements scanners: objects that compute implicit dependencies for -# files, such as includes in C++. -# -# Scanner has a regular expression used to find dependencies, some -# data needed to interpret those dependencies (for example, include -# paths), and a code which actually established needed relationship -# between actual jam targets. -# -# Scanner objects are created by actions, when they try to actualize -# virtual targets, passed to 'virtual-target.actualize' method and are -# then associated with actual targets. It is possible to use -# several scanners for a virtual-target. For example, a single source -# might be used by to compile actions, with different include paths. -# In this case, two different actual targets will be created, each -# having scanner of its own. -# -# Typically, scanners are created from target type and action's -# properties, using the rule 'get' in this module. Directly creating -# scanners is not recommended, because it might create many equvivalent -# but different instances, and lead in unneeded duplication of -# actual targets. However, actions can also create scanners in a special -# way, instead of relying on just target type. - -import property -import bjam -import os -from b2.exceptions import * -from b2.manager import get_manager - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __scanners, __rv_cache, __scanner_cache - - # Maps registered scanner classes to relevant properties - __scanners = {} - - # A cache of scanners. - # The key is: class_name.properties_tag, where properties_tag is the concatenation - # of all relevant properties, separated by '-' - __scanner_cache = {} - -reset () - - -def register(scanner_class, relevant_properties): - """ Registers a new generator class, specifying a set of - properties relevant to this scanner. Ctor for that class - should have one parameter: list of properties. - """ - __scanners[str(scanner_class)] = relevant_properties - -def registered(scanner_class): - """ Returns true iff a scanner of that class is registered - """ - return __scanners.has_key(str(scanner_class)) - -def get(scanner_class, properties): - """ Returns an instance of previously registered scanner - with the specified properties. - """ - scanner_name = str(scanner_class) - - if not registered(scanner_name): - raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name) - - relevant_properties = __scanners[scanner_name] - r = property.select(relevant_properties, properties) - - scanner_id = scanner_name + '.' + '-'.join(r) - - if not __scanner_cache.has_key(scanner_name): - __scanner_cache[scanner_name] = scanner_class(r) - - return __scanner_cache[scanner_name] - -class Scanner: - """ Base scanner class. - """ - def __init__ (self): - pass - - def pattern (self): - """ Returns a pattern to use for scanning. - """ - raise BaseException ("method must be overriden") - - def process (self, target, matches): - """ Establish necessary relationship between targets, - given actual target beeing scanned, and a list of - pattern matches in that file. - """ - raise BaseException ("method must be overriden") - - -# Common scanner class, which can be used when there's only one -# kind of includes (unlike C, where "" and <> includes have different -# search paths). -class CommonScanner(Scanner): - - def __init__ (self, includes): - Scanner.__init__(self) - self.includes = includes - - def process(self, target, matches, binding): - - target_path = os.path.normpath(os.path.dirname(binding[0])) - bjam.call("mark-included", target, matches) - - get_manager().engine().set_target_variable(matches, "SEARCH", - [target_path] + self.includes) - get_manager().scanners().propagate(self, matches) - -class ScannerRegistry: - - def __init__ (self, manager): - self.manager_ = manager - self.count_ = 0 - self.exported_scanners_ = {} - - def install (self, scanner, target, vtarget): - """ Installs the specified scanner on actual target 'target'. - vtarget: virtual target from which 'target' was actualized. - """ - engine = self.manager_.engine() - engine.set_target_variable(target, "HDRSCAN", scanner.pattern()) - if not self.exported_scanners_.has_key(scanner): - exported_name = "scanner_" + str(self.count_) - self.count_ = self.count_ + 1 - self.exported_scanners_[scanner] = exported_name - bjam.import_rule("", exported_name, scanner.process) - else: - exported_name = self.exported_scanners_[scanner] - - engine.set_target_variable(target, "HDRRULE", exported_name) - - # scanner reflects difference in properties affecting - # binding of 'target', which will be known when processing - # includes for it, will give information on how to - # interpret quoted includes. - engine.set_target_variable(target, "HDRGRIST", str(id(scanner))) - pass - - def propagate(self, scanner, targets): - engine = self.manager_.engine() - engine.set_target_variable(targets, "HDRSCAN", scanner.pattern()) - engine.set_target_variable(targets, "HDRRULE", - self.exported_scanners_[scanner]) - engine.set_target_variable(targets, "HDRGRIST", str(id(scanner))) - diff --git a/jam-files/boost-build/build/targets.jam b/jam-files/boost-build/build/targets.jam deleted file mode 100644 index a70532ce..00000000 --- a/jam-files/boost-build/build/targets.jam +++ /dev/null @@ -1,1659 +0,0 @@ -# Copyright Vladimir Prus 2002. -# Copyright Rene Rivera 2006. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Supports 'abstract' targets, which are targets explicitly defined in a -# Jamfile. -# -# Abstract targets are represented by classes derived from 'abstract-target' -# class. The first abstract target is 'project-target', which is created for -# each Jamfile, and can be obtained by the 'target' rule in the Jamfile's module -# (see project.jam). -# -# Project targets keep a list of 'main-target' instances. A main target is what -# the user explicitly defines in a Jamfile. It is possible to have several -# definitions for a main target, for example to have different lists of sources -# for different platforms. So, main targets keep a list of alternatives. -# -# Each alternative is an instance of 'abstract-target'. When a main target -# subvariant is defined by some rule, that rule will decide what class to use, -# create an instance of that class and add it to the list of alternatives for -# the main target. -# -# Rules supplied by the build system will use only targets derived from -# 'basic-target' class, which will provide some default behaviour. There will be -# different classes derived from it such as 'make-target', created by the 'make' -# rule, and 'typed-target', created by rules such as 'exe' and 'lib'. - -# -# +------------------------+ -# |abstract-target | -# +========================+ -# |name | -# |project | -# | | -# |generate(properties) = 0| -# +-----------+------------+ -# | -# ^ -# / \ -# +-+-+ -# | -# | -# +------------------------+------+------------------------------+ -# | | | -# | | | -# +----------+-----------+ +------+------+ +------+-------+ -# | project-target | | main-target | | basic-target | -# +======================+ 1 * +=============+ alternatives +==============+ -# | generate(properties) |o-----------+ generate |<>------------->| generate | -# | main-target | +-------------+ | construct = 0| -# +----------------------+ +--------------+ -# | -# ^ -# / \ -# +-+-+ -# | -# | -# ...--+----------------+------------------+----------------+---+ -# | | | | -# | | | | -# ... ---+-----+ +------+-------+ +------+------+ +--------+-----+ -# | | typed-target | | make-target | | stage-target | -# . +==============+ +=============+ +==============+ -# . | construct | | construct | | construct | -# +--------------+ +-------------+ +--------------+ - -import assert ; -import "class" : new ; -import errors ; -import feature ; -import indirect ; -import path ; -import property ; -import property-set ; -import sequence ; -import set ; -import toolset ; -import build-request ; - - -# Base class for all abstract targets. -# -class abstract-target -{ - import project ; - import assert ; - import "class" ; - import errors ; - - rule __init__ ( name # Name of the target in Jamfile. - : project-target # The project target to which this one belongs. - ) - { - # Note: it might seem that we don't need either name or project at all. - # However, there are places where we really need it. One example is - # error messages which should name problematic targets. Another is - # setting correct paths for sources and generated files. - - self.name = $(name) ; - self.project = $(project-target) ; - self.location = [ errors.nearest-user-location ] ; - } - - # Returns the name of this target. - rule name ( ) - { - return $(self.name) ; - } - - # Returns the project for this target. - rule project ( ) - { - return $(self.project) ; - } - - # Return the location where the target was declared. - rule location ( ) - { - return $(self.location) ; - } - - # Returns a user-readable name for this target. - rule full-name ( ) - { - local location = [ $(self.project).get location ] ; - return $(location)/$(self.name) ; - } - - # Generates virtual targets for this abstract target using the specified - # properties, unless a different value of some feature is required by the - # target. - # On success, returns: - # - a property-set with the usage requirements to be applied to dependants - # - a list of produced virtual targets, which may be empty. - # If 'property-set' is empty, performs the default build of this target, in - # a way specific to the derived class. - # - rule generate ( property-set ) - { - errors.error "method should be defined in derived classes" ; - } - - rule rename ( new-name ) - { - self.name = $(new-name) ; - } -} - - -if --debug-building in [ modules.peek : ARGV ] -{ - modules.poke : .debug-building : true ; -} - - -rule indent ( ) -{ - return $(.indent:J="") ; -} - - -rule increase-indent ( ) -{ - .indent += " " ; -} - - -rule decrease-indent ( ) -{ - .indent = $(.indent[2-]) ; -} - - -# Project target class (derived from 'abstract-target'). -# -# This class has the following responsibilities: -# - Maintaining a list of main targets in this project and building them. -# -# Main targets are constructed in two stages: -# - When Jamfile is read, a number of calls to 'add-alternative' is made. At -# that time, alternatives can also be renamed to account for inline targets. -# - The first time 'main-target' or 'has-main-target' rule is called, all -# alternatives are enumerated and main targets are created. -# -class project-target : abstract-target -{ - import project ; - import targets ; - import path ; - import print ; - import property-set ; - import set ; - import sequence ; - import "class" : new ; - import errors ; - - rule __init__ ( name : project-module parent-project ? - : requirements * : default-build * ) - { - abstract-target.__init__ $(name) : $(__name__) ; - - self.project-module = $(project-module) ; - self.location = [ project.attribute $(project-module) location ] ; - self.requirements = $(requirements) ; - self.default-build = $(default-build) ; - - if $(parent-project) - { - inherit $(parent-project) ; - } - } - - # This is needed only by the 'make' rule. Need to find the way to make - # 'make' work without this method. - # - rule project-module ( ) - { - return $(self.project-module) ; - } - - rule get ( attribute ) - { - return [ project.attribute $(self.project-module) $(attribute) ] ; - } - - rule build-dir ( ) - { - if ! $(self.build-dir) - { - self.build-dir = [ get build-dir ] ; - if ! $(self.build-dir) - { - self.build-dir = [ path.join [ $(self.project).get location ] - bin ] ; - } - } - return $(self.build-dir) ; - } - - # Generates all possible targets contained in this project. - # - rule generate ( property-set * ) - { - if [ modules.peek : .debug-building ] - { - ECHO [ targets.indent ] "building project" [ name ] " ('$(__name__)') with" [ $(property-set).raw ] ; - targets.increase-indent ; - } - - local usage-requirements = [ property-set.empty ] ; - local targets ; - - for local t in [ targets-to-build ] - { - local g = [ $(t).generate $(property-set) ] ; - usage-requirements = [ $(usage-requirements).add $(g[1]) ] ; - targets += $(g[2-]) ; - } - targets.decrease-indent ; - return $(usage-requirements) [ sequence.unique $(targets) ] ; - } - - # Computes and returns a list of abstract-target instances which must be - # built when this project is built. - # - rule targets-to-build ( ) - { - local result ; - - if ! $(self.built-main-targets) - { - build-main-targets ; - } - - # Collect all main targets here, except for "explicit" ones. - for local t in $(self.main-targets) - { - if ! [ $(t).name ] in $(self.explicit-targets) - { - result += $(t) ; - } - } - - # Collect all projects referenced via "projects-to-build" attribute. - local self-location = [ get location ] ; - for local pn in [ get projects-to-build ] - { - result += [ find $(pn)/ ] ; - } - - return $(result) ; - } - - # Add 'target' to the list of targets in this project that should be build - # only by explicit request - # - rule mark-target-as-explicit ( target-name * ) - { - # Record the name of the target, not instance, since this rule is called - # before main target instances are created. - self.explicit-targets += $(target-name) ; - } - - rule mark-target-as-always ( target-name * ) - { - # Record the name of the target, not instance, since this rule is called - # before main target instances are created. - self.always-targets += $(target-name) ; - } - - # Add new target alternative - # - rule add-alternative ( target-instance ) - { - if $(self.built-main-targets) - { - errors.error add-alternative called when main targets are already - created. : in project [ full-name ] ; - } - self.alternatives += $(target-instance) ; - } - - # Returns a 'main-target' class instance corresponding to 'name'. - # - rule main-target ( name ) - { - if ! $(self.built-main-targets) - { - build-main-targets ; - } - return $(self.main-target.$(name)) ; - } - - # Returns whether a main target with the specified name exists. - # - rule has-main-target ( name ) - { - if ! $(self.built-main-targets) - { - build-main-targets ; - } - - if $(self.main-target.$(name)) - { - return true ; - } - } - - # Worker function for the find rule not implementing any caching and simply - # returning nothing in case the target can not be found. - # - rule find-really ( id ) - { - local result ; - local current-location = [ get location ] ; - - local split = [ MATCH (.*)//(.*) : $(id) ] ; - local project-part = $(split[1]) ; - local target-part = $(split[2]) ; - - local extra-error-message ; - if $(project-part) - { - # There is an explicitly specified project part in id. Looks up the - # project and passes the request to it. - local pm = [ project.find $(project-part) : $(current-location) ] ; - if $(pm) - { - project-target = [ project.target $(pm) ] ; - result = [ $(project-target).find $(target-part) : no-error ] ; - } - else - { - # TODO: This extra error message will not get displayed most - # likely due to some buggy refactoring. Refactor the code so the - # message gets diplayed again. - extra-error-message = error: could not find project - '$(project-part)' ; - } - } - else - { - # Interpret target-name as name of main target. Need to do this - # before checking for file. Consider the following scenario with a - # toolset not modifying its executable's names, e.g. gcc on - # Unix-like platforms: - # - # exe test : test.cpp ; - # install s : test : <location>. ; - # - # After the first build we would have a target named 'test' in the - # Jamfile and a file named 'test' on the disk. We need the target to - # override the file. - result = [ main-target $(id) ] ; - - # Interpret id as an existing file reference. - if ! $(result) - { - result = [ new file-reference [ path.make $(id) ] : - $(self.project) ] ; - if ! [ $(result).exists ] - { - result = ; - } - } - - # Interpret id as project-id. - if ! $(result) - { - local project-module = [ project.find $(id) : - $(current-location) ] ; - if $(project-module) - { - result = [ project.target $(project-module) ] ; - } - } - } - - return $(result) ; - } - - # Find and return the target with the specified id, treated relative to - # self. Id may specify either a target or a file name with the target taking - # priority. May report an error or return nothing if the target is not found - # depending on the 'no-error' parameter. - # - rule find ( id : no-error ? ) - { - local v = $(.id.$(id)) ; - if ! $(v) - { - v = [ find-really $(id) ] ; - if ! $(v) - { - v = none ; - } - .id.$(id) = $(v) ; - } - - if $(v) != none - { - return $(v) ; - } - else - { - if ! $(no-error) - { - local current-location = [ get location ] ; - ECHO "error: Unable to find file or target named" ; - ECHO "error: '$(id)'" ; - ECHO "error: referred from project at" ; - ECHO "error: '$(current-location)'" ; - ECHO $(extra-error-message) ; - EXIT ; - } - } - } - - rule build-main-targets ( ) - { - self.built-main-targets = true ; - for local a in $(self.alternatives) - { - local name = [ $(a).name ] ; - local target = $(self.main-target.$(name)) ; - if ! $(target) - { - local t = [ new main-target $(name) : $(self.project) ] ; - self.main-target.$(name) = $(t) ; - self.main-targets += $(t) ; - target = $(self.main-target.$(name)) ; - } - - if $(name) in $(self.always-targets) - { - $(a).always ; - } - - $(target).add-alternative $(a) ; - } - } - - # Accessor, add a constant. - # - rule add-constant ( - name # Variable name of the constant. - : value + # Value of the constant. - : type ? # Optional type of value. - ) - { - switch $(type) - { - case path : - local r ; - for local v in $(value) - { - local l = $(self.location) ; - if ! $(l) - { - # Project corresponding to config files do not have - # 'location' attribute, but do have source location. - # It might be more reasonable to make every project have - # a location and use some other approach to prevent buildable - # targets in config files, but that's for later. - l = [ get source-location ] ; - } - v = [ path.root [ path.make $(v) ] $(l) ] ; - # Now make the value absolute path. - v = [ path.root $(v) [ path.pwd ] ] ; - # Constants should be in platform-native form. - v = [ path.native $(v) ] ; - r += $(v) ; - } - value = $(r) ; - } - if ! $(name) in $(self.constants) - { - self.constants += $(name) ; - } - self.constant.$(name) = $(value) ; - # Inject the constant in the scope of the Jamroot module. - modules.poke $(self.project-module) : $(name) : $(value) ; - } - - rule inherit ( parent ) - { - for local c in [ modules.peek $(parent) : self.constants ] - { - # No need to pass the type. Path constants were converted to - # absolute paths already by parent. - add-constant $(c) - : [ modules.peek $(parent) : self.constant.$(c) ] ; - } - - # Import rules from parent. - local this-module = [ project-module ] ; - local parent-module = [ $(parent).project-module ] ; - # Do not import rules coming from 'project-rules' as they must be - # imported localized. - local user-rules = [ set.difference - [ RULENAMES $(parent-module) ] : - [ RULENAMES project-rules ] ] ; - IMPORT $(parent-module) : $(user-rules) : $(this-module) : $(user-rules) ; - EXPORT $(this-module) : $(user-rules) ; - } -} - - -# Helper rules to detect cycles in main target references. -# -local rule start-building ( main-target-instance ) -{ - if $(main-target-instance) in $(.targets-being-built) - { - local names ; - for local t in $(.targets-being-built) $(main-target-instance) - { - names += [ $(t).full-name ] ; - } - - errors.error "Recursion in main target references" - : "the following target are being built currently:" - : $(names) ; - } - .targets-being-built += $(main-target-instance) ; -} - - -local rule end-building ( main-target-instance ) -{ - .targets-being-built = $(.targets-being-built[1--2]) ; -} - - -# A named top-level target in Jamfile. -# -class main-target : abstract-target -{ - import assert ; - import errors ; - import feature ; - import print ; - import property-set ; - import sequence ; - import targets : start-building end-building ; - - rule __init__ ( name : project ) - { - abstract-target.__init__ $(name) : $(project) ; - } - - # Add a new alternative for this target - rule add-alternative ( target ) - { - local d = [ $(target).default-build ] ; - if $(self.alternatives) && ( $(self.default-build) != $(d) ) - { - errors.error "default build must be identical in all alternatives" - : "main target is" [ full-name ] - : "with" [ $(d).raw ] - : "differing from previous default build" [ $(self.default-build).raw ] ; - } - else - { - self.default-build = $(d) ; - } - self.alternatives += $(target) ; - } - - # Returns the best viable alternative for this property-set. See the - # documentation for selection rules. - # - local rule select-alternatives ( property-set debug ? ) - { - # When selecting alternatives we have to consider defaults, for example: - # lib l : l.cpp : <variant>debug ; - # lib l : l_opt.cpp : <variant>release ; - # won't work unless we add default value <variant>debug. - property-set = [ $(p).add-defaults ] ; - - # The algorithm: we keep the current best viable alternative. When we've - # got a new best viable alternative, we compare it with the current one. - - local best ; - local best-properties ; - - if $(self.alternatives[2-]) - { - local bad ; - local worklist = $(self.alternatives) ; - while $(worklist) && ! $(bad) - { - local v = $(worklist[1]) ; - local properties = [ $(v).match $(property-set) $(debug) ] ; - - if $(properties) != no-match - { - if ! $(best) - { - best = $(v) ; - best-properties = $(properties) ; - } - else - { - if $(properties) = $(best-properties) - { - bad = true ; - } - else if $(properties) in $(best-properties) - { - # Do nothing, this alternative is worse - } - else if $(best-properties) in $(properties) - { - best = $(v) ; - best-properties = $(properties) ; - } - else - { - bad = true ; - } - } - } - worklist = $(worklist[2-]) ; - } - if ! $(bad) - { - return $(best) ; - } - } - else - { - return $(self.alternatives) ; - } - } - - rule apply-default-build ( property-set ) - { - return [ targets.apply-default-build $(property-set) - : $(self.default-build) ] ; - } - - # Select an alternative for this main target, by finding all alternatives - # which requirements are satisfied by 'properties' and picking the one with - # the longest requirements set. Returns the result of calling 'generate' on - # that alternative. - # - rule generate ( property-set ) - { - start-building $(__name__) ; - - # We want composite properties in build request act as if all the - # properties it expands too are explicitly specified. - property-set = [ $(property-set).expand ] ; - - local all-property-sets = [ apply-default-build $(property-set) ] ; - local usage-requirements = [ property-set.empty ] ; - local result ; - for local p in $(all-property-sets) - { - local r = [ generate-really $(p) ] ; - if $(r) - { - usage-requirements = [ $(usage-requirements).add $(r[1]) ] ; - result += $(r[2-]) ; - } - } - end-building $(__name__) ; - return $(usage-requirements) [ sequence.unique $(result) ] ; - } - - # Generates the main target with the given property set and returns a list - # which first element is property-set object containing usage-requirements - # of generated target and with generated virtual target in other elements. - # It is possible that no targets are generated. - # - local rule generate-really ( property-set ) - { - local best-alternatives = [ select-alternatives $(property-set) ] ; - if ! $(best-alternatives) - { - ECHO "error: No best alternative for" [ full-name ] ; - select-alternatives $(property-set) debug ; - return [ property-set.empty ] ; - } - else - { - # Now return virtual targets for the only alternative. - return [ $(best-alternatives).generate $(property-set) ] ; - } - } - - rule rename ( new-name ) - { - abstract-target.rename $(new-name) ; - for local a in $(self.alternatives) - { - $(a).rename $(new-name) ; - } - } -} - - -# Abstract target refering to a source file. This is an artificial entity -# allowing sources to a target to be represented using a list of abstract target -# instances. -# -class file-reference : abstract-target -{ - import virtual-target ; - import property-set ; - import path ; - - rule __init__ ( file : project ) - { - abstract-target.__init__ $(file) : $(project) ; - } - - rule generate ( properties ) - { - return [ property-set.empty ] [ virtual-target.from-file $(self.name) : - [ location ] : $(self.project) ] ; - } - - # Returns true if the referred file really exists. - rule exists ( ) - { - location ; - return $(self.file-path) ; - } - - # Returns the location of target. Needed by 'testing.jam'. - rule location ( ) - { - if ! $(self.file-location) - { - local source-location = [ $(self.project).get source-location ] ; - for local src-dir in $(source-location) - { - if ! $(self.file-location) - { - local location = [ path.root $(self.name) $(src-dir) ] ; - if [ CHECK_IF_FILE [ path.native $(location) ] ] - { - self.file-location = $(src-dir) ; - self.file-path = $(location) ; - } - } - } - } - return $(self.file-location) ; - } -} - - -# Given a target-reference, made in context of 'project', returns the -# abstract-target instance that is referred to, as well as properties explicitly -# specified for this reference. -# -rule resolve-reference ( target-reference : project ) -{ - # Separate target name from properties override. - local split = [ MATCH "^([^<]*)(/(<.*))?$" : $(target-reference) ] ; - local id = $(split[1]) ; - local sproperties = ; - if $(split[3]) - { - sproperties = [ property.make [ feature.split $(split[3]) ] ] ; - sproperties = [ feature.expand-composites $(sproperties) ] ; - } - - # Find the target. - local target = [ $(project).find $(id) ] ; - - return $(target) [ property-set.create $(sproperties) ] ; -} - - -# Attempts to generate the target given by target reference, which can refer -# both to a main target or to a file. Returns a list consisting of -# - usage requirements -# - generated virtual targets, if any -# -rule generate-from-reference ( - target-reference # Target reference. - : project # Project where the reference is made. - : property-set # Properties of the main target that makes the reference. -) -{ - local r = [ resolve-reference $(target-reference) : $(project) ] ; - local target = $(r[1]) ; - local sproperties = $(r[2]) ; - - # Take properties which should be propagated and refine them with - # source-specific requirements. - local propagated = [ $(property-set).propagated ] ; - local rproperties = [ $(propagated).refine $(sproperties) ] ; - if $(rproperties[1]) = "@error" - { - errors.error - "When building" [ full-name ] " with properties " $(properties) : - "Invalid properties specified for " $(source) ":" - $(rproperties[2-]) ; - } - return [ $(target).generate $(rproperties) ] ; -} - -rule apply-default-build ( property-set : default-build ) -{ - # 1. First, see what properties from default-build are already present - # in property-set. - - local raw = [ $(property-set).raw ] ; - local specified-features = $(raw:G) ; - - local defaults-to-apply ; - for local d in [ $(default-build).raw ] - { - if ! $(d:G) in $(specified-features) - { - defaults-to-apply += $(d) ; - } - } - - # 2. If there are any defaults to be applied, form a new build request. - # Pass it through to 'expand-no-defaults' since default-build might - # contain "release debug" resulting in two property-sets. - local result ; - if $(defaults-to-apply) - { - properties = [ - build-request.expand-no-defaults - - # We have to compress subproperties here to prevent property - # lists like: - # - # <toolset>msvc <toolset-msvc:version>7.1 <threading>multi - # - # from being expanded into: - # - # <toolset-msvc:version>7.1/<threading>multi - # <toolset>msvc/<toolset-msvc:version>7.1/<threading>multi - # - # due to a cross-product property combination. That may be an - # indication that build-request.expand-no-defaults is the wrong - # rule to use here. - [ feature.compress-subproperties $(raw) ] - $(defaults-to-apply) - ] ; - - if $(properties) - { - for local p in $(properties) - { - result += [ property-set.create - [ feature.expand [ feature.split $(p) ] ] ] ; - } - } - else - { - result = [ property-set.empty ] ; - } - } - else - { - result = $(property-set) ; - } - return $(result) ; -} - - -# Given a build request and requirements, return properties common to dependency -# build request and target requirements. -# -# TODO: Document exactly what 'common properties' are, whether they should -# include default property values, whether they should contain any conditional -# properties or should those be already processed, etc. See whether there are -# any differences between use cases with empty and non-empty build-request as -# well as with requirements containing and those not containing any non-free -# features. -# -rule common-properties ( build-request requirements ) -{ - # For optimization, we add free requirements directly, without using a - # complex algorithm. This gives the complex algorithm a better chance of - # caching results. - local free = [ $(requirements).free ] ; - local non-free = [ property-set.create [ $(requirements).base ] - [ $(requirements).incidental ] ] ; - - local key = .rp.$(build-request)-$(non-free) ; - if ! $($(key)) - { - $(key) = [ common-properties2 $(build-request) $(non-free) ] ; - } - result = [ $($(key)).add-raw $(free) ] ; -} - - -# Given a 'context' -- a set of already present properties, and 'requirements', -# decide which extra properties should be applied to 'context'. For conditional -# requirements, this means evaluating the condition. For indirect conditional -# requirements, this means calling a rule. Ordinary requirements are always -# applied. -# -# Handles the situation where evaluating one conditional requirement affects -# conditions of another conditional requirements, such as: -# <toolset>gcc:<variant>release <variant>release:<define>RELEASE -# -# If 'what' is 'refined' returns context refined with new requirements. If -# 'what' is 'added' returns just the requirements to be applied. -# -rule evaluate-requirements ( requirements : context : what ) -{ - # Apply non-conditional requirements. It is possible that further - # conditional requirement change a value set by non-conditional - # requirements. For example: - # - # exe a : a.cpp : <threading>single <toolset>foo:<threading>multi ; - # - # I am not sure if this should be an error, or not, especially given that - # - # <threading>single - # - # might come from project's requirements. - - local unconditional = [ feature.expand [ $(requirements).non-conditional ] ] ; - - local raw = [ $(context).raw ] ; - raw = [ property.refine $(raw) : $(unconditional) ] ; - - # We have collected properties that surely must be present in common - # properties. We now try to figure out what other properties should be added - # in order to satisfy rules (4)-(6) from the docs. - - local conditionals = [ $(requirements).conditional ] ; - # The 'count' variable has one element for each conditional feature and for - # each occurrence of '<indirect-conditional>' feature. It is used as a loop - # counter: for each iteration of the loop before we remove one element and - # the property set should stabilize before we are done. It is assumed that - # #conditionals iterations should be enough for properties to propagate - # along conditions in any direction. - local count = $(conditionals) - [ $(requirements).get <conditional> ] - and-once-more ; - - local added-requirements ; - - local current = $(raw) ; - - # It is assumed that ordinary conditional requirements can not add - # <conditional> properties (a.k.a. indirect conditional properties), and - # that rules referred to by <conditional> properties can not add new - # <conditional> properties. So the list of indirect conditionals does not - # change. - local indirect = [ $(requirements).get <conditional> ] ; - indirect = [ MATCH ^@(.*) : $(indirect) ] ; - - local ok ; - while $(count) - { - # Evaluate conditionals in context of current properties. - local e = [ property.evaluate-conditionals-in-context $(conditionals) - : $(current) ] ; - - # Evaluate indirect conditionals. - for local i in $(indirect) - { - e += [ indirect.call $(i) $(current) ] ; - } - - if $(e) = $(added-requirements) - { - # If we got the same result, we have found the final properties. - count = ; - ok = true ; - } - else - { - # Oops, conditional evaluation results have changed. Also 'current' - # contains leftovers from a previous evaluation. Recompute 'current' - # using initial properties and conditional requirements. - added-requirements = $(e) ; - current = [ property.refine $(raw) : [ feature.expand $(e) ] ] ; - } - count = $(count[2-]) ; - } - if ! $(ok) - { - errors.error "Can not evaluate conditional properties " $(conditionals) ; - } - - if $(what) = added - { - return [ property-set.create $(unconditional) $(added-requirements) ] ; - } - else if $(what) = refined - { - return [ property-set.create $(current) ] ; - } - else - { - errors.error "Invalid value of the 'what' parameter." ; - } -} - - -rule common-properties2 ( build-request requirements ) -{ - # This guarantees that default properties are present in the result, unless - # they are overriden by some requirement. FIXME: There is possibility that - # we have added <foo>bar, which is composite and expands to <foo2>bar2, but - # default value of <foo2> is not bar2, in which case it is not clear what to - # do. - # - build-request = [ $(build-request).add-defaults ] ; - # Features added by 'add-default' can be composite and expand to features - # without default values -- so they are not added yet. It could be clearer/ - # /faster to expand only newly added properties but that is not critical. - build-request = [ $(build-request).expand ] ; - - return [ evaluate-requirements $(requirements) : $(build-request) : - refined ] ; -} - -rule push-target ( target ) -{ - .targets = $(target) $(.targets) ; -} - -rule pop-target ( ) -{ - .targets = $(.targets[2-]) ; -} - -# Return the metatarget that is currently being generated. -rule current ( ) -{ - return $(.targets[1]) ; -} - - -# Implements the most standard way of constructing main target alternative from -# sources. Allows sources to be either file or other main target and handles -# generation of those dependency targets. -# -class basic-target : abstract-target -{ - import build-request ; - import build-system ; - import "class" : new ; - import errors ; - import feature ; - import property ; - import property-set ; - import sequence ; - import set ; - import targets ; - import virtual-target ; - - rule __init__ ( name : project : sources * : requirements * - : default-build * : usage-requirements * ) - { - abstract-target.__init__ $(name) : $(project) ; - - self.sources = $(sources) ; - if ! $(requirements) { - requirements = [ property-set.empty ] ; - } - self.requirements = $(requirements) ; - if ! $(default-build) - { - default-build = [ property-set.empty ] ; - } - self.default-build = $(default-build) ; - if ! $(usage-requirements) - { - usage-requirements = [ property-set.empty ] ; - } - self.usage-requirements = $(usage-requirements) ; - - if $(sources:G) - { - errors.user-error properties found in the 'sources' parameter for - [ full-name ] ; - } - } - - rule always ( ) - { - self.always = 1 ; - } - - # Returns the list of abstract-targets which are used as sources. The extra - # properties specified for sources are not represented. The only user for - # this rule at the moment is the "--dump-tests" feature of the test system. - # - rule sources ( ) - { - if ! $(self.source-targets) - { - for local s in $(self.sources) - { - self.source-targets += - [ targets.resolve-reference $(s) : $(self.project) ] ; - } - } - return $(self.source-targets) ; - } - - rule requirements ( ) - { - return $(self.requirements) ; - } - - rule default-build ( ) - { - return $(self.default-build) ; - } - - # Returns the alternative condition for this alternative, if the condition - # is satisfied by 'property-set'. - # - rule match ( property-set debug ? ) - { - # The condition is composed of all base non-conditional properties. It - # is not clear if we should expand 'self.requirements' or not. For one - # thing, it would be nice to be able to put - # <toolset>msvc-6.0 - # in requirements. On the other hand, if we have <variant>release as a - # condition it does not make sense to require <optimization>full to be - # in the build request just to select this variant. - local bcondition = [ $(self.requirements).base ] ; - local ccondition = [ $(self.requirements).conditional ] ; - local condition = [ set.difference $(bcondition) : $(ccondition) ] ; - if $(debug) - { - ECHO " next alternative: required properties:" $(condition:E=(empty)) ; - } - - if $(condition) in [ $(property-set).raw ] - { - if $(debug) - { - ECHO " matched" ; - } - return $(condition) ; - } - else - { - if $(debug) - { - ECHO " not matched" ; - } - return no-match ; - } - } - - # Takes a target reference, which might be either target id or a dependency - # property, and generates that target using 'property-set' as build request. - # - # The results are added to the variable called 'result-var'. Usage - # requirements are added to the variable called 'usage-requirements-var'. - # - rule generate-dependencies ( dependencies * : property-set - : result-var usage-requirements-var ) - { - for local dependency in $(dependencies) - { - local grist = $(dependency:G) ; - local id = $(dependency:G=) ; - - local result = [ targets.generate-from-reference $(id) : - $(self.project) : $(property-set) ] ; - - $(result-var) += $(result[2-]:G=$(grist)) ; - $(usage-requirements-var) += [ $(result[1]).raw ] ; - } - } - - # Determines final build properties, generates sources, and calls - # 'construct'. This method should not be overridden. - # - rule generate ( property-set ) - { - if [ modules.peek : .debug-building ] - { - ECHO ; - local fn = [ full-name ] ; - ECHO [ targets.indent ] "Building target '$(fn)'" ; - targets.increase-indent ; - ECHO [ targets.indent ] "Build request: " $(property-set) [ $(property-set).raw ] ; - local cf = [ build-system.command-line-free-features ] ; - ECHO [ targets.indent ] "Command line free features: " [ $(cf).raw ] ; - ECHO [ targets.indent ] "Target requirements: " [ $(self.requirements).raw ] ; - } - targets.push-target $(__name__) ; - - if ! $(self.generated.$(property-set)) - { - # Apply free features from the command line. If user said - # define=FOO - # he most likely wants this define to be set for all compiles. - property-set = [ $(property-set).refine - [ build-system.command-line-free-features ] ] ; - local rproperties = [ targets.common-properties $(property-set) - $(self.requirements) ] ; - - if [ modules.peek : .debug-building ] - { - ECHO ; - ECHO [ targets.indent ] "Common properties: " [ $(rproperties).raw ] ; - } - - if ( $(rproperties[1]) != "@error" ) && ( [ $(rproperties).get - <build> ] != no ) - { - local source-targets ; - local properties = [ $(rproperties).non-dependency ] ; - local usage-requirements ; - - generate-dependencies [ $(rproperties).dependency ] : - $(rproperties) : properties usage-requirements ; - - generate-dependencies $(self.sources) : $(rproperties) : - source-targets usage-requirements ; - - if [ modules.peek : .debug-building ] - { - ECHO ; - ECHO [ targets.indent ] "Usage requirements for" - $(self.name)": " $(usage-requirements) ; - } - - rproperties = [ property-set.create $(properties) - $(usage-requirements) ] ; - usage-requirements = [ property-set.create $(usage-requirements) ] ; - - if [ modules.peek : .debug-building ] - { - ECHO [ targets.indent ] "Build properties: " - [ $(rproperties).raw ] ; - } - - local extra = [ $(rproperties).get <source> ] ; - source-targets += $(extra:G=) ; - # We might get duplicate sources, for example if we link to two - # libraries having the same <library> usage requirement. - # Use stable sort, since for some targets the order is - # important. E.g. RUN_PY target need python source to come - # first. - source-targets = [ sequence.unique $(source-targets) : stable ] ; - - local result = [ construct $(self.name) : $(source-targets) : - $(rproperties) ] ; - - if $(result) - { - local gur = $(result[1]) ; - result = $(result[2-]) ; - - if $(self.always) - { - for local t in $(result) - { - $(t).always ; - } - } - - local s = [ create-subvariant $(result) - : [ virtual-target.recent-targets ] - : $(property-set) : $(source-targets) - : $(rproperties) : $(usage-requirements) ] ; - virtual-target.clear-recent-targets ; - - local ur = [ compute-usage-requirements $(s) ] ; - ur = [ $(ur).add $(gur) ] ; - $(s).set-usage-requirements $(ur) ; - if [ modules.peek : .debug-building ] - { - ECHO [ targets.indent ] "Usage requirements from" - $(self.name)": " [ $(ur).raw ] ; - } - - self.generated.$(property-set) = $(ur) $(result) ; - } - } - else - { - if $(rproperties[1]) = "@error" - { - ECHO [ targets.indent ] "Skipping build of:" [ full-name ] - "cannot compute common properties" ; - } - else if [ $(rproperties).get <build> ] = no - { - # If we just see <build>no, we cannot produce any reasonable - # diagnostics. The code that adds this property is expected - # to explain why a target is not built, for example using - # the configure.log-component-configuration function. - } - else - { - ECHO [ targets.indent ] "Skipping build of: " [ full-name ] - " unknown reason" ; - } - - # We are here either because there has been an error computing - # properties or there is <build>no in properties. In the latter - # case we do not want any diagnostic. In the former case, we - # need diagnostics. FIXME - - # If this target fails to build, add <build>no to properties to - # cause any parent target to fail to build. Except that it - # - does not work now, since we check for <build>no only in - # common properties, but not in properties that came from - # dependencies - # - it is not clear if that is a good idea anyway. The alias - # target, for example, should not fail to build if a - # dependency fails. - self.generated.$(property-set) = [ property-set.create <build>no ] ; - } - } - else - { - if [ modules.peek : .debug-building ] - { - ECHO [ targets.indent ] "Already built" ; - local ur = $(self.generated.$(property-set)) ; - ur = $(ur[0]) ; - targets.increase-indent ; - ECHO [ targets.indent ] "Usage requirements from" - $(self.name)": " [ $(ur).raw ] ; - targets.decrease-indent ; - } - } - - targets.pop-target ; - targets.decrease-indent ; - return $(self.generated.$(property-set)) ; - } - - # Given the set of generated targets, and refined build properties, - # determines and sets appropriate usage requirements on those targets. - # - rule compute-usage-requirements ( subvariant ) - { - local rproperties = [ $(subvariant).build-properties ] ; - xusage-requirements = [ targets.evaluate-requirements - $(self.usage-requirements) : $(rproperties) : added ] ; - - # We generate all dependency properties and add them, as well as their - # usage requirements, to the result. - local extra ; - generate-dependencies [ $(xusage-requirements).dependency ] : - $(rproperties) : extra extra ; - - local result = [ property-set.create - [ $(xusage-requirements).non-dependency ] $(extra) ] ; - - # Propagate usage requirements we got from sources, except for the - # <pch-header> and <pch-file> features. - # - # That feature specifies which pch file to use, and should apply only to - # direct dependents. Consider: - # - # pch pch1 : ... - # lib lib1 : ..... pch1 ; - # pch pch2 : - # lib lib2 : pch2 lib1 ; - # - # Here, lib2 should not get <pch-header> property from pch1. - # - # Essentially, when those two features are in usage requirements, they - # are propagated only to direct dependents. We might need a more general - # mechanism, but for now, only those two features are special. - # - # TODO - Actually there are more possible candidates like for instance - # when listing static library X as a source for another static library. - # Then static library X will be added as a <source> property to the - # second library's usage requirements but those requirements should last - # only up to the first executable or shared library that actually links - # to it. - local raw = [ $(subvariant).sources-usage-requirements ] ; - raw = [ $(raw).raw ] ; - raw = [ property.change $(raw) : <pch-header> ] ; - raw = [ property.change $(raw) : <pch-file> ] ; - return [ $(result).add [ property-set.create $(raw) ] ] ; - } - - # Creates new subvariant instances for 'targets'. - # 'root-targets' - virtual targets to be returned to dependants - # 'all-targets' - virtual targets created while building this main target - # 'build-request' - property-set instance with requested build properties - # - local rule create-subvariant ( root-targets * : all-targets * : - build-request : sources * : rproperties : usage-requirements ) - { - for local e in $(root-targets) - { - $(e).root true ; - } - - # Process all virtual targets that will be created if this main target - # is created. - local s = [ new subvariant $(__name__) : $(build-request) : $(sources) : - $(rproperties) : $(usage-requirements) : $(all-targets) ] ; - for local v in $(all-targets) - { - if ! [ $(v).creating-subvariant ] - { - $(v).creating-subvariant $(s) ; - } - } - return $(s) ; - } - - # Constructs virtual targets for this abstract target and the dependency - # graph. Returns a usage-requirements property-set and a list of virtual - # targets. Should be overriden in derived classes. - # - rule construct ( name : source-targets * : properties * ) - { - errors.error "method should be defined in derived classes" ; - } -} - - -class typed-target : basic-target -{ - import generators ; - - rule __init__ ( name : project : type : sources * : requirements * : - default-build * : usage-requirements * ) - { - basic-target.__init__ $(name) : $(project) : $(sources) : - $(requirements) : $(default-build) : $(usage-requirements) ; - - self.type = $(type) ; - } - - rule type ( ) - { - return $(self.type) ; - } - - rule construct ( name : source-targets * : property-set ) - { - local r = [ generators.construct $(self.project) $(name:S=) : $(self.type) - : [ property-set.create [ $(property-set).raw ] - <main-target-type>$(self.type) ] - : $(source-targets) : true ] ; - if ! $(r) - { - ECHO "warn: Unable to construct" [ full-name ] ; - - # Are there any top-level generators for this type/property set. - if ! [ generators.find-viable-generators $(self.type) - : $(property-set) ] - { - ECHO "error: no generators were found for type '$(self.type)'" ; - ECHO "error: and the requested properties" ; - ECHO "error: make sure you've configured the needed tools" ; - ECHO "See http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html" ; - ECHO "To debug this problem, try the --debug-generators option." ; - EXIT ; - } - } - return $(r) ; - } -} - - -# Return the list of sources to use, if main target rule is invoked with -# 'sources'. If there are any objects in 'sources', they are treated as main -# target instances, and the name of such targets are adjusted to be -# '<name_of_this_target>__<name_of_source_target>'. Such renaming is disabled if -# a non-empty value is passed as the 'no-renaming' parameter. -# -rule main-target-sources ( sources * : main-target-name : no-renaming ? ) -{ - local result ; - for local t in $(sources) - { - if [ class.is-instance $(t) ] - { - local name = [ $(t).name ] ; - if ! $(no-renaming) - { - name = $(main-target-name)__$(name) ; - $(t).rename $(name) ; - } - # Inline targets are not built by default. - local p = [ $(t).project ] ; - $(p).mark-target-as-explicit $(name) ; - result += $(name) ; - } - else - { - result += $(t) ; - } - } - return $(result) ; -} - - -# Returns the requirements to use when declaring a main target, obtained by -# translating all specified property paths and refining project requirements -# with the ones specified for the target. -# -rule main-target-requirements ( - specification * # Properties explicitly specified for the main target. - : project # Project where the main target is to be declared. -) -{ - specification += [ toolset.requirements ] ; - - local requirements = [ property-set.refine-from-user-input - [ $(project).get requirements ] : $(specification) : - [ $(project).project-module ] : [ $(project).get location ] ] ; - if $(requirements[1]) = "@error" - { - errors.error "Conflicting requirements for target:" $(requirements) ; - } - return $(requirements) ; -} - - -# Returns the usage requirements to use when declaring a main target, which are -# obtained by translating all specified property paths and adding project's -# usage requirements. -# -rule main-target-usage-requirements ( - specification * # Use-properties explicitly specified for a main target. - : project # Project where the main target is to be declared. -) -{ - local project-usage-requirements = [ $(project).get usage-requirements ] ; - - # We do not use 'refine-from-user-input' because: - # - I am not sure if removing parent's usage requirements makes sense - # - refining usage requirements is not needed, since usage requirements are - # always free. - local usage-requirements = [ property-set.create-from-user-input - $(specification) - : [ $(project).project-module ] [ $(project).get location ] ] ; - - return [ $(project-usage-requirements).add $(usage-requirements) ] ; -} - - -# Return the default build value to use when declaring a main target, which is -# obtained by using the specified value if not empty and parent's default build -# attribute otherwise. -# -rule main-target-default-build ( - specification * # Default build explicitly specified for a main target. - : project # Project where the main target is to be declared. -) -{ - local result ; - if $(specification) - { - result = $(specification) ; - } - else - { - result = [ $(project).get default-build ] ; - } - return [ property-set.create-with-validation $(result) ] ; -} - - -# Registers the specified target as a main target alternative and returns it. -# -rule main-target-alternative ( target ) -{ - local ptarget = [ $(target).project ] ; - $(ptarget).add-alternative $(target) ; - return $(target) ; -} - -# Creates a new metargets with the specified properties, using 'klass' as -# the class. The 'name', 'sources', -# 'requirements', 'default-build' and 'usage-requirements' are assumed to be in -# the form specified by the user in Jamfile corresponding to 'project'. -# -rule create-metatarget ( klass : project : name : sources * : requirements * : - default-build * : usage-requirements * ) -{ - return [ - targets.main-target-alternative - [ new $(klass) $(name) : $(project) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ] ; -} - -# Creates a typed-target with the specified properties. The 'name', 'sources', -# 'requirements', 'default-build' and 'usage-requirements' are assumed to be in -# the form specified by the user in Jamfile corresponding to 'project'. -# -rule create-typed-target ( type : project : name : sources * : requirements * : - default-build * : usage-requirements * ) -{ - return [ - targets.main-target-alternative - [ new typed-target $(name) : $(project) : $(type) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ] ; -} diff --git a/jam-files/boost-build/build/targets.py b/jam-files/boost-build/build/targets.py deleted file mode 100644 index a35612ce..00000000 --- a/jam-files/boost-build/build/targets.py +++ /dev/null @@ -1,1401 +0,0 @@ -# Status: ported. -# Base revision: 64488 - -# Copyright Vladimir Prus 2002-2007. -# Copyright Rene Rivera 2006. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Supports 'abstract' targets, which are targets explicitly defined in Jamfile. -# -# Abstract targets are represented by classes derived from 'AbstractTarget' class. -# The first abstract target is 'project_target', which is created for each -# Jamfile, and can be obtained by the 'target' rule in the Jamfile's module. -# (see project.jam). -# -# Project targets keep a list of 'MainTarget' instances. -# A main target is what the user explicitly defines in a Jamfile. It is -# possible to have several definitions for a main target, for example to have -# different lists of sources for different platforms. So, main targets -# keep a list of alternatives. -# -# Each alternative is an instance of 'AbstractTarget'. When a main target -# subvariant is defined by some rule, that rule will decide what class to -# use, create an instance of that class and add it to the list of alternatives -# for the main target. -# -# Rules supplied by the build system will use only targets derived -# from 'BasicTarget' class, which will provide some default behaviour. -# There will be two classes derived from it, 'make-target', created by the -# 'make' rule, and 'TypedTarget', created by rules such as 'exe' and 'dll'. - -# -# +------------------------+ -# |AbstractTarget | -# +========================+ -# |name | -# |project | -# | | -# |generate(properties) = 0| -# +-----------+------------+ -# | -# ^ -# / \ -# +-+-+ -# | -# | -# +------------------------+------+------------------------------+ -# | | | -# | | | -# +----------+-----------+ +------+------+ +------+-------+ -# | project_target | | MainTarget | | BasicTarget | -# +======================+ 1 * +=============+ alternatives +==============+ -# | generate(properties) |o-----------+ generate |<>------------->| generate | -# | main-target | +-------------+ | construct = 0| -# +----------------------+ +--------------+ -# | -# ^ -# / \ -# +-+-+ -# | -# | -# ...--+----------------+------------------+----------------+---+ -# | | | | -# | | | | -# ... ---+-----+ +------+-------+ +------+------+ +--------+-----+ -# | | TypedTarget | | make-target | | stage-target | -# . +==============+ +=============+ +==============+ -# . | construct | | construct | | construct | -# +--------------+ +-------------+ +--------------+ - -import re -import os.path -import sys - -from b2.manager import get_manager - -from b2.util.utility import * -import property, project, virtual_target, property_set, feature, generators, toolset -from virtual_target import Subvariant -from b2.exceptions import * -from b2.util.sequence import unique -from b2.util import path, bjam_signature -from b2.build.errors import user_error_checkpoint - -import b2.build.build_request as build_request - -import b2.util.set -_re_separate_target_from_properties = re.compile (r'^([^<]*)(/(<.*))?$') - -class TargetRegistry: - - def __init__ (self): - # All targets that are currently being built. - # Only the key is id (target), the value is the actual object. - self.targets_being_built_ = {} - - # Current indent for debugging messages - self.indent_ = "" - - self.debug_building_ = "--debug-building" in bjam.variable("ARGV") - - self.targets_ = [] - - def main_target_alternative (self, target): - """ Registers the specified target as a main target alternatives. - Returns 'target'. - """ - target.project ().add_alternative (target) - return target - - def main_target_sources (self, sources, main_target_name, no_renaming=0): - """Return the list of sources to use, if main target rule is invoked - with 'sources'. If there are any objects in 'sources', they are treated - as main target instances, and the name of such targets are adjusted to - be '<name_of_this_target>__<name_of_source_target>'. Such renaming - is disabled is non-empty value is passed for 'no-renaming' parameter.""" - result = [] - - for t in sources: - - t = b2.util.jam_to_value_maybe(t) - - if isinstance (t, AbstractTarget): - name = t.name () - - if not no_renaming: - name = main_target_name + '__' + name - t.rename (name) - - # Inline targets are not built by default. - p = t.project() - p.mark_targets_as_explicit([name]) - result.append(name) - - else: - result.append (t) - - return result - - - def main_target_requirements(self, specification, project): - """Returns the requirement to use when declaring a main target, - which are obtained by - - translating all specified property paths, and - - refining project requirements with the one specified for the target - - 'specification' are the properties xplicitly specified for a - main target - 'project' is the project where the main taret is to be declared.""" - - specification.extend(toolset.requirements()) - - requirements = property_set.refine_from_user_input( - project.get("requirements"), specification, - project.project_module(), project.get("location")) - - return requirements - - def main_target_usage_requirements (self, specification, project): - """ Returns the use requirement to use when declaraing a main target, - which are obtained by - - translating all specified property paths, and - - adding project's usage requirements - specification: Use-properties explicitly specified for a main target - project: Project where the main target is to be declared - """ - project_usage_requirements = project.get ('usage-requirements') - - # We don't use 'refine-from-user-input' because I'm not sure if: - # - removing of parent's usage requirements makes sense - # - refining of usage requirements is not needed, since usage requirements - # are always free. - usage_requirements = property_set.create_from_user_input( - specification, project.project_module(), project.get("location")) - - return project_usage_requirements.add (usage_requirements) - - def main_target_default_build (self, specification, project): - """ Return the default build value to use when declaring a main target, - which is obtained by using specified value if not empty and parent's - default build attribute otherwise. - specification: Default build explicitly specified for a main target - project: Project where the main target is to be declared - """ - if specification: - return property_set.create_with_validation(specification) - else: - return project.get ('default-build') - - def start_building (self, main_target_instance): - """ Helper rules to detect cycles in main target references. - """ - if self.targets_being_built_.has_key(id(main_target_instance)): - names = [] - for t in self.targets_being_built_.values() + [main_target_instance]: - names.append (t.full_name()) - - get_manager().errors()("Recursion in main target references\n") - - self.targets_being_built_[id(main_target_instance)] = main_target_instance - - def end_building (self, main_target_instance): - assert (self.targets_being_built_.has_key (id (main_target_instance))) - del self.targets_being_built_ [id (main_target_instance)] - - def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements): - """ Creates a TypedTarget with the specified properties. - The 'name', 'sources', 'requirements', 'default_build' and - 'usage_requirements' are assumed to be in the form specified - by the user in Jamfile corresponding to 'project'. - """ - return self.main_target_alternative (TypedTarget (name, project, type, - self.main_target_sources (sources, name), - self.main_target_requirements (requirements, project), - self.main_target_default_build (default_build, project), - self.main_target_usage_requirements (usage_requirements, project))) - - def increase_indent(self): - self.indent_ += " " - - def decrease_indent(self): - self.indent_ = self.indent_[0:-4] - - def logging(self): - return self.debug_building_ - - def log(self, message): - if self.debug_building_: - print self.indent_ + message - - def push_target(self, target): - self.targets_.append(target) - - def pop_target(self): - self.targets_ = self.targets_[:-1] - - def current(self): - return self.targets_[0] - - -class GenerateResult: - - def __init__ (self, ur=None, targets=None): - if not targets: - targets = [] - - self.__usage_requirements = ur - self.__targets = targets - assert all(isinstance(t, virtual_target.VirtualTarget) for t in targets) - - if not self.__usage_requirements: - self.__usage_requirements = property_set.empty () - - def usage_requirements (self): - return self.__usage_requirements - - def targets (self): - return self.__targets - - def extend (self, other): - assert (isinstance (other, GenerateResult)) - - self.__usage_requirements = self.__usage_requirements.add (other.usage_requirements ()) - self.__targets.extend (other.targets ()) - -class AbstractTarget: - """ Base class for all abstract targets. - """ - def __init__ (self, name, project, manager = None): - """ manager: the Manager object - name: name of the target - project: the project target to which this one belongs - manager:the manager object. If none, uses project.manager () - """ - assert (isinstance (project, ProjectTarget)) - # Note: it might seem that we don't need either name or project at all. - # However, there are places where we really need it. One example is error - # messages which should name problematic targets. Another is setting correct - # paths for sources and generated files. - - # Why allow manager to be specified? Because otherwise project target could not derive - # from this class. - if manager: - self.manager_ = manager - else: - self.manager_ = project.manager () - - self.name_ = name - self.project_ = project - - def manager (self): - return self.manager_ - - def name (self): - """ Returns the name of this target. - """ - return self.name_ - - def project (self): - """ Returns the project for this target. - """ - return self.project_ - - def location (self): - """ Return the location where the target was declared. - """ - return self.location_ - - def full_name (self): - """ Returns a user-readable name for this target. - """ - location = self.project ().get ('location') - return location + '/' + self.name_ - - def generate (self, property_set): - """ Takes a property set. Generates virtual targets for this abstract - target, using the specified properties, unless a different value of some - feature is required by the target. - On success, returns a GenerateResult instance with: - - a property_set with the usage requirements to be - applied to dependents - - a list of produced virtual targets, which may be - empty. - If 'property_set' is empty, performs default build of this - target, in a way specific to derived class. - """ - raise BaseException ("method should be defined in derived classes") - - def rename (self, new_name): - self.name_ = new_name - -class ProjectTarget (AbstractTarget): - """ Project target class (derived from 'AbstractTarget') - - This class these responsibilities: - - maintaining a list of main target in this project and - building it - - Main targets are constructed in two stages: - - When Jamfile is read, a number of calls to 'add_alternative' is made. - At that time, alternatives can also be renamed to account for inline - targets. - - The first time 'main-target' or 'has-main-target' rule is called, - all alternatives are enumerated an main targets are created. - """ - def __init__ (self, manager, name, project_module, parent_project, requirements, default_build): - AbstractTarget.__init__ (self, name, self, manager) - - self.project_module_ = project_module - self.location_ = manager.projects().attribute (project_module, 'location') - self.requirements_ = requirements - self.default_build_ = default_build - - self.build_dir_ = None - - # A cache of IDs - self.ids_cache_ = {} - - # True is main targets have already been built. - self.built_main_targets_ = False - - # A list of the registered alternatives for this project. - self.alternatives_ = [] - - # A map from main target name to the target corresponding - # to it. - self.main_target_ = {} - - # Targets marked as explicit. - self.explicit_targets_ = set() - - # Targets marked as always - self.always_targets_ = set() - - # The constants defined for this project. - self.constants_ = {} - - # Whether targets for all main target are already created. - self.built_main_targets_ = 0 - - if parent_project: - self.inherit (parent_project) - - - # TODO: This is needed only by the 'make' rule. Need to find the - # way to make 'make' work without this method. - def project_module (self): - return self.project_module_ - - def get (self, attribute): - return self.manager().projects().attribute( - self.project_module_, attribute) - - def build_dir (self): - if not self.build_dir_: - self.build_dir_ = self.get ('build-dir') - if not self.build_dir_: - self.build_dir_ = os.path.join(self.project_.get ('location'), 'bin') - - return self.build_dir_ - - def generate (self, ps): - """ Generates all possible targets contained in this project. - """ - self.manager_.targets().log( - "Building project '%s' with '%s'" % (self.name (), str(ps))) - self.manager_.targets().increase_indent () - - result = GenerateResult () - - for t in self.targets_to_build (): - g = t.generate (ps) - result.extend (g) - - self.manager_.targets().decrease_indent () - return result - - def targets_to_build (self): - """ Computes and returns a list of AbstractTarget instances which - must be built when this project is built. - """ - result = [] - - if not self.built_main_targets_: - self.build_main_targets () - - # Collect all main targets here, except for "explicit" ones. - for n, t in self.main_target_.iteritems (): - if not t.name () in self.explicit_targets_: - result.append (t) - - # Collect all projects referenced via "projects-to-build" attribute. - self_location = self.get ('location') - for pn in self.get ('projects-to-build'): - result.append (self.find(pn + "/")) - - return result - - def mark_targets_as_explicit (self, target_names): - """Add 'target' to the list of targets in this project - that should be build only by explicit request.""" - - # Record the name of the target, not instance, since this - # rule is called before main target instaces are created. - self.explicit_targets_.update(target_names) - - def mark_targets_as_always(self, target_names): - self.always_targets_.update(target_names) - - def add_alternative (self, target_instance): - """ Add new target alternative. - """ - if self.built_main_targets_: - raise IllegalOperation ("add-alternative called when main targets are already created for project '%s'" % self.full_name ()) - - self.alternatives_.append (target_instance) - - def main_target (self, name): - if not self.built_main_targets_: - self.build_main_targets() - - return self.main_target_[name] - - def has_main_target (self, name): - """Tells if a main target with the specified name exists.""" - if not self.built_main_targets_: - self.build_main_targets() - - return self.main_target_.has_key(name) - - def create_main_target (self, name): - """ Returns a 'MainTarget' class instance corresponding to the 'name'. - """ - if not self.built_main_targets_: - self.build_main_targets () - - return self.main_targets_.get (name, None) - - - def find_really(self, id): - """ Find and return the target with the specified id, treated - relative to self. - """ - result = None - current_location = self.get ('location') - - __re_split_project_target = re.compile (r'(.*)//(.*)') - split = __re_split_project_target.match (id) - - project_part = None - target_part = None - - if split: - project_part = split.group (1) - target_part = split.group (2) - - project_registry = self.project_.manager ().projects () - - extra_error_message = '' - if project_part: - # There's explicit project part in id. Looks up the - # project and pass the request to it. - pm = project_registry.find (project_part, current_location) - - if pm: - project_target = project_registry.target (pm) - result = project_target.find (target_part, no_error=1) - - else: - extra_error_message = "error: could not find project '$(project_part)'" - - else: - # Interpret target-name as name of main target - # Need to do this before checking for file. Consider this: - # - # exe test : test.cpp ; - # install s : test : <location>. ; - # - # After first build we'll have target 'test' in Jamfile and file - # 'test' on the disk. We need target to override the file. - - result = None - if self.has_main_target(id): - result = self.main_target(id) - - if not result: - result = FileReference (self.manager_, id, self.project_) - if not result.exists (): - # File actually does not exist. - # Reset 'target' so that an error is issued. - result = None - - - if not result: - # Interpret id as project-id - project_module = project_registry.find (id, current_location) - if project_module: - result = project_registry.target (project_module) - - return result - - def find (self, id, no_error = False): - v = self.ids_cache_.get (id, None) - - if not v: - v = self.find_really (id) - self.ids_cache_ [id] = v - - if v or no_error: - return v - - raise BaseException ("Unable to find file or target named '%s'\nreferred from project at '%s'" % (id, self.get ('location'))) - - - def build_main_targets (self): - self.built_main_targets_ = True - - for a in self.alternatives_: - name = a.name () - if not self.main_target_.has_key (name): - t = MainTarget (name, self.project_) - self.main_target_ [name] = t - - if name in self.always_targets_: - a.always() - - self.main_target_ [name].add_alternative (a) - - def add_constant(self, name, value, path=0): - """Adds a new constant for this project. - - The constant will be available for use in Jamfile - module for this project. If 'path' is true, - the constant will be interpreted relatively - to the location of project. - """ - - if path: - l = self.location_ - if not l: - # Project corresponding to config files do not have - # 'location' attribute, but do have source location. - # It might be more reasonable to make every project have - # a location and use some other approach to prevent buildable - # targets in config files, but that's for later. - l = get('source-location') - - value = os.path.join(l, value) - # Now make the value absolute path - value = os.path.join(os.getcwd(), value) - - self.constants_[name] = value - bjam.call("set-variable", self.project_module(), name, value) - - def inherit(self, parent_project): - for c in parent_project.constants_: - # No need to pass the type. Path constants were converted to - # absolute paths already by parent. - self.add_constant(c, parent_project.constants_[c]) - - # Import rules from parent - this_module = self.project_module() - parent_module = parent_project.project_module() - - rules = bjam.call("RULENAMES", parent_module) - if not rules: - rules = [] - user_rules = [x for x in rules - if x not in self.manager().projects().project_rules().all_names()] - if user_rules: - bjam.call("import-rules-from-parent", parent_module, this_module, user_rules) - -class MainTarget (AbstractTarget): - """ A named top-level target in Jamfile. - """ - def __init__ (self, name, project): - AbstractTarget.__init__ (self, name, project) - self.alternatives_ = [] - self.default_build_ = property_set.empty () - - def add_alternative (self, target): - """ Add a new alternative for this target. - """ - d = target.default_build () - - if self.alternatives_ and self.default_build_ != d: - get_manager().errors()("default build must be identical in all alternatives\n" - "main target is '%s'\n" - "with '%s'\n" - "differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ())) - - else: - self.default_build_ = d - - self.alternatives_.append (target) - - def __select_alternatives (self, property_set, debug): - """ Returns the best viable alternative for this property_set - See the documentation for selection rules. - # TODO: shouldn't this be 'alternative' (singular)? - """ - # When selecting alternatives we have to consider defaults, - # for example: - # lib l : l.cpp : <variant>debug ; - # lib l : l_opt.cpp : <variant>release ; - # won't work unless we add default value <variant>debug. - property_set = property_set.add_defaults () - - # The algorithm: we keep the current best viable alternative. - # When we've got new best viable alternative, we compare it - # with the current one. - best = None - best_properties = None - - if len (self.alternatives_) == 0: - return None - - if len (self.alternatives_) == 1: - return self.alternatives_ [0] - - if debug: - print "Property set for selection:", property_set - - for v in self.alternatives_: - properties = v.match (property_set, debug) - - if properties is not None: - if not best: - best = v - best_properties = properties - - else: - if b2.util.set.equal (properties, best_properties): - return None - - elif b2.util.set.contains (properties, best_properties): - # Do nothing, this alternative is worse - pass - - elif b2.util.set.contains (best_properties, properties): - best = v - best_properties = properties - - else: - return None - - return best - - def apply_default_build (self, property_set): - return apply_default_build(property_set, self.default_build_) - - def generate (self, ps): - """ Select an alternative for this main target, by finding all alternatives - which requirements are satisfied by 'properties' and picking the one with - longest requirements set. - Returns the result of calling 'generate' on that alternative. - """ - self.manager_.targets ().start_building (self) - - # We want composite properties in build request act as if - # all the properties it expands too are explicitly specified. - ps = ps.expand () - - all_property_sets = self.apply_default_build (ps) - - result = GenerateResult () - - for p in all_property_sets: - result.extend (self.__generate_really (p)) - - self.manager_.targets ().end_building (self) - - return result - - def __generate_really (self, prop_set): - """ Generates the main target with the given property set - and returns a list which first element is property_set object - containing usage_requirements of generated target and with - generated virtual target in other elements. It's possible - that no targets are generated. - """ - best_alternative = self.__select_alternatives (prop_set, debug=0) - - if not best_alternative: - # FIXME: revive. - # self.__select_alternatives(prop_set, debug=1) - self.manager_.errors()( - "No best alternative for '%s'.\n" - % (self.full_name(),)) - - result = best_alternative.generate (prop_set) - - # Now return virtual targets for the only alternative - return result - - def rename(self, new_name): - AbstractTarget.rename(self, new_name) - for a in self.alternatives_: - a.rename(new_name) - -class FileReference (AbstractTarget): - """ Abstract target which refers to a source file. - This is artificial creature; it's usefull so that sources to - a target can be represented as list of abstract target instances. - """ - def __init__ (self, manager, file, project): - AbstractTarget.__init__ (self, file, project) - self.file_location_ = None - - def generate (self, properties): - return GenerateResult (None, [ - self.manager_.virtual_targets ().from_file ( - self.name_, self.location(), self.project_) ]) - - def exists (self): - """ Returns true if the referred file really exists. - """ - if self.location (): - return True - else: - return False - - def location (self): - # Returns the location of target. Needed by 'testing.jam' - if not self.file_location_: - source_location = self.project_.get('source-location') - - for src_dir in source_location: - location = os.path.join(src_dir, self.name()) - if os.path.isfile(location): - self.file_location_ = src_dir - self.file_path = location - break - - return self.file_location_ - -def resolve_reference(target_reference, project): - """ Given a target_reference, made in context of 'project', - returns the AbstractTarget instance that is referred to, as well - as properties explicitly specified for this reference. - """ - # Separate target name from properties override - split = _re_separate_target_from_properties.match (target_reference) - if not split: - raise BaseException ("Invalid reference: '%s'" % target_reference) - - id = split.group (1) - - sproperties = [] - - if split.group (3): - sproperties = property.create_from_strings(feature.split(split.group(3))) - sproperties = feature.expand_composites(sproperties) - - # Find the target - target = project.find (id) - - return (target, property_set.create(sproperties)) - -def generate_from_reference(target_reference, project, property_set): - """ Attempts to generate the target given by target reference, which - can refer both to a main target or to a file. - Returns a list consisting of - - usage requirements - - generated virtual targets, if any - target_reference: Target reference - project: Project where the reference is made - property_set: Properties of the main target that makes the reference - """ - target, sproperties = resolve_reference(target_reference, project) - - # Take properties which should be propagated and refine them - # with source-specific requirements. - propagated = property_set.propagated() - rproperties = propagated.refine(sproperties) - - return target.generate(rproperties) - - - -class BasicTarget (AbstractTarget): - """ Implements the most standard way of constructing main target - alternative from sources. Allows sources to be either file or - other main target and handles generation of those dependency - targets. - """ - def __init__ (self, name, project, sources, requirements = None, default_build = None, usage_requirements = None): - AbstractTarget.__init__ (self, name, project) - - for s in sources: - if get_grist (s): - raise InvalidSource ("property '%s' found in the 'sources' parameter for '%s'" % (s, name)) - - self.sources_ = sources - - if not requirements: requirements = property_set.empty () - self.requirements_ = requirements - - if not default_build: default_build = property_set.empty () - self.default_build_ = default_build - - if not usage_requirements: usage_requirements = property_set.empty () - self.usage_requirements_ = usage_requirements - - # A cache for resolved references - self.source_targets_ = None - - # A cache for generated targets - self.generated_ = {} - - # A cache for build requests - self.request_cache = {} - - # Result of 'capture_user_context' has everything. For example, if this - # target is declare as result of loading Jamfile which was loaded when - # building target B which was requested from A, then we'll have A, B and - # Jamroot location in context. We only care about Jamroot location, most - # of the times. - self.user_context_ = self.manager_.errors().capture_user_context()[-1:] - - self.always_ = False - - def always(self): - self.always_ = True - - def sources (self): - """ Returns the list of AbstractTargets which are used as sources. - The extra properties specified for sources are not represented. - The only used of this rule at the moment is the '--dump-tests' - feature of the test system. - """ - if self.source_targets_ == None: - self.source_targets_ = [] - for s in self.sources_: - self.source_targets_.append(resolve_reference(s, self.project_)[0]) - - return self.source_targets_ - - def requirements (self): - return self.requirements_ - - def default_build (self): - return self.default_build_ - - def common_properties (self, build_request, requirements): - """ Given build request and requirements, return properties - common to dependency build request and target build - properties. - """ - # For optimization, we add free unconditional requirements directly, - # without using complex algorithsm. - # This gives the complex algorithm better chance of caching results. - # The exact effect of this "optimization" is no longer clear - free_unconditional = [] - other = [] - for p in requirements.all(): - if p.feature().free() and not p.condition() and p.feature().name() != 'conditional': - free_unconditional.append(p) - else: - other.append(p) - other = property_set.create(other) - - key = (build_request, other) - if not self.request_cache.has_key(key): - self.request_cache[key] = self.__common_properties2 (build_request, other) - - return self.request_cache[key].add_raw(free_unconditional) - - # Given 'context' -- a set of already present properties, and 'requirements', - # decide which extra properties should be applied to 'context'. - # For conditional requirements, this means evaluating condition. For - # indirect conditional requirements, this means calling a rule. Ordinary - # requirements are always applied. - # - # Handles situation where evaluating one conditional requirements affects - # condition of another conditional requirements, for example: - # - # <toolset>gcc:<variant>release <variant>release:<define>RELEASE - # - # If 'what' is 'refined' returns context refined with new requirements. - # If 'what' is 'added' returns just the requirements that must be applied. - def evaluate_requirements(self, requirements, context, what): - # Apply non-conditional requirements. - # It's possible that that further conditional requirement change - # a value set by non-conditional requirements. For example: - # - # exe a : a.cpp : <threading>single <toolset>foo:<threading>multi ; - # - # I'm not sure if this should be an error, or not, especially given that - # - # <threading>single - # - # might come from project's requirements. - unconditional = feature.expand(requirements.non_conditional()) - - context = context.refine(property_set.create(unconditional)) - - # We've collected properties that surely must be present in common - # properties. We now try to figure out what other properties - # should be added in order to satisfy rules (4)-(6) from the docs. - - conditionals = property_set.create(requirements.conditional()) - - # It's supposed that #conditionals iterations - # should be enough for properties to propagate along conditions in any - # direction. - max_iterations = len(conditionals.all()) +\ - len(requirements.get("<conditional>")) + 1 - - added_requirements = [] - current = context - - # It's assumed that ordinary conditional requirements can't add - # <indirect-conditional> properties, and that rules referred - # by <indirect-conditional> properties can't add new - # <indirect-conditional> properties. So the list of indirect conditionals - # does not change. - indirect = requirements.get("<conditional>") - - ok = 0 - for i in range(0, max_iterations): - - e = conditionals.evaluate_conditionals(current).all()[:] - - # Evaluate indirect conditionals. - for i in indirect: - i = b2.util.jam_to_value_maybe(i) - if callable(i): - # This is Python callable, yeah. - e.extend(i(current)) - else: - # Name of bjam function. Because bjam is unable to handle - # list of Property, pass list of strings. - br = b2.util.call_jam_function(i[1:], [str(p) for p in current.all()]) - if br: - e.extend(property.create_from_strings(br)) - - if e == added_requirements: - # If we got the same result, we've found final properties. - ok = 1 - break - else: - # Oops, results of evaluation of conditionals has changed. - # Also 'current' contains leftover from previous evaluation. - # Recompute 'current' using initial properties and conditional - # requirements. - added_requirements = e - current = context.refine(property_set.create(feature.expand(e))) - - if not ok: - self.manager().errors()("Can't evaluate conditional properties " - + str(conditionals)) - - - if what == "added": - return property_set.create(unconditional + added_requirements) - elif what == "refined": - return current - else: - self.manager().errors("Invalid value of the 'what' parameter") - - def __common_properties2(self, build_request, requirements): - # This guarantees that default properties are present - # in result, unless they are overrided by some requirement. - # TODO: There is possibility that we've added <foo>bar, which is composite - # and expands to <foo2>bar2, but default value of <foo2> is not bar2, - # in which case it's not clear what to do. - # - build_request = build_request.add_defaults() - # Featured added by 'add-default' can be composite and expand - # to features without default values -- so they are not added yet. - # It could be clearer/faster to expand only newly added properties - # but that's not critical. - build_request = build_request.expand() - - return self.evaluate_requirements(requirements, build_request, - "refined") - - def match (self, property_set, debug): - """ Returns the alternative condition for this alternative, if - the condition is satisfied by 'property_set'. - """ - # The condition is composed of all base non-conditional properties. - # It's not clear if we should expand 'self.requirements_' or not. - # For one thing, it would be nice to be able to put - # <toolset>msvc-6.0 - # in requirements. - # On the other hand, if we have <variant>release in condition it - # does not make sense to require <optimization>full to be in - # build request just to select this variant. - bcondition = self.requirements_.base () - ccondition = self.requirements_.conditional () - condition = b2.util.set.difference (bcondition, ccondition) - - if debug: - print " next alternative: required properties:", [str(p) for p in condition] - - if b2.util.set.contains (condition, property_set.all()): - - if debug: - print " matched" - - return condition - - else: - return None - - - def generate_dependency_targets (self, target_ids, property_set): - targets = [] - usage_requirements = [] - for id in target_ids: - - result = generate_from_reference(id, self.project_, property_set) - targets += result.targets() - usage_requirements += result.usage_requirements().all() - - return (targets, usage_requirements) - - def generate_dependency_properties(self, properties, ps): - """ Takes a target reference, which might be either target id - or a dependency property, and generates that target using - 'property_set' as build request. - - Returns a tuple (result, usage_requirements). - """ - result_properties = [] - usage_requirements = [] - for p in properties: - - result = generate_from_reference(p.value(), self.project_, ps) - - for t in result.targets(): - result_properties.append(property.Property(p.feature(), t)) - - usage_requirements += result.usage_requirements().all() - - return (result_properties, usage_requirements) - - - - - @user_error_checkpoint - def generate (self, ps): - """ Determines final build properties, generates sources, - and calls 'construct'. This method should not be - overridden. - """ - self.manager_.errors().push_user_context( - "Generating target " + self.full_name(), self.user_context_) - - if self.manager().targets().logging(): - self.manager().targets().log( - "Building target '%s'" % self.name_) - self.manager().targets().increase_indent () - self.manager().targets().log( - "Build request: '%s'" % str (ps.raw ())) - cf = self.manager().command_line_free_features() - self.manager().targets().log( - "Command line free features: '%s'" % str (cf.raw ())) - self.manager().targets().log( - "Target requirements: %s'" % str (self.requirements().raw ())) - - self.manager().targets().push_target(self) - - if not self.generated_.has_key(ps): - - # Apply free features form the command line. If user - # said - # define=FOO - # he most likely want this define to be set for all compiles. - ps = ps.refine(self.manager().command_line_free_features()) - rproperties = self.common_properties (ps, self.requirements_) - - self.manager().targets().log( - "Common properties are '%s'" % str (rproperties)) - - if rproperties.get("<build>") != ["no"]: - - result = GenerateResult () - - properties = rproperties.non_dependency () - - (p, u) = self.generate_dependency_properties (rproperties.dependency (), rproperties) - properties += p - assert all(isinstance(p, property.Property) for p in properties) - usage_requirements = u - - (source_targets, u) = self.generate_dependency_targets (self.sources_, rproperties) - usage_requirements += u - - self.manager_.targets().log( - "Usage requirements for '%s' are '%s'" % (self.name_, usage_requirements)) - - # FIXME: - - rproperties = property_set.create(properties + usage_requirements) - usage_requirements = property_set.create (usage_requirements) - - self.manager_.targets().log( - "Build properties: '%s'" % str(rproperties)) - - source_targets += rproperties.get('<source>') - - # We might get duplicate sources, for example if - # we link to two library which have the same <library> in - # usage requirements. - # Use stable sort, since for some targets the order is - # important. E.g. RUN_PY target need python source to come - # first. - source_targets = unique(source_targets, stable=True) - - # FIXME: figure why this call messes up source_targets in-place - result = self.construct (self.name_, source_targets[:], rproperties) - - if result: - assert len(result) == 2 - gur = result [0] - result = result [1] - - if self.always_: - for t in result: - t.always() - - s = self.create_subvariant ( - result, - self.manager().virtual_targets().recent_targets(), ps, - source_targets, rproperties, usage_requirements) - self.manager().virtual_targets().clear_recent_targets() - - ur = self.compute_usage_requirements (s) - ur = ur.add (gur) - s.set_usage_requirements (ur) - - self.manager_.targets().log ( - "Usage requirements from '%s' are '%s'" % - (self.name(), str(rproperties))) - - self.generated_[ps] = GenerateResult (ur, result) - else: - self.generated_[ps] = GenerateResult (property_set.empty(), []) - else: - # If we just see <build>no, we cannot produce any reasonable - # diagnostics. The code that adds this property is expected - # to explain why a target is not built, for example using - # the configure.log-component-configuration function. - - # If this target fails to build, add <build>no to properties - # to cause any parent target to fail to build. Except that it - # - does not work now, since we check for <build>no only in - # common properties, but not in properties that came from - # dependencies - # - it's not clear if that's a good idea anyway. The alias - # target, for example, should not fail to build if a dependency - # fails. - self.generated_[ps] = GenerateResult( - property_set.create(["<build>no"]), []) - else: - self.manager().targets().log ("Already built") - - self.manager().targets().pop_target() - self.manager().targets().decrease_indent() - - return self.generated_[ps] - - def compute_usage_requirements (self, subvariant): - """ Given the set of generated targets, and refined build - properties, determines and sets appripriate usage requirements - on those targets. - """ - rproperties = subvariant.build_properties () - xusage_requirements =self.evaluate_requirements( - self.usage_requirements_, rproperties, "added") - - # We generate all dependency properties and add them, - # as well as their usage requirements, to result. - (r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties) - extra = r1 + r2 - - result = property_set.create (xusage_requirements.non_dependency () + extra) - - # Propagate usage requirements we've got from sources, except - # for the <pch-header> and <pch-file> features. - # - # That feature specifies which pch file to use, and should apply - # only to direct dependents. Consider: - # - # pch pch1 : ... - # lib lib1 : ..... pch1 ; - # pch pch2 : - # lib lib2 : pch2 lib1 ; - # - # Here, lib2 should not get <pch-header> property from pch1. - # - # Essentially, when those two features are in usage requirements, - # they are propagated only to direct dependents. We might need - # a more general mechanism, but for now, only those two - # features are special. - raw = subvariant.sources_usage_requirements().raw() - raw = property.change(raw, "<pch-header>", None); - raw = property.change(raw, "<pch-file>", None); - result = result.add(property_set.create(raw)) - - return result - - def create_subvariant (self, root_targets, all_targets, - build_request, sources, - rproperties, usage_requirements): - """Creates a new subvariant-dg instances for 'targets' - - 'root-targets' the virtual targets will be returned to dependents - - 'all-targets' all virtual - targets created while building this main target - - 'build-request' is property-set instance with - requested build properties""" - - for e in root_targets: - e.root (True) - - s = Subvariant (self, build_request, sources, - rproperties, usage_requirements, all_targets) - - for v in all_targets: - if not v.creating_subvariant(): - v.creating_subvariant(s) - - return s - - def construct (self, name, source_targets, properties): - """ Constructs the virtual targets for this abstract targets and - the dependecy graph. Returns a tuple consisting of the properties and the list of virtual targets. - Should be overrided in derived classes. - """ - raise BaseException ("method should be defined in derived classes") - - -class TypedTarget (BasicTarget): - import generators - - def __init__ (self, name, project, type, sources, requirements, default_build, usage_requirements): - BasicTarget.__init__ (self, name, project, sources, requirements, default_build, usage_requirements) - self.type_ = type - - def __jam_repr__(self): - return b2.util.value_to_jam(self) - - def type (self): - return self.type_ - - def construct (self, name, source_targets, prop_set): - - r = generators.construct (self.project_, name, self.type_, - prop_set.add_raw(['<main-target-type>' + self.type_]), - source_targets, True) - - if not r: - print "warning: Unable to construct '%s'" % self.full_name () - - # Are there any top-level generators for this type/property set. - if not generators.find_viable_generators (self.type_, prop_set): - print "error: no generators were found for type '" + self.type_ + "'" - print "error: and the requested properties" - print "error: make sure you've configured the needed tools" - print "See http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html" - - print "To debug this problem, try the --debug-generators option." - sys.exit(1) - - return r - -def apply_default_build(property_set, default_build): - # 1. First, see what properties from default_build - # are already present in property_set. - - specified_features = set(p.feature() for p in property_set.all()) - - defaults_to_apply = [] - for d in default_build.all(): - if not d.feature() in specified_features: - defaults_to_apply.append(d) - - # 2. If there's any defaults to be applied, form the new - # build request. Pass it throw 'expand-no-defaults', since - # default_build might contain "release debug", which will - # result in two property_sets. - result = [] - if defaults_to_apply: - - # We have to compress subproperties here to prevent - # property lists like: - # - # <toolset>msvc <toolset-msvc:version>7.1 <threading>multi - # - # from being expanded into: - # - # <toolset-msvc:version>7.1/<threading>multi - # <toolset>msvc/<toolset-msvc:version>7.1/<threading>multi - # - # due to cross-product property combination. That may - # be an indication that - # build_request.expand-no-defaults is the wrong rule - # to use here. - compressed = feature.compress_subproperties(property_set.all()) - - result = build_request.expand_no_defaults( - b2.build.property_set.create([p]) for p in (compressed + defaults_to_apply)) - - else: - result.append (property_set) - - return result - - -def create_typed_metatarget(name, type, sources, requirements, default_build, usage_requirements): - - from b2.manager import get_manager - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative( - TypedTarget(name, project, type, - t.main_target_sources(sources, name), - t.main_target_requirements(requirements, project), - t.main_target_default_build(default_build, project), - t.main_target_usage_requirements(usage_requirements, project))) - - -def create_metatarget(klass, name, sources, requirements=[], default_build=[], usage_requirements=[]): - from b2.manager import get_manager - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative( - klass(name, project, - t.main_target_sources(sources, name), - t.main_target_requirements(requirements, project), - t.main_target_default_build(default_build, project), - t.main_target_usage_requirements(usage_requirements, project))) - -def metatarget_function_for_class(class_): - - @bjam_signature((["name"], ["sources", "*"], ["requirements", "*"], - ["default_build", "*"], ["usage_requirements", "*"])) - def create_metatarget(name, sources, requirements = [], default_build = None, usage_requirements = []): - - from b2.manager import get_manager - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative( - class_(name, project, - t.main_target_sources(sources, name), - t.main_target_requirements(requirements, project), - t.main_target_default_build(default_build, project), - t.main_target_usage_requirements(usage_requirements, project))) - - return create_metatarget diff --git a/jam-files/boost-build/build/toolset.jam b/jam-files/boost-build/build/toolset.jam deleted file mode 100644 index f2036d99..00000000 --- a/jam-files/boost-build/build/toolset.jam +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2005 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Support for toolset definition. - -import errors ; -import feature ; -import generators ; -import numbers ; -import path ; -import property ; -import regex ; -import sequence ; -import set ; - - -.flag-no = 1 ; - -.ignore-requirements = ; - -# This is used only for testing, to make sure we do not get random extra -# elements in paths. -if --ignore-toolset-requirements in [ modules.peek : ARGV ] -{ - .ignore-requirements = 1 ; -} - - -# Initializes an additional toolset-like module. First load the 'toolset-module' -# and then calls its 'init' rule with trailing arguments. -# -rule using ( toolset-module : * ) -{ - import $(toolset-module) ; - $(toolset-module).init $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -} - - -# Expands subfeatures in each property sets, e.g. '<toolset>gcc-3.2' will be -# converted to '<toolset>gcc/<toolset-version>3.2'. -# -local rule normalize-condition ( property-sets * ) -{ - local result ; - for local p in $(property-sets) - { - local split = [ feature.split $(p) ] ; - local expanded = [ feature.expand-subfeatures [ feature.split $(p) ] ] ; - result += $(expanded:J=/) ; - } - return $(result) ; -} - - -# Specifies if the 'flags' rule should check that the invoking module is the -# same as the module we are setting the flag for. 'v' can be either 'checked' or -# 'unchecked'. Subsequent call to 'pop-checking-for-flags-module' will restore -# the setting that was in effect before calling this rule. -# -rule push-checking-for-flags-module ( v ) -{ - .flags-module-checking = $(v) $(.flags-module-checking) ; -} - -rule pop-checking-for-flags-module ( ) -{ - .flags-module-checking = $(.flags-module-checking[2-]) ; -} - - -# Specifies the flags (variables) that must be set on targets under certain -# conditions, described by arguments. -# -rule flags ( - rule-or-module # If contains a dot, should be a rule name. The flags will - # be applied when that rule is used to set up build - # actions. - # - # If does not contain dot, should be a module name. The - # flag will be applied for all rules in that module. If - # module for rule is different from the calling module, an - # error is issued. - - variable-name # Variable that should be set on target. - condition * : # A condition when this flag should be applied. Should be a - # set of property sets. If one of those property sets is - # contained in the build properties, the flag will be used. - # Implied values are not allowed: "<toolset>gcc" should be - # used, not just "gcc". Subfeatures, like in - # "<toolset>gcc-3.2" are allowed. If left empty, the flag - # will be used unconditionally. - # - # Propery sets may use value-less properties ('<a>' vs. - # '<a>value') to match absent properties. This allows to - # separately match: - # - # <architecture>/<address-model>64 - # <architecture>ia64/<address-model> - # - # Where both features are optional. Without this syntax - # we would be forced to define "default" values. - - values * : # The value to add to variable. If <feature> is specified, - # then the value of 'feature' will be added. - unchecked ? # If value 'unchecked' is passed, will not test that flags - # are set for the calling module. - : hack-hack ? # For - # flags rule OPTIONS <cxx-abi> : -model ansi - # Treat <cxx-abi> as condition - # FIXME: ugly hack. -) -{ - local caller = [ CALLER_MODULE ] ; - if ! [ MATCH ".*([.]).*" : $(rule-or-module) ] - && [ MATCH "(Jamfile<.*)" : $(caller) ] - { - # Unqualified rule name, used inside Jamfile. Most likely used with - # 'make' or 'notfile' rules. This prevents setting flags on the entire - # Jamfile module (this will be considered as rule), but who cares? - # Probably, 'flags' rule should be split into 'flags' and - # 'flags-on-module'. - rule-or-module = $(caller).$(rule-or-module) ; - } - else - { - local module_ = [ MATCH "([^.]*).*" : $(rule-or-module) ] ; - if $(unchecked) != unchecked - && $(.flags-module-checking[1]) != unchecked - && $(module_) != $(caller) - { - errors.error "Module $(caller) attempted to set flags for module $(module_)" ; - } - } - - if $(condition) && ! $(condition:G=) && ! $(hack-hack) - { - # We have condition in the form '<feature>', that is, without value. - # That is an older syntax: - # flags gcc.link RPATH <dll-path> ; - # for compatibility, convert it to - # flags gcc.link RPATH : <dll-path> ; - values = $(condition) ; - condition = ; - } - - if $(condition) - { - property.validate-property-sets $(condition) ; - condition = [ normalize-condition $(condition) ] ; - } - - add-flag $(rule-or-module) : $(variable-name) : $(condition) : $(values) ; -} - - -# Adds a new flag setting with the specified values. Does no checking. -# -local rule add-flag ( rule-or-module : variable-name : condition * : values * ) -{ - .$(rule-or-module).flags += $(.flag-no) ; - - # Store all flags for a module. - local module_ = [ MATCH "([^.]*).*" : $(rule-or-module) ] ; - .module-flags.$(module_) += $(.flag-no) ; - # Store flag-no -> rule-or-module mapping. - .rule-or-module.$(.flag-no) = $(rule-or-module) ; - - .$(rule-or-module).variable.$(.flag-no) += $(variable-name) ; - .$(rule-or-module).values.$(.flag-no) += $(values) ; - .$(rule-or-module).condition.$(.flag-no) += $(condition) ; - - .flag-no = [ numbers.increment $(.flag-no) ] ; -} - - -# Returns the first element of 'property-sets' which is a subset of -# 'properties' or an empty list if no such element exists. -# -rule find-property-subset ( property-sets * : properties * ) -{ - # Cut property values off. - local prop-keys = $(properties:G) ; - - local result ; - for local s in $(property-sets) - { - if ! $(result) - { - # Handle value-less properties like '<architecture>' (compare with - # '<architecture>x86'). - - local set = [ feature.split $(s) ] ; - - # Find the set of features that - # - have no property specified in required property set - # - are omitted in the build property set. - local default-props ; - for local i in $(set) - { - # If $(i) is a value-less property it should match default value - # of an optional property. See the first line in the example - # below: - # - # property set properties result - # <a> <b>foo <b>foo match - # <a> <b>foo <a>foo <b>foo no match - # <a>foo <b>foo <b>foo no match - # <a>foo <b>foo <a>foo <b>foo match - if ! ( $(i:G=) || ( $(i:G) in $(prop-keys) ) ) - { - default-props += $(i) ; - } - } - - if $(set) in $(properties) $(default-props) - { - result = $(s) ; - } - } - } - return $(result) ; -} - - -# Returns a value to be added to some flag for some target based on the flag's -# value definition and the given target's property set. -# -rule handle-flag-value ( value * : properties * ) -{ - local result ; - if $(value:G) - { - local matches = [ property.select $(value) : $(properties) ] ; - for local p in $(matches) - { - local att = [ feature.attributes $(p:G) ] ; - if dependency in $(att) - { - # The value of a dependency feature is a target and needs to be - # actualized. - result += [ $(p:G=).actualize ] ; - } - else if path in $(att) || free in $(att) - { - local values ; - # Treat features with && in the value specially -- each - # &&-separated element is considered a separate value. This is - # needed to handle searched libraries or include paths, which - # may need to be in a specific order. - if ! [ MATCH (&&) : $(p:G=) ] - { - values = $(p:G=) ; - } - else - { - values = [ regex.split $(p:G=) "&&" ] ; - } - if path in $(att) - { - result += [ sequence.transform path.native : $(values) ] ; - } - else - { - result += $(values) ; - } - } - else - { - result += $(p:G=) ; - } - } - } - else - { - result += $(value) ; - } - return $(result) ; -} - - -# Given a rule name and a property set, returns a list of interleaved variables -# names and values which must be set on targets for that rule/property-set -# combination. -# -rule set-target-variables-aux ( rule-or-module : property-set ) -{ - local result ; - properties = [ $(property-set).raw ] ; - for local f in $(.$(rule-or-module).flags) - { - local variable = $(.$(rule-or-module).variable.$(f)) ; - local condition = $(.$(rule-or-module).condition.$(f)) ; - local values = $(.$(rule-or-module).values.$(f)) ; - - if ! $(condition) || - [ find-property-subset $(condition) : $(properties) ] - { - local processed ; - for local v in $(values) - { - # The value might be <feature-name> so needs special treatment. - processed += [ handle-flag-value $(v) : $(properties) ] ; - } - for local r in $(processed) - { - result += $(variable) $(r) ; - } - } - } - - # Strip away last dot separated part and recurse. - local next = [ MATCH ^(.+)\\.([^\\.])* : $(rule-or-module) ] ; - if $(next) - { - result += [ set-target-variables-aux $(next[1]) : $(property-set) ] ; - } - return $(result) ; -} - - -rule set-target-variables ( rule-or-module targets + : property-set ) -{ - properties = [ $(property-set).raw ] ; - local key = $(rule-or-module).$(property-set) ; - local settings = $(.stv.$(key)) ; - if ! $(settings) - { - settings = [ set-target-variables-aux $(rule-or-module) : - $(property-set) ] ; - - if ! $(settings) - { - settings = none ; - } - .stv.$(key) = $(settings) ; - } - - if $(settings) != none - { - local var-name = ; - for local name-or-value in $(settings) - { - if $(var-name) - { - $(var-name) on $(targets) += $(name-or-value) ; - var-name = ; - } - else - { - var-name = $(name-or-value) ; - } - } - } -} - - -# Make toolset 'toolset', defined in a module of the same name, inherit from -# 'base'. -# 1. The 'init' rule from 'base' is imported into 'toolset' with full name. -# Another 'init' is called, which forwards to the base one. -# 2. All generators from 'base' are cloned. The ids are adjusted and <toolset> -# property in requires is adjusted too. -# 3. All flags are inherited. -# 4. All rules are imported. -# -rule inherit ( toolset : base ) -{ - import $(base) ; - inherit-generators $(toolset) : $(base) ; - inherit-flags $(toolset) : $(base) ; - inherit-rules $(toolset) : $(base) ; -} - - -rule inherit-generators ( toolset properties * : base : generators-to-ignore * ) -{ - properties ?= <toolset>$(toolset) ; - local base-generators = [ generators.generators-for-toolset $(base) ] ; - for local g in $(base-generators) - { - local id = [ $(g).id ] ; - - if ! $(id) in $(generators-to-ignore) - { - # Some generator names have multiple periods in their name, so - # $(id:B=$(toolset)) does not generate the right new-id name. E.g. - # if id = gcc.compile.c++ then $(id:B=darwin) = darwin.c++, which is - # not what we want. Manually parse the base and suffix. If there is - # a better way to do this, I would love to see it. See also the - # register() rule in the generators module. - local base = $(id) ; - local suffix = "" ; - while $(base:S) - { - suffix = $(base:S)$(suffix) ; - base = $(base:B) ; - } - local new-id = $(toolset)$(suffix) ; - - generators.register [ $(g).clone $(new-id) : $(properties) ] ; - } - } -} - - -# Brings all flag definitions from the 'base' toolset into the 'toolset' -# toolset. Flag definitions whose conditions make use of properties in -# 'prohibited-properties' are ignored. Do not confuse property and feature, for -# example <debug-symbols>on and <debug-symbols>off, so blocking one of them does -# not block the other one. -# -# The flag conditions are not altered at all, so if a condition includes a name, -# or version of a base toolset, it will not ever match the inheriting toolset. -# When such flag settings must be inherited, define a rule in base toolset -# module and call it as needed. -# -rule inherit-flags ( toolset : base : prohibited-properties * : prohibited-vars * ) -{ - for local f in $(.module-flags.$(base)) - { - local rule-or-module = $(.rule-or-module.$(f)) ; - if ( [ set.difference - $(.$(rule-or-module).condition.$(f)) : - $(prohibited-properties) ] - || ! $(.$(rule-or-module).condition.$(f)) - ) && ( ! $(.$(rule-or-module).variable.$(f)) in $(prohibited-vars) ) - { - local rule_ = [ MATCH "[^.]*\.(.*)" : $(rule-or-module) ] ; - local new-rule-or-module ; - if $(rule_) - { - new-rule-or-module = $(toolset).$(rule_) ; - } - else - { - new-rule-or-module = $(toolset) ; - } - - add-flag - $(new-rule-or-module) - : $(.$(rule-or-module).variable.$(f)) - : $(.$(rule-or-module).condition.$(f)) - : $(.$(rule-or-module).values.$(f)) ; - } - } -} - - -rule inherit-rules ( toolset : base : localize ? ) -{ - # It appears that "action" creates a local rule. - local base-generators = [ generators.generators-for-toolset $(base) ] ; - local rules ; - for local g in $(base-generators) - { - rules += [ MATCH "[^.]*\.(.*)" : [ $(g).rule-name ] ] ; - } - rules = [ sequence.unique $(rules) ] ; - IMPORT $(base) : $(rules) : $(toolset) : $(rules) : $(localize) ; - IMPORT $(toolset) : $(rules) : : $(toolset).$(rules) ; -} - - -# Return the list of global 'toolset requirements'. Those requirements will be -# automatically added to the requirements of any main target. -# -rule requirements ( ) -{ - return $(.requirements) ; -} - - -# Adds elements to the list of global 'toolset requirements'. The requirements -# will be automatically added to the requirements for all main targets, as if -# they were specified literally. For best results, all requirements added should -# be conditional or indirect conditional. -# -rule add-requirements ( requirements * ) -{ - if ! $(.ignore-requirements) - { - .requirements += $(requirements) ; - } -} - - -rule __test__ ( ) -{ - import assert ; - local p = <b>0 <c>1 <d>2 <e>3 <f>4 ; - assert.result <c>1/<d>2/<e>3 : find-property-subset <c>1/<d>2/<e>3 <a>0/<b>0/<c>1 <d>2/<e>5 <a>9 : $(p) ; - assert.result : find-property-subset <a>0/<b>0/<c>9/<d>9/<e>5 <a>9 : $(p) ; - - local p-set = <a>/<b> <a>0/<b> <a>/<b>1 <a>0/<b>1 ; - assert.result <a>/<b> : find-property-subset $(p-set) : ; - assert.result <a>0/<b> : find-property-subset $(p-set) : <a>0 <c>2 ; - assert.result <a>/<b>1 : find-property-subset $(p-set) : <b>1 <c>2 ; - assert.result <a>0/<b>1 : find-property-subset $(p-set) : <a>0 <b>1 ; -} diff --git a/jam-files/boost-build/build/toolset.py b/jam-files/boost-build/build/toolset.py deleted file mode 100644 index b4267987..00000000 --- a/jam-files/boost-build/build/toolset.py +++ /dev/null @@ -1,398 +0,0 @@ -# Status: being ported by Vladimir Prus -# Base revision: 40958 -# -# Copyright 2003 Dave Abrahams -# Copyright 2005 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -""" Support for toolset definition. -""" - -import feature, property, generators, property_set -import b2.util.set -from b2.util import cached, qualify_jam_action -from b2.util.utility import * -from b2.util import bjam_signature -from b2.manager import get_manager - -__re_split_last_segment = re.compile (r'^(.+)\.([^\.])*') -__re_two_ampersands = re.compile ('(&&)') -__re_first_segment = re.compile ('([^.]*).*') -__re_first_group = re.compile (r'[^.]*\.(.*)') - -# Flag is a mechanism to set a value -# A single toolset flag. Specifies that when certain -# properties are in build property set, certain values -# should be appended to some variable. -# -# A flag applies to a specific action in specific module. -# The list of all flags for a module is stored, and each -# flag further contains the name of the rule it applies -# for, -class Flag: - - def __init__(self, variable_name, values, condition, rule = None): - self.variable_name = variable_name - self.values = values - self.condition = condition - self.rule = rule - - def __str__(self): - return("Flag(" + str(self.variable_name) + ", " + str(self.values) +\ - ", " + str(self.condition) + ", " + str(self.rule) + ")") - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __module_flags, __flags, __stv - - # Mapping from module name to a list of all flags that apply - # to either that module directly, or to any rule in that module. - # Each element of the list is Flag instance. - # So, for module named xxx this might contain flags for 'xxx', - # for 'xxx.compile', for 'xxx.compile.c++', etc. - __module_flags = {} - - # Mapping from specific rule or module name to a list of Flag instances - # that apply to that name. - # Say, it might contain flags for 'xxx.compile.c++'. If there are - # entries for module name 'xxx', they are flags for 'xxx' itself, - # not including any rules in that module. - __flags = {} - - # A cache for varaible settings. The key is generated from the rule name and the properties. - __stv = {} - -reset () - -# FIXME: --ignore-toolset-requirements -# FIXME: using - -# FIXME push-checking-for-flags-module .... -# FIXME: investigate existing uses of 'hack-hack' parameter -# in jam code. - -@bjam_signature((["rule_or_module", "variable_name", "condition", "*"], - ["values", "*"])) -def flags(rule_or_module, variable_name, condition, values = []): - """ Specifies the flags (variables) that must be set on targets under certain - conditions, described by arguments. - rule_or_module: If contains dot, should be a rule name. - The flags will be applied when that rule is - used to set up build actions. - - If does not contain dot, should be a module name. - The flags will be applied for all rules in that - module. - If module for rule is different from the calling - module, an error is issued. - - variable_name: Variable that should be set on target - - condition A condition when this flag should be applied. - Should be set of property sets. If one of - those property sets is contained in build - properties, the flag will be used. - Implied values are not allowed: - "<toolset>gcc" should be used, not just - "gcc". Subfeatures, like in "<toolset>gcc-3.2" - are allowed. If left empty, the flag will - always used. - - Propery sets may use value-less properties - ('<a>' vs. '<a>value') to match absent - properties. This allows to separately match - - <architecture>/<address-model>64 - <architecture>ia64/<address-model> - - Where both features are optional. Without this - syntax we'd be forced to define "default" value. - - values: The value to add to variable. If <feature> - is specified, then the value of 'feature' - will be added. - """ - caller = bjam.caller()[:-1] - if not '.' in rule_or_module and caller.startswith("Jamfile"): - # Unqualified rule name, used inside Jamfile. Most likely used with - # 'make' or 'notfile' rules. This prevents setting flags on the entire - # Jamfile module (this will be considered as rule), but who cares? - # Probably, 'flags' rule should be split into 'flags' and - # 'flags-on-module'. - rule_or_module = qualify_jam_action(rule_or_module, caller) - else: - # FIXME: revive checking that we don't set flags for a different - # module unintentionally - pass - - if condition and not replace_grist (condition, ''): - # We have condition in the form '<feature>', that is, without - # value. That's a previous syntax: - # - # flags gcc.link RPATH <dll-path> ; - # for compatibility, convert it to - # flags gcc.link RPATH : <dll-path> ; - values = [ condition ] - condition = None - - if condition: - transformed = [] - for c in condition: - # FIXME: 'split' might be a too raw tool here. - pl = [property.create_from_string(s) for s in c.split('/')] - pl = feature.expand_subfeatures(pl); - transformed.append(property_set.create(pl)) - condition = transformed - - property.validate_property_sets(condition) - - __add_flag (rule_or_module, variable_name, condition, values) - -def set_target_variables (manager, rule_or_module, targets, ps): - """ - """ - settings = __set_target_variables_aux(manager, rule_or_module, ps) - - if settings: - for s in settings: - for target in targets: - manager.engine ().set_target_variable (target, s [0], s[1], True) - -def find_satisfied_condition(conditions, ps): - """Returns the first element of 'property-sets' which is a subset of - 'properties', or an empty list if no such element exists.""" - - features = set(p.feature() for p in ps.all()) - - for condition in conditions: - - found_all = True - for i in condition.all(): - - found = False - if i.value(): - found = i.value() in ps.get(i.feature()) - else: - # Handle value-less properties like '<architecture>' (compare with - # '<architecture>x86'). - # If $(i) is a value-less property it should match default - # value of an optional property. See the first line in the - # example below: - # - # property set properties result - # <a> <b>foo <b>foo match - # <a> <b>foo <a>foo <b>foo no match - # <a>foo <b>foo <b>foo no match - # <a>foo <b>foo <a>foo <b>foo match - found = not i.feature() in features - - found_all = found_all and found - - if found_all: - return condition - - return None - - -def register (toolset): - """ Registers a new toolset. - """ - feature.extend('toolset', [toolset]) - -def inherit_generators (toolset, properties, base, generators_to_ignore = []): - if not properties: - properties = [replace_grist (toolset, '<toolset>')] - - base_generators = generators.generators_for_toolset(base) - - for g in base_generators: - id = g.id() - - if not id in generators_to_ignore: - # Some generator names have multiple periods in their name, so - # $(id:B=$(toolset)) doesn't generate the right new_id name. - # e.g. if id = gcc.compile.c++, $(id:B=darwin) = darwin.c++, - # which is not what we want. Manually parse the base and suffix - # (if there's a better way to do this, I'd love to see it.) - # See also register in module generators. - (base, suffix) = split_action_id(id) - - new_id = toolset + '.' + suffix - - generators.register(g.clone(new_id, properties)) - -def inherit_flags(toolset, base, prohibited_properties = []): - """Brings all flag definitions from the 'base' toolset into the 'toolset' - toolset. Flag definitions whose conditions make use of properties in - 'prohibited-properties' are ignored. Don't confuse property and feature, for - example <debug-symbols>on and <debug-symbols>off, so blocking one of them does - not block the other one. - - The flag conditions are not altered at all, so if a condition includes a name, - or version of a base toolset, it won't ever match the inheriting toolset. When - such flag settings must be inherited, define a rule in base toolset module and - call it as needed.""" - for f in __module_flags.get(base, []): - - if not f.condition or b2.util.set.difference(f.condition, prohibited_properties): - match = __re_first_group.match(f.rule) - rule_ = None - if match: - rule_ = match.group(1) - - new_rule_or_module = '' - - if rule_: - new_rule_or_module = toolset + '.' + rule_ - else: - new_rule_or_module = toolset - - __add_flag (new_rule_or_module, f.variable_name, f.condition, f.values) - -def inherit_rules (toolset, base): - pass - # FIXME: do something about this. -# base_generators = generators.generators_for_toolset (base) - -# import action - -# ids = [] -# for g in base_generators: -# (old_toolset, id) = split_action_id (g.id ()) -# ids.append (id) ; - -# new_actions = [] - -# engine = get_manager().engine() - # FIXME: do this! -# for action in engine.action.values(): -# pass -# (old_toolset, id) = split_action_id(action.action_name) -# -# if old_toolset == base: -# new_actions.append ((id, value [0], value [1])) -# -# for a in new_actions: -# action.register (toolset + '.' + a [0], a [1], a [2]) - - # TODO: how to deal with this? -# IMPORT $(base) : $(rules) : $(toolset) : $(rules) : localized ; -# # Import the rules to the global scope -# IMPORT $(toolset) : $(rules) : : $(toolset).$(rules) ; -# } -# - -###################################################################################### -# Private functions - -@cached -def __set_target_variables_aux (manager, rule_or_module, ps): - """ Given a rule name and a property set, returns a list of tuples of - variables names and values, which must be set on targets for that - rule/properties combination. - """ - result = [] - - for f in __flags.get(rule_or_module, []): - - if not f.condition or find_satisfied_condition (f.condition, ps): - processed = [] - for v in f.values: - # The value might be <feature-name> so needs special - # treatment. - processed += __handle_flag_value (manager, v, ps) - - for r in processed: - result.append ((f.variable_name, r)) - - # strip away last dot separated part and recurse. - next = __re_split_last_segment.match(rule_or_module) - - if next: - result.extend(__set_target_variables_aux( - manager, next.group(1), ps)) - - return result - -def __handle_flag_value (manager, value, ps): - result = [] - - if get_grist (value): - f = feature.get(value) - values = ps.get(f) - - for value in values: - - if f.dependency(): - # the value of a dependency feature is a target - # and must be actualized - result.append(value.actualize()) - - elif f.path() or f.free(): - - # Treat features with && in the value - # specially -- each &&-separated element is considered - # separate value. This is needed to handle searched - # libraries, which must be in specific order. - if not __re_two_ampersands.search(value): - result.append(value) - - else: - result.extend(value.split ('&&')) - else: - result.append (ungristed) - else: - result.append (value) - - return result - -def __add_flag (rule_or_module, variable_name, condition, values): - """ Adds a new flag setting with the specified values. - Does no checking. - """ - f = Flag(variable_name, values, condition, rule_or_module) - - # Grab the name of the module - m = __re_first_segment.match (rule_or_module) - assert m - module = m.group(1) - - __module_flags.setdefault(m, []).append(f) - __flags.setdefault(rule_or_module, []).append(f) - -__requirements = [] - -def requirements(): - """Return the list of global 'toolset requirements'. - Those requirements will be automatically added to the requirements of any main target.""" - return __requirements - -def add_requirements(requirements): - """Adds elements to the list of global 'toolset requirements'. The requirements - will be automatically added to the requirements for all main targets, as if - they were specified literally. For best results, all requirements added should - be conditional or indirect conditional.""" - - #if ! $(.ignore-requirements) - #{ - print "XXXX", requirements - __requirements.extend(requirements) - #} - -# Make toolset 'toolset', defined in a module of the same name, -# inherit from 'base' -# 1. The 'init' rule from 'base' is imported into 'toolset' with full -# name. Another 'init' is called, which forwards to the base one. -# 2. All generators from 'base' are cloned. The ids are adjusted and -# <toolset> property in requires is adjusted too -# 3. All flags are inherited -# 4. All rules are imported. -def inherit(toolset, base): - get_manager().projects().load_module(base, []); - - inherit_generators(toolset, [], base) - inherit_flags(toolset, base) - inherit_rules(toolset, base) diff --git a/jam-files/boost-build/build/type.jam b/jam-files/boost-build/build/type.jam deleted file mode 100644 index 1a7a5782..00000000 --- a/jam-files/boost-build/build/type.jam +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright 2002, 2003 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Deals with target type declaration and defines target class which supports -# typed targets. - -import "class" : new ; -import errors ; -import feature ; -import generators : * ; -import project ; -import property ; -import scanner ; -import os ; - -# The following import would create a circular dependency: -# project -> project-root -> builtin -> type -> targets -> project -# import targets ; - -# The feature is optional so it would never get added implicitly. It is used -# only for internal purposes and in all cases we want to use it explicitly. -feature.feature target-type : : composite optional ; - -feature.feature main-target-type : : optional incidental ; -feature.feature base-target-type : : composite optional free ; - - -# Registers a target type, possible derived from a 'base-type'. Providing a list -# of 'suffixes' here is a shortcut for separately calling the register-suffixes -# rule with the given suffixes and the set-generated-target-suffix rule with the -# first given suffix. -# -rule register ( type : suffixes * : base-type ? ) -{ - # Type names cannot contain hyphens, because when used as feature-values - # they would be interpreted as composite features which need to be - # decomposed. - switch $(type) - { - case *-* : errors.error "type name \"$(type)\" contains a hyphen" ; - } - - if $(type) in $(.types) - { - errors.error "Type $(type) is already registered." ; - } - else - { - .types += $(type) ; - .base.$(type) = $(base-type) ; - .derived.$(base-type) += $(type) ; - - if $(suffixes)-is-not-empty - { - # Specify mapping from suffixes to type. - register-suffixes $(suffixes) : $(type) ; - # By default generated targets of 'type' will use the first of - #'suffixes'. This may be overriden. - set-generated-target-suffix $(type) : : $(suffixes[1]) ; - } - - feature.extend target-type : $(type) ; - feature.extend main-target-type : $(type) ; - feature.extend base-target-type : $(type) ; - - feature.compose <target-type>$(type) : $(base-type:G=<base-target-type>) ; - feature.compose <base-target-type>$(type) : <base-target-type>$(base-type) ; - - # We used to declare the main target rule only when a 'main' parameter - # has been specified. However, it is hard to decide that a type will - # *never* need a main target rule and so from time to time we needed to - # make yet another type 'main'. So now a main target rule is defined for - # each type. - main-rule-name = [ type-to-rule-name $(type) ] ; - .main-target-type.$(main-rule-name) = $(type) ; - IMPORT $(__name__) : main-target-rule : : $(main-rule-name) ; - - # Adding a new derived type affects generator selection so we need to - # make the generator selection module update any of its cached - # information related to a new derived type being defined. - generators.update-cached-information-with-a-new-type $(type) ; - } -} - - -# Given a type, returns the name of the main target rule which creates targets -# of that type. -# -rule type-to-rule-name ( type ) -{ - # Lowercase everything. Convert underscores to dashes. - import regex ; - local n = [ regex.split $(type:L) "_" ] ; - return $(n:J=-) ; -} - - -# Given a main target rule name, returns the type for which it creates targets. -# -rule type-from-rule-name ( rule-name ) -{ - return $(.main-target-type.$(rule-name)) ; -} - - -# Specifies that files with suffix from 'suffixes' be recognized as targets of -# type 'type'. Issues an error if a different type is already specified for any -# of the suffixes. -# -rule register-suffixes ( suffixes + : type ) -{ - for local s in $(suffixes) - { - if ! $(.type.$(s)) - { - .type.$(s) = $(type) ; - } - else if $(.type.$(s)) != $(type) - { - errors.error Attempting to specify multiple types for suffix - \"$(s)\" : "Old type $(.type.$(s)), New type $(type)" ; - } - } -} - - -# Returns true iff type has been registered. -# -rule registered ( type ) -{ - if $(type) in $(.types) - { - return true ; - } -} - - -# Issues an error if 'type' is unknown. -# -rule validate ( type ) -{ - if ! [ registered $(type) ] - { - errors.error "Unknown target type $(type)" ; - } -} - - -# Sets a scanner class that will be used for this 'type'. -# -rule set-scanner ( type : scanner ) -{ - validate $(type) ; - .scanner.$(type) = $(scanner) ; -} - - -# Returns a scanner instance appropriate to 'type' and 'properties'. -# -rule get-scanner ( type : property-set ) -{ - if $(.scanner.$(type)) - { - return [ scanner.get $(.scanner.$(type)) : $(property-set) ] ; - } -} - - -# Returns a base type for the given type or nothing in case the given type is -# not derived. -# -rule base ( type ) -{ - return $(.base.$(type)) ; -} - - -# Returns the given type and all of its base types in order of their distance -# from type. -# -rule all-bases ( type ) -{ - local result = $(type) ; - while $(type) - { - type = [ base $(type) ] ; - result += $(type) ; - } - return $(result) ; -} - - -# Returns the given type and all of its derived types in order of their distance -# from type. -# -rule all-derived ( type ) -{ - local result = $(type) ; - for local d in $(.derived.$(type)) - { - result += [ all-derived $(d) ] ; - } - return $(result) ; -} - - -# Returns true if 'type' is equal to 'base' or has 'base' as its direct or -# indirect base. -# -rule is-derived ( type base ) -{ - if $(base) in [ all-bases $(type) ] - { - return true ; - } -} - -# Returns true if 'type' is either derived from or is equal to 'base'. -# -# TODO: It might be that is-derived and is-subtype were meant to be different -# rules - one returning true for type = base and one not, but as currently -# implemented they are actually the same. Clean this up. -# -rule is-subtype ( type base ) -{ - return [ is-derived $(type) $(base) ] ; -} - - -# Store suffixes for generated targets. -.suffixes = [ new property-map ] ; - -# Store prefixes for generated targets (e.g. "lib" for library). -.prefixes = [ new property-map ] ; - - -# Sets a file suffix to be used when generating a target of 'type' with the -# specified properties. Can be called with no properties if no suffix has -# already been specified for the 'type'. The 'suffix' parameter can be an empty -# string ("") to indicate that no suffix should be used. -# -# Note that this does not cause files with 'suffix' to be automatically -# recognized as being of 'type'. Two different types can use the same suffix for -# their generated files but only one type can be auto-detected for a file with -# that suffix. User should explicitly specify which one using the -# register-suffixes rule. -# -rule set-generated-target-suffix ( type : properties * : suffix ) -{ - set-generated-target-ps suffix : $(type) : $(properties) : $(suffix) ; -} - - -# Change the suffix previously registered for this type/properties combination. -# If suffix is not yet specified, sets it. -# -rule change-generated-target-suffix ( type : properties * : suffix ) -{ - change-generated-target-ps suffix : $(type) : $(properties) : $(suffix) ; -} - - -# Returns the suffix used when generating a file of 'type' with the given -# properties. -# -rule generated-target-suffix ( type : property-set ) -{ - return [ generated-target-ps suffix : $(type) : $(property-set) ] ; -} - - -# Sets a target prefix that should be used when generating targets of 'type' -# with the specified properties. Can be called with empty properties if no -# prefix for 'type' has been specified yet. -# -# The 'prefix' parameter can be empty string ("") to indicate that no prefix -# should be used. -# -# Usage example: library names use the "lib" prefix on unix. -# -rule set-generated-target-prefix ( type : properties * : prefix ) -{ - set-generated-target-ps prefix : $(type) : $(properties) : $(prefix) ; -} - - -# Change the prefix previously registered for this type/properties combination. -# If prefix is not yet specified, sets it. -# -rule change-generated-target-prefix ( type : properties * : prefix ) -{ - change-generated-target-ps prefix : $(type) : $(properties) : $(prefix) ; -} - - -rule generated-target-prefix ( type : property-set ) -{ - return [ generated-target-ps prefix : $(type) : $(property-set) ] ; -} - - -# Common rules for prefix/suffix provisioning follow. - -local rule set-generated-target-ps ( ps : type : properties * : psval ) -{ - properties = <target-type>$(type) $(properties) ; - $(.$(ps)es).insert $(properties) : $(psval) ; -} - - -local rule change-generated-target-ps ( ps : type : properties * : psval ) -{ - properties = <target-type>$(type) $(properties) ; - local prev = [ $(.$(ps)es).find-replace $(properties) : $(psval) ] ; - if ! $(prev) - { - set-generated-target-ps $(ps) : $(type) : $(properties) : $(psval) ; - } -} - - -# Returns either prefix or suffix (as indicated by 'ps') that should be used -# when generating a target of 'type' with the specified properties. Parameter -# 'ps' can be either "prefix" or "suffix". If no prefix/suffix is specified for -# 'type', returns prefix/suffix for base type, if any. -# -local rule generated-target-ps-real ( ps : type : properties * ) -{ - local result ; - local found ; - while $(type) && ! $(found) - { - result = [ $(.$(ps)es).find <target-type>$(type) $(properties) ] ; - # If the prefix/suffix is explicitly set to an empty string, we consider - # prefix/suffix to be found. If we were not to compare with "", there - # would be no way to specify an empty prefix/suffix. - if $(result)-is-not-empty - { - found = true ; - } - type = $(.base.$(type)) ; - } - if $(result) = "" - { - result = ; - } - return $(result) ; -} - - -local rule generated-target-ps ( ps : type : property-set ) -{ - local key = .$(ps).$(type).$(property-set) ; - local v = $($(key)) ; - if ! $(v) - { - v = [ generated-target-ps-real $(ps) : $(type) : [ $(property-set).raw ] - ] ; - if ! $(v) - { - v = none ; - } - $(key) = $(v) ; - } - - if $(v) != none - { - return $(v) ; - } -} - - -# Returns file type given its name. If there are several dots in filename, tries -# each suffix. E.g. for name of "file.so.1.2" suffixes "2", "1", and "so" will -# be tried. -# -rule type ( filename ) -{ - if [ os.name ] in NT CYGWIN - { - filename = $(filename:L) ; - } - local type ; - while ! $(type) && $(filename:S) - { - local suffix = $(filename:S) ; - type = $(.type$(suffix)) ; - filename = $(filename:S=) ; - } - return $(type) ; -} - - -# Rule used to construct all main targets. Note that this rule gets imported -# into the global namespace under different alias names and the exact target -# type to construct is selected based on the alias used to actually invoke this -# rule. -# -rule main-target-rule ( name : sources * : requirements * : default-build * : - usage-requirements * ) -{ - # First discover the required target type based on the exact alias used to - # invoke this rule. - local bt = [ BACKTRACE 1 ] ; - local rulename = $(bt[4]) ; - local target-type = [ type-from-rule-name $(rulename) ] ; - - # This is a circular module dependency and so must be imported here. - import targets ; - - return [ targets.create-typed-target $(target-type) : [ project.current ] : - $(name) : $(sources) : $(requirements) : $(default-build) : - $(usage-requirements) ] ; -} - - -rule __test__ ( ) -{ - import assert ; - - # TODO: Add tests for all the is-derived, is-base & related type relation - # checking rules. -} diff --git a/jam-files/boost-build/build/type.py b/jam-files/boost-build/build/type.py deleted file mode 100644 index ddb7ba09..00000000 --- a/jam-files/boost-build/build/type.py +++ /dev/null @@ -1,313 +0,0 @@ -# Status: ported. -# Base revision: 45462. - -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - - - -import re -import os -import os.path -from b2.util.utility import replace_grist, os_name -from b2.exceptions import * -from b2.build import feature, property, scanner -from b2.util import bjam_signature - - -__re_hyphen = re.compile ('-') - -def __register_features (): - """ Register features need by this module. - """ - # The feature is optional so that it is never implicitly added. - # It's used only for internal purposes, and in all cases we - # want to explicitly use it. - feature.feature ('target-type', [], ['composite', 'optional']) - feature.feature ('main-target-type', [], ['optional', 'incidental']) - feature.feature ('base-target-type', [], ['composite', 'optional', 'free']) - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - Note that this must be called _after_ resetting the module 'feature'. - """ - global __prefixes_suffixes, __suffixes_to_types, __types, __rule_names_to_types, __target_suffixes_cache - - __register_features () - - # Stores suffixes for generated targets. - __prefixes_suffixes = [property.PropertyMap(), property.PropertyMap()] - - # Maps suffixes to types - __suffixes_to_types = {} - - # A map with all the registered types, indexed by the type name - # Each entry is a dictionary with following values: - # 'base': the name of base type or None if type has no base - # 'derived': a list of names of type which derive from this one - # 'scanner': the scanner class registered for this type, if any - __types = {} - - # Caches suffixes for targets with certain properties. - __target_suffixes_cache = {} - -reset () - -@bjam_signature((["type"], ["suffixes", "*"], ["base_type", "?"])) -def register (type, suffixes = [], base_type = None): - """ Registers a target type, possibly derived from a 'base-type'. - If 'suffixes' are provided, they list all the suffixes that mean a file is of 'type'. - Also, the first element gives the suffix to be used when constructing and object of - 'type'. - type: a string - suffixes: None or a sequence of strings - base_type: None or a string - """ - # Type names cannot contain hyphens, because when used as - # feature-values they will be interpreted as composite features - # which need to be decomposed. - if __re_hyphen.search (type): - raise BaseException ('type name "%s" contains a hyphen' % type) - - if __types.has_key (type): - raise BaseException ('Type "%s" is already registered.' % type) - - entry = {} - entry ['base'] = base_type - entry ['derived'] = [] - entry ['scanner'] = None - __types [type] = entry - - if base_type: - __types [base_type]['derived'].append (type) - - if len (suffixes) > 0: - # Generated targets of 'type' will use the first of 'suffixes' - # (this may be overriden) - set_generated_target_suffix (type, [], suffixes [0]) - - # Specify mapping from suffixes to type - register_suffixes (suffixes, type) - - feature.extend('target-type', [type]) - feature.extend('main-target-type', [type]) - feature.extend('base-target-type', [type]) - - if base_type: - feature.compose ('<target-type>' + type, replace_grist (base_type, '<base-target-type>')) - feature.compose ('<base-target-type>' + type, '<base-target-type>' + base_type) - - import b2.build.generators as generators - # Adding a new derived type affects generator selection so we need to - # make the generator selection module update any of its cached - # information related to a new derived type being defined. - generators.update_cached_information_with_a_new_type(type) - - # FIXME: resolving recursive dependency. - from b2.manager import get_manager - get_manager().projects().project_rules().add_rule_for_type(type) - -# FIXME: quick hack. -def type_from_rule_name(rule_name): - return rule_name.upper().replace("-", "_") - - -def register_suffixes (suffixes, type): - """ Specifies that targets with suffix from 'suffixes' have the type 'type'. - If a different type is already specified for any of syffixes, issues an error. - """ - for s in suffixes: - if __suffixes_to_types.has_key (s): - old_type = __suffixes_to_types [s] - if old_type != type: - raise BaseException ('Attempting to specify type for suffix "%s"\nOld type: "%s", New type "%s"' % (s, old_type, type)) - else: - __suffixes_to_types [s] = type - -def registered (type): - """ Returns true iff type has been registered. - """ - return __types.has_key (type) - -def validate (type): - """ Issues an error if 'type' is unknown. - """ - if not registered (type): - raise BaseException ("Unknown target type '%s'" % type) - -def set_scanner (type, scanner): - """ Sets a scanner class that will be used for this 'type'. - """ - validate (type) - __types [type]['scanner'] = scanner - -def get_scanner (type, prop_set): - """ Returns a scanner instance appropriate to 'type' and 'property_set'. - """ - if registered (type): - scanner_type = __types [type]['scanner'] - if scanner_type: - return scanner.get (scanner_type, prop_set.raw ()) - pass - - return None - -def base(type): - """Returns a base type for the given type or nothing in case the given type is - not derived.""" - - return __types[type]['base'] - -def all_bases (type): - """ Returns type and all of its bases, in the order of their distance from type. - """ - result = [] - while type: - result.append (type) - type = __types [type]['base'] - - return result - -def all_derived (type): - """ Returns type and all classes that derive from it, in the order of their distance from type. - """ - result = [type] - for d in __types [type]['derived']: - result.extend (all_derived (d)) - - return result - -def is_derived (type, base): - """ Returns true if 'type' is 'base' or has 'base' as its direct or indirect base. - """ - # TODO: this isn't very efficient, especially for bases close to type - if base in all_bases (type): - return True - else: - return False - -def is_subtype (type, base): - """ Same as is_derived. Should be removed. - """ - # TODO: remove this method - return is_derived (type, base) - -@bjam_signature((["type"], ["properties", "*"], ["suffix"])) -def set_generated_target_suffix (type, properties, suffix): - """ Sets a target suffix that should be used when generating target - of 'type' with the specified properties. Can be called with - empty properties if no suffix for 'type' was specified yet. - This does not automatically specify that files 'suffix' have - 'type' --- two different types can use the same suffix for - generating, but only one type should be auto-detected for - a file with that suffix. User should explicitly specify which - one. - - The 'suffix' parameter can be empty string ("") to indicate that - no suffix should be used. - """ - set_generated_target_ps(1, type, properties, suffix) - - - -def change_generated_target_suffix (type, properties, suffix): - """ Change the suffix previously registered for this type/properties - combination. If suffix is not yet specified, sets it. - """ - change_generated_target_ps(1, type, properties, suffix) - -def generated_target_suffix(type, properties): - return generated_target_ps(1, type, properties) - -# Sets a target prefix that should be used when generating targets of 'type' -# with the specified properties. Can be called with empty properties if no -# prefix for 'type' has been specified yet. -# -# The 'prefix' parameter can be empty string ("") to indicate that no prefix -# should be used. -# -# Usage example: library names use the "lib" prefix on unix. -@bjam_signature((["type"], ["properties", "*"], ["suffix"])) -def set_generated_target_prefix(type, properties, prefix): - set_generated_target_ps(0, type, properties, prefix) - -# Change the prefix previously registered for this type/properties combination. -# If prefix is not yet specified, sets it. -def change_generated_target_prefix(type, properties, prefix): - change_generated_target_ps(0, type, properties, prefix) - -def generated_target_prefix(type, properties): - return generated_target_ps(0, type, properties) - -def set_generated_target_ps(is_suffix, type, properties, val): - properties.append ('<target-type>' + type) - __prefixes_suffixes[is_suffix].insert (properties, val) - -def change_generated_target_ps(is_suffix, type, properties, val): - properties.append ('<target-type>' + type) - prev = __prefixes_suffixes[is_suffix].find_replace(properties, val) - if not prev: - set_generated_target_ps(is_suffix, type, properties, val) - -# Returns either prefix or suffix (as indicated by 'is_suffix') that should be used -# when generating a target of 'type' with the specified properties. -# If no prefix/suffix is specified for 'type', returns prefix/suffix for -# base type, if any. -def generated_target_ps_real(is_suffix, type, properties): - - result = '' - found = False - while type and not found: - result = __prefixes_suffixes[is_suffix].find (['<target-type>' + type] + properties) - - # Note that if the string is empty (""), but not null, we consider - # suffix found. Setting prefix or suffix to empty string is fine. - if result is not None: - found = True - - type = __types [type]['base'] - - if not result: - result = '' - return result - -def generated_target_ps(is_suffix, type, prop_set): - """ Returns suffix that should be used when generating target of 'type', - with the specified properties. If not suffix were specified for - 'type', returns suffix for base type, if any. - """ - key = (is_suffix, type, prop_set) - v = __target_suffixes_cache.get(key, None) - - if not v: - v = generated_target_ps_real(is_suffix, type, prop_set.raw()) - __target_suffixes_cache [key] = v - - return v - -def type(filename): - """ Returns file type given it's name. If there are several dots in filename, - tries each suffix. E.g. for name of "file.so.1.2" suffixes "2", "1", and - "so" will be tried. - """ - while 1: - filename, suffix = os.path.splitext (filename) - if not suffix: return None - suffix = suffix[1:] - - if __suffixes_to_types.has_key(suffix): - return __suffixes_to_types[suffix] - -# NOTE: moved from tools/types/register -def register_type (type, suffixes, base_type = None, os = []): - """ Register the given type on the specified OSes, or on remaining OSes - if os is not specified. This rule is injected into each of the type - modules for the sake of convenience. - """ - if registered (type): - return - - if not os or os_name () in os: - register (type, suffixes, base_type) diff --git a/jam-files/boost-build/build/version.jam b/jam-files/boost-build/build/version.jam deleted file mode 100644 index 7626ddda..00000000 --- a/jam-files/boost-build/build/version.jam +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2002, 2003, 2004, 2006 Vladimir Prus -# Copyright 2008 Jurko Gospodnetic -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import errors ; -import numbers ; - -major = "2011" ; -minor = "04" ; - -rule boost-build ( ) -{ - return "$(major).$(minor)-svn" ; -} - -rule print ( ) -{ - if [ verify-engine-version ] - { - ECHO "Boost.Build" [ boost-build ] ; - } -} - -rule verify-engine-version ( ) -{ - local v = [ modules.peek : JAM_VERSION ] ; - - if $(v[1]) != $(major) || $(v[2]) != $(minor) - { - local argv = [ modules.peek : ARGV ] ; - local e = $(argv[1]) ; - local l = [ modules.binding version ] ; - l = $(l:D) ; - l = $(l:D) ; - ECHO "warning: mismatched versions of Boost.Build engine and core" ; - ECHO "warning: Boost.Build engine ($(e)) is $(v:J=.)" ; - ECHO "warning: Boost.Build core (at $(l)) is" [ boost-build ] ; - } - else - { - return true ; - } -} - - - -# Utility rule for testing whether all elements in a sequence are equal to 0. -# -local rule is-all-zeroes ( sequence * ) -{ - local result = "true" ; - for local e in $(sequence) - { - if $(e) != "0" - { - result = "" ; - } - } - return $(result) ; -} - - -# Returns "true" if the first version is less than the second one. -# -rule version-less ( lhs + : rhs + ) -{ - numbers.check $(lhs) ; - numbers.check $(rhs) ; - - local done ; - local result ; - - while ! $(done) && $(lhs) && $(rhs) - { - if [ numbers.less $(lhs[1]) $(rhs[1]) ] - { - done = "true" ; - result = "true" ; - } - else if [ numbers.less $(rhs[1]) $(lhs[1]) ] - { - done = "true" ; - } - else - { - lhs = $(lhs[2-]) ; - rhs = $(rhs[2-]) ; - } - } - if ( ! $(done) && ! $(lhs) && ! [ is-all-zeroes $(rhs) ] ) - { - result = "true" ; - } - - return $(result) ; -} - - -# Returns "true" if the current JAM version version is at least the given -# version. -# -rule check-jam-version ( version + ) -{ - local version-tag = $(version:J=.) ; - if ! $(version-tag) - { - errors.error Invalid version specifier: : $(version:E="(undefined)") ; - } - - if ! $(.jam-version-check.$(version-tag))-is-not-empty - { - local jam-version = [ modules.peek : JAM_VERSION ] ; - if ! $(jam-version) - { - errors.error "Unable to deduce Boost Jam version. Your Boost Jam" - "installation is most likely terribly outdated." ; - } - .jam-version-check.$(version-tag) = "true" ; - if [ version-less [ modules.peek : JAM_VERSION ] : $(version) ] - { - .jam-version-check.$(version-tag) = "" ; - } - } - return $(.jam-version-check.$(version-tag)) ; -} - - -rule __test__ ( ) -{ - import assert ; - - local jam-version = [ modules.peek : JAM_VERSION ] ; - local future-version = $(jam-version) ; - future-version += "1" ; - - assert.true check-jam-version $(jam-version) ; - assert.false check-jam-version $(future-version) ; - - assert.true version-less 0 : 1 ; - assert.false version-less 0 : 0 ; - assert.true version-less 1 : 2 ; - assert.false version-less 1 : 1 ; - assert.false version-less 2 : 1 ; - assert.true version-less 3 1 20 : 3 4 10 ; - assert.false version-less 3 1 10 : 3 1 10 ; - assert.false version-less 3 4 10 : 3 1 20 ; - assert.true version-less 3 1 20 5 1 : 3 4 10 ; - assert.false version-less 3 1 10 5 1 : 3 1 10 ; - assert.false version-less 3 4 10 5 1 : 3 1 20 ; - assert.true version-less 3 1 20 : 3 4 10 5 1 ; - assert.true version-less 3 1 10 : 3 1 10 5 1 ; - assert.false version-less 3 4 10 : 3 1 20 5 1 ; - assert.false version-less 3 1 10 : 3 1 10 0 0 ; - assert.false version-less 3 1 10 0 0 : 3 1 10 ; - assert.false version-less 3 1 10 0 : 3 1 10 0 0 ; - assert.false version-less 3 1 10 0 : 03 1 10 0 0 ; - assert.false version-less 03 1 10 0 : 3 1 10 0 0 ; - - # TODO: Add tests for invalid input data being sent to version-less. -} diff --git a/jam-files/boost-build/build/virtual-target.jam b/jam-files/boost-build/build/virtual-target.jam deleted file mode 100644 index 2e8446bc..00000000 --- a/jam-files/boost-build/build/virtual-target.jam +++ /dev/null @@ -1,1317 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2005, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Implements virtual targets, which correspond to actual files created during a -# build, but are not yet targets in Jam sense. They are needed, for example, -# when searching for possible transformation sequences, when it is not yet known -# whether a particular target should be created at all. - -import "class" : new ; -import errors ; -import path ; -import sequence ; -import set ; -import type ; -import utility ; - - -# +--------------------------+ -# | virtual-target | -# +==========================+ -# | actualize | -# +--------------------------+ -# | actualize-action() = 0 | -# | actualize-location() = 0 | -# +----------------+---------+ -# | -# ^ -# / \ -# +-+-+ -# | -# +---------------------+ +-------+--------------+ -# | action | | abstract-file-target | -# +=====================| * +======================+ -# | action-name | +--+ action | -# | properties | | +----------------------+ -# +---------------------+--+ | actualize-action() | -# | actualize() |0..1 +-----------+----------+ -# | path() | | -# | adjust-properties() | sources | -# | actualize-sources() | targets | -# +------+--------------+ ^ -# | / \ -# ^ +-+-+ -# / \ | -# +-+-+ +-------------+-------------+ -# | | | -# | +------+---------------+ +--------+-------------+ -# | | file-target | | searched-lib-target | -# | +======================+ +======================+ -# | | actualize-location() | | actualize-location() | -# | +----------------------+ +----------------------+ -# | -# +-+------------------------------+ -# | | -# +----+----------------+ +---------+-----------+ -# | compile-action | | link-action | -# +=====================+ +=====================+ -# | adjust-properties() | | adjust-properties() | -# +---------------------+ | actualize-sources() | -# +---------------------+ -# -# The 'compile-action' and 'link-action' classes are not defined here but in -# builtin.jam modules. They are shown in the diagram to give the big picture. - - -# Models a potential target. It can be converted into a Jam target and used in -# building, if needed. However, it can be also dropped, which allows us to -# search for different transformations and select only one. -# -class virtual-target -{ - import scanner ; - import sequence ; - import utility ; - import virtual-target ; - - rule __init__ ( - name # Target/project name. - : project # Project to which this target belongs. - ) - { - self.name = $(name) ; - self.project = $(project) ; - self.dependencies = ; - } - - # Name of this target. - # - rule name ( ) - { - return $(self.name) ; - } - - # Project of this target. - # - rule project ( ) - { - return $(self.project) ; - } - - # Adds additional 'virtual-target' instances this one depends on. - # - rule depends ( d + ) - { - self.dependencies = [ sequence.merge $(self.dependencies) : - [ sequence.insertion-sort $(d) ] ] ; - } - - rule dependencies ( ) - { - return $(self.dependencies) ; - } - - rule always ( ) - { - .always = 1 ; - } - - # Generates all the actual targets and sets up build actions for this - # target. - # - # If 'scanner' is specified, creates an additional target with the same - # location as the actual target, which will depend on the actual target and - # be associated with a 'scanner'. That additional target is returned. See - # the docs (#dependency_scanning) for rationale. Target must correspond to a - # file if 'scanner' is specified. - # - # If scanner is not specified then the actual target is returned. - # - rule actualize ( scanner ? ) - { - local actual-name = [ actualize-no-scanner ] ; - - if $(.always) - { - ALWAYS $(actual-name) ; - } - - if ! $(scanner) - { - return $(actual-name) ; - } - else - { - # Add the scanner instance to the grist for name. - local g = [ sequence.join - [ utility.ungrist $(actual-name:G) ] $(scanner) : - ] ; - local name = $(actual-name:G=$(g)) ; - - if ! $(self.made.$(name)) - { - self.made.$(name) = true ; - - DEPENDS $(name) : $(actual-name) ; - - actualize-location $(name) ; - - scanner.install $(scanner) : $(name) $(__name__) ; - } - return $(name) ; - } - } - -# private: (overridables) - - # Sets up build actions for 'target'. Should call appropriate rules and set - # target variables. - # - rule actualize-action ( target ) - { - errors.error "method should be defined in derived classes" ; - } - - # Sets up variables on 'target' which specify its location. - # - rule actualize-location ( target ) - { - errors.error "method should be defined in derived classes" ; - } - - # If the target is a generated one, returns the path where it will be - # generated. Otherwise, returns an empty list. - # - rule path ( ) - { - errors.error "method should be defined in derived classes" ; - } - - # Returns the actual target name to be used in case when no scanner is - # involved. - # - rule actual-name ( ) - { - errors.error "method should be defined in derived classes" ; - } - -# implementation - rule actualize-no-scanner ( ) - { - # In fact, we just need to merge virtual-target with - # abstract-file-target as the latter is the only class derived from the - # former. But that has been left for later. - - errors.error "method should be defined in derived classes" ; - } -} - - -# Target corresponding to a file. The exact mapping for file is not yet -# specified in this class. (TODO: Actually, the class name could be better...) -# -# May be a source file (when no action is specified) or a derived file -# (otherwise). -# -# The target's grist is a concatenation of its project's location, action -# properties (for derived targets) and, optionally, value identifying the main -# target. -# -class abstract-file-target : virtual-target -{ - import project ; - import regex ; - import sequence ; - import path ; - import type ; - import property-set ; - import indirect ; - - rule __init__ ( - name # Target's name. - exact ? # If non-empty, the name is exactly the name created file - # should have. Otherwise, the '__init__' method will add a - # suffix obtained from 'type' by calling - # 'type.generated-target-suffix'. - : type ? # Target's type. - : project - : action ? - ) - { - virtual-target.__init__ $(name) : $(project) ; - - self.type = $(type) ; - self.action = $(action) ; - if $(action) - { - $(action).add-targets $(__name__) ; - - if $(self.type) && ! $(exact) - { - _adjust-name $(name) ; - } - } - } - - rule type ( ) - { - return $(self.type) ; - } - - # Sets the path. When generating target name, it will override any path - # computation from properties. - # - rule set-path ( path ) - { - self.path = [ path.native $(path) ] ; - } - - # Returns the currently set action. - # - rule action ( ) - { - return $(self.action) ; - } - - # Sets/gets the 'root' flag. Target is root if it directly corresponds to - # some variant of a main target. - # - rule root ( set ? ) - { - if $(set) - { - self.root = true ; - } - return $(self.root) ; - } - - # Gets or sets the subvariant which created this target. Subvariant is set - # when target is brought into existance and is never changed after that. In - # particular, if a target is shared by a subvariant, only the first is - # stored. - # - rule creating-subvariant ( s ? # If specified, specifies the value to set, - # which should be a 'subvariant' class - # instance. - ) - { - if $(s) && ! $(self.creating-subvariant) - { - self.creating-subvariant = $(s) ; - } - return $(self.creating-subvariant) ; - } - - rule actualize-action ( target ) - { - if $(self.action) - { - $(self.action).actualize ; - } - } - - # Return a human-readable representation of this target. If this target has - # an action, that is: - # - # { <action-name>-<self.name>.<self.type> <action-sources>... } - # - # otherwise, it is: - # - # { <self.name>.<self.type> } - # - rule str ( ) - { - local action = [ action ] ; - local name-dot-type = [ sequence.join $(self.name) "." $(self.type) ] ; - - if $(action) - { - local sources = [ $(action).sources ] ; - local action-name = [ $(action).action-name ] ; - - local ss ; - for local s in $(sources) - { - ss += [ $(s).str ] ; - } - - return "{" $(action-name)-$(name-dot-type) $(ss) "}" ; - } - else - { - return "{" $(name-dot-type) "}" ; - } - } - - rule less ( a ) - { - if [ str ] < [ $(a).str ] - { - return true ; - } - } - - rule equal ( a ) - { - if [ str ] = [ $(a).str ] - { - return true ; - } - } - -# private: - rule actual-name ( ) - { - if ! $(self.actual-name) - { - local grist = [ grist ] ; - local basename = [ path.native $(self.name) ] ; - self.actual-name = <$(grist)>$(basename) ; - } - return $(self.actual-name) ; - } - - # Helper to 'actual-name', above. Computes a unique prefix used to - # distinguish this target from other targets with the same name creating - # different files. - # - rule grist ( ) - { - # Depending on target, there may be different approaches to generating - # unique prefixes. We generate prefixes in the form: - # <one letter approach code> <the actual prefix> - local path = [ path ] ; - if $(path) - { - # The target will be generated to a known path. Just use the path - # for identification, since path is as unique as it can get. - return p$(path) ; - } - else - { - # File is either source, which will be searched for, or is not a - # file at all. Use the location of project for distinguishing. - local project-location = [ $(self.project).get location ] ; - local location-grist = [ sequence.join [ regex.split - $(project-location) "/" ] : "!" ] ; - - if $(self.action) - { - local ps = [ $(self.action).properties ] ; - local property-grist = [ $(ps).as-path ] ; - # 'property-grist' can be empty when 'ps' is an empty property - # set. - if $(property-grist) - { - location-grist = $(location-grist)/$(property-grist) ; - } - } - - return l$(location-grist) ; - } - } - - # Given the target name specified in constructor, returns the name which - # should be really used, by looking at the <tag> properties. Tag properties - # need to be specified as <tag>@rule-name. This makes Boost Build call the - # specified rule with the target name, type and properties to get the new - # name. If no <tag> property is specified or the rule specified by <tag> - # returns nothing, returns the result of calling - # virtual-target.add-prefix-and-suffix. - # - rule _adjust-name ( specified-name ) - { - local ps ; - if $(self.action) - { - ps = [ $(self.action).properties ] ; - } - else - { - ps = [ property-set.empty ] ; - } - - # We add ourselves to the properties so that any tag rule can get more - # direct information about the target than just that available through - # the properties. This is useful in implementing name changes based on - # the sources of the target. For example to make unique names of object - # files based on the source file. --grafik - ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ; - - local tag = [ $(ps).get <tag> ] ; - - if $(tag) - { - local rule-name = [ MATCH ^@(.*) : $(tag) ] ; - if $(rule-name) - { - if $(tag[2]) - { - errors.error "<tag>@rulename is present but is not the only" - "<tag> feature" ; - } - - self.name = [ indirect.call $(rule-name) $(specified-name) - : $(self.type) : $(ps) ] ; - } - else - { - errors.error - "The value of the <tag> feature must be '@rule-name'" ; - } - } - - # If there is no tag or the tag rule returned nothing. - if ! $(tag) || ! $(self.name) - { - self.name = [ virtual-target.add-prefix-and-suffix $(specified-name) - : $(self.type) : $(ps) ] ; - } - } - - rule actualize-no-scanner ( ) - { - local name = [ actual-name ] ; - - # Do anything only on the first invocation. - if ! $(self.made.$(name)) - { - self.made.$(name) = true ; - - if $(self.action) - { - # For non-derived target, we do not care if there are several - # virtual targets that refer to the same name. One case when - # this is unavoidable is when the file name is main.cpp and two - # targets have types CPP (for compiling) and MOCCABLE_CPP (for - # conversion to H via Qt tools). - virtual-target.register-actual-name $(name) : $(__name__) ; - } - - for local i in $(self.dependencies) - { - DEPENDS $(name) : [ $(i).actualize ] ; - } - - actualize-location $(name) ; - actualize-action $(name) ; - } - return $(name) ; - } -} - - -# Appends the suffix appropriate to 'type/property-set' combination to the -# specified name and returns the result. -# -rule add-prefix-and-suffix ( specified-name : type ? : property-set ) -{ - local suffix = [ type.generated-target-suffix $(type) : $(property-set) ] ; - - # Handle suffixes for which no leading dot is desired. Those are specified - # by enclosing them in <...>. Needed by python so it can create "_d.so" - # extensions, for example. - if $(suffix:G) - { - suffix = [ utility.ungrist $(suffix) ] ; - } - else - { - suffix = .$(suffix) ; - } - - local prefix = [ type.generated-target-prefix $(type) : $(property-set) ] ; - - if [ MATCH ^($(prefix)) : $(specified-name) ] - { - prefix = ; - } - return $(prefix:E="")$(specified-name)$(suffix:E="") ; -} - - -# File targets with explicitly known location. -# -# The file path is determined as -# * Value passed to the 'set-path' method, if any. -# * For derived files, project's build dir, joined with components that -# describe action properties. If free properties are not equal to the -# project's reference properties an element with the name of the main -# target is added. -# * For source files, project's source dir. -# -# The file suffix is determined as: -# * The value passed to the 'suffix' method, if any. -# * The suffix corresponding to the target's type. -# -class file-target : abstract-file-target -{ - import "class" : new ; - import common ; - import errors ; - - rule __init__ ( - name exact ? - : type ? # Optional type for this target. - : project - : action ? - : path ? - ) - { - abstract-file-target.__init__ $(name) $(exact) : $(type) : $(project) : - $(action) ; - - self.path = $(path) ; - } - - rule clone-with-different-type ( new-type ) - { - return [ new file-target $(self.name) exact : $(new-type) : - $(self.project) : $(self.action) : $(self.path) ] ; - } - - rule actualize-location ( target ) - { - if $(self.action) - { - # This is a derived file. - local path = [ path ] ; - LOCATE on $(target) = $(path) ; - - # Make sure the path exists. - DEPENDS $(target) : $(path) ; - common.MkDir $(path) ; - - # It is possible that the target name includes a directory too, for - # example when installing headers. Create that directory. - if $(target:D) - { - local d = $(target:D) ; - d = $(d:R=$(path)) ; - DEPENDS $(target) : $(d) ; - common.MkDir $(d) ; - } - - # For a real file target, we create a fake target depending on the - # real target. This allows us to run - # - # bjam hello.o - # - # without trying to guess the name of the real target. Note that the - # target has no directory name and uses a special <e> grist. - # - # First, that means that "bjam hello.o" will build all known hello.o - # targets. Second, the <e> grist makes sure this target will not be - # confused with other targets, for example, if we have subdir 'test' - # with target 'test' in it that includes a 'test.o' file, then the - # target for directory will be just 'test' the target for test.o - # will be <ptest/bin/gcc/debug>test.o and the target we create below - # will be <e>test.o - DEPENDS $(target:G=e) : $(target) ; - # Allow bjam <path-to-file>/<file> to work. This will not catch all - # possible ways to refer to the path (relative/absolute, extra ".", - # various "..", but should help in obvious cases. - DEPENDS $(target:G=e:R=$(path)) : $(target) ; - } - else - { - SEARCH on $(target) = [ path.native $(self.path) ] ; - } - } - - # Returns the directory for this target. - # - rule path ( ) - { - if ! $(self.path) - { - if $(self.action) - { - local p = [ $(self.action).properties ] ; - local path,relative-to-build-dir = [ $(p).target-path ] ; - local path = $(path,relative-to-build-dir[1]) ; - local relative-to-build-dir = $(path,relative-to-build-dir[2]) ; - - if $(relative-to-build-dir) - { - path = [ path.join [ $(self.project).build-dir ] $(path) ] ; - } - - self.path = [ path.native $(path) ] ; - } - } - return $(self.path) ; - } -} - - -class notfile-target : abstract-file-target -{ - rule __init__ ( name : project : action ? ) - { - abstract-file-target.__init__ $(name) : : $(project) : $(action) ; - } - - # Returns nothing to indicate that the target's path is not known. - # - rule path ( ) - { - return ; - } - - rule actualize-location ( target ) - { - NOTFILE $(target) ; - ALWAYS $(target) ; - # TEMPORARY $(target) ; - NOUPDATE $(target) ; - } -} - - -# Class representing an action. Both 'targets' and 'sources' should list -# instances of 'virtual-target'. Action name should name a rule with this -# prototype: -# rule action-name ( targets + : sources * : properties * ) -# Targets and sources are passed as actual Jam targets. The rule may not -# establish additional dependency relationships. -# -class action -{ - import "class" ; - import errors ; - import type ; - import toolset ; - import property-set ; - import indirect ; - import path ; - import set : difference ; - - rule __init__ ( sources * : action-name + : property-set ? ) - { - self.sources = $(sources) ; - - self.action-name = [ indirect.make-qualified $(action-name) ] ; - - if ! $(property-set) - { - property-set = [ property-set.empty ] ; - } - - if ! [ class.is-instance $(property-set) ] - { - errors.error "Property set instance required" ; - } - - self.properties = $(property-set) ; - } - - rule add-targets ( targets * ) - { - self.targets += $(targets) ; - } - - rule replace-targets ( old-targets * : new-targets * ) - { - self.targets = [ set.difference $(self.targets) : $(old-targets) ] ; - self.targets += $(new-targets) ; - } - - rule targets ( ) - { - return $(self.targets) ; - } - - rule sources ( ) - { - return $(self.sources) ; - } - - rule action-name ( ) - { - return $(self.action-name) ; - } - - rule properties ( ) - { - return $(self.properties) ; - } - - # Generates actual build instructions. - # - rule actualize ( ) - { - if ! $(self.actualized) - { - self.actualized = true ; - - local ps = [ properties ] ; - local properties = [ adjust-properties $(ps) ] ; - - local actual-targets ; - for local i in [ targets ] - { - actual-targets += [ $(i).actualize ] ; - } - - actualize-sources [ sources ] : $(properties) ; - - DEPENDS $(actual-targets) : $(self.actual-sources) - $(self.dependency-only-sources) ; - - # This works around a bug with -j and actions that - # produce multiple target, where: - # - dependency on the first output is found, and - # the action is started - # - dependency on the second output is found, and - # bjam noticed that command is already running - # - instead of waiting for the command, dependents - # of the second targets are immediately updated. - if $(actual-targets[2]) - { - INCLUDES $(actual-targets) : $(actual-targets) ; - } - - # Action name can include additional argument to rule, which should - # not be passed to 'set-target-variables' - toolset.set-target-variables - [ indirect.get-rule $(self.action-name[1]) ] $(actual-targets) - : $(properties) ; - - # Reflect ourselves in a variable for the target. This allows - # looking up additional info for the action given the raw target. - # For example to debug or output action information from action - # rules. - .action on $(actual-targets) = $(__name__) ; - - indirect.call $(self.action-name) $(actual-targets) - : $(self.actual-sources) : [ $(properties).raw ] ; - - # Since we set up the creating action here, we set up the action for - # cleaning up as well. - common.Clean clean-all : $(actual-targets) ; - } - } - - # Helper for 'actualize-sources'. For each passed source, actualizes it with - # the appropriate scanner. Returns the actualized virtual targets. - # - rule actualize-source-type ( sources * : property-set ) - { - local result = ; - for local i in $(sources) - { - local scanner ; - if [ $(i).type ] - { - scanner = [ type.get-scanner [ $(i).type ] : $(property-set) ] ; - } - result += [ $(i).actualize $(scanner) ] ; - } - return $(result) ; - } - - # Creates actual Jam targets for sources. Initializes the following member - # variables: - # 'self.actual-sources' -- sources passed to the updating action. - # 'self.dependency-only-sources' -- sources marked as dependencies, but - # are not used otherwise. - # - # New values will be *appended* to the variables. They may be non-empty if - # caller wants it. - # - rule actualize-sources ( sources * : property-set ) - { - local dependencies = [ $(self.properties).get <dependency> ] ; - - self.dependency-only-sources += - [ actualize-source-type $(dependencies) : $(property-set) ] ; - self.actual-sources += - [ actualize-source-type $(sources) : $(property-set) ] ; - - # This is used to help bjam find dependencies in generated headers and - # other main targets, e.g. in: - # - # make a.h : ....... ; - # exe hello : hello.cpp : <implicit-dependency>a.h ; - # - # For bjam to find the dependency the generated target must be - # actualized (i.e. have its Jam target constructed). In the above case, - # if we are building just hello ("bjam hello"), 'a.h' will not be - # actualized unless we do it here. - local implicit = [ $(self.properties).get <implicit-dependency> ] ; - for local i in $(implicit) - { - $(i:G=).actualize ; - } - } - - # Determines real properties when trying to build with 'properties'. This is - # the last chance to fix properties, for example to adjust includes to get - # generated headers correctly. Default implementation simply returns its - # argument. - # - rule adjust-properties ( property-set ) - { - return $(property-set) ; - } -} - - -# Action class which does nothing --- it produces the targets with specific -# properties out of nowhere. It is needed to distinguish virtual targets with -# different properties that are known to exist and have no actions which create -# them. -# -class null-action : action -{ - rule __init__ ( property-set ? ) - { - action.__init__ : .no-action : $(property-set) ; - } - - rule actualize ( ) - { - if ! $(self.actualized) - { - self.actualized = true ; - for local i in [ targets ] - { - $(i).actualize ; - } - } - } -} - - -# Class which acts exactly like 'action', except that its sources are not -# scanned for dependencies. -# -class non-scanning-action : action -{ - rule __init__ ( sources * : action-name + : property-set ? ) - { - action.__init__ $(sources) : $(action-name) : $(property-set) ; - } - - rule actualize-source-type ( sources * : property-set ) - { - local result ; - for local i in $(sources) - { - result += [ $(i).actualize ] ; - } - return $(result) ; - } -} - - -# Creates a virtual target with an appropriate name and type from 'file'. If a -# target with that name in that project already exists, returns that already -# created target. -# -# FIXME: a more correct way would be to compute the path to the file, based on -# name and source location for the project, and use that path to determine if -# the target has already been created. This logic should be shared with how we -# usually find targets identified by a specific target id. It should also be -# updated to work correctly when the file is specified using both relative and -# absolute paths. -# -# TODO: passing a project with all virtual targets is starting to be annoying. -# -rule from-file ( file : file-loc : project ) -{ - import type ; # Had to do this here to break a circular dependency. - - # Check whether we already created a target corresponding to this file. - local path = [ path.root [ path.root $(file) $(file-loc) ] [ path.pwd ] ] ; - - if $(.files.$(path)) - { - return $(.files.$(path)) ; - } - else - { - local name = [ path.make $(file) ] ; - local type = [ type.type $(file) ] ; - local result ; - - result = [ new file-target $(file) : $(type) : $(project) : : - $(file-loc) ] ; - - .files.$(path) = $(result) ; - return $(result) ; - } -} - - -# Registers a new virtual target. Checks if there is already a registered target -# with the same name, type, project and subvariant properties as well as the -# same sources and equal action. If such target is found it is returned and a -# new 'target' is not registered. Otherwise, 'target' is registered and -# returned. -# -rule register ( target ) -{ - local signature = [ sequence.join - [ $(target).path ] [ $(target).name ] : - ] ; - - local result ; - for local t in $(.cache.$(signature)) - { - local a1 = [ $(t).action ] ; - local a2 = [ $(target).action ] ; - - if ! $(result) - { - if ! $(a1) && ! $(a2) - { - result = $(t) ; - } - else - { - if $(a1) && $(a2) && - ( [ $(a1).action-name ] = [ $(a2).action-name ] ) && - ( [ $(a1).sources ] = [ $(a2).sources ] ) - { - local ps1 = [ $(a1).properties ] ; - local ps2 = [ $(a2).properties ] ; - local p1 = [ $(ps1).base ] [ $(ps1).free ] [ set.difference - [ $(ps1).dependency ] : [ $(ps1).incidental ] ] ; - local p2 = [ $(ps2).base ] [ $(ps2).free ] [ set.difference - [ $(ps2).dependency ] : [ $(ps2).incidental ] ] ; - if $(p1) = $(p2) - { - result = $(t) ; - } - } - } - } - } - - if ! $(result) - { - .cache.$(signature) += $(target) ; - result = $(target) ; - } - - .recent-targets += $(result) ; - .all-targets += $(result) ; - - return $(result) ; -} - - -# Each target returned by 'register' is added to the .recent-targets list, -# returned by this function. This allows us to find all virtual targets created -# when building a specific main target, even those constructed only as -# intermediate targets. -# -rule recent-targets ( ) -{ - return $(.recent-targets) ; -} - - -rule clear-recent-targets ( ) -{ - .recent-targets = ; -} - - -# Returns all virtual targets ever created. -# -rule all-targets ( ) -{ - return $(.all-targets) ; -} - - -# Returns all targets from 'targets' with types equal to 'type' or derived from -# it. -# -rule select-by-type ( type : targets * ) -{ - local result ; - for local t in $(targets) - { - if [ type.is-subtype [ $(t).type ] $(type) ] - { - result += $(t) ; - } - } - return $(result) ; -} - - -rule register-actual-name ( actual-name : virtual-target ) -{ - if $(.actual.$(actual-name)) - { - local cs1 = [ $(.actual.$(actual-name)).creating-subvariant ] ; - local cs2 = [ $(virtual-target).creating-subvariant ] ; - local cmt1 = [ $(cs1).main-target ] ; - local cmt2 = [ $(cs2).main-target ] ; - - local action1 = [ $(.actual.$(actual-name)).action ] ; - local action2 = [ $(virtual-target).action ] ; - local properties-added ; - local properties-removed ; - if $(action1) && $(action2) - { - local p1 = [ $(action1).properties ] ; - p1 = [ $(p1).raw ] ; - local p2 = [ $(action2).properties ] ; - p2 = [ $(p2).raw ] ; - properties-removed = [ set.difference $(p1) : $(p2) ] ; - properties-removed ?= "none" ; - properties-added = [ set.difference $(p2) : $(p1) ] ; - properties-added ?= "none" ; - } - errors.error "Duplicate name of actual target:" $(actual-name) - : "previous virtual target" [ $(.actual.$(actual-name)).str ] - : "created from" [ $(cmt1).full-name ] - : "another virtual target" [ $(virtual-target).str ] - : "created from" [ $(cmt2).full-name ] - : "added properties:" $(properties-added) - : "removed properties:" $(properties-removed) ; - } - else - { - .actual.$(actual-name) = $(virtual-target) ; - } -} - - -# Traverses the dependency graph of 'target' and return all targets that will be -# created before this one is created. If the root of some dependency graph is -# found during traversal, it is either included or not, depending on the -# 'include-roots' value. In either case traversal stops at root targets, i.e. -# root target sources are not traversed. -# -rule traverse ( target : include-roots ? : include-sources ? ) -{ - local result ; - if [ $(target).action ] - { - local action = [ $(target).action ] ; - # This includes the 'target' as well. - result += [ $(action).targets ] ; - - for local t in [ $(action).sources ] - { - if ! [ $(t).root ] - { - result += [ traverse $(t) : $(include-roots) : $(include-sources) ] ; - } - else if $(include-roots) - { - result += $(t) ; - } - } - } - else if $(include-sources) - { - result = $(target) ; - } - return $(result) ; -} - - -# Takes an 'action' instance and creates a new instance of it and all targets -# produced by the action. The rule-name and properties are set to -# 'new-rule-name' and 'new-properties', if those are specified. Returns the -# cloned action. -# -rule clone-action ( action : new-project : new-action-name ? : new-properties ? ) -{ - if ! $(new-action-name) - { - new-action-name = [ $(action).action-name ] ; - } - if ! $(new-properties) - { - new-properties = [ $(action).properties ] ; - } - - local action-class = [ modules.peek $(action) : __class__ ] ; - local cloned-action = [ class.new $(action-class) - [ $(action).sources ] : $(new-action-name) : $(new-properties) ] ; - - local cloned-targets ; - for local target in [ $(action).targets ] - { - local n = [ $(target).name ] ; - # Do not modify produced target names. - local cloned-target = [ class.new file-target $(n) exact : - [ $(target).type ] : $(new-project) : $(cloned-action) ] ; - local d = [ $(target).dependencies ] ; - if $(d) - { - $(cloned-target).depends $(d) ; - } - $(cloned-target).root [ $(target).root ] ; - $(cloned-target).creating-subvariant [ $(target).creating-subvariant ] ; - - cloned-targets += $(cloned-target) ; - } - - return $(cloned-action) ; -} - - -class subvariant -{ - import sequence ; - import type ; - - rule __init__ ( main-target # The instance of main-target class. - : property-set # Properties requested for this target. - : sources * - : build-properties # Actually used properties. - : sources-usage-requirements # Properties propagated from sources. - : created-targets * ) # Top-level created targets. - { - self.main-target = $(main-target) ; - self.properties = $(property-set) ; - self.sources = $(sources) ; - self.build-properties = $(build-properties) ; - self.sources-usage-requirements = $(sources-usage-requirements) ; - self.created-targets = $(created-targets) ; - - # Pre-compose a list of other dependency graphs this one depends on. - local deps = [ $(build-properties).get <implicit-dependency> ] ; - for local d in $(deps) - { - self.other-dg += [ $(d:G=).creating-subvariant ] ; - } - - self.other-dg = [ sequence.unique $(self.other-dg) ] ; - } - - rule main-target ( ) - { - return $(self.main-target) ; - } - - rule created-targets ( ) - { - return $(self.created-targets) ; - } - - rule requested-properties ( ) - { - return $(self.properties) ; - } - - rule build-properties ( ) - { - return $(self.build-properties) ; - } - - rule sources-usage-requirements ( ) - { - return $(self.sources-usage-requirements) ; - } - - rule set-usage-requirements ( usage-requirements ) - { - self.usage-requirements = $(usage-requirements) ; - } - - rule usage-requirements ( ) - { - return $(self.usage-requirements) ; - } - - # Returns all targets referenced by this subvariant, either directly or - # indirectly, and either as sources, or as dependency properties. Targets - # referred to using the dependency property are returned as properties, not - # targets. - # - rule all-referenced-targets ( theset ) - { - # Find directly referenced targets. - local deps = [ $(self.build-properties).dependency ] ; - local all-targets = $(self.sources) $(deps) ; - - # Find other subvariants. - local r ; - for local t in $(all-targets) - { - if ! [ $(theset).contains $(t) ] - { - $(theset).add $(t) ; - r += [ $(t:G=).creating-subvariant ] ; - } - } - r = [ sequence.unique $(r) ] ; - for local s in $(r) - { - if $(s) != $(__name__) - { - $(s).all-referenced-targets $(theset) ; - } - } - } - - # Returns the properties specifying implicit include paths to generated - # headers. This traverses all targets in this subvariant and subvariants - # referred by <implcit-dependecy> properties. For all targets of type - # 'target-type' (or for all targets, if 'target-type' is not specified), the - # result will contain <$(feature)>path-to-that-target. - # - rule implicit-includes ( feature : target-type ? ) - { - local key = ii$(feature)-$(target-type:E="") ; - if ! $($(key))-is-not-empty - { - local target-paths = [ all-target-directories $(target-type) ] ; - target-paths = [ sequence.unique $(target-paths) ] ; - local result = $(target-paths:G=$(feature)) ; - if ! $(result) - { - result = "" ; - } - $(key) = $(result) ; - } - if $($(key)) = "" - { - return ; - } - else - { - return $($(key)) ; - } - } - - rule all-target-directories ( target-type ? ) - { - if ! $(self.target-directories) - { - compute-target-directories $(target-type) ; - } - return $(self.target-directories) ; - } - - rule compute-target-directories ( target-type ? ) - { - local result ; - for local t in $(self.created-targets) - { - # Skip targets of the wrong type. - if ! $(target-type) || - [ type.is-derived [ $(t).type ] $(target-type) ] - { - result = [ sequence.merge $(result) : [ $(t).path ] ] ; - } - } - for local d in $(self.other-dg) - { - result += [ $(d).all-target-directories $(target-type) ] ; - } - self.target-directories = $(result) ; - } -} diff --git a/jam-files/boost-build/build/virtual_target.py b/jam-files/boost-build/build/virtual_target.py deleted file mode 100644 index 51dff037..00000000 --- a/jam-files/boost-build/build/virtual_target.py +++ /dev/null @@ -1,1118 +0,0 @@ -# Status: ported. -# Base revision: 64488. -# -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -# Implements virtual targets, which correspond to actual files created during -# build, but are not yet targets in Jam sense. They are needed, for example, -# when searching for possible transormation sequences, when it's not known -# if particular target should be created at all. -# -# -# +--------------------------+ -# | VirtualTarget | -# +==========================+ -# | actualize | -# +--------------------------+ -# | actualize_action() = 0 | -# | actualize_location() = 0 | -# +----------------+---------+ -# | -# ^ -# / \ -# +-+-+ -# | -# +---------------------+ +-------+--------------+ -# | Action | | AbstractFileTarget | -# +=====================| * +======================+ -# | action_name | +--+ action | -# | properties | | +----------------------+ -# +---------------------+--+ | actualize_action() | -# | actualize() |0..1 +-----------+----------+ -# | path() | | -# | adjust_properties() | sources | -# | actualize_sources() | targets | -# +------+--------------+ ^ -# | / \ -# ^ +-+-+ -# / \ | -# +-+-+ +-------------+-------------+ -# | | | -# | +------+---------------+ +--------+-------------+ -# | | FileTarget | | SearchedLibTarget | -# | +======================+ +======================+ -# | | actualize-location() | | actualize-location() | -# | +----------------------+ +----------------------+ -# | -# +-+------------------------------+ -# | | -# +----+----------------+ +---------+-----------+ -# | CompileAction | | LinkAction | -# +=====================+ +=====================+ -# | adjust_properties() | | adjust_properties() | -# +---------------------+ | actualize_sources() | -# +---------------------+ -# -# The 'CompileAction' and 'LinkAction' classes are defined not here, -# but in builtin.jam modules. They are shown in the diagram to give -# the big picture. - -import bjam - -import re -import os.path -import string -import types - -from b2.util import path, utility, set -from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, get_value -from b2.util.sequence import unique -from b2.tools import common -from b2.exceptions import * -import b2.build.type -import b2.build.property_set as property_set - -import b2.build.property as property - -from b2.manager import get_manager -from b2.util import bjam_signature - -__re_starts_with_at = re.compile ('^@(.*)') - -class VirtualTargetRegistry: - def __init__ (self, manager): - self.manager_ = manager - - # A cache for FileTargets - self.files_ = {} - - # A cache for targets. - self.cache_ = {} - - # A map of actual names to virtual targets. - # Used to make sure we don't associate same - # actual target to two virtual targets. - self.actual_ = {} - - self.recent_targets_ = [] - - # All targets ever registed - self.all_targets_ = [] - - self.next_id_ = 0 - - def register (self, target): - """ Registers a new virtual target. Checks if there's already registered target, with the same - name, type, project and subvariant properties, and also with the same sources - and equal action. If such target is found it is retured and 'target' is not registered. - Otherwise, 'target' is registered and returned. - """ - if target.path(): - signature = target.path() + "-" + target.name() - else: - signature = "-" + target.name() - - result = None - if not self.cache_.has_key (signature): - self.cache_ [signature] = [] - - for t in self.cache_ [signature]: - a1 = t.action () - a2 = target.action () - - # TODO: why are we checking for not result? - if not result: - if not a1 and not a2: - result = t - else: - if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources (): - ps1 = a1.properties () - ps2 = a2.properties () - p1 = ps1.base () + ps1.free () +\ - b2.util.set.difference(ps1.dependency(), ps1.incidental()) - p2 = ps2.base () + ps2.free () +\ - b2.util.set.difference(ps2.dependency(), ps2.incidental()) - if p1 == p2: - result = t - - if not result: - self.cache_ [signature].append (target) - result = target - - # TODO: Don't append if we found pre-existing target? - self.recent_targets_.append(result) - self.all_targets_.append(result) - - return result - - def from_file (self, file, file_location, project): - """ Creates a virtual target with appropriate name and type from 'file'. - If a target with that name in that project was already created, returns that already - created target. - TODO: more correct way would be to compute path to the file, based on name and source location - for the project, and use that path to determine if the target was already created. - TODO: passing project with all virtual targets starts to be annoying. - """ - # Check if we've created a target corresponding to this file. - path = os.path.join(os.getcwd(), file_location, file) - path = os.path.normpath(path) - - if self.files_.has_key (path): - return self.files_ [path] - - file_type = b2.build.type.type (file) - - result = FileTarget (file, file_type, project, - None, file_location) - self.files_ [path] = result - - return result - - def recent_targets(self): - """Each target returned by 'register' is added to a list of - 'recent-target', returned by this function. So, this allows - us to find all targets created when building a given main - target, even if the target.""" - - return self.recent_targets_ - - def clear_recent_targets(self): - self.recent_targets_ = [] - - def all_targets(self): - # Returns all virtual targets ever created - return self.all_targets_ - - # Returns all targets from 'targets' with types - # equal to 'type' or derived from it. - def select_by_type(self, type, targets): - return [t for t in targets if b2.build.type.is_sybtype(t.type(), type)] - - def register_actual_name (self, actual_name, virtual_target): - if self.actual_.has_key (actual_name): - cs1 = self.actual_ [actual_name].creating_subvariant () - cs2 = virtual_target.creating_subvariant () - cmt1 = cs1.main_target () - cmt2 = cs2.main_target () - - action1 = self.actual_ [actual_name].action () - action2 = virtual_target.action () - - properties_added = [] - properties_removed = [] - if action1 and action2: - p1 = action1.properties () - p1 = p1.raw () - p2 = action2.properties () - p2 = p2.raw () - - properties_removed = set.difference (p1, p2) - if not properties_removed: properties_removed = "none" - - properties_added = set.difference (p2, p1) - if not properties_added: properties_added = "none" - - # FIXME: Revive printing of real location. - get_manager().errors()( - "Duplicate name of actual target: '%s'\n" - "previous virtual target '%s'\n" - "created from '%s'\n" - "another virtual target '%s'\n" - "created from '%s'\n" - "added properties: '%s'\n" - "removed properties: '%s'\n" - % (actual_name, - self.actual_ [actual_name], "loc", #cmt1.location (), - virtual_target, - "loc", #cmt2.location (), - properties_added, properties_removed)) - - else: - self.actual_ [actual_name] = virtual_target - - - def add_suffix (self, specified_name, file_type, prop_set): - """ Appends the suffix appropriate to 'type/property_set' combination - to the specified name and returns the result. - """ - suffix = b2.build.type.generated_target_suffix (file_type, prop_set) - - if suffix: - return specified_name + '.' + suffix - - else: - return specified_name - -class VirtualTarget: - """ Potential target. It can be converted into jam target and used in - building, if needed. However, it can be also dropped, which allows - to search for different transformation and select only one. - name: name of this target. - project: project to which this target belongs. - """ - def __init__ (self, name, project): - self.name_ = name - self.project_ = project - self.dependencies_ = [] - self.always_ = False - - # Caches if dapendencies for scanners have already been set. - self.made_ = {} - - def manager(self): - return self.project_.manager() - - def virtual_targets(self): - return self.manager().virtual_targets() - - def name (self): - """ Name of this target. - """ - return self.name_ - - def project (self): - """ Project of this target. - """ - return self.project_ - - def depends (self, d): - """ Adds additional instances of 'VirtualTarget' that this - one depends on. - """ - self.dependencies_ = unique (self.dependencies_ + d).sort () - - def dependencies (self): - return self.dependencies_ - - def always(self): - self.always_ = True - - def actualize (self, scanner = None): - """ Generates all the actual targets and sets up build actions for - this target. - - If 'scanner' is specified, creates an additional target - with the same location as actual target, which will depend on the - actual target and be associated with 'scanner'. That additional - target is returned. See the docs (#dependency_scanning) for rationale. - Target must correspond to a file if 'scanner' is specified. - - If scanner is not specified, then actual target is returned. - """ - actual_name = self.actualize_no_scanner () - - if self.always_: - bjam.call("ALWAYS", actual_name) - - if not scanner: - return actual_name - - else: - # Add the scanner instance to the grist for name. - g = '-'.join ([ungrist(get_grist(actual_name)), str(id(scanner))]) - - name = replace_grist (actual_name, '<' + g + '>') - - if not self.made_.has_key (name): - self.made_ [name] = True - - self.project_.manager ().engine ().add_dependency (name, actual_name) - - self.actualize_location (name) - - self.project_.manager ().scanners ().install (scanner, name, str (self)) - - return name - -# private: (overridables) - - def actualize_action (self, target): - """ Sets up build actions for 'target'. Should call appropriate rules - and set target variables. - """ - raise BaseException ("method should be defined in derived classes") - - def actualize_location (self, target): - """ Sets up variables on 'target' which specify its location. - """ - raise BaseException ("method should be defined in derived classes") - - def path (self): - """ If the target is generated one, returns the path where it will be - generated. Otherwise, returns empty list. - """ - raise BaseException ("method should be defined in derived classes") - - def actual_name (self): - """ Return that actual target name that should be used - (for the case where no scanner is involved) - """ - raise BaseException ("method should be defined in derived classes") - - -class AbstractFileTarget (VirtualTarget): - """ Target which correspond to a file. The exact mapping for file - is not yet specified in this class. (TODO: Actually, the class name - could be better...) - - May be a source file (when no action is specified), or - derived file (otherwise). - - The target's grist is concatenation of project's location, - properties of action (for derived files), and, optionally, - value identifying the main target. - - exact: If non-empty, the name is exactly the name - created file should have. Otherwise, the '__init__' - method will add suffix obtained from 'type' by - calling 'type.generated-target-suffix'. - - type: optional type of this target. - """ - def __init__ (self, name, type, project, action = None, exact=False): - VirtualTarget.__init__ (self, name, project) - - self.type_ = type - - self.action_ = action - self.exact_ = exact - - if action: - action.add_targets ([self]) - - if self.type and not exact: - self.__adjust_name (name) - - - self.actual_name_ = None - self.path_ = None - self.intermediate_ = False - self.creating_subvariant_ = None - - # True if this is a root target. - self.root_ = False - - def type (self): - return self.type_ - - def set_path (self, path): - """ Sets the path. When generating target name, it will override any path - computation from properties. - """ - self.path_ = path - - def action (self): - """ Returns the action. - """ - return self.action_ - - def root (self, set = None): - """ Sets/gets the 'root' flag. Target is root is it directly correspods to some - variant of a main target. - """ - if set: - self.root_ = True - return self.root_ - - def creating_subvariant (self, s = None): - """ Gets or sets the subvariant which created this target. Subvariant - is set when target is brought into existance, and is never changed - after that. In particual, if target is shared by subvariant, only - the first is stored. - s: If specified, specified the value to set, - which should be instance of 'subvariant' class. - """ - if s and not self.creating_subvariant (): - if self.creating_subvariant (): - raise BaseException ("Attempt to change 'dg'") - - else: - self.creating_subvariant_ = s - - return self.creating_subvariant_ - - def actualize_action (self, target): - if self.action_: - self.action_.actualize () - - # Return a human-readable representation of this target - # - # If this target has an action, that's: - # - # { <action-name>-<self.name>.<self.type> <action-sources>... } - # - # otherwise, it's: - # - # { <self.name>.<self.type> } - # - def str(self): - a = self.action() - - name_dot_type = self.name_ + "." + self.type_ - - if a: - action_name = a.action_name() - ss = [ s.str() for s in a.sources()] - - return "{ %s-%s %s}" % (action_name, name_dot_type, str(ss)) - else: - return "{ " + name_dot_type + " }" - -# private: - - def actual_name (self): - if not self.actual_name_: - self.actual_name_ = '<' + self.grist() + '>' + self.name_ - - return self.actual_name_ - - def grist (self): - """Helper to 'actual_name', above. Compute unique prefix used to distinguish - this target from other targets with the same name which create different - file. - """ - # Depending on target, there may be different approaches to generating - # unique prefixes. We'll generate prefixes in the form - # <one letter approach code> <the actual prefix> - path = self.path () - - if path: - # The target will be generated to a known path. Just use the path - # for identification, since path is as unique as it can get. - return 'p' + path - - else: - # File is either source, which will be searched for, or is not a file at - # all. Use the location of project for distinguishing. - project_location = self.project_.get ('location') - path_components = b2.util.path.split(project_location) - location_grist = '!'.join (path_components) - - if self.action_: - ps = self.action_.properties () - property_grist = ps.as_path () - # 'property_grist' can be empty when 'ps' is an empty - # property set. - if property_grist: - location_grist = location_grist + '/' + property_grist - - return 'l' + location_grist - - def __adjust_name(self, specified_name): - """Given the target name specified in constructor, returns the - name which should be really used, by looking at the <tag> properties. - The tag properties come in two flavour: - - <tag>value, - - <tag>@rule-name - In the first case, value is just added to name - In the second case, the specified rule is called with specified name, - target type and properties and should return the new name. - If not <tag> property is specified, or the rule specified by - <tag> returns nothing, returns the result of calling - virtual-target.add-suffix""" - - if self.action_: - ps = self.action_.properties() - else: - ps = property_set.empty() - - # FIXME: I'm not sure how this is used, need to check with - # Rene to figure out how to implement - #~ We add ourselves to the properties so that any tag rule can get - #~ more direct information about the target than just that available - #~ through the properties. This is useful in implementing - #~ name changes based on the sources of the target. For example to - #~ make unique names of object files based on the source file. - #~ --grafik - #ps = property_set.create(ps.raw() + ["<target>%s" % "XXXX"]) - #ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ; - - tag = ps.get("<tag>") - - if tag: - - if len(tag) > 1: - get_manager().errors()( - """<tag>@rulename is present but is not the only <tag> feature""") - - tag = tag[0] - if callable(tag): - self.name_ = tag(specified_name, self.type_, ps) - else: - if not tag[0] == '@': - self.manager_.errors()("""The value of the <tag> feature must be '@rule-nane'""") - - exported_ps = b2.util.value_to_jam(ps, methods=True) - self.name_ = b2.util.call_jam_function( - tag[1:], specified_name, self.type_, exported_ps) - if self.name_: - self.name_ = self.name_[0] - - # If there's no tag or the tag rule returned nothing. - if not tag or not self.name_: - self.name_ = add_prefix_and_suffix(specified_name, self.type_, ps) - - def actualize_no_scanner(self): - name = self.actual_name() - - # Do anything only on the first invocation - if not self.made_: - self.made_[name] = True - - if self.action_: - # For non-derived target, we don't care if there - # are several virtual targets that refer to the same name. - # One case when this is unavoidable is when file name is - # main.cpp and two targets have types CPP (for compiling) - # and MOCCABLE_CPP (for convertion to H via Qt tools). - self.virtual_targets().register_actual_name(name, self) - - for i in self.dependencies_: - self.manager_.engine().add_dependency(name, i.actualize()) - - self.actualize_location(name) - self.actualize_action(name) - - return name - -@bjam_signature((["specified_name"], ["type"], ["property_set"])) -def add_prefix_and_suffix(specified_name, type, property_set): - """Appends the suffix appropriate to 'type/property-set' combination - to the specified name and returns the result.""" - - property_set = b2.util.jam_to_value_maybe(property_set) - - suffix = "" - if type: - suffix = b2.build.type.generated_target_suffix(type, property_set) - - # Handle suffixes for which no leading dot is desired. Those are - # specified by enclosing them in <...>. Needed by python so it - # can create "_d.so" extensions, for example. - if get_grist(suffix): - suffix = ungrist(suffix) - elif suffix: - suffix = "." + suffix - - prefix = "" - if type: - prefix = b2.build.type.generated_target_prefix(type, property_set) - - if specified_name.startswith(prefix): - prefix = "" - - if not prefix: - prefix = "" - if not suffix: - suffix = "" - return prefix + specified_name + suffix - - -class FileTarget (AbstractFileTarget): - """ File target with explicitly known location. - - The file path is determined as - - value passed to the 'set_path' method, if any - - for derived files, project's build dir, joined with components - that describe action's properties. If the free properties - are not equal to the project's reference properties - an element with name of main target is added. - - for source files, project's source dir - - The file suffix is - - the value passed to the 'suffix' method, if any, or - - the suffix which correspond to the target's type. - """ - def __init__ (self, name, type, project, action = None, path=None, exact=False): - AbstractFileTarget.__init__ (self, name, type, project, action, exact) - - self.path_ = path - - def __str__(self): - if self.type_: - return self.name_ + "." + self.type_ - else: - return self.name_ - - def clone_with_different_type(self, new_type): - return FileTarget(self.name_, new_type, self.project_, - self.action_, self.path_, exact=True) - - def actualize_location (self, target): - engine = self.project_.manager_.engine () - - if self.action_: - # This is a derived file. - path = self.path () - engine.set_target_variable (target, 'LOCATE', path) - - # Make sure the path exists. - engine.add_dependency (target, path) - common.mkdir(engine, path) - - # It's possible that the target name includes a directory - # too, for example when installing headers. Create that - # directory. - d = os.path.dirname(get_value(target)) - if d: - d = os.path.join(path, d) - engine.add_dependency(target, d) - common.mkdir(engine, d) - - # For real file target, we create a fake target that - # depends on the real target. This allows to run - # - # bjam hello.o - # - # without trying to guess the name of the real target. - # Note the that target has no directory name, and a special - # grist <e>. - # - # First, that means that "bjam hello.o" will build all - # known hello.o targets. - # Second, the <e> grist makes sure this target won't be confused - # with other targets, for example, if we have subdir 'test' - # with target 'test' in it that includes 'test.o' file, - # then the target for directory will be just 'test' the target - # for test.o will be <ptest/bin/gcc/debug>test.o and the target - # we create below will be <e>test.o - engine.add_dependency("<e>%s" % get_value(target), target) - - # Allow bjam <path-to-file>/<file> to work. This won't catch all - # possible ways to refer to the path (relative/absolute, extra ".", - # various "..", but should help in obvious cases. - engine.add_dependency("<e>%s" % (os.path.join(path, get_value(target))), target) - - else: - # This is a source file. - engine.set_target_variable (target, 'SEARCH', self.project_.get ('source-location')) - - - def path (self): - """ Returns the directory for this target. - """ - if not self.path_: - if self.action_: - p = self.action_.properties () - (target_path, relative_to_build_dir) = p.target_path () - - if relative_to_build_dir: - # Indicates that the path is relative to - # build dir. - target_path = os.path.join (self.project_.build_dir (), target_path) - - # Store the computed path, so that it's not recomputed - # any more - self.path_ = target_path - - return self.path_ - - -class NotFileTarget(AbstractFileTarget): - - def __init__(self, name, project, action): - AbstractFileTarget.__init__(self, name, None, project, action) - - def path(self): - """Returns nothing, to indicate that target path is not known.""" - return None - - def actualize_location(self, target): - bjam.call("NOTFILE", target) - bjam.call("ALWAYS", target) - bjam.call("NOUPDATE", target) - - -class Action: - """ Class which represents an action. - Both 'targets' and 'sources' should list instances of 'VirtualTarget'. - Action name should name a rule with this prototype - rule action_name ( targets + : sources * : properties * ) - Targets and sources are passed as actual jam targets. The rule may - not establish dependency relationship, but should do everything else. - """ - def __init__ (self, manager, sources, action_name, prop_set): - assert(isinstance(prop_set, property_set.PropertySet)) - assert type(sources) == types.ListType - self.sources_ = sources - self.action_name_ = action_name - if not prop_set: - prop_set = property_set.empty() - self.properties_ = prop_set - if not all(isinstance(v, VirtualTarget) for v in prop_set.get('implicit-dependency')): - import pdb - pdb.set_trace() - - self.manager_ = manager - self.engine_ = self.manager_.engine () - self.targets_ = [] - - # Indicates whether this has been actualized or not. - self.actualized_ = False - - self.dependency_only_sources_ = [] - self.actual_sources_ = [] - - - def add_targets (self, targets): - self.targets_ += targets - - - def replace_targets (old_targets, new_targets): - self.targets_ = [t for t in targets if not t in old_targets] + new_targets - - def targets (self): - return self.targets_ - - def sources (self): - return self.sources_ - - def action_name (self): - return self.action_name_ - - def properties (self): - return self.properties_ - - def actualize (self): - """ Generates actual build instructions. - """ - if self.actualized_: - return - - self.actualized_ = True - - ps = self.properties () - properties = self.adjust_properties (ps) - - - actual_targets = [] - - for i in self.targets (): - actual_targets.append (i.actualize ()) - - self.actualize_sources (self.sources (), properties) - - self.engine_.add_dependency (actual_targets, self.actual_sources_ + self.dependency_only_sources_) - - # This works around a bug with -j and actions that - # produce multiple target, where: - # - dependency on the first output is found, and - # the action is started - # - dependency on the second output is found, and - # bjam noticed that command is already running - # - instead of waiting for the command, dependents - # of the second targets are immediately updated. - if len(actual_targets) > 1: - bjam.call("INCLUDES", actual_targets, actual_targets) - - # FIXME: check the comment below. Was self.action_name_ [1] - # Action name can include additional argument to rule, which should not - # be passed to 'set-target-variables' - # FIXME: breaking circular dependency - import toolset - toolset.set_target_variables (self.manager_, self.action_name_, actual_targets, properties) - - engine = self.manager_.engine () - - # FIXME: this is supposed to help --out-xml option, but we don't - # implement that now, and anyway, we should handle it in Python, - # not but putting variables on bjam-level targets. - bjam.call("set-target-variable", actual_targets, ".action", repr(self)) - - self.manager_.engine ().set_update_action (self.action_name_, actual_targets, self.actual_sources_, - properties) - - # Since we set up creating action here, we also set up - # action for cleaning up - self.manager_.engine ().set_update_action ('common.Clean', 'clean-all', - actual_targets) - - return actual_targets - - def actualize_source_type (self, sources, prop_set): - """ Helper for 'actualize_sources'. - For each passed source, actualizes it with the appropriate scanner. - Returns the actualized virtual targets. - """ - result = [] - for i in sources: - scanner = None - -# FIXME: what's this? -# if isinstance (i, str): -# i = self.manager_.get_object (i) - - if i.type (): - scanner = b2.build.type.get_scanner (i.type (), prop_set) - - r = i.actualize (scanner) - result.append (r) - - return result - - def actualize_sources (self, sources, prop_set): - """ Creates actual jam targets for sources. Initializes two member - variables: - 'self.actual_sources_' -- sources which are passed to updating action - 'self.dependency_only_sources_' -- sources which are made dependencies, but - are not used otherwise. - - New values will be *appended* to the variables. They may be non-empty, - if caller wants it. - """ - dependencies = self.properties_.get ('<dependency>') - - self.dependency_only_sources_ += self.actualize_source_type (dependencies, prop_set) - self.actual_sources_ += self.actualize_source_type (sources, prop_set) - - # This is used to help bjam find dependencies in generated headers - # in other main targets. - # Say: - # - # make a.h : ....... ; - # exe hello : hello.cpp : <implicit-dependency>a.h ; - # - # However, for bjam to find the dependency the generated target must - # be actualized (i.e. have the jam target). In the above case, - # if we're building just hello ("bjam hello"), 'a.h' won't be - # actualized unless we do it here. - implicit = self.properties_.get("<implicit-dependency>") - - for i in implicit: - i.actualize() - - def adjust_properties (self, prop_set): - """ Determines real properties when trying building with 'properties'. - This is last chance to fix properties, for example to adjust includes - to get generated headers correctly. Default implementation returns - its argument. - """ - return prop_set - - -class NullAction (Action): - """ Action class which does nothing --- it produces the targets with - specific properties out of nowhere. It's needed to distinguish virtual - targets with different properties that are known to exist, and have no - actions which create them. - """ - def __init__ (self, manager, prop_set): - Action.__init__ (self, manager, [], None, prop_set) - - def actualize (self): - if not self.actualized_: - self.actualized_ = True - - for i in self.targets (): - i.actualize () - -class NonScanningAction(Action): - """Class which acts exactly like 'action', except that the sources - are not scanned for dependencies.""" - - def __init__(self, sources, action_name, property_set): - #FIXME: should the manager parameter of Action.__init__ - #be removed? -- Steven Watanabe - Action.__init__(self, b2.manager.get_manager(), sources, action_name, property_set) - - def actualize_source_type(self, sources, property_set): - - result = [] - for s in sources: - result.append(s.actualize()) - return result - -def traverse (target, include_roots = False, include_sources = False): - """ Traverses the dependency graph of 'target' and return all targets that will - be created before this one is created. If root of some dependency graph is - found during traversal, it's either included or not, dependencing of the - value of 'include_roots'. In either case, sources of root are not traversed. - """ - result = [] - - if target.action (): - action = target.action () - - # This includes 'target' as well - result += action.targets () - - for t in action.sources (): - - # FIXME: - # TODO: see comment in Manager.register_object () - #if not isinstance (t, VirtualTarget): - # t = target.project_.manager_.get_object (t) - - if not t.root (): - result += traverse (t, include_roots, include_sources) - - elif include_roots: - result.append (t) - - elif include_sources: - result.append (target) - - return result - -def clone_action (action, new_project, new_action_name, new_properties): - """Takes an 'action' instances and creates new instance of it - and all produced target. The rule-name and properties are set - to 'new-rule-name' and 'new-properties', if those are specified. - Returns the cloned action.""" - - if not new_action_name: - new_action_name = action.action_name() - - if not new_properties: - new_properties = action.properties() - - cloned_action = action.__class__(action.manager_, action.sources(), new_action_name, - new_properties) - - cloned_targets = [] - for target in action.targets(): - - n = target.name() - # Don't modify the name of the produced targets. Strip the directory f - cloned_target = FileTarget(n, target.type(), new_project, - cloned_action, exact=True) - - d = target.dependencies() - if d: - cloned_target.depends(d) - cloned_target.root(target.root()) - cloned_target.creating_subvariant(target.creating_subvariant()) - - cloned_targets.append(cloned_target) - - return cloned_action - -class Subvariant: - - def __init__ (self, main_target, prop_set, sources, build_properties, sources_usage_requirements, created_targets): - """ - main_target: The instance of MainTarget class - prop_set: Properties requested for this target - sources: - build_properties: Actually used properties - sources_usage_requirements: Properties propagated from sources - created_targets: Top-level created targets - """ - self.main_target_ = main_target - self.properties_ = prop_set - self.sources_ = sources - self.build_properties_ = build_properties - self.sources_usage_requirements_ = sources_usage_requirements - self.created_targets_ = created_targets - - self.usage_requirements_ = None - - # Pre-compose the list of other dependency graphs, on which this one - # depends - deps = build_properties.get('<implicit-dependency>') - - self.other_dg_ = [] - for d in deps: - self.other_dg_.append(d.creating_subvariant ()) - - self.other_dg_ = unique (self.other_dg_) - - self.implicit_includes_cache_ = {} - self.target_directories_ = None - - def main_target (self): - return self.main_target_ - - def created_targets (self): - return self.created_targets_ - - def requested_properties (self): - return self.properties_ - - def build_properties (self): - return self.build_properties_ - - def sources_usage_requirements (self): - return self.sources_usage_requirements_ - - def set_usage_requirements (self, usage_requirements): - self.usage_requirements_ = usage_requirements - - def usage_requirements (self): - return self.usage_requirements_ - - def all_referenced_targets(self, result): - """Returns all targets referenced by this subvariant, - either directly or indirectly, and either as sources, - or as dependency properties. Targets referred with - dependency property are returned a properties, not targets.""" - - # Find directly referenced targets. - deps = self.build_properties().dependency() - all_targets = self.sources_ + deps - - # Find other subvariants. - r = [] - for e in all_targets: - if not e in result: - result.add(e) - if isinstance(e, property.Property): - t = e.value() - else: - t = e - - # FIXME: how can this be? - cs = t.creating_subvariant() - if cs: - r.append(cs) - r = unique(r) - for s in r: - if s != self: - s.all_referenced_targets(result) - - - def implicit_includes (self, feature, target_type): - """ Returns the properties which specify implicit include paths to - generated headers. This traverses all targets in this subvariant, - and subvariants referred by <implcit-dependecy>properties. - For all targets which are of type 'target-type' (or for all targets, - if 'target_type' is not specified), the result will contain - <$(feature)>path-to-that-target. - """ - - if not target_type: - key = feature - else: - key = feature + "-" + target_type - - - result = self.implicit_includes_cache_.get(key) - if not result: - target_paths = self.all_target_directories(target_type) - target_paths = unique(target_paths) - result = ["<%s>%s" % (feature, p) for p in target_paths] - self.implicit_includes_cache_[key] = result - - return result - - def all_target_directories(self, target_type = None): - # TODO: does not appear to use target_type in deciding - # if we've computed this already. - if not self.target_directories_: - self.target_directories_ = self.compute_target_directories(target_type) - return self.target_directories_ - - def compute_target_directories(self, target_type=None): - result = [] - for t in self.created_targets(): - if not target_type or b2.build.type.is_derived(t.type(), target_type): - result.append(t.path()) - - for d in self.other_dg_: - result.extend(d.all_target_directories(target_type)) - - result = unique(result) - return result diff --git a/jam-files/boost-build/kernel/boost-build.jam b/jam-files/boost-build/kernel/boost-build.jam deleted file mode 100644 index 377f6ec0..00000000 --- a/jam-files/boost-build/kernel/boost-build.jam +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -boost-build . ; diff --git a/jam-files/boost-build/kernel/bootstrap.jam b/jam-files/boost-build/kernel/bootstrap.jam deleted file mode 100644 index 89048af9..00000000 --- a/jam-files/boost-build/kernel/bootstrap.jam +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003, 2005, 2006 Rene Rivera -# Copyright 2003, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# First of all, check the jam version - -if $(JAM_VERSION:J="") < 030112 -{ - ECHO "error: Boost.Jam version 3.1.12 or later required" ; - EXIT ; -} - -local required-rules = GLOB-RECURSIVELY HAS_NATIVE_RULE ; - -for local r in $(required-rules) -{ - if ! $(r) in [ RULENAMES ] - { - ECHO "error: builtin rule '$(r)' is not present" ; - ECHO "error: your version of bjam is likely out of date" ; - ECHO "error: please get a fresh version from SVN." ; - EXIT ; - } -} - -local native = - regex transform 2 - ; -while $(native) -{ - if ! [ HAS_NATIVE_RULE $(native[1]) : - $(native[2]) : - $(native[3]) ] - { - ECHO "error: missing native rule '$(native[1]).$(native[2])'" ; - ECHO "error: or interface version of that rule is too low" ; - ECHO "error: your version of bjam is likely out of date" ; - ECHO "error: please get a fresh version from SVN." ; - EXIT ; - } - native = $(native[4-]) ; -} - -# Check that the builtin .ENVIRON module is present. We don't have a -# builtin to check that a module is present, so we assume that the PATH -# environment variable is always set and verify that the .ENVIRON module -# has non-empty value of that variable. -module .ENVIRON -{ - local p = $(PATH) $(Path) $(path) ; - if ! $(p) - { - ECHO "error: no builtin module .ENVIRON is found" ; - ECHO "error: your version of bjam is likely out of date" ; - ECHO "error: please get a fresh version from SVN." ; - EXIT ; - } -} - -# Check that @() functionality is present. Similarly to modules, -# we don't have a way to test that directly. Instead we check that -# $(TMPNAME) functionality is present which was added at roughly -# the same time (more precisely it was added just before). -{ - if ! $(TMPNAME) - { - ECHO "error: no @() functionality found" ; - ECHO "error: your version of bjam is likely out of date" ; - ECHO "error: please get a fresh version from SVN." ; - EXIT ; - } -} - -# Make sure that \n escape is avaiable. -if "\n" = "n" -{ - if $(OS) = CYGWIN - { - ECHO "warning: escape sequences are not supported" ; - ECHO "warning: this will cause major misbehaviour on cygwin" ; - ECHO "warning: your version of bjam is likely out of date" ; - ECHO "warning: please get a fresh version from SVN." ; - } -} - -# Bootstrap the module system. Then bring the import rule into the global module. -# -SEARCH on <module@>modules.jam = $(.bootstrap-file:D) ; -module modules { include <module@>modules.jam ; } -IMPORT modules : import : : import ; - -{ - # Add module subdirectories to the BOOST_BUILD_PATH, which allows - # us to make an incremental refactoring step by moving modules to - # the appropriate subdirectories, thereby achieving some physical - # separation of different layers without changing all of our code - # to specify subdirectories in import statements or use an extra - # level of qualification on imported names. - - local subdirs = - kernel # only the most-intrinsic modules: modules, errors - util # low-level substrate: string/number handling, etc. - build # essential elements of the build system architecture - tools # toolsets for handling specific build jobs and targets. - contrib # user contributed (unreviewed) modules - . # build-system.jam lives here - ; - local whereami = [ NORMALIZE_PATH $(.bootstrap-file:DT) ] ; - BOOST_BUILD_PATH += $(whereami:D)/$(subdirs) ; - - modules.poke .ENVIRON : BOOST_BUILD_PATH : $(BOOST_BUILD_PATH) ; - - modules.poke : EXTRA_PYTHONPATH : $(whereami) ; -} - -# Reload the modules, to clean up things. The modules module can tolerate -# being included twice. -# -import modules ; - -# Process option plugins first to alow them to prevent loading -# the rest of the build system. -# -import option ; -local dont-build = [ option.process ] ; - -# Should we skip building, i.e. loading the build system, according -# to the options processed? -# -if ! $(dont-build) -{ - if ! --python in $(ARGV) - { - # Allow users to override the build system file from the - # command-line (mostly for testing) - local build-system = [ MATCH --build-system=(.*) : $(ARGV) ] ; - build-system ?= build-system ; - - # Use last element in case of multiple command-line options - import $(build-system[-1]) ; - } - else - { - ECHO "Boost.Build V2 Python port (experimental)" ; - - # Define additional interface that is exposed to Python code. Python code will - # also have access to select bjam builtins in the 'bjam' module, but some - # things are easier to define outside C. - module python_interface - { - rule load ( module-name : location ) - { - USER_MODULE $(module-name) ; - # Make all rules in the loaded module available in - # the global namespace, so that we don't have - # to bother specifying "right" module when calling - # from Python. - module $(module-name) - { - __name__ = $(1) ; - include $(2) ; - local rules = [ RULENAMES $(1) ] ; - IMPORT $(1) : $(rules) : $(1) : $(1).$(rules) ; - } - } - - rule peek ( module-name ? : variables + ) - { - module $(<) - { - return $($(>)) ; - } - } - - rule set-variable ( module-name : name : value * ) - { - module $(<) - { - $(>) = $(3) ; - } - } - - rule set-top-level-targets ( targets * ) - { - DEPENDS all : $(targets) ; - } - - rule call-in-module ( m : rulename : * ) - { - module $(m) - { - return [ $(2) $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; - } - } - - - rule set-update-action ( action : targets * : sources * : properties * ) - { - $(action) $(targets) : $(sources) : $(properties) ; - } - - rule set-update-action-in-module ( m : action : targets * : sources * : properties * ) - { - module $(m) - { - $(2) $(3) : $(4) : $(5) ; - } - } - - rule set-target-variable ( targets + : variable : value * : append ? ) - { - if $(append) - { - $(variable) on $(targets) += $(value) ; - } - else - { - $(variable) on $(targets) = $(value) ; - } - } - - rule get-target-variable ( targets + : variable ) - { - return [ on $(targets) return $($(variable)) ] ; - } - - rule import-rules-from-parent ( parent-module : this-module : user-rules * ) - { - IMPORT $(parent-module) : $(user-rules) : $(this-module) : $(user-rules) ; - EXPORT $(this-module) : $(user-rules) ; - } - - rule mark-included ( targets * : includes * ) { - NOCARE $(includes) ; - INCLUDES $(targets) : $(includes) ; - ISFILE $(includes) ; - } - } - - PYTHON_IMPORT_RULE bootstrap : bootstrap : PyBB : bootstrap ; - modules.poke PyBB : root : [ NORMALIZE_PATH $(.bootstrap-file:DT)/.. ] ; - - module PyBB - { - local ok = [ bootstrap $(root) ] ; - if ! $(ok) - { - EXIT ; - } - } - - - #PYTHON_IMPORT_RULE boost.build.build_system : main : PyBB : main ; - - #module PyBB - #{ - # main ; - #} - - } -} diff --git a/jam-files/boost-build/kernel/bootstrap.py b/jam-files/boost-build/kernel/bootstrap.py deleted file mode 100644 index 2e8dd37b..00000000 --- a/jam-files/boost-build/kernel/bootstrap.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2009 Vladimir Prus -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import imp -import sys - -def bootstrap(root_path): - """Performs python-side bootstrapping of Boost.Build/Python. - - This function arranges for 'b2.whatever' package names to work, while also - allowing to put python files alongside corresponding jam modules. - """ - - m = imp.new_module("b2") - # Note that: - # 1. If __path__ is not list of strings, nothing will work - # 2. root_path is already list of strings. - m.__path__ = root_path - sys.modules["b2"] = m - - import b2.build_system - return b2.build_system.main() - diff --git a/jam-files/boost-build/kernel/class.jam b/jam-files/boost-build/kernel/class.jam deleted file mode 100644 index b8e55af3..00000000 --- a/jam-files/boost-build/kernel/class.jam +++ /dev/null @@ -1,420 +0,0 @@ -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2002, 2005 Rene Rivera -# Copyright 2002, 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Polymorphic class system built on top of core Jam facilities. -# -# Classes are defined by 'class' keywords:: -# -# class myclass -# { -# rule __init__ ( arg1 ) # constructor -# { -# self.attribute = $(arg1) ; -# } -# -# rule method1 ( ) # method -# { -# return [ method2 ] ; -# } -# -# rule method2 ( ) # method -# { -# return $(self.attribute) ; -# } -# } -# -# The __init__ rule is the constructor, and sets member variables. -# -# New instances are created by invoking [ new <class> <args...> ]: -# -# local x = [ new myclass foo ] ; # x is a new myclass object -# assert.result foo : [ $(x).method1 ] ; # $(x).method1 returns "foo" -# -# Derived class are created by mentioning base classes in the declaration:: -# -# class derived : myclass -# { -# rule __init__ ( arg ) -# { -# myclass.__init__ $(arg) ; # call base __init__ -# -# } -# -# rule method2 ( ) # method override -# { -# return $(self.attribute)XXX ; -# } -# } -# -# All methods operate virtually, replacing behavior in the base classes. For -# example:: -# -# local y = [ new derived foo ] ; # y is a new derived object -# assert.result fooXXX : [ $(y).method1 ] ; # $(y).method1 returns "foo" -# -# Each class instance is its own core Jam module. All instance attributes and -# methods are accessible without additional qualification from within the class -# instance. All rules imported in class declaration, or visible in base classses -# are also visible. Base methods are available in qualified form: -# base-name.method-name. By convention, attribute names are prefixed with -# "self.". - -import modules ; -import numbers ; - - -rule xinit ( instance : class ) -{ - module $(instance) - { - __class__ = $(2) ; - __name__ = $(1) ; - } -} - - -rule new ( class args * : * ) -{ - .next-instance ?= 1 ; - local id = object($(class))@$(.next-instance) ; - - xinit $(id) : $(class) ; - - INSTANCE $(id) : class@$(class) ; - IMPORT_MODULE $(id) ; - $(id).__init__ $(args) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - - # Bump the next unique object name. - .next-instance = [ numbers.increment $(.next-instance) ] ; - - # Return the name of the new instance. - return $(id) ; -} - - -rule bases ( class ) -{ - module class@$(class) - { - return $(__bases__) ; - } -} - - -rule is-derived ( class : bases + ) -{ - local stack = $(class) ; - local visited found ; - while ! $(found) && $(stack) - { - local top = $(stack[1]) ; - stack = $(stack[2-]) ; - if ! ( $(top) in $(visited) ) - { - visited += $(top) ; - stack += [ bases $(top) ] ; - - if $(bases) in $(visited) - { - found = true ; - } - } - } - return $(found) ; -} - - -# Returns true if the 'value' is a class instance. -# -rule is-instance ( value ) -{ - return [ MATCH "^(object\\()[^@]+\\)@.*" : $(value) ] ; -} - - -# Check if the given value is of the given type. -# -rule is-a ( - instance # The value to check. - : type # The type to test for. -) -{ - if [ is-instance $(instance) ] - { - return [ class.is-derived [ modules.peek $(instance) : __class__ ] : $(type) ] ; - } -} - - -local rule typecheck ( x ) -{ - local class-name = [ MATCH "^\\[(.*)\\]$" : [ BACKTRACE 1 ] ] ; - if ! [ is-a $(x) : $(class-name) ] - { - return "Expected an instance of "$(class-name)" but got \""$(x)"\" for argument" ; - } -} - - -rule __test__ ( ) -{ - import assert ; - import "class" : new ; - - # This will be the construction function for a class called 'myclass'. - # - class myclass - { - import assert ; - - rule __init__ ( x_ * : y_ * ) - { - # Set some instance variables. - x = $(x_) ; - y = $(y_) ; - foo += 10 ; - } - - rule set-x ( newx * ) - { - x = $(newx) ; - } - - rule get-x ( ) - { - return $(x) ; - } - - rule set-y ( newy * ) - { - y = $(newy) ; - } - - rule get-y ( ) - { - return $(y) ; - } - - rule f ( ) - { - return [ g $(x) ] ; - } - - rule g ( args * ) - { - if $(x) in $(y) - { - return $(x) ; - } - else if $(y) in $(x) - { - return $(y) ; - } - else - { - return ; - } - } - - rule get-class ( ) - { - return $(__class__) ; - } - - rule get-instance ( ) - { - return $(__name__) ; - } - - rule invariant ( ) - { - assert.equal 1 : 1 ; - } - - rule get-foo ( ) - { - return $(foo) ; - } - } -# class myclass ; - - class derived1 : myclass - { - rule __init__ ( z_ ) - { - myclass.__init__ $(z_) : X ; - z = $(z_) ; - } - - # Override g. - # - rule g ( args * ) - { - return derived1.g ; - } - - rule h ( ) - { - return derived1.h ; - } - - rule get-z ( ) - { - return $(z) ; - } - - # Check that 'assert.equal' visible in base class is visible here. - # - rule invariant2 ( ) - { - assert.equal 2 : 2 ; - } - - # Check that 'assert.variable-not-empty' visible in base class is - # visible here. - # - rule invariant3 ( ) - { - local v = 10 ; - assert.variable-not-empty v ; - } - } -# class derived1 : myclass ; - - class derived2 : myclass - { - rule __init__ ( ) - { - myclass.__init__ 1 : 2 ; - } - - # Override g. - # - rule g ( args * ) - { - return derived2.g ; - } - - # Test the ability to call base class functions with qualification. - # - rule get-x ( ) - { - return [ myclass.get-x ] ; - } - } -# class derived2 : myclass ; - - class derived2a : derived2 - { - rule __init__ - { - derived2.__init__ ; - } - } -# class derived2a : derived2 ; - - local rule expect_derived2 ( [derived2] x ) { } - - local a = [ new myclass 3 4 5 : 4 5 ] ; - local b = [ new derived1 4 ] ; - local b2 = [ new derived1 4 ] ; - local c = [ new derived2 ] ; - local d = [ new derived2 ] ; - local e = [ new derived2a ] ; - - expect_derived2 $(d) ; - expect_derived2 $(e) ; - - # Argument checking is set up to call exit(1) directly on failure, and we - # can not hijack that with try, so we should better not do this test by - # default. We could fix this by having errors look up and invoke the EXIT - # rule instead; EXIT can be hijacked (;-) - if --fail-typecheck in [ modules.peek : ARGV ] - { - try ; - { - expect_derived2 $(a) ; - } - catch - "Expected an instance of derived2 but got" instead - ; - } - - #try ; - #{ - # new bad_subclass ; - #} - #catch - # bad_subclass.bad_subclass failed to call base class constructor myclass.__init__ - # ; - - #try ; - #{ - # class bad_subclass ; - #} - #catch bad_subclass has already been declared ; - - assert.result 3 4 5 : $(a).get-x ; - assert.result 4 5 : $(a).get-y ; - assert.result 4 : $(b).get-x ; - assert.result X : $(b).get-y ; - assert.result 4 : $(b).get-z ; - assert.result 1 : $(c).get-x ; - assert.result 2 : $(c).get-y ; - assert.result 4 5 : $(a).f ; - assert.result derived1.g : $(b).f ; - assert.result derived2.g : $(c).f ; - assert.result derived2.g : $(d).f ; - - assert.result 10 : $(b).get-foo ; - - $(a).invariant ; - $(b).invariant2 ; - $(b).invariant3 ; - - # Check that the __class__ attribute is getting properly set. - assert.result myclass : $(a).get-class ; - assert.result derived1 : $(b).get-class ; - assert.result $(a) : $(a).get-instance ; - - $(a).set-x a.x ; - $(b).set-x b.x ; - $(c).set-x c.x ; - $(d).set-x d.x ; - assert.result a.x : $(a).get-x ; - assert.result b.x : $(b).get-x ; - assert.result c.x : $(c).get-x ; - assert.result d.x : $(d).get-x ; - - class derived3 : derived1 derived2 - { - rule __init__ ( ) - { - } - } - - assert.result : bases myclass ; - assert.result myclass : bases derived1 ; - assert.result myclass : bases derived2 ; - assert.result derived1 derived2 : bases derived3 ; - - assert.true is-derived derived1 : myclass ; - assert.true is-derived derived2 : myclass ; - assert.true is-derived derived3 : derived1 ; - assert.true is-derived derived3 : derived2 ; - assert.true is-derived derived3 : derived1 derived2 myclass ; - assert.true is-derived derived3 : myclass ; - - assert.false is-derived myclass : derived1 ; - - assert.true is-instance $(a) ; - assert.false is-instance bar ; - - assert.true is-a $(a) : myclass ; - assert.true is-a $(c) : derived2 ; - assert.true is-a $(d) : myclass ; - assert.false is-a literal : myclass ; -} diff --git a/jam-files/boost-build/kernel/errors.jam b/jam-files/boost-build/kernel/errors.jam deleted file mode 100644 index 63b11e86..00000000 --- a/jam-files/boost-build/kernel/errors.jam +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2004 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Print a stack backtrace leading to this rule's caller. Each argument -# represents a line of output to be printed after the first line of the -# backtrace. -# -rule backtrace ( skip-frames prefix messages * : * ) -{ - local frame-skips = 5 9 13 17 21 25 29 33 37 41 45 49 53 57 61 65 69 73 77 81 ; - local drop-elements = $(frame-skips[$(skip-frames)]) ; - if ! ( $(skip-frames) in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ) - { - ECHO "warning: backtrace doesn't support skipping $(skip-frames) frames;" - "using 1 instead." ; - drop-elements = 5 ; - } - - local args = $(.args) ; - if $(.user-modules-only) - { - local bt = [ nearest-user-location ] ; - ECHO "$(prefix) at $(bt) " ; - for local n in $(args) - { - if $($(n))-is-not-empty - { - ECHO $(prefix) $($(n)) ; - } - } - } - else - { - # Get the whole backtrace, then drop the initial quadruples - # corresponding to the frames that must be skipped. - local bt = [ BACKTRACE ] ; - bt = $(bt[$(drop-elements)-]) ; - - while $(bt) - { - local m = [ MATCH ^(.+)\\.$ : $(bt[3]) ] ; - ECHO $(bt[1]):$(bt[2]): "in" $(bt[4]) "from module" $(m) ; - - # The first time through, print each argument on a separate line. - for local n in $(args) - { - if $($(n))-is-not-empty - { - ECHO $(prefix) $($(n)) ; - } - } - args = ; # Kill args so that this never happens again. - - # Move on to the next quadruple. - bt = $(bt[5-]) ; - } - } -} - -.args ?= messages 2 3 4 5 6 7 8 9 ; -.disabled ?= ; -.last-error-$(.args) ?= ; - - -# try-catch -- -# -# This is not really an exception-handling mechanism, but it does allow us to -# perform some error-checking on our error-checking. Errors are suppressed after -# a try, and the first one is recorded. Use catch to check that the error -# message matched expectations. - -# Begin looking for error messages. -# -rule try ( ) -{ - .disabled += true ; - .last-error-$(.args) = ; -} - - -# Stop looking for error messages; generate an error if an argument of messages -# is not found in the corresponding argument in the error call. -# -rule catch ( messages * : * ) -{ - .disabled = $(.disabled[2-]) ; # Pop the stack. - - import sequence ; - - if ! $(.last-error-$(.args))-is-not-empty - { - error-skip-frames 3 expected an error, but none occurred ; - } - else - { - for local n in $(.args) - { - if ! $($(n)) in $(.last-error-$(n)) - { - local v = [ sequence.join $($(n)) : " " ] ; - v ?= "" ; - local joined = [ sequence.join $(.last-error-$(n)) : " " ] ; - - .last-error-$(.args) = ; - error-skip-frames 3 expected \"$(v)\" in argument $(n) of error - : got \"$(joined)\" instead ; - } - } - } -} - - -rule error-skip-frames ( skip-frames messages * : * ) -{ - if ! $(.disabled) - { - backtrace $(skip-frames) error: $(messages) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - EXIT ; - } - else if ! $(.last-error-$(.args)) - { - for local n in $(.args) - { - # Add an extra empty string so that we always have - # something in the event of an error - .last-error-$(n) = $($(n)) "" ; - } - } -} - -if --no-error-backtrace in [ modules.peek : ARGV ] -{ - .no-error-backtrace = true ; -} - - -# Print an error message with a stack backtrace and exit. -# -rule error ( messages * : * ) -{ - if $(.no-error-backtrace) - { - # Print each argument on a separate line. - for local n in $(.args) - { - if $($(n))-is-not-empty - { - if ! $(first-printed) - { - ECHO error: $($(n)) ; - first-printed = true ; - } - else - { - ECHO $($(n)) ; - } - } - } - EXIT ; - } - else - { - error-skip-frames 3 $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } -} - - -# Same as 'error', but the generated backtrace will include only user files. -# -rule user-error ( messages * : * ) -{ - .user-modules-only = 1 ; - error-skip-frames 3 $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -} - - -# Print a warning message with a stack backtrace and exit. -# -rule warning -{ - backtrace 2 warning: $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -} - - -# Convert an arbitrary argument list into a list with ":" separators and quoted -# elements representing the same information. This is mostly useful for -# formatting descriptions of arguments with which a rule was called when -# reporting an error. -# -rule lol->list ( * ) -{ - local result ; - local remaining = 1 2 3 4 5 6 7 8 9 ; - while $($(remaining)) - { - local n = $(remaining[1]) ; - remaining = $(remaining[2-]) ; - - if $(n) != 1 - { - result += ":" ; - } - result += \"$($(n))\" ; - } - return $(result) ; -} - - -# Return the file:line for the nearest entry in backtrace which correspond to a -# user module. -# -rule nearest-user-location ( ) -{ - local bt = [ BACKTRACE ] ; - - local result ; - while $(bt) && ! $(result) - { - local m = [ MATCH ^(.+)\\.$ : $(bt[3]) ] ; - local user-modules = ([Jj]amroot(.jam|.v2|)|([Jj]amfile(.jam|.v2|)|user-config.jam|site-config.jam|project-root.jam) ; - - if [ MATCH $(user-modules) : $(bt[1]:D=) ] - { - result = $(bt[1]):$(bt[2]) ; - } - bt = $(bt[5-]) ; - } - return $(result) ; -} - - -# If optimized rule is available in Jam, use it. -if NEAREST_USER_LOCATION in [ RULENAMES ] -{ - rule nearest-user-location ( ) - { - local r = [ NEAREST_USER_LOCATION ] ; - return $(r[1]):$(r[2]) ; - } -} - - -rule __test__ ( ) -{ - # Show that we can correctly catch an expected error. - try ; - { - error an error occurred : somewhere ; - } - catch an error occurred : somewhere ; - - # Show that unexpected errors generate real errors. - try ; - { - try ; - { - error an error occurred : somewhere ; - } - catch an error occurred : nowhere ; - } - catch expected \"nowhere\" in argument 2 ; - - # Show that not catching an error where one was expected is an error. - try ; - { - try ; - { - } - catch ; - } - catch expected an error, but none occurred ; -} diff --git a/jam-files/boost-build/kernel/modules.jam b/jam-files/boost-build/kernel/modules.jam deleted file mode 100644 index 1f75354f..00000000 --- a/jam-files/boost-build/kernel/modules.jam +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Essentially an include guard; ensures that no module is loaded multiple times. -.loaded ?= ; - -# A list of modules currently being loaded for error reporting of circular -# dependencies. -.loading ?= ; - -# A list of modules needing to be tested using their __test__ rule. -.untested ?= ; - -# A list of modules which have been tested using their __test__ rule. -.tested ?= ; - - -# Runs internal Boost Build unit tests for the specified module. The module's -# __test__ rule is executed in its own module to eliminate any inadvertent -# effects of testing module dependencies (such as assert) on the module itself. -# -local rule run-module-test ( m ) -{ - local tested-modules = [ modules.peek modules : .tested ] ; - - if ( ! $(m) in $(tested-modules) ) # Avoid recursive test invocations. - && ( ( --debug in $(argv) ) || ( --debug-module=$(m) in $(argv) ) ) - { - modules.poke modules : .tested : $(tested-modules) $(m) ; - - if ! ( __test__ in [ RULENAMES $(m) ] ) - { - local argv = [ peek : ARGV ] ; - if ! ( --quiet in $(argv) ) && ( --debug-tests in $(argv) ) - { - ECHO warning: no __test__ rule defined in module $(m) ; - } - } - else - { - if ! ( --quiet in $(argv) ) - { - ECHO testing module $(m)... ; - } - - local test-module = __test-$(m)__ ; - IMPORT $(m) : [ RULENAMES $(m) ] : $(test-module) : [ RULENAMES $(m) ] ; - IMPORT $(m) : __test__ : $(test-module) : __test__ : LOCALIZE ; - module $(test-module) - { - __test__ ; - } - } - } -} - - -# Return the binding of the given module. -# -rule binding ( module ) -{ - return $($(module).__binding__) ; -} - - -# Sets the module-local value of a variable. This is the most reliable way to -# set a module-local variable in a different module; it eliminates issues of -# name shadowing due to dynamic scoping. -# -rule poke ( module-name ? : variables + : value * ) -{ - module $(<) - { - $(>) = $(3) ; - } -} - - -# Returns the module-local value of a variable. This is the most reliable way to -# examine a module-local variable in a different module; it eliminates issues of -# name shadowing due to dynamic scoping. -# -rule peek ( module-name ? : variables + ) -{ - module $(<) - { - return $($(>)) ; - } -} - - -# Call the given rule locally in the given module. Use this for rules accepting -# rule names as arguments, so that the passed rule may be invoked in the context -# of the rule's caller (for example, if the rule accesses module globals or is a -# local rule). Note that rules called this way may accept at most 8 parameters. -# -rule call-in ( module-name ? : rule-name args * : * ) -{ - module $(module-name) - { - return [ $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; - } -} - - -# Given a possibly qualified rule name and arguments, remove any initial module -# qualification from the rule and invoke it in that module. If there is no -# module qualification, the rule is invoked in the global module. Note that -# rules called this way may accept at most 8 parameters. -# -rule call-locally ( qualified-rule-name args * : * ) -{ - local module-rule = [ MATCH (.*)\\.(.*) : $(qualified-rule-name) ] ; - local rule-name = $(module-rule[2]) ; - rule-name ?= $(qualified-rule-name) ; - # We pass only 8 parameters here since Boost Jam allows at most 9 rule - # parameter positions and the call-in rule already uses up the initial - # position for the module name. - return [ call-in $(module-rule[1]) : $(rule-name) $(args) : $(2) : $(3) : - $(4) : $(5) : $(6) : $(7) : $(8) ] ; -} - - -# Load the indicated module if it is not already loaded. -# -rule load ( - module-name # Name of module to load. Rules will be defined in this - # module. - : filename ? # (partial) path to file; Defaults to $(module-name).jam. - : search * # Directories in which to search for filename. Defaults to - # $(BOOST_BUILD_PATH). -) -{ - # Avoid loading modules twice. - if ! ( $(module-name) in $(.loaded) ) - { - filename ?= $(module-name).jam ; - - # Mark the module loaded so we do not try to load it recursively. - .loaded += $(module-name) ; - - # Suppress tests if any module loads are already in progress. - local suppress-test = $(.loading[1]) ; - - # Push this module on the loading stack. - .loading += $(module-name) ; - - # Remember that it is untested. - .untested += $(module-name) ; - - # Insert the new module's __name__ and __file__ globals. - poke $(module-name) : __name__ : $(module-name) ; - poke $(module-name) : __file__ : $(filename) ; - - module $(module-name) - { - # Add some grist so that the module will have a unique target name. - local module-target = $(__file__:G=module@) ; - - local search = $(3) ; - search ?= [ modules.peek : BOOST_BUILD_PATH ] ; - SEARCH on $(module-target) = $(search) ; - BINDRULE on $(module-target) = modules.record-binding ; - - include $(module-target) ; - - # Allow the module to see its own names with full qualification. - local rules = [ RULENAMES $(__name__) ] ; - IMPORT $(__name__) : $(rules) : $(__name__) : $(__name__).$(rules) ; - } - - if $(module-name) != modules && ! [ binding $(module-name) ] - { - import errors ; - errors.error "Could not find module" $(module-name) in $(search) ; - } - - # Pop the loading stack. Must happen before testing or we will run into - # a circular loading dependency. - .loading = $(.loading[1--2]) ; - - # Run any pending tests if this is an outer load. - if ! $(suppress-test) - { - local argv = [ peek : ARGV ] ; - for local m in $(.untested) - { - run-module-test $(m) ; - } - .untested = ; - } - } - else if $(module-name) in $(.loading) - { - import errors ; - errors.error loading \"$(module-name)\" - : circular module loading dependency: - : $(.loading)" ->" $(module-name) ; - } -} - - -# This helper is used by load (above) to record the binding (path) of each -# loaded module. -# -rule record-binding ( module-target : binding ) -{ - $(.loading[-1]).__binding__ = $(binding) ; -} - - -# Transform each path in the list, with all backslashes converted to forward -# slashes and all detectable redundancy removed. Something like this is probably -# needed in path.jam, but I am not sure of that, I do not understand it, and I -# am not ready to move all of path.jam into the kernel. -# -local rule normalize-raw-paths ( paths * ) -{ - local result ; - for p in $(paths:T) - { - result += [ NORMALIZE_PATH $(p) ] ; - } - return $(result) ; -} - - -.cwd = [ PWD ] ; - - -# Load the indicated module and import rule names into the current module. Any -# members of rules-opt will be available without qualification in the caller's -# module. Any members of rename-opt will be taken as the names of the rules in -# the caller's module, in place of the names they have in the imported module. -# If rules-opt = '*', all rules from the indicated module are imported into the -# caller's module. If rename-opt is supplied, it must have the same number of -# elements as rules-opt. -# -rule import ( module-names + : rules-opt * : rename-opt * ) -{ - if ( $(rules-opt) = * || ! $(rules-opt) ) && $(rename-opt) - { - import errors ; - errors.error "Rule aliasing is only available for explicit imports." ; - } - - if $(module-names[2]) && ( $(rules-opt) || $(rename-opt) ) - { - import errors ; - errors.error "When loading multiple modules, no specific rules or" - "renaming is allowed" ; - } - - local caller = [ CALLER_MODULE ] ; - - # Import each specified module - for local m in $(module-names) - { - if ! $(m) in $(.loaded) - { - # If the importing module isn't already in the BOOST_BUILD_PATH, - # prepend it to the path. We don't want to invert the search order - # of modules that are already there. - - local caller-location ; - if $(caller) - { - caller-location = [ binding $(caller) ] ; - caller-location = $(caller-location:D) ; - caller-location = [ normalize-raw-paths $(caller-location:R=$(.cwd)) ] ; - } - - local search = [ peek : BOOST_BUILD_PATH ] ; - search = [ normalize-raw-paths $(search:R=$(.cwd)) ] ; - - if $(caller-location) && ! $(caller-location) in $(search) - { - search = $(caller-location) $(search) ; - } - - load $(m) : : $(search) ; - } - - IMPORT_MODULE $(m) : $(caller) ; - - if $(rules-opt) - { - local source-names ; - if $(rules-opt) = * - { - local all-rules = [ RULENAMES $(m) ] ; - source-names = $(all-rules) ; - } - else - { - source-names = $(rules-opt) ; - } - local target-names = $(rename-opt) ; - target-names ?= $(source-names) ; - IMPORT $(m) : $(source-names) : $(caller) : $(target-names) ; - } - } -} - - -# Define exported copies in $(target-module) of all rules exported from -# $(source-module). Also make them available in the global module with -# qualification, so that it is just as though the rules were defined originally -# in $(target-module). -# -rule clone-rules ( source-module target-module ) -{ - local rules = [ RULENAMES $(source-module) ] ; - - IMPORT $(source-module) : $(rules) : $(target-module) : $(rules) : LOCALIZE ; - EXPORT $(target-module) : $(rules) ; - IMPORT $(target-module) : $(rules) : : $(target-module).$(rules) ; -} - - -# These rules need to be available in all modules to implement module loading -# itself and other fundamental operations. -local globalize = peek poke record-binding ; -IMPORT modules : $(globalize) : : modules.$(globalize) ; - - -rule __test__ ( ) -{ - import assert ; - import modules : normalize-raw-paths ; - - module modules.__test__ - { - foo = bar ; - } - - assert.result bar : peek modules.__test__ : foo ; - - poke modules.__test__ : foo : bar baz ; - assert.result bar baz : peek modules.__test__ : foo ; - - assert.result c:/foo/bar : normalize-raw-paths c:/x/../foo/./xx/yy/../../bar ; - assert.result . : normalize-raw-paths . ; - assert.result .. : normalize-raw-paths .. ; - assert.result ../.. : normalize-raw-paths ../.. ; - assert.result .. : normalize-raw-paths ./.. ; - assert.result / / : normalize-raw-paths / \\ ; - assert.result a : normalize-raw-paths a ; - assert.result a : normalize-raw-paths a/ ; - assert.result /a : normalize-raw-paths /a/ ; - assert.result / : normalize-raw-paths /a/.. ; -} diff --git a/jam-files/boost-build/options/help.jam b/jam-files/boost-build/options/help.jam deleted file mode 100644 index b507e1ed..00000000 --- a/jam-files/boost-build/options/help.jam +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003, 2006 Rene Rivera -# Copyright 2003, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module is the plug-in handler for the --help and --help-.* -# command-line options -import modules ; -import assert ; -import doc : do-scan set-option set-output set-output-file print-help-usage print-help-top ; -import sequence ; -import set ; -import project ; -import print ; -import os ; -import version ; -import path ; - -# List of possible modules, but which really aren't. -# -.not-modules = - boost-build bootstrap site-config test user-config - -tools allyourbase boost-base features python stlport testing unit-tests ; - -# The help system options are parsed here and handed off to the doc -# module to translate into documentation requests and actions. The -# understood options are: -# -# --help-disable-<option> -# --help-doc-options -# --help-enable-<option> -# --help-internal -# --help-options -# --help-usage -# --help-output <type> -# --help-output-file <file> -# --help [<module-or-class>] -# -rule process ( - command # The option. - : values * # The values, starting after the "=". - ) -{ - assert.result --help : MATCH ^(--help).* : $(command) ; - local did-help = ; - switch $(command) - { - case --help-internal : - local path-to-modules = [ modules.peek : BOOST_BUILD_PATH ] ; - path-to-modules ?= . ; - local possible-modules = [ GLOB $(path-to-modules) : *\\.jam ] ; - local not-modules = [ GLOB $(path-to-modules) : *$(.not-modules)\\.jam ] ; - local modules-to-list = - [ sequence.insertion-sort - [ set.difference $(possible-modules:D=:S=) : $(not-modules:D=:S=) ] ] ; - local modules-to-scan ; - for local m in $(modules-to-list) - { - local module-files = [ GLOB $(path-to-modules) : $(m)\\.jam ] ; - modules-to-scan += $(module-files[1]) ; - } - do-scan $(modules-to-scan) : print-help-all ; - did-help = true ; - - case --help-enable-* : - local option = [ MATCH --help-enable-(.*) : $(command) ] ; option = $(option:L) ; - set-option $(option) : enabled ; - did-help = true ; - - case --help-disable-* : - local option = [ MATCH --help-disable-(.*) : $(command) ] ; option = $(option:L) ; - set-option $(option) ; - did-help = true ; - - case --help-output : - set-output $(values[1]) ; - did-help = true ; - - case --help-output-file : - set-output-file $(values[1]) ; - did-help = true ; - - case --help-doc-options : - local doc-module-spec = [ split-symbol doc ] ; - do-scan $(doc-module-spec[1]) : print-help-options ; - did-help = true ; - - case --help-options : - print-help-usage ; - did-help = true ; - - case --help : - local spec = $(values[1]) ; - if $(spec) - { - local spec-parts = [ split-symbol $(spec) ] ; - if $(spec-parts) - { - if $(spec-parts[2]) - { - do-scan $(spec-parts[1]) : print-help-classes $(spec-parts[2]) ; - do-scan $(spec-parts[1]) : print-help-rules $(spec-parts[2]) ; - do-scan $(spec-parts[1]) : print-help-variables $(spec-parts[2]) ; - } - else - { - do-scan $(spec-parts[1]) : print-help-module ; - } - } - else - { - EXIT "Unrecognized help option '"$(command)" "$(spec)"'." ; - } - } - else - { - version.print ; - ECHO ; - # First print documentation from the current Jamfile, if any. - # FIXME: Generally, this duplication of project.jam logic is bad. - local names = [ modules.peek project : JAMROOT ] - [ modules.peek project : JAMFILE ] ; - local project-file = [ path.glob . : $(names) ] ; - if ! $(project-file) - { - project-file = [ path.glob-in-parents . : $(names) ] ; - } - - for local p in $(project-file) - { - do-scan $(p) : print-help-project $(p) ; - } - - # Next any user-config help. - local user-path = [ os.home-directories ] [ os.environ BOOST_BUILD_PATH ] ; - local user-config = [ GLOB $(user-path) : user-config.jam ] ; - if $(user-config) - { - do-scan $(user-config[1]) : print-help-config user $(user-config[1]) ; - } - - # Next any site-config help. - local site-config = [ GLOB $(user-path) : site-config.jam ] ; - if $(site-config) - { - do-scan $(site-config[1]) : print-help-config site $(site-config[1]) ; - } - - # Then the overall help. - print-help-top ; - } - did-help = true ; - } - if $(did-help) - { - UPDATE all ; - NOCARE all ; - } - return $(did-help) ; -} - -# Split a reference to a symbol into module and symbol parts. -# -local rule split-symbol ( - symbol # The symbol to split. - ) -{ - local path-to-modules = [ modules.peek : BOOST_BUILD_PATH ] ; - path-to-modules ?= . ; - local module-name = $(symbol) ; - local symbol-name = ; - local result = ; - while ! $(result) - { - local module-path = [ GLOB $(path-to-modules) : $(module-name)\\.jam ] ; - if $(module-path) - { - # The 'module-name' in fact refers to module. Return the full - # module path and a symbol within it. If 'symbol' passed to this - # rule is already module, 'symbol-name' will be empty. Otherwise, - # it's initialized on the previous loop iteration. - # In case there are several modules by this name, - # use the first one. - result = $(module-path[1]) $(symbol-name) ; - } - else - { - if ! $(module-name:S) - { - result = - ; - } - else - { - local next-symbol-part = [ MATCH ^.(.*) : $(module-name:S) ] ; - if $(symbol-name) - { - symbol-name = $(next-symbol-part).$(symbol-name) ; - } - else - { - symbol-name = $(next-symbol-part) ; - } - module-name = $(module-name:B) ; - } - } - } - if $(result) != - - { - return $(result) ; - } -} diff --git a/jam-files/boost-build/site-config.jam b/jam-files/boost-build/site-config.jam deleted file mode 100644 index ad22d674..00000000 --- a/jam-files/boost-build/site-config.jam +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2002, 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - diff --git a/jam-files/boost-build/tools/__init__.py b/jam-files/boost-build/tools/__init__.py deleted file mode 100644 index e69de29b..00000000 --- a/jam-files/boost-build/tools/__init__.py +++ /dev/null diff --git a/jam-files/boost-build/tools/acc.jam b/jam-files/boost-build/tools/acc.jam deleted file mode 100644 index f04c9dc8..00000000 --- a/jam-files/boost-build/tools/acc.jam +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Copyright Toon Knapen 2004. -# Copyright Boris Gubenko 2007. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# -# Boost.Build V2 toolset for the HP aC++ compiler. -# - -import toolset : flags ; -import feature ; -import generators ; -import common ; - -feature.extend toolset : acc ; -toolset.inherit acc : unix ; -generators.override builtin.lib-generator : acc.prebuilt ; -generators.override acc.searched-lib-generator : searched-lib-generator ; - -# Configures the acc toolset. -rule init ( version ? : user-provided-command * : options * ) -{ - local condition = [ common.check-init-parameters acc - : version $(version) ] ; - - local command = [ common.get-invocation-command acc : aCC - : $(user-provided-command) ] ; - - common.handle-options acc : $(condition) : $(command) : $(options) ; -} - - -# Declare generators -generators.register-c-compiler acc.compile.c : C : OBJ : <toolset>acc ; -generators.register-c-compiler acc.compile.c++ : CPP : OBJ : <toolset>acc ; - -# Declare flags. -flags acc CFLAGS <optimization>off : ; -flags acc CFLAGS <optimization>speed : -O3 ; -flags acc CFLAGS <optimization>space : -O2 ; - -flags acc CFLAGS <inlining>off : +d ; -flags acc CFLAGS <inlining>on : ; -flags acc CFLAGS <inlining>full : ; - -flags acc C++FLAGS <exception-handling>off : ; -flags acc C++FLAGS <exception-handling>on : ; - -flags acc C++FLAGS <rtti>off : ; -flags acc C++FLAGS <rtti>on : ; - -# We want the full path to the sources in the debug symbols because otherwise -# the debugger won't find the sources when we use boost.build. -flags acc CFLAGS <debug-symbols>on : -g ; -flags acc LINKFLAGS <debug-symbols>on : -g ; -flags acc LINKFLAGS <debug-symbols>off : -s ; - -# V2 does not have <shared-linkable>, not sure what this meant in V1. -# flags acc CFLAGS <shared-linkable>true : +Z ; - -flags acc CFLAGS <profiling>on : -pg ; -flags acc LINKFLAGS <profiling>on : -pg ; - -flags acc CFLAGS <address-model>64 : +DD64 ; -flags acc LINKFLAGS <address-model>64 : +DD64 ; - -# It is unknown if there's separate option for rpath used only -# at link time, similar to -rpath-link in GNU. We'll use -L. -flags acc RPATH_LINK : <xdll-path> ; - -flags acc CFLAGS <cflags> ; -flags acc C++FLAGS <cxxflags> ; -flags acc DEFINES <define> ; -flags acc UNDEFS <undef> ; -flags acc HDRS <include> ; -flags acc STDHDRS <sysinclude> ; -flags acc LINKFLAGS <linkflags> ; -flags acc ARFLAGS <arflags> ; - -flags acc LIBPATH <library-path> ; -flags acc NEEDLIBS <library-file> ; -flags acc FINDLIBS <find-shared-library> ; -flags acc FINDLIBS <find-static-library> ; - -# Select the compiler name according to the threading model. -flags acc CFLAGS <threading>multi : -mt ; -flags acc LINKFLAGS <threading>multi : -mt ; - -flags acc.compile.c++ TEMPLATE_DEPTH <c++-template-depth> ; - - -actions acc.link bind NEEDLIBS -{ - $(CONFIG_COMMAND) -AA $(LINKFLAGS) -o "$(<[1])" -L"$(RPATH_LINK)" -L$(LIBPATH) -L$(STDLIBPATH) "$(>)" "$(NEEDLIBS)" "$(NEEDLIBS)" -l$(FINDLIBS) $(OPTIONS) -} - -SPACE = " " ; -actions acc.link.dll bind NEEDLIBS -{ - $(CONFIG_COMMAND) -AA -b $(LINKFLAGS) -o "$(<[1])" -L"$(RPATH_LINK)" -Wl,+h$(<[-1]:D=) -L$(LIBPATH) -L$(STDLIBPATH) "$(>)" "$(NEEDLIBS)" "$(NEEDLIBS)" -l$(FINDLIBS) $(OPTIONS) -} - -actions acc.compile.c -{ - cc -c -I$(BOOST_ROOT) -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" $(OPTIONS) -} - -actions acc.compile.c++ -{ - $(CONFIG_COMMAND) -AA -c -Wc,--pending_instantiations=$(TEMPLATE_DEPTH) -I$(BOOST_ROOT) -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) $(C++FLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" $(OPTIONS) -} - -actions updated together piecemeal acc.archive -{ - ar ru$(ARFLAGS:E="") "$(<)" "$(>)" -} diff --git a/jam-files/boost-build/tools/auto-index.jam b/jam-files/boost-build/tools/auto-index.jam deleted file mode 100644 index ebbf344e..00000000 --- a/jam-files/boost-build/tools/auto-index.jam +++ /dev/null @@ -1,212 +0,0 @@ - -import feature ; -import generators ; -import "class" ; -import toolset ; -import targets ; -import "class" : new ; -import project ; - -feature.feature auto-index : off "on" ; -feature.feature auto-index-internal : off "on" ; -feature.feature auto-index-verbose : off "on" ; -feature.feature auto-index-no-duplicates : off "on" ; -feature.feature auto-index-script : : free ; -feature.feature auto-index-prefix : : free ; -feature.feature auto-index-type : : free ; -feature.feature auto-index-section-names : "on" off ; - -toolset.flags auto-index.auto-index FLAGS <auto-index-internal>on : --internal-index ; -toolset.flags auto-index.auto-index SCRIPT <auto-index-script> ; -toolset.flags auto-index.auto-index PREFIX <auto-index-prefix> ; -toolset.flags auto-index.auto-index INDEX_TYPE <auto-index-type> ; -toolset.flags auto-index.auto-index FLAGS <auto-index-verbose>on : --verbose ; -toolset.flags auto-index.auto-index FLAGS <auto-index-no-duplicates>on : --no-duplicates ; -toolset.flags auto-index.auto-index FLAGS <auto-index-section-names>off : --no-section-names ; - -# <auto-index-binary> shell command to run AutoIndex -# <auto-index-binary-dependencies> targets to build AutoIndex from sources. -feature.feature <auto-index-binary> : : free ; -feature.feature <auto-index-binary-dependencies> : : free dependency ; - -class auto-index-generator : generator -{ - import common modules path targets build-system ; - rule run ( project name ? : property-set : sources * ) - { - # AutoIndex invocation command and dependencies. - local auto-index-binary = [ modules.peek auto-index : .command ] ; - local auto-index-binary-dependencies ; - - if $(auto-index-binary) - { - # Use user-supplied command. - auto-index-binary = [ common.get-invocation-command auto-index : auto-index : $(auto-index-binary) ] ; - } - else - { - # Search for AutoIndex sources in sensible places, like - # $(BOOST_ROOT)/tools/auto_index - # $(BOOST_BUILD_PATH)/../../auto_index - - # And build auto-index executable from sources. - - local boost-root = [ modules.peek : BOOST_ROOT ] ; - local boost-build-path = [ build-system.location ] ; - local boost-build-path2 = [ modules.peek : BOOST_BUILD_PATH ] ; - - local auto-index-dir ; - - if $(boost-root) - { - auto-index-dir += [ path.join $(boost-root) tools ] ; - } - - if $(boost-build-path) - { - auto-index-dir += $(boost-build-path)/../.. ; - } - if $(boost-build-path2) - { - auto-index-dir += $(boost-build-path2)/.. ; - } - - #ECHO $(auto-index-dir) ; - auto-index-dir = [ path.glob $(auto-index-dir) : auto_index ] ; - #ECHO $(auto-index-dir) ; - - # If the AutoIndex source directory was found, mark its main target - # as a dependency for the current project. Otherwise, try to find - # 'auto-index' in user's PATH - if $(auto-index-dir) - { - auto-index-dir = [ path.make $(auto-index-dir[1]) ] ; - auto-index-dir = $(auto-index-dir)/build ; - - #ECHO $(auto-index-dir) ; - - # Get the main-target in AutoIndex directory. - local auto-index-main-target = [ targets.resolve-reference $(auto-index-dir) : $(project) ] ; - - #ECHO $(auto-index-main-target) ; - - # The first element are actual targets, the second are - # properties found in target-id. We do not care about these - # since we have passed the id ourselves. - auto-index-main-target = - [ $(auto-index-main-target[1]).main-target auto_index ] ; - - #ECHO $(auto-index-main-target) ; - - auto-index-binary-dependencies = - [ $(auto-index-main-target).generate [ $(property-set).propagated ] ] ; - - # Ignore usage-requirements returned as first element. - auto-index-binary-dependencies = $(auto-index-binary-dependencies[2-]) ; - - # Some toolsets generate extra targets (e.g. RSP). We must mark - # all targets as dependencies for the project, but we will only - # use the EXE target for auto-index-to-boostbook translation. - for local target in $(auto-index-binary-dependencies) - { - if [ $(target).type ] = EXE - { - auto-index-binary = - [ path.native - [ path.join - [ $(target).path ] - [ $(target).name ] - ] - ] ; - } - } - } - else - { - ECHO "AutoIndex warning: The path to the auto-index executable was" ; - ECHO " not provided. Additionally, couldn't find AutoIndex" ; - ECHO " sources searching in" ; - ECHO " * BOOST_ROOT/tools/auto-index" ; - ECHO " * BOOST_BUILD_PATH/../../auto-index" ; - ECHO " Will now try to find a precompiled executable by searching" ; - ECHO " the PATH for 'auto-index'." ; - ECHO " To disable this warning in the future, or to completely" ; - ECHO " avoid compilation of auto-index, you can explicitly set the" ; - ECHO " path to a auto-index executable command in user-config.jam" ; - ECHO " or site-config.jam with the call" ; - ECHO " using auto-index : /path/to/auto-index ;" ; - - # As a last resort, search for 'auto-index' command in path. Note - # that even if the 'auto-index' command is not found, - # get-invocation-command will still return 'auto-index' and might - # generate an error while generating the virtual-target. - - auto-index-binary = [ common.get-invocation-command auto-index : auto-index ] ; - } - } - - # Add $(auto-index-binary-dependencies) as a dependency of the current - # project and set it as the <auto-index-binary> feature for the - # auto-index-to-boostbook rule, below. - property-set = [ $(property-set).add-raw - <dependency>$(auto-index-binary-dependencies) - <auto-index-binary>$(auto-index-binary) - <auto-index-binary-dependencies>$(auto-index-binary-dependencies) - ] ; - - #ECHO "binary = " $(auto-index-binary) ; - #ECHO "dependencies = " $(auto-index-binary-dependencies) ; - - if [ $(property-set).get <auto-index> ] = "on" - { - return [ generator.run $(project) $(name) : $(property-set) : $(sources) ] ; - } - else - { - return [ generators.construct $(project) $(name) : DOCBOOK : $(property-set) - : $(sources) ] ; - } - } -} - -# Initialization of toolset. -# -# Parameters: -# command ? -> path to AutoIndex executable. -# -# When command is not supplied toolset will search for AutoIndex directory and -# compile the executable from source. If that fails we still search the path for -# 'auto_index'. -# -rule init ( - command ? # path to the AutoIndex executable. - ) -{ - if ! $(.initialized) - { - .initialized = true ; - .command = $(command) ; - } -} - -toolset.flags auto-index.auto-index AI-COMMAND <auto-index-binary> ; -toolset.flags auto-index.auto-index AI-DEPENDENCIES <auto-index-binary-dependencies> ; - -generators.register [ class.new auto-index-generator auto-index.auto-index : DOCBOOK : DOCBOOK(%.auto_index) ] ; -generators.override auto-index.auto-index : boostbook.boostbook-to-docbook ; - -rule auto-index ( target : source : properties * ) -{ - # Signal dependency of auto-index sources on <auto-index-binary-dependencies> - # upon invocation of auto-index-to-boostbook. - #ECHO "AI-COMMAND= " $(AI-COMMAND) ; - DEPENDS $(target) : [ on $(target) return $(AI-DEPENDENCIES) ] ; - #DEPENDS $(target) : [ on $(target) return $(SCRIPT) ] ; -} - -actions auto-index -{ - $(AI-COMMAND) $(FLAGS) "--prefix="$(PREFIX) "--script="$(SCRIPT) "--index-type="$(INDEX_TYPE) "--in="$(>) "--out="$(<) -} - - diff --git a/jam-files/boost-build/tools/bison.jam b/jam-files/boost-build/tools/bison.jam deleted file mode 100644 index 0689d4bd..00000000 --- a/jam-files/boost-build/tools/bison.jam +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import generators ; -import feature ; -import type ; -import property ; - -feature.feature bison.prefix : : free ; -type.register Y : y ; -type.register YY : yy ; -generators.register-standard bison.bison : Y : C H ; -generators.register-standard bison.bison : YY : CPP HPP ; - -rule init ( ) -{ -} - -rule bison ( dst dst_header : src : properties * ) -{ - local r = [ property.select bison.prefix : $(properties) ] ; - if $(r) - { - PREFIX_OPT on $(<) = -p $(r:G=) ; - } -} - -actions bison -{ - bison $(PREFIX_OPT) -d -o $(<[1]) $(>) -} diff --git a/jam-files/boost-build/tools/boostbook-config.jam b/jam-files/boost-build/tools/boostbook-config.jam deleted file mode 100644 index 6e3f3ddc..00000000 --- a/jam-files/boost-build/tools/boostbook-config.jam +++ /dev/null @@ -1,13 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for BoostBook tools. To use, just import this module. -# -# This module is deprecated. -# using boostbook ; -# with no arguments now suffices. - -import toolset : using ; - -using boostbook ; diff --git a/jam-files/boost-build/tools/boostbook.jam b/jam-files/boost-build/tools/boostbook.jam deleted file mode 100644 index 3a5964c6..00000000 --- a/jam-files/boost-build/tools/boostbook.jam +++ /dev/null @@ -1,727 +0,0 @@ -# Copyright 2003, 2004, 2005 Dave Abrahams -# Copyright 2003, 2004, 2005 Douglas Gregor -# Copyright 2005, 2006, 2007 Rene Rivera -# Copyright 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines rules to handle generation of documentation -# from BoostBook sources. -# -# The type of output is controlled by the <format> feature which can -# have the following values:: -# -# * html: Generates html documention. This is the default. -# * xhtml: Generates xhtml documentation -# * htmlhelp: Generates html help output. -# * onehtml: Generates a single html page. -# * man: Generates man pages. -# * pdf: Generates pdf documentation. -# * ps: Generates postscript output. -# * docbook: Generates docbook XML. -# * fo: Generates XSL formating objects. -# * tests: Extracts test cases from the boostbook XML. -# -# format is an implicit feature, so typing pdf on the command -# line (for example) is a short-cut for format=pdf. - -import "class" : new ; -import common ; -import errors ; -import targets ; -import feature ; -import generators ; -import print ; -import property ; -import project ; -import property-set ; -import regex ; -import scanner ; -import sequence ; -import make ; -import os ; -import type ; -import modules path project ; -import build-system ; - -import xsltproc : xslt xslt-dir ; - -# Make this module into a project. -project.initialize $(__name__) ; -project boostbook ; - - -feature.feature format : html xhtml htmlhelp onehtml man pdf ps docbook fo tests - : incidental implicit composite propagated ; - -type.register DTDXML : dtdxml ; -type.register XML : xml ; -type.register BOOSTBOOK : boostbook : XML ; -type.register DOCBOOK : docbook : XML ; -type.register FO : fo : XML ; -type.register PDF : pdf ; -type.register PS : ps ; -type.register XSLT : xsl : XML ; -type.register HTMLDIR ; -type.register XHTMLDIR ; -type.register HTMLHELP ; -type.register MANPAGES ; -type.register TESTS : tests ; -# Artificial target type, used to require invocation of top-level -# BoostBook generator. -type.register BOOSTBOOK_MAIN ; - - -# Initialize BoostBook support. -rule init ( - docbook-xsl-dir ? # The DocBook XSL stylesheet directory. If not - # provided, we use DOCBOOK_XSL_DIR from the environment - # (if available) or look in standard locations. - # Otherwise, we let the XML processor load the - # stylesheets remotely. - - : docbook-dtd-dir ? # The DocBook DTD directory. If not provided, we use - # DOCBOOK_DTD_DIR From the environment (if available) or - # look in standard locations. Otherwise, we let the XML - # processor load the DTD remotely. - - : boostbook-dir ? # The BoostBook directory with the DTD and XSL subdirs. -) -{ - - if ! $(.initialized) - { - .initialized = true ; - - check-boostbook-dir $(boostbook-dir) ; - find-tools $(docbook-xsl-dir) : $(docbook-dtd-dir) : $(boostbook-dir) ; - - # Register generators only if we've were called via "using boostbook ; " - generators.register-standard boostbook.dtdxml-to-boostbook : DTDXML : XML ; - generators.register-standard boostbook.boostbook-to-docbook : XML : DOCBOOK ; - generators.register-standard boostbook.boostbook-to-tests : XML : TESTS ; - generators.register-standard boostbook.docbook-to-onehtml : DOCBOOK : HTML ; - generators.register-standard boostbook.docbook-to-htmldir : DOCBOOK : HTMLDIR ; - generators.register-standard boostbook.docbook-to-xhtmldir : DOCBOOK : XHTMLDIR ; - generators.register-standard boostbook.docbook-to-htmlhelp : DOCBOOK : HTMLHELP ; - generators.register-standard boostbook.docbook-to-manpages : DOCBOOK : MANPAGES ; - generators.register-standard boostbook.docbook-to-fo : DOCBOOK : FO ; - - # The same about Jamfile main target rules. - IMPORT $(__name__) : boostbook : : boostbook ; - } - else - { - if $(docbook-xsl-dir) - { - modify-config ; - .docbook-xsl-dir = [ path.make $(docbook-xsl-dir) ] ; - check-docbook-xsl-dir ; - } - if $(docbook-dtd-dir) - { - modify-config ; - .docbook-dtd-dir = [ path.make $(docbook-dtd-dir) ] ; - check-docbook-dtd-dir ; - } - if $(boostbook-dir) - { - modify-config ; - check-boostbook-dir $(boostbook-dir) ; - local boostbook-xsl-dir = [ path.glob $(boostbook-dir) : xsl ] ; - local boostbook-dtd-dir = [ path.glob $(boostbook-dir) : dtd ] ; - .boostbook-xsl-dir = $(boostbook-xsl-dir[1]) ; - .boostbook-dtd-dir = $(boostbook-dtd-dir[1]) ; - check-boostbook-xsl-dir ; - check-boostbook-dtd-dir ; - } - } -} - -rule lock-config ( ) -{ - if ! $(.initialized) - { - errors.user-error "BoostBook has not been configured." ; - } - if ! $(.config-locked) - { - .config-locked = true ; - } -} - -rule modify-config ( ) -{ - if $(.config-locked) - { - errors.user-error "BoostBook configuration cannot be changed after it has been used." ; - } -} - -rule find-boost-in-registry ( keys * ) -{ - local boost-root = ; - for local R in $(keys) - { - local installed-boost = [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\$(R)" - : "InstallRoot" ] ; - if $(installed-boost) - { - boost-root += [ path.make $(installed-boost) ] ; - } - } - return $(boost-root) ; -} - -rule check-docbook-xsl-dir ( ) -{ - if $(.docbook-xsl-dir) - { - if ! [ path.glob $(.docbook-xsl-dir) : common/common.xsl ] - { - errors.user-error "BoostBook: could not find docbook XSL stylesheets in:" [ path.native $(.docbook-xsl-dir) ] ; - } - else - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice: BoostBook: found docbook XSL stylesheets in:" [ path.native $(.docbook-xsl-dir) ] ; - } - } - } -} - -rule check-docbook-dtd-dir ( ) -{ - if $(.docbook-dtd-dir) - { - if ! [ path.glob $(.docbook-dtd-dir) : docbookx.dtd ] - { - errors.user-error "error: BoostBook: could not find docbook DTD in:" [ path.native $(.docbook-dtd-dir) ] ; - } - else - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice: BoostBook: found docbook DTD in:" [ path.native $(.docbook-dtd-dir) ] ; - } - } - } -} - -rule check-boostbook-xsl-dir ( ) -{ - if ! $(.boostbook-xsl-dir) - { - errors.user-error "error: BoostBook: could not find boostbook XSL stylesheets." ; - } - else if ! [ path.glob $(.boostbook-xsl-dir) : docbook.xsl ] - { - errors.user-error "error: BoostBook: could not find docbook XSL stylesheets in:" [ path.native $(.boostbook-xsl-dir) ] ; - } - else - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice: BoostBook: found boostbook XSL stylesheets in:" [ path.native $(.boostbook-xsl-dir) ] ; - } - } -} - -rule check-boostbook-dtd-dir ( ) -{ - if ! $(.boostbook-dtd-dir) - { - errors.user-error "error: BoostBook: could not find boostbook DTD." ; - } - else if ! [ path.glob $(.boostbook-dtd-dir) : boostbook.dtd ] - { - errors.user-error "error: BoostBook: could not find boostbook DTD in:" [ path.native $(.boostbook-dtd-dir) ] ; - } - else - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice: BoostBook: found boostbook DTD in:" [ path.native $(.boostbook-dtd-dir) ] ; - } - } -} - -rule check-boostbook-dir ( boostbook-dir ? ) -{ - if $(boostbook-dir) && ! [ path.glob $(boostbook-dir) : xsl ] - { - errors.user-error "error: BoostBook: could not find boostbook in:" [ path.native $(boostbook-dir) ] ; - } -} - -rule find-tools ( docbook-xsl-dir ? : docbook-dtd-dir ? : boostbook-dir ? ) -{ - docbook-xsl-dir ?= [ modules.peek : DOCBOOK_XSL_DIR ] ; - docbook-dtd-dir ?= [ modules.peek : DOCBOOK_DTD_DIR ] ; - boostbook-dir ?= [ modules.peek : BOOSTBOOK_DIR ] ; - - # Look for the boostbook stylesheets relative to BOOST_ROOT - # and Boost.Build. - local boost-build-root = [ path.make [ build-system.location ] ] ; - local boostbook-search-dirs = [ path.join $(boost-build-root) .. .. ] ; - - local boost-root = [ modules.peek : BOOST_ROOT ] ; - if $(boost-root) - { - boostbook-search-dirs += [ path.join [ path.make $(boost-root) ] tools ] ; - } - boostbook-dir ?= [ path.glob $(boostbook-search-dirs) : boostbook* ] ; - - # Try to find the tools in platform specific locations - if [ os.name ] = NT - { - # If installed by the Boost installer. - local boost-root = ; - - local boost-installer-versions = snapshot cvs 1.33.0 ; - local boost-consulting-installer-versions = 1.33.1 1.34.0 1.34.1 ; - local boostpro-installer-versions = - 1.35.0 1.36.0 1.37.0 1.38.0 1.39.0 1.40.0 1.41.0 1.42.0 - 1.43.0 1.44.0 1.45.0 1.46.0 1.47.0 1.48.0 1.49.0 1.50.0 ; - - local old-installer-root = [ find-boost-in-registry Boost.org\\$(boost-installer-versions) ] ; - - # Make sure that the most recent version is searched for first - boost-root += [ sequence.reverse - [ find-boost-in-registry - Boost-Consulting.com\\$(boost-consulting-installer-versions) - boostpro.com\\$(boostpro-installer-versions) ] ] ; - - # Plausible locations. - local root = [ PWD ] ; - while $(root) != $(root:D) { root = $(root:D) ; } - root = [ path.make $(root) ] ; - local search-dirs = ; - local docbook-search-dirs = ; - for local p in $(boost-root) { - search-dirs += [ path.join $(p) tools ] ; - } - for local p in $(old-installer-root) - { - search-dirs += [ path.join $(p) share ] ; - docbook-search-dirs += [ path.join $(p) share ] ; - } - search-dirs += [ path.join $(root) Boost tools ] ; - search-dirs += [ path.join $(root) Boost share ] ; - docbook-search-dirs += [ path.join $(root) Boost share ] ; - - docbook-xsl-dir ?= [ path.glob $(docbook-search-dirs) : docbook-xsl* ] ; - docbook-dtd-dir ?= [ path.glob $(docbook-search-dirs) : docbook-xml* ] ; - boostbook-dir ?= [ path.glob $(search-dirs) : boostbook* ] ; - } - else - { - # Plausible locations. - - local share = /usr/local/share /usr/share /opt/share /opt/local/share ; - local dtd-versions = 4.2 ; - - docbook-xsl-dir ?= [ path.glob $(share) : docbook-xsl* ] ; - docbook-xsl-dir ?= [ path.glob $(share)/sgml/docbook : xsl-stylesheets ] ; - docbook-xsl-dir ?= [ path.glob $(share)/xsl : docbook* ] ; - - docbook-dtd-dir ?= [ path.glob $(share) : docbook-xml* ] ; - docbook-dtd-dir ?= [ path.glob $(share)/sgml/docbook : xml-dtd-$(dtd-versions)* ] ; - docbook-dtd-dir ?= [ path.glob $(share)/xml/docbook : $(dtd-versions) ] ; - - boostbook-dir ?= [ path.glob $(share) : boostbook* ] ; - - # Ubuntu Linux - docbook-xsl-dir ?= [ path.glob /usr/share/xml/docbook/stylesheet : nwalsh ] ; - docbook-dtd-dir ?= [ path.glob /usr/share/xml/docbook/schema/dtd : $(dtd-versions) ] ; - } - - if $(docbook-xsl-dir) - { - .docbook-xsl-dir = [ path.make $(docbook-xsl-dir[1]) ] ; - } - if $(docbook-dtd-dir) - { - .docbook-dtd-dir = [ path.make $(docbook-dtd-dir[1]) ] ; - } - - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice: Boost.Book: searching XSL/DTD in" ; - ECHO "notice:" [ sequence.transform path.native : $(boostbook-dir) ] ; - } - local boostbook-xsl-dir ; - for local dir in $(boostbook-dir) { - boostbook-xsl-dir += [ path.glob $(dir) : xsl ] ; - } - local boostbook-dtd-dir ; - for local dir in $(boostbook-dir) { - boostbook-dtd-dir += [ path.glob $(dir) : dtd ] ; - } - .boostbook-xsl-dir = $(boostbook-xsl-dir[1]) ; - .boostbook-dtd-dir = $(boostbook-dtd-dir[1]) ; - - check-docbook-xsl-dir ; - check-docbook-dtd-dir ; - check-boostbook-xsl-dir ; - check-boostbook-dtd-dir ; -} - -rule xsl-dir -{ - lock-config ; - return $(.boostbook-xsl-dir) ; -} - -rule dtd-dir -{ - lock-config ; - return $(.boostbook-dtd-dir) ; -} - -rule docbook-xsl-dir -{ - lock-config ; - return $(.docbook-xsl-dir) ; -} - -rule docbook-dtd-dir -{ - lock-config ; - return $(.docbook-dtd-dir) ; -} - -rule dtdxml-to-boostbook ( target : source : properties * ) -{ - lock-config ; - xslt $(target) : $(source) "$(.boostbook-xsl-dir)/dtd/dtd2boostbook.xsl" - : $(properties) ; -} - -rule boostbook-to-docbook ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/docbook.xsl ] ; - xslt $(target) : $(source) $(stylesheet) : $(properties) ; -} - -rule docbook-to-onehtml ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/html-single.xsl ] ; - xslt $(target) : $(source) $(stylesheet) : $(properties) ; -} - -rule docbook-to-htmldir ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/html.xsl ] ; - xslt-dir $(target) : $(source) $(stylesheet) : $(properties) : html ; -} - -rule docbook-to-xhtmldir ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/xhtml.xsl ] ; - xslt-dir $(target) : $(source) $(stylesheet) : $(properties) : xhtml ; -} - -rule docbook-to-htmlhelp ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/html-help.xsl ] ; - xslt-dir $(target) : $(source) $(stylesheet) : $(properties) : htmlhelp ; -} - -rule docbook-to-manpages ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/manpages.xsl ] ; - xslt-dir $(target) : $(source) $(stylesheet) : $(properties) : man ; -} - -rule docbook-to-fo ( target : source : properties * ) -{ - lock-config ; - local stylesheet = [ path.native $(.boostbook-xsl-dir)/fo.xsl ] ; - xslt $(target) : $(source) $(stylesheet) : $(properties) ; -} - -rule format-catalog-path ( path ) -{ - local result = $(path) ; - if [ xsltproc.is-cygwin ] - { - if [ os.name ] = NT - { - drive = [ MATCH ^/(.):(.*)$ : $(path) ] ; - result = /cygdrive/$(drive[1])$(drive[2]) ; - } - } - else - { - if [ os.name ] = CYGWIN - { - local native-path = [ path.native $(path) ] ; - result = [ path.make $(native-path:W) ] ; - } - } - return [ regex.replace $(result) " " "%20" ] ; -} - -rule generate-xml-catalog ( target : sources * : properties * ) -{ - print.output $(target) ; - - # BoostBook DTD catalog entry - local boostbook-dtd-dir = [ boostbook.dtd-dir ] ; - if $(boostbook-dtd-dir) - { - boostbook-dtd-dir = [ format-catalog-path $(boostbook-dtd-dir) ] ; - } - - print.text - "<?xml version=\"1.0\"?>" - "<!DOCTYPE catalog " - " PUBLIC \"-//OASIS/DTD Entity Resolution XML Catalog V1.0//EN\"" - " \"http://www.oasis-open.org/committees/entity/release/1.0/catalog.dtd\">" - "<catalog xmlns=\"urn:oasis:names:tc:entity:xmlns:xml:catalog\">" - " <rewriteURI uriStartString=\"http://www.boost.org/tools/boostbook/dtd/\" rewritePrefix=\"file://$(boostbook-dtd-dir)/\"/>" - : true ; - - local docbook-xsl-dir = [ boostbook.docbook-xsl-dir ] ; - if ! $(docbook-xsl-dir) - { - ECHO "BoostBook warning: no DocBook XSL directory specified." ; - ECHO " If you have the DocBook XSL stylesheets installed, please " ; - ECHO " set DOCBOOK_XSL_DIR to the stylesheet directory on either " ; - ECHO " the command line (via -sDOCBOOK_XSL_DIR=...) or in a " ; - ECHO " Boost.Jam configuration file. The DocBook XSL stylesheets " ; - ECHO " are available here: http://docbook.sourceforge.net/ " ; - ECHO " Stylesheets will be downloaded on-the-fly (very slow!) " ; - } - else - { - docbook-xsl-dir = [ format-catalog-path $(docbook-xsl-dir) ] ; - print.text " <rewriteURI uriStartString=\"http://docbook.sourceforge.net/release/xsl/current/\" rewritePrefix=\"file://$(docbook-xsl-dir)/\"/>" ; - } - - local docbook-dtd-dir = [ boostbook.docbook-dtd-dir ] ; - if ! $(docbook-dtd-dir) - { - ECHO "BoostBook warning: no DocBook DTD directory specified." ; - ECHO " If you have the DocBook DTD installed, please set " ; - ECHO " DOCBOOK_DTD_DIR to the DTD directory on either " ; - ECHO " the command line (via -sDOCBOOK_DTD_DIR=...) or in a " ; - ECHO " Boost.Jam configuration file. The DocBook DTD is available " ; - ECHO " here: http://www.oasis-open.org/docbook/xml/4.2/index.shtml" ; - ECHO " The DTD will be downloaded on-the-fly (very slow!) " ; - } - else - { - docbook-dtd-dir = [ format-catalog-path $(docbook-dtd-dir) ] ; - print.text " <rewriteURI uriStartString=\"http://www.oasis-open.org/docbook/xml/4.2/\" rewritePrefix=\"file://$(docbook-dtd-dir)/\"/>" ; - } - - print.text "</catalog>" ; -} - -rule xml-catalog ( ) -{ - if ! $(.xml-catalog) - { - # The target is created as part of the root project. But ideally - # it would be created as part of the boostbook project. This is not - # current possible as such global projects don't inherit things like - # the build directory. - - # Find the root project. - local root-project = [ project.current ] ; - root-project = [ $(root-project).project-module ] ; - while - [ project.attribute $(root-project) parent-module ] && - [ project.attribute $(root-project) parent-module ] != user-config && - [ project.attribute $(root-project) parent-module ] != project-config - { - root-project = [ project.attribute $(root-project) parent-module ] ; - } - .xml-catalog = [ new file-target boostbook_catalog - : XML - : [ project.target $(root-project) ] - : [ new action : boostbook.generate-xml-catalog ] - : - ] ; - .xml-catalog-file = [ $(.xml-catalog).path ] [ $(.xml-catalog).name ] ; - .xml-catalog-file = $(.xml-catalog-file:J=/) ; - } - return $(.xml-catalog) $(.xml-catalog-file) ; -} - -class boostbook-generator : generator -{ - import feature ; - import virtual-target ; - import generators ; - import boostbook ; - - - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - # Generate the catalog, but only once... - local global-catalog = [ boostbook.xml-catalog ] ; - local catalog = $(global-catalog[1]) ; - local catalog-file = $(global-catalog[2]) ; - local targets ; - - # Add the catalog to the property set - property-set = [ $(property-set).add-raw <catalog>$(catalog-file) ] ; - - local type = none ; - local manifest ; - local format = [ $(property-set).get <format> ] ; - switch $(format) - { - case html : - { - type = HTMLDIR ; - manifest = HTML.manifest ; - } - case xhtml : - { - type = XHTMLDIR ; - manifest = HTML.manifest ; - } - case htmlhelp : - { - type = HTMLHELP ; - manifest = HTML.manifest ; - } - - case onehtml : type = HTML ; - - case man : - { - type = MANPAGES ; - manifest = man.manifest ; - } - - case docbook : type = DOCBOOK ; - case fo : type = FO ; - case pdf : type = PDF ; - case ps : type = PS ; - case tests : type = TESTS ; - } - - if $(manifest) - { - # Create DOCBOOK file from BOOSTBOOK sources. - local base-target = [ generators.construct $(project) - : DOCBOOK : $(property-set) : $(sources) ] ; - base-target = $(base-target[2]) ; - $(base-target).depends $(catalog) ; - - # Generate HTML/PDF/PS from DOCBOOK. - local target = [ generators.construct $(project) $(name)_$(manifest) - : $(type) - : [ $(property-set).add-raw - <xsl:param>manifest=$(name)_$(manifest) ] - : $(base-target) ] ; - local name = [ $(property-set).get <name> ] ; - name ?= $(format) ; - $(target[2]).set-path $(name) ; - $(target[2]).depends $(catalog) ; - - targets += $(target[2]) ; - } - else { - local target = [ generators.construct $(project) - : $(type) : $(property-set) : $(sources) ] ; - - if ! $(target) - { - errors.error "Cannot build documentation type '$(format)'" ; - } - else - { - $(target[2]).depends $(catalog) ; - targets += $(target[2]) ; - } - } - - return $(targets) ; - } -} - -generators.register [ new boostbook-generator boostbook.main : : BOOSTBOOK_MAIN ] ; - -# Creates a boostbook target. -rule boostbook ( target-name : sources * : requirements * : default-build * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new typed-target $(target-name) : $(project) : BOOSTBOOK_MAIN - : [ targets.main-target-sources $(sources) : $(target-name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; -} - -############################################################################# -# Dependency scanners -############################################################################# -# XInclude scanner. Mostly stolen from c-scanner :) -# Note that this assumes an "xi" prefix for XIncludes. This isn't always the -# case for XML documents, but we'll assume it's true for anything we encounter. -class xinclude-scanner : scanner -{ - import virtual-target ; - import path ; - import scanner ; - - rule __init__ ( includes * ) - { - scanner.__init__ ; - self.includes = $(includes) ; - } - - rule pattern ( ) - { - return "xi:include[ ]*href=\"([^\"]*)\"" ; - } - - rule process ( target : matches * : binding ) - { - local target_path = [ NORMALIZE_PATH $(binding:D) ] ; - - NOCARE $(matches) ; - INCLUDES $(target) : $(matches) ; - SEARCH on $(matches) = $(target_path) $(self.includes:G=) ; - - scanner.propagate $(__name__) : $(matches) : $(target) ; - } -} - -scanner.register xinclude-scanner : xsl:path ; -type.set-scanner XML : xinclude-scanner ; - -rule boostbook-to-tests ( target : source : properties * ) -{ - lock-config ; - local boost_root = [ modules.peek : BOOST_ROOT ] ; - local native-path = - [ path.native [ path.join $(.boostbook-xsl-dir) testing Jamfile ] ] ; - local stylesheet = $(native-path:S=.xsl) ; - xslt $(target) : $(source) $(stylesheet) - : $(properties) <xsl:param>boost.root=$(boost_root) - ; -} - - diff --git a/jam-files/boost-build/tools/borland.jam b/jam-files/boost-build/tools/borland.jam deleted file mode 100644 index 6e43ca93..00000000 --- a/jam-files/boost-build/tools/borland.jam +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2005 Dave Abrahams -# Copyright 2003 Rene Rivera -# Copyright 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Support for the Borland's command line compiler - -import property ; -import generators ; -import os ; -import toolset : flags ; -import feature : get-values ; -import type ; -import common ; - -feature.extend toolset : borland ; - -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters borland : - version $(version) ] ; - - local command = [ common.get-invocation-command borland : bcc32.exe - : $(command) ] ; - - common.handle-options borland : $(condition) : $(command) : $(options) ; - - if $(command) - { - command = [ common.get-absolute-tool-path $(command[-1]) ] ; - } - root = $(command:D) ; - - flags borland.compile STDHDRS $(condition) : $(root)/include/ ; - flags borland.link STDLIBPATH $(condition) : $(root)/lib ; - flags borland.link RUN_PATH $(condition) : $(root)/bin ; - flags borland .root $(condition) : $(root)/bin/ ; -} - - -# A borland-specific target type -type.register BORLAND.TDS : tds ; - -# Declare generators - -generators.register-linker borland.link : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : EXE : <toolset>borland ; -generators.register-linker borland.link.dll : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : SHARED_LIB IMPORT_LIB : <toolset>borland ; - -generators.register-archiver borland.archive : OBJ : STATIC_LIB : <toolset>borland ; -generators.register-c-compiler borland.compile.c++ : CPP : OBJ : <toolset>borland ; -generators.register-c-compiler borland.compile.c : C : OBJ : <toolset>borland ; -generators.register-standard borland.asm : ASM : OBJ : <toolset>borland ; - -# Declare flags - -flags borland.compile OPTIONS <debug-symbols>on : -v ; -flags borland.link OPTIONS <debug-symbols>on : -v ; - -flags borland.compile OPTIONS <optimization>off : -Od ; -flags borland.compile OPTIONS <optimization>speed : -O2 ; -flags borland.compile OPTIONS <optimization>space : -O1 ; - -if $(.BORLAND_HAS_FIXED_INLINING_BUGS) -{ - flags borland CFLAGS <inlining>off : -vi- ; - flags borland CFLAGS <inlining>on : -vi -w-inl ; - flags borland CFLAGS <inlining>full : -vi -w-inl ; -} -else -{ - flags borland CFLAGS : -vi- ; -} - -flags borland.compile OPTIONS <warnings>off : -w- ; -flags borland.compile OPTIONS <warnings>all : -w ; -flags borland.compile OPTIONS <warnings-as-errors>on : -w! ; - - -# Deal with various runtime configs... - -# This should be not for DLL -flags borland OPTIONS <user-interface>console : -tWC ; - -# -tWR sets -tW as well, so we turn it off here and then turn it -# on again later if we need it: -flags borland OPTIONS <runtime-link>shared : -tWR -tWC ; -flags borland OPTIONS <user-interface>gui : -tW ; - -flags borland OPTIONS <main-target-type>LIB/<link>shared : -tWD ; -# Hmm.. not sure what's going on here. -flags borland OPTIONS : -WM- ; -flags borland OPTIONS <threading>multi : -tWM ; - - - -flags borland.compile OPTIONS <cxxflags> ; -flags borland.compile DEFINES <define> ; -flags borland.compile INCLUDES <include> ; - -flags borland NEED_IMPLIB <main-target-type>LIB/<link>shared : "" ; - -# -# for C++ compiles the following options are turned on by default: -# -# -j5 stops after 5 errors -# -g255 allow an unlimited number of warnings -# -q no banner -# -c compile to object -# -P C++ code regardless of file extention -# -a8 8 byte alignment, this option is on in the IDE by default -# and effects binary compatibility. -# - -# -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) $(C++FLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o"$(<)" "$(>)" - - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" -j5 -g255 -q -c -P -a8 -Vx- -Ve- -b- $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -I"$(STDHDRS)" -o"$(<)" "$(>)" -} - -# For C, we don't pass -P flag -actions compile.c -{ - "$(CONFIG_COMMAND)" -j5 -g255 -q -c -a8 -Vx- -Ve- -b- $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -I"$(STDHDRS)" -o"$(<)" "$(>)" -} - - -# Declare flags and action for linking -toolset.flags borland.link OPTIONS <debug-symbols>on : -v ; -toolset.flags borland.link LIBRARY_PATH <library-path> ; -toolset.flags borland.link FINDLIBS_ST <find-static-library> ; -toolset.flags borland.link FINDLIBS_SA <find-shared-library> ; -toolset.flags borland.link LIBRARIES <library-file> ; - -flags borland.link OPTIONS <linkflags> ; -flags borland.link OPTIONS <link>shared : -tWD ; - -flags borland.link LIBRARY_PATH_OPTION <toolset>borland : -L : unchecked ; -flags borland.link LIBRARY_OPTION <toolset>borland : "" : unchecked ; - - - -# bcc32 needs to have ilink32 in the path in order to invoke it, so explicitly -# specifying $(BCC_TOOL_PATH)bcc32 doesn't help. You need to add -# $(BCC_TOOL_PATH) to the path -# The NEED_IMPLIB variable controls whether we need to invoke implib. - -flags borland.archive AROPTIONS <archiveflags> ; - -# Declare action for archives. We don't use response file -# since it's hard to get "+-" there. -# The /P256 increases 'page' size -- with too low -# values tlib fails when building large applications. -# CONSIDER: don't know what 'together' is for... -actions updated together piecemeal archive -{ - $(.set-path)$(.root:W)$(.old-path) - tlib $(AROPTIONS) /P256 /u /a /C "$(<:W)" +-"$(>:W)" -} - - -if [ os.name ] = CYGWIN -{ - .set-path = "cmd /S /C set \"PATH=" ; - .old-path = ";%PATH%\" \"&&\"" ; - - - # Couldn't get TLIB to stop being confused about pathnames - # containing dashes (it seemed to treat them as option separators - # when passed through from bash), so we explicitly write the - # command into a .bat file and execute that. TLIB is also finicky - # about pathname style! Forward slashes, too, are treated as - # options. - actions updated together piecemeal archive - { - chdir $(<:D) - echo +-$(>:BS) > $(<:BS).rsp - $(.set-path)$(.root)$(.old-path) "tlib.exe" $(AROPTIONS) /P256 /C $(<:BS) @$(<:BS).rsp && $(RM) $(<:BS).rsp - } -} -else if [ os.name ] = NT -{ - .set-path = "set \"PATH=" ; - .old-path = ";%PATH%\" - " ; -} -else -{ - .set-path = "PATH=\"" ; - .old-path = "\":$PATH - export PATH - " ; -} - -RM = [ common.rm-command ] ; - -nl = " -" ; - -actions link -{ - $(.set-path)$(.root:W)$(.old-path) "$(CONFIG_COMMAND)" -v -q $(OPTIONS) -L"$(LIBRARY_PATH:W)" -L"$(STDLIBPATH:W)" -e"$(<[1]:W)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" -} - - -actions link.dll bind LIBRARIES RSP -{ - $(.set-path)$(.root:W)$(.old-path) "$(CONFIG_COMMAND)" -v -q $(OPTIONS) -L"$(LIBRARY_PATH:W)" -L"$(STDLIBPATH:W)" -e"$(<[1]:W)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" && "$(.root)implib" "$(<[2]:W)" "$(<[1]:W)" -} - -# It seems impossible to specify output file with directory when compiling -# asm files using bcc32, so use tasm32 directly. -# /ml makes all symbol names case-sensitive -actions asm -{ - $(.set-path)$(.root:W)$(.old-path) tasm32.exe /ml "$(>)" "$(<)" -} - diff --git a/jam-files/boost-build/tools/builtin.jam b/jam-files/boost-build/tools/builtin.jam deleted file mode 100644 index 148e7308..00000000 --- a/jam-files/boost-build/tools/builtin.jam +++ /dev/null @@ -1,960 +0,0 @@ -# Copyright 2002, 2003, 2004, 2005 Dave Abrahams -# Copyright 2002, 2005, 2006, 2007, 2010 Rene Rivera -# Copyright 2006 Juergen Hunold -# Copyright 2005 Toon Knapen -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Defines standard features and rules. - -import alias ; -import "class" : new ; -import errors ; -import feature ; -import generators ; -import numbers ; -import os ; -import path ; -import print ; -import project ; -import property ; -import regex ; -import scanner ; -import sequence ; -import stage ; -import symlink ; -import toolset ; -import type ; -import targets ; -import types/register ; -import utility ; -import virtual-target ; -import message ; -import convert ; - -# FIXME: the following generate module import is not needed here but removing it -# too hastly will break using code (e.g. the main Boost library Jamroot file) -# that forgot to import the generate module before calling the generate rule. -import generate ; - - -.os-names = aix bsd cygwin darwin freebsd hpux iphone linux netbsd - openbsd osf qnx qnxnto sgi solaris unix unixware windows - elf # Not actually an OS -- used for targeting bare metal where - # object format is ELF. This catches both -elf and -eabi gcc - # targets and well as other compilers targeting ELF. It is not - # clear how often do we need to key of ELF specifically as opposed - # to other bare metal targets, but let's stick with gcc naming. - ; - -# Feature used to determine which OS we're on. New <target-os> and <host-os> -# features should be used instead. -local os = [ modules.peek : OS ] ; -feature.feature os : $(os) : propagated link-incompatible ; - - -# Translates from bjam current OS to the os tags used in host-os and target-os, -# i.e. returns the running host-os. -# -local rule default-host-os ( ) -{ - local host-os ; - if [ os.name ] in $(.os-names:U) - { - host-os = [ os.name ] ; - } - else - { - switch [ os.name ] - { - case NT : host-os = windows ; - case AS400 : host-os = unix ; - case MINGW : host-os = windows ; - case BSDI : host-os = bsd ; - case COHERENT : host-os = unix ; - case DRAGONFLYBSD : host-os = bsd ; - case IRIX : host-os = sgi ; - case MACOSX : host-os = darwin ; - case KFREEBSD : host-os = freebsd ; - case LINUX : host-os = linux ; - case SUNOS : - ECHO "SunOS is not a supported operating system." ; - ECHO "We believe last version of SunOS was released in 1992, " ; - ECHO "so if you get this message, something is very wrong with configuration logic. " ; - ECHO "Please report this as a bug. " ; - EXIT ; - case * : host-os = unix ; - } - } - return $(host-os:L) ; -} - - -# The two OS features define a known set of abstract OS names. The host-os is -# the OS under which bjam is running. Even though this should really be a fixed -# property we need to list all the values to prevent unknown value errors. Both -# set the default value to the current OS to account for the default use case of -# building on the target OS. -feature.feature host-os : $(.os-names) ; -feature.set-default host-os : [ default-host-os ] ; - -feature.feature target-os : $(.os-names) : propagated link-incompatible ; -feature.set-default target-os : [ default-host-os ] ; - - -feature.feature toolset : : implicit propagated symmetric ; -feature.feature stdlib : native : propagated composite ; -feature.feature link : shared static : propagated ; -feature.feature runtime-link : shared static : propagated ; -feature.feature runtime-debugging : on off : propagated ; -feature.feature optimization : off speed space none : propagated ; -feature.feature profiling : off on : propagated ; -feature.feature inlining : off on full : propagated ; -feature.feature threading : single multi : propagated ; -feature.feature rtti : on off : propagated ; -feature.feature exception-handling : on off : propagated ; - -# Whether there is support for asynchronous EH (e.g. catching SEGVs). -feature.feature asynch-exceptions : off on : propagated ; - -# Whether all extern "C" functions are considered nothrow by default. -feature.feature extern-c-nothrow : off on : propagated ; - -feature.feature debug-symbols : on off none : propagated ; -# Controls whether the binary should be stripped -- that is have -# everything not necessary to running removed. This option should -# not be very often needed. Also, this feature will show up in -# target paths of everything, not just binaries. Should fix that -# when impelementing feature relevance. -feature.feature strip : off on : propagated ; -feature.feature define : : free ; -feature.feature undef : : free ; -feature.feature "include" : : free path ; #order-sensitive ; -feature.feature cflags : : free ; -feature.feature cxxflags : : free ; -feature.feature fflags : : free ; -feature.feature asmflags : : free ; -feature.feature linkflags : : free ; -feature.feature archiveflags : : free ; -feature.feature version : : free ; - -# Generic, i.e. non-language specific, flags for tools. -feature.feature flags : : free ; -feature.feature location-prefix : : free ; - - -# The following features are incidental since they have no effect on built -# products. Not making them incidental will result in problems in corner cases, -# e.g.: -# -# unit-test a : a.cpp : <use>b ; -# lib b : a.cpp b ; -# -# Here, if <use> is not incidental, we would decide we have two targets for -# a.obj with different properties and complain about it. -# -# Note that making a feature incidental does not mean it is ignored. It may be -# ignored when creating a virtual target, but the rest of build process will use -# them. -feature.feature use : : free dependency incidental ; -feature.feature dependency : : free dependency incidental ; -feature.feature implicit-dependency : : free dependency incidental ; - -feature.feature warnings : - on # Enable default/"reasonable" warning level for the tool. - all # Enable all possible warnings issued by the tool. - off # Disable all warnings issued by the tool. - : incidental propagated ; - -feature.feature warnings-as-errors : - off # Do not fail the compilation if there are warnings. - on # Fail the compilation if there are warnings. - : incidental propagated ; - -# Feature that allows us to configure the maximal template instantiation depth -# level allowed by a C++ compiler. Applies only to C++ toolsets whose compilers -# actually support this configuration setting. -# -# Note that Boost Build currently does not allow defining features that take any -# positive integral value as a parameter, which is what we need here, so we just -# define some of the values here and leave it up to the user to extend this set -# as he needs using the feature.extend rule. -# -# TODO: This should be upgraded as soon as Boost Build adds support for custom -# validated feature values or at least features allowing any positive integral -# value. See related Boost Build related trac ticket #194. -# -feature.feature c++-template-depth - : - [ numbers.range 64 1024 : 64 ] - [ numbers.range 20 1000 : 10 ] - # Maximum template instantiation depth guaranteed for ANSI/ISO C++ - # conforming programs. - 17 - : - incidental optional propagated ; - -feature.feature source : : free dependency incidental ; -feature.feature library : : free dependency incidental ; -feature.feature file : : free dependency incidental ; -feature.feature find-shared-library : : free ; #order-sensitive ; -feature.feature find-static-library : : free ; #order-sensitive ; -feature.feature library-path : : free path ; #order-sensitive ; - -# Internal feature. -feature.feature library-file : : free dependency ; - -feature.feature name : : free ; -feature.feature tag : : free ; -feature.feature search : : free path ; #order-sensitive ; -feature.feature location : : free path ; -feature.feature dll-path : : free path ; -feature.feature hardcode-dll-paths : true false : incidental ; - - -# An internal feature that holds the paths of all dependency shared libraries. -# On Windows, it is needed so that we can add all those paths to PATH when -# running applications. On Linux, it is needed to add proper -rpath-link command -# line options. -feature.feature xdll-path : : free path ; - -# Provides means to specify def-file for windows DLLs. -feature.feature def-file : : free dependency ; - -feature.feature suppress-import-lib : false true : incidental ; - -# Internal feature used to store the name of a bjam action to call when building -# a target. -feature.feature action : : free ; - -# This feature is used to allow specific generators to run. For example, QT -# tools can only be invoked when QT library is used. In that case, <allow>qt -# will be in usage requirement of the library. -feature.feature allow : : free ; - -# The addressing model to generate code for. Currently a limited set only -# specifying the bit size of pointers. -feature.feature address-model : 16 32 64 32_64 : propagated optional ; - -# Type of CPU architecture to compile for. -feature.feature architecture : - # x86 and x86-64 - x86 - - # ia64 - ia64 - - # Sparc - sparc - - # RS/6000 & PowerPC - power - - # MIPS/SGI - mips1 mips2 mips3 mips4 mips32 mips32r2 mips64 - - # HP/PA-RISC - parisc - - # Advanced RISC Machines - arm - - # Combined architectures for platforms/toolsets that support building for - # multiple architectures at once. "combined" would be the default multi-arch - # for the toolset. - combined - combined-x86-power - - : propagated optional ; - -# The specific instruction set in an architecture to compile. -feature.feature instruction-set : - # x86 and x86-64 - native i386 i486 i586 i686 pentium pentium-mmx pentiumpro pentium2 pentium3 - pentium3m pentium-m pentium4 pentium4m prescott nocona core2 conroe conroe-xe - conroe-l allendale mermon mermon-xe kentsfield kentsfield-xe penryn wolfdale - yorksfield nehalem k6 k6-2 k6-3 athlon athlon-tbird athlon-4 athlon-xp - athlon-mp k8 opteron athlon64 athlon-fx winchip-c6 winchip2 c3 c3-2 - - # ia64 - itanium itanium1 merced itanium2 mckinley - - # Sparc - v7 cypress v8 supersparc sparclite hypersparc sparclite86x f930 f934 - sparclet tsc701 v9 ultrasparc ultrasparc3 - - # RS/6000 & PowerPC - 401 403 405 405fp 440 440fp 505 601 602 603 603e 604 604e 620 630 740 7400 - 7450 750 801 821 823 860 970 8540 power-common ec603e g3 g4 g5 power power2 - power3 power4 power5 powerpc powerpc64 rios rios1 rsc rios2 rs64a - - # MIPS - 4kc 4kp 5kc 20kc m4k r2000 r3000 r3900 r4000 r4100 r4300 r4400 r4600 r4650 - r6000 r8000 rm7000 rm9000 orion sb1 vr4100 vr4111 vr4120 vr4130 vr4300 - vr5000 vr5400 vr5500 - - # HP/PA-RISC - 700 7100 7100lc 7200 7300 8000 - - # Advanced RISC Machines - armv2 armv2a armv3 armv3m armv4 armv4t armv5 armv5t armv5te armv6 armv6j iwmmxt ep9312 - - : propagated optional ; - -# Used to select a specific variant of C++ ABI if the compiler supports several. -feature.feature c++abi : : propagated optional ; - -feature.feature conditional : : incidental free ; - -# The value of 'no' prevents building of a target. -feature.feature build : yes no : optional ; - -# Windows-specific features - -feature.feature user-interface : console gui wince native auto ; - -feature.feature variant : : implicit composite propagated symmetric ; - - -# Declares a new variant. -# -# First determines explicit properties for this variant, by refining parents' -# explicit properties with the passed explicit properties. The result is -# remembered and will be used if this variant is used as parent. -# -# Second, determines the full property set for this variant by adding to the -# explicit properties default values for all missing non-symmetric properties. -# -# Lastly, makes appropriate value of 'variant' property expand to the full -# property set. -# -rule variant ( name # Name of the variant - : parents-or-properties * # Specifies parent variants, if - # 'explicit-properties' are given, and - # explicit-properties or parents otherwise. - : explicit-properties * # Explicit properties. - ) -{ - local parents ; - if ! $(explicit-properties) - { - if $(parents-or-properties[1]:G) - { - explicit-properties = $(parents-or-properties) ; - } - else - { - parents = $(parents-or-properties) ; - } - } - else - { - parents = $(parents-or-properties) ; - } - - # The problem is that we have to check for conflicts between base variants. - if $(parents[2]) - { - errors.error "multiple base variants are not yet supported" ; - } - - local inherited ; - # Add explicitly specified properties for parents. - for local p in $(parents) - { - # TODO: This check may be made stricter. - if ! [ feature.is-implicit-value $(p) ] - { - errors.error "Invalid base variant" $(p) ; - } - - inherited += $(.explicit-properties.$(p)) ; - } - property.validate $(explicit-properties) ; - explicit-properties = [ property.refine $(inherited) - : $(explicit-properties) ] ; - - # Record explicitly specified properties for this variant. We do this after - # inheriting parents' properties so they affect other variants derived from - # this one. - .explicit-properties.$(name) = $(explicit-properties) ; - - feature.extend variant : $(name) ; - feature.compose <variant>$(name) : $(explicit-properties) ; -} -IMPORT $(__name__) : variant : : variant ; - - -variant debug : <optimization>off <debug-symbols>on <inlining>off - <runtime-debugging>on ; -variant release : <optimization>speed <debug-symbols>off <inlining>full - <runtime-debugging>off <define>NDEBUG ; -variant profile : release : <profiling>on <debug-symbols>on ; - - -class searched-lib-target : abstract-file-target -{ - rule __init__ ( name - : project - : shared ? - : search * - : action - ) - { - abstract-file-target.__init__ $(name) : SEARCHED_LIB : $(project) - : $(action) : ; - - self.shared = $(shared) ; - self.search = $(search) ; - } - - rule shared ( ) - { - return $(self.shared) ; - } - - rule search ( ) - { - return $(self.search) ; - } - - rule actualize-location ( target ) - { - NOTFILE $(target) ; - } - - rule path ( ) - { - } -} - - -# The generator class for libraries (target type LIB). Depending on properties -# it will request building of the appropriate specific library type -- -# -- SHARED_LIB, STATIC_LIB or SHARED_LIB. -# -class lib-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - # The lib generator is composing, and can be only invoked with an - # explicit name. This check is present in generator.run (and so in - # builtin.linking-generator) but duplicated here to avoid doing extra - # work. - if $(name) - { - local properties = [ $(property-set).raw ] ; - # Determine the needed target type. - local actual-type ; - # <source>files can be generated by <conditional>@rule feature - # in which case we do not consider it a SEARCHED_LIB type. - if ! <source> in $(properties:G) && - ( <search> in $(properties:G) || <name> in $(properties:G) ) - { - actual-type = SEARCHED_LIB ; - } - else if <file> in $(properties:G) - { - actual-type = LIB ; - } - else if <link>shared in $(properties) - { - actual-type = SHARED_LIB ; - } - else - { - actual-type = STATIC_LIB ; - } - property-set = [ $(property-set).add-raw <main-target-type>LIB ] ; - # Construct the target. - return [ generators.construct $(project) $(name) : $(actual-type) - : $(property-set) : $(sources) ] ; - } - } - - rule viable-source-types ( ) - { - return * ; - } -} - - -generators.register [ new lib-generator builtin.lib-generator : : LIB ] ; - - -# The implementation of the 'lib' rule. Beyond standard syntax that rule allows -# simplified: "lib a b c ;". -# -rule lib ( names + : sources * : requirements * : default-build * : - usage-requirements * ) -{ - if $(names[2]) - { - if <name> in $(requirements:G) - { - errors.user-error "When several names are given to the 'lib' rule" : - "it is not allowed to specify the <name> feature." ; - } - if $(sources) - { - errors.user-error "When several names are given to the 'lib' rule" : - "it is not allowed to specify sources." ; - } - } - - # This is a circular module dependency so it must be imported here. - import targets ; - - local project = [ project.current ] ; - local result ; - - for local name in $(names) - { - local r = $(requirements) ; - # Support " lib a ; " and " lib a b c ; " syntax. - if ! $(sources) && ! <name> in $(requirements:G) - && ! <file> in $(requirements:G) - { - r += <name>$(name) ; - } - result += [ targets.main-target-alternative - [ new typed-target $(name) : $(project) : LIB - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(r) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ] ; - } - return $(result) ; -} -IMPORT $(__name__) : lib : : lib ; - - -class searched-lib-generator : generator -{ - import property-set ; - - rule __init__ ( ) - { - # The requirements cause the generators to be tried *only* when we're - # building a lib target with a 'search' feature. This seems ugly --- all - # we want is to make sure searched-lib-generator is not invoked deep - # inside transformation search to produce intermediate targets. - generator.__init__ searched-lib-generator : : SEARCHED_LIB ; - } - - rule run ( project name ? : property-set : sources * ) - { - if $(name) - { - # If 'name' is empty, it means we have not been called to build a - # top-level target. In this case, we just fail immediately, because - # searched-lib-generator cannot be used to produce intermediate - # targets. - - local properties = [ $(property-set).raw ] ; - local shared ; - if <link>shared in $(properties) - { - shared = true ; - } - - local search = [ feature.get-values <search> : $(properties) ] ; - - local a = [ new null-action $(property-set) ] ; - local lib-name = [ feature.get-values <name> : $(properties) ] ; - lib-name ?= $(name) ; - local t = [ new searched-lib-target $(lib-name) : $(project) - : $(shared) : $(search) : $(a) ] ; - # We return sources for a simple reason. If there is - # lib png : z : <name>png ; - # the 'z' target should be returned, so that apps linking to 'png' - # will link to 'z', too. - return [ property-set.create <xdll-path>$(search) ] - [ virtual-target.register $(t) ] $(sources) ; - } - } -} - -generators.register [ new searched-lib-generator ] ; - - -class prebuilt-lib-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - local f = [ $(property-set).get <file> ] ; - return $(f) $(sources) ; - } -} - -generators.register - [ new prebuilt-lib-generator builtin.prebuilt : : LIB : <file> ] ; - -generators.override builtin.prebuilt : builtin.lib-generator ; - -class preprocessed-target-class : basic-target -{ - import generators ; - rule construct ( name : sources * : property-set ) - { - local result = [ generators.construct [ project ] - $(name) : PREPROCESSED_CPP : $(property-set) : $(sources) ] ; - if ! $(result) - { - result = [ generators.construct [ project ] - $(name) : PREPROCESSED_C : $(property-set) : $(sources) ] ; - } - if ! $(result) - { - local s ; - for x in $(sources) - { - s += [ $(x).name ] ; - } - local p = [ project ] ; - errors.user-error - "In project" [ $(p).name ] : - "Could not construct preprocessed file \"$(name)\" from $(s:J=, )." ; - } - return $(result) ; - } -} - -rule preprocessed ( name : sources * : requirements * : default-build * : - usage-requirements * ) -{ - local project = [ project.current ] ; - return [ targets.main-target-alternative - [ new preprocessed-target-class $(name) : $(project) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(r) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ] ; -} - -IMPORT $(__name__) : preprocessed : : preprocessed ; - -class compile-action : action -{ - import sequence ; - - rule __init__ ( targets * : sources * : action-name : properties * ) - { - action.__init__ $(targets) : $(sources) : $(action-name) : $(properties) ; - } - - # For all virtual targets for the same dependency graph as self, i.e. which - # belong to the same main target, add their directories to the include path. - # - rule adjust-properties ( property-set ) - { - local s = [ $(self.targets[1]).creating-subvariant ] ; - return [ $(property-set).add-raw - [ $(s).implicit-includes "include" : H ] ] ; - } -} - - -# Declare a special compiler generator. The only thing it does is changing the -# type used to represent 'action' in the constructed dependency graph to -# 'compile-action'. That class in turn adds additional include paths to handle -# cases when a source file includes headers which are generated themselves. -# -class C-compiling-generator : generator -{ - rule __init__ ( id : source-types + : target-types + : requirements * - : optional-properties * ) - { - generator.__init__ $(id) : $(source-types) : $(target-types) : - $(requirements) : $(optional-properties) ; - } - - rule action-class ( ) - { - return compile-action ; - } -} - - -rule register-c-compiler ( id : source-types + : target-types + : requirements * - : optional-properties * ) -{ - generators.register [ new C-compiling-generator $(id) : $(source-types) : - $(target-types) : $(requirements) : $(optional-properties) ] ; -} - -# FIXME: this is ugly, should find a better way (we would like client code to -# register all generators as "generators.some-rule" instead of -# "some-module.some-rule".) -# -IMPORT $(__name__) : register-c-compiler : : generators.register-c-compiler ; - - -# The generator class for handling EXE and SHARED_LIB creation. -# -class linking-generator : generator -{ - import path ; - import project ; - import property-set ; - import type ; - - rule __init__ ( id - composing ? : # The generator will be composing if a non-empty - # string is passed or the parameter is not given. To - # make the generator non-composing, pass an empty - # string (""). - source-types + : - target-types + : - requirements * ) - { - composing ?= true ; - generator.__init__ $(id) $(composing) : $(source-types) - : $(target-types) : $(requirements) ; - } - - rule run ( project name ? : property-set : sources + ) - { - sources += [ $(property-set).get <library> ] ; - - # Add <library-path> properties for all searched libraries. - local extra ; - for local s in $(sources) - { - if [ $(s).type ] = SEARCHED_LIB - { - local search = [ $(s).search ] ; - extra += <library-path>$(search) ; - } - } - - # It is possible that sources include shared libraries that did not came - # from 'lib' targets, e.g. .so files specified as sources. In this case - # we have to add extra dll-path properties and propagate extra xdll-path - # properties so that application linking to us will get xdll-path to - # those libraries. - local extra-xdll-paths ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] SHARED_LIB ] && ! [ $(s).action ] - { - # Unfortunately, we do not have a good way to find the path to a - # file, so use this nasty approach. - # - # TODO: This needs to be done better. One thing that is really - # broken with this is that it does not work correctly with - # projects having multiple source locations. - local p = [ $(s).project ] ; - local location = [ path.root [ $(s).name ] - [ $(p).get source-location ] ] ; - extra-xdll-paths += [ path.parent $(location) ] ; - } - } - - # Hardcode DLL paths only when linking executables. - # Pros: do not need to relink libraries when installing. - # Cons: "standalone" libraries (plugins, python extensions) can not - # hardcode paths to dependent libraries. - if [ $(property-set).get <hardcode-dll-paths> ] = true - && [ type.is-derived $(self.target-types[1]) EXE ] - { - local xdll-path = [ $(property-set).get <xdll-path> ] ; - extra += <dll-path>$(xdll-path) <dll-path>$(extra-xdll-paths) ; - } - - if $(extra) - { - property-set = [ $(property-set).add-raw $(extra) ] ; - } - - local result = [ generator.run $(project) $(name) : $(property-set) - : $(sources) ] ; - - local ur ; - if $(result) - { - ur = [ extra-usage-requirements $(result) : $(property-set) ] ; - ur = [ $(ur).add - [ property-set.create <xdll-path>$(extra-xdll-paths) ] ] ; - } - return $(ur) $(result) ; - } - - rule extra-usage-requirements ( created-targets * : property-set ) - { - local result = [ property-set.empty ] ; - local extra ; - - # Add appropricate <xdll-path> usage requirements. - local raw = [ $(property-set).raw ] ; - if <link>shared in $(raw) - { - local paths ; - local pwd = [ path.pwd ] ; - for local t in $(created-targets) - { - if [ type.is-derived [ $(t).type ] SHARED_LIB ] - { - paths += [ path.root [ path.make [ $(t).path ] ] $(pwd) ] ; - } - } - extra += $(paths:G=<xdll-path>) ; - } - - # We need to pass <xdll-path> features that we've got from sources, - # because if a shared library is built, exe using it needs to know paths - # to other shared libraries this one depends on in order to be able to - # find them all at runtime. - - # Just pass all features in property-set, it is theorically possible - # that we will propagate <xdll-path> features explicitly specified by - # the user, but then the user is to blaim for using an internal feature. - local values = [ $(property-set).get <xdll-path> ] ; - extra += $(values:G=<xdll-path>) ; - - if $(extra) - { - result = [ property-set.create $(extra) ] ; - } - return $(result) ; - } - - rule generated-targets ( sources + : property-set : project name ? ) - { - local sources2 ; # Sources to pass to inherited rule. - local properties2 ; # Properties to pass to inherited rule. - local libraries ; # Library sources. - - # Searched libraries are not passed as arguments to the linker but via - # some option. So, we pass them to the action using a property. - properties2 = [ $(property-set).raw ] ; - local fsa ; - local fst ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] SEARCHED_LIB ] - { - local name = [ $(s).name ] ; - if [ $(s).shared ] - { - fsa += $(name) ; - } - else - { - fst += $(name) ; - } - } - else - { - sources2 += $(s) ; - } - } - properties2 += <find-shared-library>$(fsa:J=&&) - <find-static-library>$(fst:J=&&) ; - - return [ generator.generated-targets $(sources2) - : [ property-set.create $(properties2) ] : $(project) $(name) ] ; - } -} - - -rule register-linker ( id composing ? : source-types + : target-types + - : requirements * ) -{ - generators.register [ new linking-generator $(id) $(composing) - : $(source-types) : $(target-types) : $(requirements) ] ; -} - - -# The generator class for handling STATIC_LIB creation. -# -class archive-generator : generator -{ - import property-set ; - - rule __init__ ( id composing ? : source-types + : target-types + - : requirements * ) - { - composing ?= true ; - generator.__init__ $(id) $(composing) : $(source-types) - : $(target-types) : $(requirements) ; - } - - rule run ( project name ? : property-set : sources + ) - { - sources += [ $(property-set).get <library> ] ; - - local result = [ generator.run $(project) $(name) : $(property-set) - : $(sources) ] ; - - # For static linking, if we get a library in source, we can not directly - # link to it so we need to cause our dependencies to link to that - # library. There are two approaches: - # - adding the library to the list of returned targets. - # - using the <library> usage requirements. - # The problem with the first is: - # - # lib a1 : : <file>liba1.a ; - # lib a2 : a2.cpp a1 : <link>static ; - # install dist : a2 ; - # - # here we will try to install 'a1', even though it is not necessary in - # the general case. With the second approach, even indirect dependants - # will link to the library, but it should not cause any harm. So, return - # all LIB sources together with created targets, so that dependants link - # to them. - local usage-requirements ; - if [ $(property-set).get <link> ] = static - { - for local t in $(sources) - { - if [ type.is-derived [ $(t).type ] LIB ] - { - usage-requirements += <library>$(t) ; - } - } - } - - usage-requirements = [ property-set.create $(usage-requirements) ] ; - - return $(usage-requirements) $(result) ; - } -} - - -rule register-archiver ( id composing ? : source-types + : target-types + - : requirements * ) -{ - generators.register [ new archive-generator $(id) $(composing) - : $(source-types) : $(target-types) : $(requirements) ] ; -} - - -# Generator that accepts everything and produces nothing. Useful as a general -# fallback for toolset-specific actions like PCH generation. -# -class dummy-generator : generator -{ - import property-set ; - - rule run ( project name ? : property-set : sources + ) - { - return [ property-set.empty ] ; - } -} - -IMPORT $(__name__) : register-linker register-archiver - : : generators.register-linker generators.register-archiver ; diff --git a/jam-files/boost-build/tools/builtin.py b/jam-files/boost-build/tools/builtin.py deleted file mode 100644 index 31a7bffe..00000000 --- a/jam-files/boost-build/tools/builtin.py +++ /dev/null @@ -1,718 +0,0 @@ -# Status: minor updates by Steven Watanabe to make gcc work -# -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -""" Defines standard features and rules. -""" - -import b2.build.targets as targets - -import sys -from b2.build import feature, property, virtual_target, generators, type, property_set, scanner -from b2.util.utility import * -from b2.util import path, regex, bjam_signature -import b2.tools.types -from b2.manager import get_manager - - -# Records explicit properties for a variant. -# The key is the variant name. -__variant_explicit_properties = {} - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - """ - global __variant_explicit_properties - - __variant_explicit_properties = {} - -@bjam_signature((["name"], ["parents_or_properties", "*"], ["explicit_properties", "*"])) -def variant (name, parents_or_properties, explicit_properties = []): - """ Declares a new variant. - First determines explicit properties for this variant, by - refining parents' explicit properties with the passed explicit - properties. The result is remembered and will be used if - this variant is used as parent. - - Second, determines the full property set for this variant by - adding to the explicit properties default values for all properties - which neither present nor are symmetric. - - Lastly, makes appropriate value of 'variant' property expand - to the full property set. - name: Name of the variant - parents_or_properties: Specifies parent variants, if - 'explicit_properties' are given, - and explicit_properties otherwise. - explicit_properties: Explicit properties. - """ - parents = [] - if not explicit_properties: - explicit_properties = parents_or_properties - else: - parents = parents_or_properties - - inherited = property_set.empty() - if parents: - - # If we allow multiple parents, we'd have to to check for conflicts - # between base variants, and there was no demand for so to bother. - if len (parents) > 1: - raise BaseException ("Multiple base variants are not yet supported") - - p = parents[0] - # TODO: the check may be stricter - if not feature.is_implicit_value (p): - raise BaseException ("Invalid base varaint '%s'" % p) - - inherited = __variant_explicit_properties[p] - - explicit_properties = property_set.create_with_validation(explicit_properties) - explicit_properties = inherited.refine(explicit_properties) - - # Record explicitly specified properties for this variant - # We do this after inheriting parents' properties, so that - # they affect other variants, derived from this one. - __variant_explicit_properties[name] = explicit_properties - - feature.extend('variant', [name]) - feature.compose ("<variant>" + name, explicit_properties.all()) - -__os_names = """ - amiga aix bsd cygwin darwin dos emx freebsd hpux iphone linux netbsd - openbsd osf qnx qnxnto sgi solaris sun sunos svr4 sysv ultrix unix unixware - vms windows -""".split() - -# Translates from bjam current OS to the os tags used in host-os and target-os, -# i.e. returns the running host-os. -# -def default_host_os(): - host_os = os_name() - if host_os not in (x.upper() for x in __os_names): - if host_os == 'NT': host_os = 'windows' - elif host_os == 'AS400': host_os = 'unix' - elif host_os == 'MINGW': host_os = 'windows' - elif host_os == 'BSDI': host_os = 'bsd' - elif host_os == 'COHERENT': host_os = 'unix' - elif host_os == 'DRAGONFLYBSD': host_os = 'bsd' - elif host_os == 'IRIX': host_os = 'sgi' - elif host_os == 'MACOSX': host_os = 'darwin' - elif host_os == 'KFREEBSD': host_os = 'freebsd' - elif host_os == 'LINUX': host_os = 'linux' - else: host_os = 'unix' - return host_os.lower() - -def register_globals (): - """ Registers all features and variants declared by this module. - """ - - # This feature is used to determine which OS we're on. - # In future, this may become <target-os> and <host-os> - # TODO: check this. Compatibility with bjam names? Subfeature for version? - os = sys.platform - feature.feature ('os', [os], ['propagated', 'link-incompatible']) - - - # The two OS features define a known set of abstract OS names. The host-os is - # the OS under which bjam is running. Even though this should really be a fixed - # property we need to list all the values to prevent unknown value errors. Both - # set the default value to the current OS to account for the default use case of - # building on the target OS. - feature.feature('host-os', __os_names) - feature.set_default('host-os', default_host_os()) - - feature.feature('target-os', __os_names, ['propagated', 'link-incompatible']) - feature.set_default('target-os', default_host_os()) - - feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric']) - - feature.feature ('stdlib', ['native'], ['propagated', 'composite']) - - feature.feature ('link', ['shared', 'static'], ['propagated']) - feature.feature ('runtime-link', ['shared', 'static'], ['propagated']) - feature.feature ('runtime-debugging', ['on', 'off'], ['propagated']) - - - feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated']) - feature.feature ('profiling', ['off', 'on'], ['propagated']) - feature.feature ('inlining', ['off', 'on', 'full'], ['propagated']) - - feature.feature ('threading', ['single', 'multi'], ['propagated']) - feature.feature ('rtti', ['on', 'off'], ['propagated']) - feature.feature ('exception-handling', ['on', 'off'], ['propagated']) - feature.feature ('debug-symbols', ['on', 'off'], ['propagated']) - feature.feature ('define', [], ['free']) - feature.feature ('include', [], ['free', 'path']) #order-sensitive - feature.feature ('cflags', [], ['free']) - feature.feature ('cxxflags', [], ['free']) - feature.feature ('linkflags', [], ['free']) - feature.feature ('archiveflags', [], ['free']) - feature.feature ('version', [], ['free']) - - feature.feature ('location-prefix', [], ['free']) - - feature.feature ('action', [], ['free']) - - - # The following features are incidental, since - # in themself they have no effect on build products. - # Not making them incidental will result in problems in corner - # cases, for example: - # - # unit-test a : a.cpp : <use>b ; - # lib b : a.cpp b ; - # - # Here, if <use> is not incidental, we'll decide we have two - # targets for a.obj with different properties, and will complain. - # - # Note that making feature incidental does not mean it's ignored. It may - # be ignored when creating the virtual target, but the rest of build process - # will use them. - feature.feature ('use', [], ['free', 'dependency', 'incidental']) - feature.feature ('dependency', [], ['free', 'dependency', 'incidental']) - feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental']) - - feature.feature('warnings', [ - 'on', # Enable default/"reasonable" warning level for the tool. - 'all', # Enable all possible warnings issued by the tool. - 'off'], # Disable all warnings issued by the tool. - ['incidental', 'propagated']) - - feature.feature('warnings-as-errors', [ - 'off', # Do not fail the compilation if there are warnings. - 'on'], # Fail the compilation if there are warnings. - ['incidental', 'propagated']) - - feature.feature ('source', [], ['free', 'dependency', 'incidental']) - feature.feature ('library', [], ['free', 'dependency', 'incidental']) - feature.feature ('file', [], ['free', 'dependency', 'incidental']) - feature.feature ('find-shared-library', [], ['free']) #order-sensitive ; - feature.feature ('find-static-library', [], ['free']) #order-sensitive ; - feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ; - # Internal feature. - feature.feature ('library-file', [], ['free', 'dependency']) - - feature.feature ('name', [], ['free']) - feature.feature ('tag', [], ['free']) - feature.feature ('search', [], ['free', 'path']) #order-sensitive ; - feature.feature ('location', [], ['free', 'path']) - - feature.feature ('dll-path', [], ['free', 'path']) - feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental']) - - - # This is internal feature which holds the paths of all dependency - # dynamic libraries. On Windows, it's needed so that we can all - # those paths to PATH, when running applications. - # On Linux, it's needed to add proper -rpath-link command line options. - feature.feature ('xdll-path', [], ['free', 'path']) - - #provides means to specify def-file for windows dlls. - feature.feature ('def-file', [], ['free', 'dependency']) - - # This feature is used to allow specific generators to run. - # For example, QT tools can only be invoked when QT library - # is used. In that case, <allow>qt will be in usage requirement - # of the library. - feature.feature ('allow', [], ['free']) - - # The addressing model to generate code for. Currently a limited set only - # specifying the bit size of pointers. - feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional']) - - # Type of CPU architecture to compile for. - feature.feature('architecture', [ - # x86 and x86-64 - 'x86', - - # ia64 - 'ia64', - - # Sparc - 'sparc', - - # RS/6000 & PowerPC - 'power', - - # MIPS/SGI - 'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64', - - # HP/PA-RISC - 'parisc', - - # Advanced RISC Machines - 'arm', - - # Combined architectures for platforms/toolsets that support building for - # multiple architectures at once. "combined" would be the default multi-arch - # for the toolset. - 'combined', - 'combined-x86-power'], - - ['propagated', 'optional']) - - # The specific instruction set in an architecture to compile. - feature.feature('instruction-set', [ - # x86 and x86-64 - 'i386', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3', - 'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'conroe', 'conroe-xe', - 'conroe-l', 'allendale', 'mermon', 'mermon-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale', - 'yorksfield', 'nehalem', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp', - 'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'winchip-c6', 'winchip2', 'c3', 'c3-2', - - # ia64 - 'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley', - - # Sparc - 'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934', - 'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3', - - # RS/6000 & PowerPC - '401', '403', '405', '405fp', '440', '440fp', '505', '601', '602', - '603', '603e', '604', '604e', '620', '630', '740', '7400', - '7450', '750', '801', '821', '823', '860', '970', '8540', - 'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2', - 'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios', - 'rios1', 'rsc', 'rios2', 'rs64a', - - # MIPS - '4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000', - 'r4100', 'r4300', 'r4400', 'r4600', 'r4650', - 'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100', - 'vr4111', 'vr4120', 'vr4130', 'vr4300', - 'vr5000', 'vr5400', 'vr5500', - - # HP/PA-RISC - '700', '7100', '7100lc', '7200', '7300', '8000', - - # Advanced RISC Machines - 'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5', - 'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'], - - ['propagated', 'optional']) - - feature.feature('conditional', [], ['incidental', 'free']) - - # The value of 'no' prevents building of a target. - feature.feature('build', ['yes', 'no'], ['optional']) - - # Windows-specific features - feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], []) - feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric']) - - - variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on']) - variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full', - '<runtime-debugging>off', '<define>NDEBUG']) - variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on']) - - type.register ('H', ['h']) - type.register ('HPP', ['hpp'], 'H') - type.register ('C', ['c']) - - -reset () -register_globals () - -class SearchedLibTarget (virtual_target.AbstractFileTarget): - def __init__ (self, name, project, shared, real_name, search, action): - virtual_target.AbstractFileTarget.__init__ (self, name, 'SEARCHED_LIB', project, action) - - self.shared_ = shared - self.real_name_ = real_name - if not self.real_name_: - self.real_name_ = name - self.search_ = search - - def shared (self): - return self.shared_ - - def real_name (self): - return self.real_name_ - - def search (self): - return self.search_ - - def actualize_location (self, target): - bjam.call("NOTFILE", target) - - def path (self): - #FIXME: several functions rely on this not being None - return "" - - -class CScanner (scanner.Scanner): - def __init__ (self, includes): - scanner.Scanner.__init__ (self) - - self.includes_ = includes - - def pattern (self): - return r'#[ \t]*include[ ]*(<(.*)>|"(.*)")' - - def process (self, target, matches, binding): - - angle = regex.transform (matches, "<(.*)>") - quoted = regex.transform (matches, '"(.*)"') - - g = str(id(self)) - b = os.path.normpath(os.path.dirname(binding[0])) - - # Attach binding of including file to included targets. - # When target is directly created from virtual target - # this extra information is unnecessary. But in other - # cases, it allows to distinguish between two headers of the - # same name included from different places. - # We don't need this extra information for angle includes, - # since they should not depend on including file (we can't - # get literal "." in include path). - g2 = g + "#" + b - - g = "<" + g + ">" - g2 = "<" + g2 + ">" - angle = [g + x for x in angle] - quoted = [g2 + x for x in quoted] - - all = angle + quoted - bjam.call("mark-included", target, all) - - engine = get_manager().engine() - engine.set_target_variable(angle, "SEARCH", get_value(self.includes_)) - engine.set_target_variable(quoted, "SEARCH", [b] + get_value(self.includes_)) - - # Just propagate current scanner to includes, in a hope - # that includes do not change scanners. - get_manager().scanners().propagate(self, angle + quoted) - -scanner.register (CScanner, 'include') -type.set_scanner ('CPP', CScanner) -type.set_scanner ('C', CScanner) - -# Ported to trunk@47077 -class LibGenerator (generators.Generator): - """ The generator class for libraries (target type LIB). Depending on properties it will - request building of the approapriate specific type -- SHARED_LIB, STATIC_LIB or - SHARED_LIB. - """ - - def __init__(self, id = 'LibGenerator', composing = True, source_types = [], target_types_and_names = ['LIB'], requirements = []): - generators.Generator.__init__(self, id, composing, source_types, target_types_and_names, requirements) - - def run(self, project, name, prop_set, sources): - - # The lib generator is composing, and can be only invoked with - # explicit name. This check is present in generator.run (and so in - # builtin.LinkingGenerator), but duplicate it here to avoid doing - # extra work. - if name: - properties = prop_set.raw() - # Determine the needed target type - actual_type = None - properties_grist = get_grist(properties) - if '<source>' not in properties_grist and \ - ('<search>' in properties_grist or '<name>' in properties_grist): - actual_type = 'SEARCHED_LIB' - elif '<file>' in properties_grist: - # The generator for - actual_type = 'LIB' - elif '<link>shared' in properties: - actual_type = 'SHARED_LIB' - else: - actual_type = 'STATIC_LIB' - - prop_set = prop_set.add_raw(['<main-target-type>LIB']) - - # Construct the target. - return generators.construct(project, name, actual_type, prop_set, sources) - - def viable_source_types(self): - return ['*'] - -generators.register(LibGenerator()) - -def lib(names, sources=[], requirements=[], default_build=[], usage_requirements=[]): - """The implementation of the 'lib' rule. Beyond standard syntax that rule allows - simplified: 'lib a b c ;'.""" - - if len(names) > 1: - if any(r.startswith('<name>') for r in requirements): - get_manager().errors()("When several names are given to the 'lib' rule\n" + - "it is not allowed to specify the <name> feature.") - - if sources: - get_manager().errors()("When several names are given to the 'lib' rule\n" + - "it is not allowed to specify sources.") - - project = get_manager().projects().current() - result = [] - - for name in names: - r = requirements[:] - - # Support " lib a ; " and " lib a b c ; " syntax. - if not sources and not any(r.startswith("<name>") for r in requirements) \ - and not any(r.startswith("<file") for r in requirements): - r.append("<name>" + name) - - result.append(targets.create_typed_metatarget(name, "LIB", sources, - r, - default_build, - usage_requirements)) - return result - -get_manager().projects().add_rule("lib", lib) - - -# Updated to trunk@47077 -class SearchedLibGenerator (generators.Generator): - def __init__ (self, id = 'SearchedLibGenerator', composing = False, source_types = [], target_types_and_names = ['SEARCHED_LIB'], requirements = []): - # TODO: the comment below looks strange. There are no requirements! - # The requirements cause the generators to be tried *only* when we're building - # lib target and there's 'search' feature. This seems ugly --- all we want - # is make sure SearchedLibGenerator is not invoked deep in transformation - # search. - generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def run(self, project, name, prop_set, sources): - - if not name: - return None - - # If name is empty, it means we're called not from top-level. - # In this case, we just fail immediately, because SearchedLibGenerator - # cannot be used to produce intermediate targets. - - properties = prop_set.raw () - shared = '<link>shared' in properties - - a = virtual_target.NullAction (project.manager(), prop_set) - - real_name = feature.get_values ('<name>', properties) - if real_name: - real_name = real_name[0] - else: - real_nake = name - search = feature.get_values('<search>', properties) - usage_requirements = property_set.create(['<xdll-path>' + p for p in search]) - t = SearchedLibTarget(name, project, shared, real_name, search, a) - - # We return sources for a simple reason. If there's - # lib png : z : <name>png ; - # the 'z' target should be returned, so that apps linking to - # 'png' will link to 'z', too. - return(usage_requirements, [b2.manager.get_manager().virtual_targets().register(t)] + sources) - -generators.register (SearchedLibGenerator ()) - -### class prebuilt-lib-generator : generator -### { -### rule __init__ ( * : * ) -### { -### generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; -### } -### -### rule run ( project name ? : prop_set : sources * : multiple ? ) -### { -### local f = [ $(prop_set).get <file> ] ; -### return $(f) $(sources) ; -### } -### } -### -### generators.register -### [ new prebuilt-lib-generator builtin.prebuilt : : LIB : <file> ] ; - - -class CompileAction (virtual_target.Action): - def __init__ (self, manager, sources, action_name, prop_set): - virtual_target.Action.__init__ (self, manager, sources, action_name, prop_set) - - def adjust_properties (self, prop_set): - """ For all virtual targets for the same dependency graph as self, - i.e. which belong to the same main target, add their directories - to include path. - """ - s = self.targets () [0].creating_subvariant () - - return prop_set.add_raw (s.implicit_includes ('include', 'H')) - -class CCompilingGenerator (generators.Generator): - """ Declare a special compiler generator. - The only thing it does is changing the type used to represent - 'action' in the constructed dependency graph to 'CompileAction'. - That class in turn adds additional include paths to handle a case - when a source file includes headers which are generated themselfs. - """ - def __init__ (self, id, composing, source_types, target_types_and_names, requirements): - # TODO: (PF) What to do with optional_properties? It seemed that, in the bjam version, the arguments are wrong. - generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def action_class (self): - return CompileAction - -def register_c_compiler (id, source_types, target_types, requirements, optional_properties = []): - g = CCompilingGenerator (id, False, source_types, target_types, requirements + optional_properties) - return generators.register (g) - - -class LinkingGenerator (generators.Generator): - """ The generator class for handling EXE and SHARED_LIB creation. - """ - def __init__ (self, id, composing, source_types, target_types_and_names, requirements): - generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def run (self, project, name, prop_set, sources): - - lib_sources = prop_set.get('<library>') - sources.extend(lib_sources) - - # Add <library-path> properties for all searched libraries - extra = [] - for s in sources: - if s.type () == 'SEARCHED_LIB': - search = s.search() - extra.extend(property.Property('<library-path>', sp) for sp in search) - - orig_xdll_path = [] - - if prop_set.get('<hardcode-dll-paths>') == ['true'] \ - and type.is_derived(self.target_types_ [0], 'EXE'): - xdll_path = prop_set.get('<xdll-path>') - orig_xdll_path = [ replace_grist(x, '<dll-path>') for x in xdll_path ] - # It's possible that we have libraries in sources which did not came - # from 'lib' target. For example, libraries which are specified - # just as filenames as sources. We don't have xdll-path properties - # for such target, but still need to add proper dll-path properties. - for s in sources: - if type.is_derived (s.type (), 'SHARED_LIB') and not s.action (): - # Unfortunately, we don't have a good way to find the path - # to a file, so use this nasty approach. - p = s.project() - location = path.root(s.name(), p.get('source-location')) - xdll_path.append(path.parent(location)) - - extra.extend(property.Property('<dll-path>', sp) for sp in xdll_path) - - if extra: - prop_set = prop_set.add_raw (extra) - - result = generators.Generator.run(self, project, name, prop_set, sources) - - if result: - ur = self.extra_usage_requirements(result, prop_set) - ur = ur.add(property_set.create(orig_xdll_path)) - else: - return None - - return(ur, result) - - def extra_usage_requirements (self, created_targets, prop_set): - - result = property_set.empty () - extra = [] - - # Add appropriate <xdll-path> usage requirements. - raw = prop_set.raw () - if '<link>shared' in raw: - paths = [] - - # TODO: is it safe to use the current directory? I think we should use - # another mechanism to allow this to be run from anywhere. - pwd = os.getcwd() - - for t in created_targets: - if type.is_derived(t.type(), 'SHARED_LIB'): - paths.append(path.root(path.make(t.path()), pwd)) - - extra += replace_grist(paths, '<xdll-path>') - - # We need to pass <xdll-path> features that we've got from sources, - # because if shared library is built, exe which uses it must know paths - # to other shared libraries this one depends on, to be able to find them - # all at runtime. - - # Just pass all features in property_set, it's theorically possible - # that we'll propagate <xdll-path> features explicitly specified by - # the user, but then the user's to blaim for using internal feature. - values = prop_set.get('<xdll-path>') - extra += replace_grist(values, '<xdll-path>') - - if extra: - result = property_set.create(extra) - - return result - - def generated_targets (self, sources, prop_set, project, name): - - # sources to pass to inherited rule - sources2 = [] - # sources which are libraries - libraries = [] - - # Searched libraries are not passed as argument to linker - # but via some option. So, we pass them to the action - # via property. - fsa = [] - fst = [] - for s in sources: - if type.is_derived(s.type(), 'SEARCHED_LIB'): - n = s.real_name() - if s.shared(): - fsa.append(n) - - else: - fst.append(n) - - else: - sources2.append(s) - - add = [] - if fsa: - add.append("<find-shared-library>" + '&&'.join(fsa)) - if fst: - add.append("<find-static-library>" + '&&'.join(fst)) - - spawn = generators.Generator.generated_targets(self, sources2, prop_set.add_raw(add), project, name) - return spawn - - -def register_linker(id, source_types, target_types, requirements): - g = LinkingGenerator(id, True, source_types, target_types, requirements) - generators.register(g) - -class ArchiveGenerator (generators.Generator): - """ The generator class for handling STATIC_LIB creation. - """ - def __init__ (self, id, composing, source_types, target_types_and_names, requirements): - generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def run (self, project, name, prop_set, sources): - sources += prop_set.get ('<library>') - - result = generators.Generator.run (self, project, name, prop_set, sources) - - return result - -### rule register-archiver ( id composing ? : source_types + : target_types + : -### requirements * ) -### { -### local g = [ new ArchiveGenerator $(id) $(composing) : $(source_types) -### : $(target_types) : $(requirements) ] ; -### generators.register $(g) ; -### } -### -### -### IMPORT $(__name__) : register-linker register-archiver -### : : generators.register-linker generators.register-archiver ; -### -### -### - -get_manager().projects().add_rule("variant", variant) - -import stage -import symlink -import message diff --git a/jam-files/boost-build/tools/cast.jam b/jam-files/boost-build/tools/cast.jam deleted file mode 100644 index 6c84922f..00000000 --- a/jam-files/boost-build/tools/cast.jam +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2005 Vladimir Prus. -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Defines main target 'cast', used to change type for target. For example, in Qt -# library one wants two kinds of CPP files -- those that just compiled and those -# that are passed via the MOC tool. -# -# This is done with: -# -# exe main : main.cpp [ cast _ moccable-cpp : widget.cpp ] ; -# -# Boost.Build will assing target type CPP to both main.cpp and widget.cpp. Then, -# the cast rule will change target type of widget.cpp to MOCCABLE-CPP, and Qt -# support will run the MOC tool as part of the build process. -# -# At the moment, the 'cast' rule only works for non-derived (source) targets. -# -# TODO: The following comment is unclear or incorrect. Clean it up. -# > Another solution would be to add a separate main target 'moc-them' that -# > would moc all the passed sources, no matter what their type is, but I prefer -# > cast, as defining a new target type + generator for that type is somewhat -# > simpler than defining a main target rule. - -import "class" : new ; -import errors ; -import project ; -import property-set ; -import targets ; -import type ; - - -class cast-target-class : typed-target -{ - import type ; - - rule __init__ ( name : project : type : sources * : requirements * : - default-build * : usage-requirements * ) - { - typed-target.__init__ $(name) : $(project) : $(type) : $(sources) : - $(requirements) : $(default-build) : $(usage-requirements) ; - } - - rule construct ( name : source-targets * : property-set ) - { - local result ; - for local s in $(source-targets) - { - if ! [ class.is-a $(s) : file-target ] - { - import errors ; - errors.user-error Source to the 'cast' rule is not a file! ; - } - if [ $(s).action ] - { - import errors ; - errors.user-error Only non-derived target are allowed for - 'cast'. : when building [ full-name ] ; - } - local r = [ $(s).clone-with-different-type $(self.type) ] ; - result += [ virtual-target.register $(r) ] ; - } - return [ property-set.empty ] $(result) ; - } -} - - -rule cast ( name type : sources * : requirements * : default-build * : - usage-requirements * ) -{ - local project = [ project.current ] ; - - local real-type = [ type.type-from-rule-name $(type) ] ; - if ! $(real-type) - { - errors.user-error No type corresponds to the main target rule name - '$(type)' : "Hint: try a lowercase name" ; - } - - targets.main-target-alternative [ new cast-target-class $(name) : $(project) - : $(real-type) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : - $(project) ] ] ; -} - - -IMPORT $(__name__) : cast : : cast ; diff --git a/jam-files/boost-build/tools/cast.py b/jam-files/boost-build/tools/cast.py deleted file mode 100644 index 8f053f11..00000000 --- a/jam-files/boost-build/tools/cast.py +++ /dev/null @@ -1,69 +0,0 @@ -# Status: ported -# Base revision: 64432. -# Copyright 2005-2010 Vladimir Prus. -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Defines main target 'cast', used to change type for target. For example, in Qt -# library one wants two kinds of CPP files -- those that just compiled and those -# that are passed via the MOC tool. -# -# This is done with: -# -# exe main : main.cpp [ cast _ moccable-cpp : widget.cpp ] ; -# -# Boost.Build will assing target type CPP to both main.cpp and widget.cpp. Then, -# the cast rule will change target type of widget.cpp to MOCCABLE-CPP, and Qt -# support will run the MOC tool as part of the build process. -# -# At the moment, the 'cast' rule only works for non-derived (source) targets. -# -# TODO: The following comment is unclear or incorrect. Clean it up. -# > Another solution would be to add a separate main target 'moc-them' that -# > would moc all the passed sources, no matter what their type is, but I prefer -# > cast, as defining a new target type + generator for that type is somewhat -# > simpler than defining a main target rule. - -import b2.build.targets as targets -import b2.build.virtual_target as virtual_target - -from b2.manager import get_manager -from b2.util import bjam_signature - -class CastTargetClass(targets.TypedTarget): - - def construct(name, source_targets, ps): - result = [] - for s in source_targets: - if not isinstance(s, virtual_targets.FileTarget): - get_manager().errors()("Source to the 'cast' metatager is not a file") - - if s.action(): - get_manager().errors()("Only non-derived targets allowed as sources for 'cast'.") - - - r = s.clone_with_different_type(self.type()) - result.append(get_manager().virtual_targets().register(r)) - - return result - - -@bjam_signature((["name", "type"], ["sources", "*"], ["requirements", "*"], - ["default_build", "*"], ["usage_requirements", "*"])) -def cast(name, type, sources, requirements, default_build, usage_requirements): - - from b2.manager import get_manager - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative( - CastTargetClass(name, project, type, - t.main_target_sources(sources, name), - t.main_target_requirements(requirements, project), - t.main_target_default_build(default_build, project), - t.main_target_usage_requirements(usage_requirements, project))) - - -get_manager().projects().add_rule("cast", cast) diff --git a/jam-files/boost-build/tools/clang-darwin.jam b/jam-files/boost-build/tools/clang-darwin.jam deleted file mode 100644 index a8abc7d6..00000000 --- a/jam-files/boost-build/tools/clang-darwin.jam +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Copyright Noel Belcourt 2007. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -import clang ; -import feature : feature ; -import os ; -import toolset ; -import toolset : flags ; -import gcc ; -import common ; -import errors ; -import generators ; - -feature.extend-subfeature toolset clang : platform : darwin ; - -toolset.inherit-generators clang-darwin - <toolset>clang <toolset-clang:platform>darwin - : gcc - # Don't inherit PCH generators. They were not tested, and probably - # don't work for this compiler. - : gcc.mingw.link gcc.mingw.link.dll gcc.compile.c.pch gcc.compile.c++.pch - ; - -generators.override clang-darwin.prebuilt : builtin.lib-generator ; -generators.override clang-darwin.prebuilt : builtin.prebuilt ; -generators.override clang-darwin.searched-lib-generator : searched-lib-generator ; - -toolset.inherit-rules clang-darwin : gcc ; -toolset.inherit-flags clang-darwin : gcc - : <inlining>off <inlining>on <inlining>full <optimization>space - <warnings>off <warnings>all <warnings>on - <architecture>x86/<address-model>32 - <architecture>x86/<address-model>64 - ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -# vectorization diagnostics -feature vectorize : off on full ; - -# Initializes the clang-darwin toolset -# version in optional -# name (default clang++) is used to invoke the specified clang complier -# compile and link options allow you to specify addition command line options for each version -rule init ( version ? : command * : options * ) -{ - command = [ common.get-invocation-command clang-darwin : clang++ - : $(command) ] ; - - # Determine the version - local command-string = $(command:J=" ") ; - if $(command) - { - version ?= [ MATCH "^([0-9.]+)" - : [ SHELL "$(command-string) -dumpversion" ] ] ; - } - - local condition = [ common.check-init-parameters clang-darwin - : version $(version) ] ; - - common.handle-options clang-darwin : $(condition) : $(command) : $(options) ; - - gcc.init-link-flags clang-darwin darwin $(condition) ; - -} - -SPACE = " " ; - -flags clang-darwin.compile OPTIONS <cflags> ; -flags clang-darwin.compile OPTIONS <cxxflags> ; -# flags clang-darwin.compile INCLUDES <include> ; - -# Declare flags and action for compilation. -toolset.flags clang-darwin.compile OPTIONS <optimization>off : -O0 ; -toolset.flags clang-darwin.compile OPTIONS <optimization>speed : -O3 ; -toolset.flags clang-darwin.compile OPTIONS <optimization>space : -Os ; - -toolset.flags clang-darwin.compile OPTIONS <inlining>off : -fno-inline ; -toolset.flags clang-darwin.compile OPTIONS <inlining>on : -Wno-inline ; -toolset.flags clang-darwin.compile OPTIONS <inlining>full : -finline-functions -Wno-inline ; - -toolset.flags clang-darwin.compile OPTIONS <warnings>off : -w ; -toolset.flags clang-darwin.compile OPTIONS <warnings>on : -Wall ; -toolset.flags clang-darwin.compile OPTIONS <warnings>all : -Wall -pedantic ; -toolset.flags clang-darwin.compile OPTIONS <warnings-as-errors>on : -Werror ; - -toolset.flags clang-darwin.compile OPTIONS <debug-symbols>on : -g ; -toolset.flags clang-darwin.compile OPTIONS <profiling>on : -pg ; -toolset.flags clang-darwin.compile OPTIONS <rtti>off : -fno-rtti ; - -actions compile.c -{ - "$(CONFIG_COMMAND)" -x c $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" -x c++ $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -flags clang-darwin ARFLAGS <archiveflags> ; - -# Default value. Mostly for the sake of clang-linux -# that inherits from gcc, but does not has the same -# logic to set the .AR variable. We can put the same -# logic in clang-linux, but that's hardly worth the trouble -# as on Linux, 'ar' is always available. -.AR = ar ; - -rule archive ( targets * : sources * : properties * ) -{ - # Always remove archive and start again. Here's rationale from - # Andre Hentz: - # - # I had a file, say a1.c, that was included into liba.a. - # I moved a1.c to a2.c, updated my Jamfiles and rebuilt. - # My program was crashing with absurd errors. - # After some debugging I traced it back to the fact that a1.o was *still* - # in liba.a - # - # Rene Rivera: - # - # Originally removing the archive was done by splicing an RM - # onto the archive action. That makes archives fail to build on NT - # when they have many files because it will no longer execute the - # action directly and blow the line length limit. Instead we - # remove the file in a different action, just before the building - # of the archive. - # - local clean.a = $(targets[1])(clean) ; - TEMPORARY $(clean.a) ; - NOCARE $(clean.a) ; - LOCATE on $(clean.a) = [ on $(targets[1]) return $(LOCATE) ] ; - DEPENDS $(clean.a) : $(sources) ; - DEPENDS $(targets) : $(clean.a) ; - common.RmTemps $(clean.a) : $(targets) ; -} - -actions piecemeal archive -{ - "$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)" - "ranlib" -cs "$(<)" -} - -flags clang-darwin.link USER_OPTIONS <linkflags> ; - -# Declare actions for linking -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; - # Serialize execution of the 'link' action, since - # running N links in parallel is just slower. - JAM_SEMAPHORE on $(targets) = <s>clang-darwin-link-semaphore ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(USER_OPTIONS) -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(USER_OPTIONS) -L"$(LINKPATH)" -o "$(<)" -single_module -dynamiclib -install_name "$(<[1]:D=)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) -} diff --git a/jam-files/boost-build/tools/clang-linux.jam b/jam-files/boost-build/tools/clang-linux.jam deleted file mode 100644 index 036d749e..00000000 --- a/jam-files/boost-build/tools/clang-linux.jam +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) 2003 Michael Stevens -# Copyright (c) 2010-2011 Bryce Lelbach (blelbach@cct.lsu.edu, maintainer) -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import toolset ; -import feature ; -import toolset : flags ; - -import clang ; -import gcc ; -import common ; -import errors ; -import generators ; -import type ; -import numbers ; - -feature.extend-subfeature toolset clang : platform : linux ; - -toolset.inherit-generators clang-linux - <toolset>clang <toolset-clang:platform>linux : gcc - : gcc.mingw.link gcc.mingw.link.dll gcc.cygwin.link gcc.cygwin.link.dll ; -generators.override clang-linux.prebuilt : builtin.lib-generator ; -generators.override clang-linux.prebuilt : builtin.prebuilt ; -generators.override clang-linux.searched-lib-generator : searched-lib-generator ; - -# Override default do-nothing generators. -generators.override clang-linux.compile.c.pch : pch.default-c-pch-generator ; -generators.override clang-linux.compile.c++.pch : pch.default-cpp-pch-generator ; - -type.set-generated-target-suffix PCH - : <toolset>clang <toolset-clang:platform>linux : pth ; - -toolset.inherit-rules clang-linux : gcc ; -toolset.inherit-flags clang-linux : gcc - : <inlining>off <inlining>on <inlining>full - <optimization>space <optimization>speed - <warnings>off <warnings>all <warnings>on ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] { - .debug-configuration = true ; -} - -rule init ( version ? : command * : options * ) { - command = [ common.get-invocation-command clang-linux : clang++ - : $(command) ] ; - - # Determine the version - local command-string = $(command:J=" ") ; - - if $(command) { - version ?= [ MATCH "version ([0-9.]+)" - : [ SHELL "$(command-string) --version" ] ] ; - } - - local condition = [ common.check-init-parameters clang-linux - : version $(version) ] ; - - common.handle-options clang-linux : $(condition) : $(command) : $(options) ; - - gcc.init-link-flags clang-linux gnu $(condition) ; -} - -############################################################################### -# Flags - -toolset.flags clang-linux.compile OPTIONS <cflags> ; -toolset.flags clang-linux.compile OPTIONS <cxxflags> ; - -toolset.flags clang-linux.compile OPTIONS <optimization>off : ; -toolset.flags clang-linux.compile OPTIONS <optimization>speed : -O3 ; -toolset.flags clang-linux.compile OPTIONS <optimization>space : -Os ; - -# note: clang silently ignores some of these inlining options -toolset.flags clang-linux.compile OPTIONS <inlining>off : -fno-inline ; -toolset.flags clang-linux.compile OPTIONS <inlining>on : -Wno-inline ; -toolset.flags clang-linux.compile OPTIONS <inlining>full : -finline-functions -Wno-inline ; - -toolset.flags clang-linux.compile OPTIONS <warnings>off : -w ; -toolset.flags clang-linux.compile OPTIONS <warnings>on : -Wall ; -toolset.flags clang-linux.compile OPTIONS <warnings>all : -Wall -pedantic ; -toolset.flags clang-linux.compile OPTIONS <warnings-as-errors>on : -Werror ; - -toolset.flags clang-linux.compile OPTIONS <debug-symbols>on : -g ; -toolset.flags clang-linux.compile OPTIONS <profiling>on : -pg ; -toolset.flags clang-linux.compile OPTIONS <rtti>off : -fno-rtti ; - -############################################################################### -# C and C++ compilation - -rule compile.c++ ( targets * : sources * : properties * ) { - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - - local pth-file = [ on $(<) return $(PCH_FILE) ] ; - - if $(pth-file) { - DEPENDS $(<) : $(pth-file) ; - compile.c++.with-pch $(targets) : $(sources) ; - } - else { - compile.c++.without-pth $(targets) : $(sources) ; - } -} - -actions compile.c++.without-pth { - "$(CONFIG_COMMAND)" -c -x c++ $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -o "$(<)" "$(>)" -} - -actions compile.c++.with-pch bind PCH_FILE -{ - "$(CONFIG_COMMAND)" -c -x c++ $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -Xclang -include-pth -Xclang "$(PCH_FILE)" -o "$(<)" "$(>)" -} - -rule compile.c ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - - local pth-file = [ on $(<) return $(PCH_FILE) ] ; - - if $(pth-file) { - DEPENDS $(<) : $(pth-file) ; - compile.c.with-pch $(targets) : $(sources) ; - } - else { - compile.c.without-pth $(targets) : $(sources) ; - } -} - -actions compile.c.without-pth -{ - "$(CONFIG_COMMAND)" -c -x c $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c.with-pch bind PCH_FILE -{ - "$(CONFIG_COMMAND)" -c -x c $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -Xclang -include-pth -Xclang "$(PCH_FILE)" -c -o "$(<)" "$(>)" -} - -############################################################################### -# PCH emission - -rule compile.c++.pch ( targets * : sources * : properties * ) { - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c++.pch { - rm -f "$(<)" && "$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -Xclang -emit-pth -o "$(<)" "$(>)" -} - -rule compile.c.pch ( targets * : sources * : properties * ) { - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c.pch -{ - rm -f "$(<)" && "$(CONFIG_COMMAND)" -x c-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -Xclang -emit-pth -o "$(<)" "$(>)" -} - -############################################################################### -# Linking - -SPACE = " " ; - -rule link ( targets * : sources * : properties * ) { - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>clang-linux-link-semaphore ; -} - -actions link bind LIBRARIES { - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) $(USER_OPTIONS) -} - -rule link.dll ( targets * : sources * : properties * ) { - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>clang-linux-link-semaphore ; -} - -# Differ from 'link' above only by -shared. -actions link.dll bind LIBRARIES { - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -o "$(<)" -Wl,-soname$(SPACE)-Wl,$(<[1]:D=) -shared "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) $(USER_OPTIONS) -} - diff --git a/jam-files/boost-build/tools/clang.jam b/jam-files/boost-build/tools/clang.jam deleted file mode 100644 index e0ac9a55..00000000 --- a/jam-files/boost-build/tools/clang.jam +++ /dev/null @@ -1,27 +0,0 @@ -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# This is a generic 'clang' toolset. Depending on the current system, it -# forwards either to 'clang-unix' or 'clang-darwin' modules. - -import feature ; -import os ; -import toolset ; - -feature.extend toolset : clang ; -feature.subfeature toolset clang : platform : : propagated link-incompatible ; - -rule init ( * : * ) -{ - if [ os.name ] = MACOSX - { - toolset.using clang-darwin : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - else - { - toolset.using clang-linux : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } -} diff --git a/jam-files/boost-build/tools/common.jam b/jam-files/boost-build/tools/common.jam deleted file mode 100644 index ed835a36..00000000 --- a/jam-files/boost-build/tools/common.jam +++ /dev/null @@ -1,994 +0,0 @@ -# Copyright 2003, 2005 Dave Abrahams -# Copyright 2005, 2006 Rene Rivera -# Copyright 2005 Toon Knapen -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Provides actions common to all toolsets, such as creating directories and -# removing files. - -import os ; -import modules ; -import utility ; -import print ; -import type ; -import feature ; -import errors ; -import path ; -import sequence ; -import toolset ; -import virtual-target ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} -if [ MATCH (--show-configuration) : [ modules.peek : ARGV ] ] -{ - .show-configuration = true ; -} - -# Configurations -# -# The following class helps to manage toolset configurations. Each configuration -# has a unique ID and one or more parameters. A typical example of a unique ID -# is a condition generated by 'common.check-init-parameters' rule. Other kinds -# of IDs can be used. Parameters may include any details about the configuration -# like 'command', 'path', etc. -# -# A toolset configuration may be in one of the following states: -# -# - registered -# Configuration has been registered (e.g. explicitly or by auto-detection -# code) but has not yet been marked as used, i.e. 'toolset.using' rule has -# not yet been called for it. -# - used -# Once called 'toolset.using' rule marks the configuration as 'used'. -# -# The main difference between the states above is that while a configuration is -# 'registered' its options can be freely changed. This is useful in particular -# for autodetection code - all detected configurations may be safely overwritten -# by user code. - -class configurations -{ - import errors ; - - rule __init__ ( ) - { - } - - # Registers a configuration. - # - # Returns 'true' if the configuration has been added and an empty value if - # it already exists. Reports an error if the configuration is 'used'. - # - rule register ( id ) - { - if $(id) in $(self.used) - { - errors.error "common: the configuration '$(id)' is in use" ; - } - - local retval ; - - if ! $(id) in $(self.all) - { - self.all += $(id) ; - - # Indicate that a new configuration has been added. - retval = true ; - } - - return $(retval) ; - } - - # Mark a configuration as 'used'. - # - # Returns 'true' if the state of the configuration has been changed to - # 'used' and an empty value if it the state has not been changed. Reports an - # error if the configuration is not known. - # - rule use ( id ) - { - if ! $(id) in $(self.all) - { - errors.error "common: the configuration '$(id)' is not known" ; - } - - local retval ; - - if ! $(id) in $(self.used) - { - self.used += $(id) ; - - # Indicate that the configuration has been marked as 'used'. - retval = true ; - } - - return $(retval) ; - } - - # Return all registered configurations. - # - rule all ( ) - { - return $(self.all) ; - } - - # Return all used configurations. - # - rule used ( ) - { - return $(self.used) ; - } - - # Returns the value of a configuration parameter. - # - rule get ( id : param ) - { - return $(self.$(param).$(id)) ; - } - - # Sets the value of a configuration parameter. - # - rule set ( id : param : value * ) - { - self.$(param).$(id) = $(value) ; - } -} - - -# The rule for checking toolset parameters. Trailing parameters should all be -# parameter name/value pairs. The rule will check that each parameter either has -# a value in each invocation or has no value in each invocation. Also, the rule -# will check that the combination of all parameter values is unique in all -# invocations. -# -# Each parameter name corresponds to a subfeature. This rule will declare a -# subfeature the first time a non-empty parameter value is passed and will -# extend it with all the values. -# -# The return value from this rule is a condition to be used for flags settings. -# -rule check-init-parameters ( toolset requirement * : * ) -{ - local sig = $(toolset) ; - local condition = <toolset>$(toolset) ; - local subcondition ; - for local index in 2 3 4 5 6 7 8 9 - { - local name = $($(index)[1]) ; - local value = $($(index)[2]) ; - - if $(value)-is-not-empty - { - condition = $(condition)-$(value) ; - if $(.had-unspecified-value.$(toolset).$(name)) - { - errors.user-error - "$(toolset) initialization: parameter '$(name)'" - "inconsistent" : "no value was specified in earlier" - "initialization" : "an explicit value is specified now" ; - } - # The below logic is for intel compiler. It calls this rule with - # 'intel-linux' and 'intel-win' as toolset, so we need to get the - # base part of toolset name. We can not pass 'intel' as toolset - # because in that case it will be impossible to register versionless - # intel-linux and intel-win toolsets of a specific version. - local t = $(toolset) ; - local m = [ MATCH ([^-]*)- : $(toolset) ] ; - if $(m) - { - t = $(m[1]) ; - } - if ! $(.had-value.$(toolset).$(name)) - { - if ! $(.declared-subfeature.$(t).$(name)) - { - feature.subfeature toolset $(t) : $(name) : : propagated ; - .declared-subfeature.$(t).$(name) = true ; - } - .had-value.$(toolset).$(name) = true ; - } - feature.extend-subfeature toolset $(t) : $(name) : $(value) ; - subcondition += <toolset-$(t):$(name)>$(value) ; - } - else - { - if $(.had-value.$(toolset).$(name)) - { - errors.user-error - "$(toolset) initialization: parameter '$(name)'" - "inconsistent" : "an explicit value was specified in an" - "earlier initialization" : "no value is specified now" ; - } - .had-unspecified-value.$(toolset).$(name) = true ; - } - sig = $(sig)$(value:E="")- ; - } - if $(sig) in $(.all-signatures) - { - local message = - "duplicate initialization of $(toolset) with the following parameters: " ; - for local index in 2 3 4 5 6 7 8 9 - { - local p = $($(index)) ; - if $(p) - { - message += "$(p[1]) = $(p[2]:E=<unspecified>)" ; - } - } - message += "previous initialization at $(.init-loc.$(sig))" ; - errors.user-error - $(message[1]) : $(message[2]) : $(message[3]) : $(message[4]) : - $(message[5]) : $(message[6]) : $(message[7]) : $(message[8]) ; - } - .all-signatures += $(sig) ; - .init-loc.$(sig) = [ errors.nearest-user-location ] ; - - # If we have a requirement, this version should only be applied under that - # condition. To accomplish this we add a toolset requirement that imposes - # the toolset subcondition, which encodes the version. - if $(requirement) - { - local r = <toolset>$(toolset) $(requirement) ; - r = $(r:J=,) ; - toolset.add-requirements $(r):$(subcondition) ; - } - - # We add the requirements, if any, to the condition to scope the toolset - # variables and options to this specific version. - condition += $(requirement) ; - - if $(.show-configuration) - { - ECHO notice: $(condition) ; - } - return $(condition:J=/) ; -} - - -# A helper rule to get the command to invoke some tool. If -# 'user-provided-command' is not given, tries to find binary named 'tool' in -# PATH and in the passed 'additional-path'. Otherwise, verifies that the first -# element of 'user-provided-command' is an existing program. -# -# This rule returns the command to be used when invoking the tool. If we can not -# find the tool, a warning is issued. If 'path-last' is specified, PATH is -# checked after 'additional-paths' when searching for 'tool'. -# -rule get-invocation-command-nodefault ( toolset : tool : - user-provided-command * : additional-paths * : path-last ? ) -{ - local command ; - if ! $(user-provided-command) - { - command = [ find-tool $(tool) : $(additional-paths) : $(path-last) ] ; - if ! $(command) && $(.debug-configuration) - { - ECHO "warning: toolset $(toolset) initialization: can not find tool $(tool)" ; - ECHO "warning: initialized from" [ errors.nearest-user-location ] ; - } - } - else - { - command = [ check-tool $(user-provided-command) ] ; - if ! $(command) && $(.debug-configuration) - { - ECHO "warning: toolset $(toolset) initialization: " ; - ECHO "warning: can not find user-provided command " '$(user-provided-command)' ; - ECHO "warning: initialized from" [ errors.nearest-user-location ] ; - } - } - - return $(command) ; -} - - -# Same as get-invocation-command-nodefault, except that if no tool is found, -# returns either the user-provided-command, if present, or the 'tool' parameter. -# -rule get-invocation-command ( toolset : tool : user-provided-command * : - additional-paths * : path-last ? ) -{ - local result = [ get-invocation-command-nodefault $(toolset) : $(tool) : - $(user-provided-command) : $(additional-paths) : $(path-last) ] ; - - if ! $(result) - { - if $(user-provided-command) - { - result = $(user-provided-command) ; - } - else - { - result = $(tool) ; - } - } - return $(result) ; -} - - -# Given an invocation command return the absolute path to the command. This -# works even if command has no path element and was found on the PATH. -# -rule get-absolute-tool-path ( command ) -{ - if $(command:D) - { - return $(command:D) ; - } - else - { - local m = [ GLOB [ modules.peek : PATH Path path ] : $(command) $(command).exe ] ; - return $(m[1]:D) ; - } -} - - -# Attempts to find tool (binary) named 'name' in PATH and in 'additional-paths'. -# If found in PATH, returns 'name' and if found in additional paths, returns -# absolute name. If the tool is found in several directories, returns the -# first path found. Otherwise, returns an empty string. If 'path-last' is -# specified, PATH is searched after 'additional-paths'. -# -local rule find-tool ( name : additional-paths * : path-last ? ) -{ - local path = [ path.programs-path ] ; - local match = [ path.glob $(path) : $(name) $(name).exe ] ; - local additional-match = [ path.glob $(additional-paths) : $(name) $(name).exe ] ; - - local result ; - if $(path-last) - { - result = $(additional-match) ; - if ! $(result) && $(match) - { - result = $(name) ; - } - } - else - { - if $(match) - { - result = $(name) ; - } - else - { - result = $(additional-match) ; - } - } - if $(result) - { - return [ path.native $(result[1]) ] ; - } -} - - -# Checks if 'command' can be found either in path or is a full name to an -# existing file. -# -local rule check-tool-aux ( command ) -{ - if $(command:D) - { - if [ path.exists $(command) ] - # Both NT and Cygwin will run .exe files by their unqualified names. - || ( [ os.on-windows ] && [ path.exists $(command).exe ] ) - # Only NT will run .bat & .cmd files by their unqualified names. - || ( ( [ os.name ] = NT ) && ( [ path.exists $(command).bat ] || - [ path.exists $(command).cmd ] ) ) - { - return $(command) ; - } - } - else - { - if [ GLOB [ modules.peek : PATH Path path ] : $(command) ] - { - return $(command) ; - } - } -} - - -# Checks that a tool can be invoked by 'command'. If command is not an absolute -# path, checks if it can be found in 'path'. If comand is an absolute path, -# check that it exists. Returns 'command' if ok or empty string otherwise. -# -local rule check-tool ( xcommand + ) -{ - if [ check-tool-aux $(xcommand[1]) ] || - [ check-tool-aux $(xcommand[-1]) ] - { - return $(xcommand) ; - } -} - - -# Handle common options for toolset, specifically sets the following flag -# variables: -# - CONFIG_COMMAND to $(command) -# - OPTIONS for compile to the value of <compileflags> in $(options) -# - OPTIONS for compile.c to the value of <cflags> in $(options) -# - OPTIONS for compile.c++ to the value of <cxxflags> in $(options) -# - OPTIONS for compile.fortran to the value of <fflags> in $(options) -# - OPTIONS for link to the value of <linkflags> in $(options) -# -rule handle-options ( toolset : condition * : command * : options * ) -{ - if $(.debug-configuration) - { - ECHO "notice: will use '$(command)' for $(toolset), condition $(condition:E=(empty))" ; - } - - # The last parameter ('unchecked') says it is OK to set flags for another - # module. - toolset.flags $(toolset) CONFIG_COMMAND $(condition) : $(command) - : unchecked ; - - toolset.flags $(toolset).compile OPTIONS $(condition) : - [ feature.get-values <compileflags> : $(options) ] : unchecked ; - - toolset.flags $(toolset).compile.c OPTIONS $(condition) : - [ feature.get-values <cflags> : $(options) ] : unchecked ; - - toolset.flags $(toolset).compile.c++ OPTIONS $(condition) : - [ feature.get-values <cxxflags> : $(options) ] : unchecked ; - - toolset.flags $(toolset).compile.fortran OPTIONS $(condition) : - [ feature.get-values <fflags> : $(options) ] : unchecked ; - - toolset.flags $(toolset).link OPTIONS $(condition) : - [ feature.get-values <linkflags> : $(options) ] : unchecked ; -} - - -# Returns the location of the "program files" directory on a Windows platform. -# -rule get-program-files-dir ( ) -{ - local ProgramFiles = [ modules.peek : ProgramFiles ] ; - if $(ProgramFiles) - { - ProgramFiles = "$(ProgramFiles:J= )" ; - } - else - { - ProgramFiles = "c:\\Program Files" ; - } - return $(ProgramFiles) ; -} - - -if [ os.name ] = NT -{ - RM = del /f /q ; - CP = copy /b ; - IGNORE = "2>nul >nul & setlocal" ; - LN ?= $(CP) ; - # Ugly hack to convince copy to set the timestamp of the - # destination to the current time by concatenating the - # source with a nonexistent file. Note that this requires - # /b (binary) as the default when concatenating files is /a (ascii). - WINDOWS-CP-HACK = "+ this-file-does-not-exist-A698EE7806899E69" ; -} -else -{ - RM = rm -f ; - CP = cp ; - LN = ln ; -} - - -rule rm-command ( ) -{ - return $(RM) ; -} - - -rule copy-command ( ) -{ - return $(CP) ; -} - - -if "\n" = "n" -{ - # Escape characters are not supported. Use ugly hacks that won't work, - # see below. - nl = " -" ; - q = "" ; -} -else -{ - nl = "\n" ; - q = "\"" ; -} - -# Returns the command needed to set an environment variable on the current -# platform. The variable setting persists through all following commands and is -# visible in the environment seen by subsequently executed commands. In other -# words, on Unix systems, the variable is exported, which is consistent with the -# only possible behavior on Windows systems. -# -rule variable-setting-command ( variable : value ) -{ - if [ os.name ] = NT - { - return "set $(variable)=$(value)$(nl)" ; - } - else - { - # If we don't have escape characters support in bjam, the below blows - # up on CYGWIN, since the $(nl) variable holds a Windows new-line \r\n - # sequence that messes up the executed export command which then reports - # that the passed variable name is incorrect. - # But we have a check for cygwin in kernel/bootstrap.jam already. - return "$(variable)=$(q)$(value)$(q)$(nl)export $(variable)$(nl)" ; - } -} - - -# Returns a command to sets a named shell path variable to the given NATIVE -# paths on the current platform. -# -rule path-variable-setting-command ( variable : paths * ) -{ - local sep = [ os.path-separator ] ; - return [ variable-setting-command $(variable) : $(paths:J=$(sep)) ] ; -} - - -# Returns a command that prepends the given paths to the named path variable on -# the current platform. -# -rule prepend-path-variable-command ( variable : paths * ) -{ - return [ path-variable-setting-command $(variable) - : $(paths) [ os.expand-variable $(variable) ] ] ; -} - - -# Return a command which can create a file. If 'r' is result of invocation, then -# 'r foobar' will create foobar with unspecified content. What happens if file -# already exists is unspecified. -# -rule file-creation-command ( ) -{ - if [ os.name ] = NT - { - # A few alternative implementations on Windows: - # - # 'type NUL >> ' - # That would construct an empty file instead of a file containing - # a space and an end-of-line marker but it would also not change - # the target's timestamp in case the file already exists. - # - # 'type NUL > ' - # That would construct an empty file instead of a file containing - # a space and an end-of-line marker but it would also destroy an - # already existing file by overwriting it with an empty one. - # - # I guess the best solution would be to allow Boost Jam to define - # built-in functions such as 'create a file', 'touch a file' or 'copy a - # file' which could be used from inside action code. That would allow - # completely portable operations without this kind of kludge. - # (22.02.2009.) (Jurko) - return "echo. > " ; - } - else - { - return "touch " ; - } -} - - -# Returns a command that may be used for 'touching' files. It is not a real -# 'touch' command on NT because it adds an empty line at the end of file but it -# works with source files. -# -rule file-touch-command ( ) -{ - if [ os.name ] = NT - { - return "echo. >> " ; - } - else - { - return "touch " ; - } -} - - -rule MkDir -{ - # If dir exists, do not update it. Do this even for $(DOT). - NOUPDATE $(<) ; - - if $(<) != $(DOT) && ! $($(<)-mkdir) - { - # Cheesy gate to prevent multiple invocations on same dir. - $(<)-mkdir = true ; - - # Schedule the mkdir build action. - common.mkdir $(<) ; - - # Prepare a Jam 'dirs' target that can be used to make the build only - # construct all the target directories. - DEPENDS dirs : $(<) ; - - # Recursively create parent directories. $(<:P) = $(<)'s parent & we - # recurse until root. - - local s = $(<:P) ; - if [ os.name ] = NT - { - switch $(s) - { - case *: : s = ; - case *:\\ : s = ; - } - } - - if $(s) - { - if $(s) != $(<) - { - DEPENDS $(<) : $(s) ; - MkDir $(s) ; - } - else - { - NOTFILE $(s) ; - } - } - } -} - - -#actions MkDir1 -#{ -# mkdir "$(<)" -#} - -# The following quick-fix actions should be replaced using the original MkDir1 -# action once Boost Jam gets updated to correctly detect different paths leading -# up to the same filesystem target and triggers their build action only once. -# (todo) (04.07.2008.) (Jurko) - -if [ os.name ] = NT -{ - actions mkdir - { - if not exist "$(<)\\" mkdir "$(<)" - } -} -else -{ - actions mkdir - { - mkdir -p "$(<)" - } -} - -actions piecemeal together existing Clean -{ - $(RM) "$(>)" -} - - -rule copy -{ -} - - -actions copy -{ - $(CP) "$(>)" $(WINDOWS-CP-HACK) "$(<)" -} - - -rule RmTemps -{ -} - - -actions quietly updated piecemeal together RmTemps -{ - $(RM) "$(>)" $(IGNORE) -} - - -actions hard-link -{ - $(RM) "$(<)" 2$(NULL_OUT) $(NULL_OUT) - $(LN) "$(>)" "$(<)" $(NULL_OUT) -} - - -# Given a target, as given to a custom tag rule, returns a string formatted -# according to the passed format. Format is a list of properties that is -# represented in the result. For each element of format the corresponding target -# information is obtained and added to the result string. For all, but the -# literal, the format value is taken as the as string to prepend to the output -# to join the item to the rest of the result. If not given "-" is used as a -# joiner. -# -# The format options can be: -# -# <base>[joiner] -# :: The basename of the target name. -# <toolset>[joiner] -# :: The abbreviated toolset tag being used to build the target. -# <threading>[joiner] -# :: Indication of a multi-threaded build. -# <runtime>[joiner] -# :: Collective tag of the build runtime. -# <version:/version-feature | X.Y[.Z]/>[joiner] -# :: Short version tag taken from the given "version-feature" in the -# build properties. Or if not present, the literal value as the -# version number. -# <property:/property-name/>[joiner] -# :: Direct lookup of the given property-name value in the build -# properties. /property-name/ is a regular expression. E.g. -# <property:toolset-.*:flavor> will match every toolset. -# /otherwise/ -# :: The literal value of the format argument. -# -# For example this format: -# -# boost_ <base> <toolset> <threading> <runtime> <version:boost-version> -# -# Might return: -# -# boost_thread-vc80-mt-gd-1_33.dll, or -# boost_regex-vc80-gd-1_33.dll -# -# The returned name also has the target type specific prefix and suffix which -# puts it in a ready form to use as the value from a custom tag rule. -# -rule format-name ( format * : name : type ? : property-set ) -{ - local result = "" ; - for local f in $(format) - { - switch $(f:G) - { - case <base> : - local matched = [ MATCH "^(boost.*python)-.*" : $(name) ] ; - if $(matched) = boost_python || $(matched) = boost_mpi_python - { - result += $(name) ; - } - else - { - result += $(name:B) ; - } - - case <toolset> : - result += [ join-tag $(f:G=) : [ toolset-tag $(name) : $(type) : - $(property-set) ] ] ; - - case <threading> : - result += [ join-tag $(f:G=) : [ threading-tag $(name) : $(type) - : $(property-set) ] ] ; - - case <runtime> : - result += [ join-tag $(f:G=) : [ runtime-tag $(name) : $(type) : - $(property-set) ] ] ; - - case <qt> : - result += [ join-tag $(f:G=) : [ qt-tag $(name) : $(type) : - $(property-set) ] ] ; - - case <address-model> : - result += [ join-tag $(f:G=) : [ address-model-tag $(name) : $(type) : - $(property-set) ] ] ; - - case <version:*> : - local key = [ MATCH <version:(.*)> : $(f:G) ] ; - local version = [ $(property-set).get <$(key)> ] ; - version ?= $(key) ; - version = [ MATCH "^([^.]+)[.]([^.]+)[.]?([^.]*)" : $(version) ] ; - result += [ join-tag $(f:G=) : $(version[1])_$(version[2]) ] ; - - case <property:*> : - local key = [ MATCH <property:(.*)> : $(f:G) ] ; - local p0 = [ MATCH <($(key))> : [ $(property-set).raw ] ] ; - if $(p0) - { - local p = [ $(property-set).get <$(p0)> ] ; - if $(p) - { - result += [ join-tag $(f:G=) : $(p) ] ; - } - } - - case * : - result += $(f:G=) ; - } - } - result = [ virtual-target.add-prefix-and-suffix $(result:J=) : $(type) : - $(property-set) ] ; - return $(result) ; -} - - -local rule join-tag ( joiner ? : tag ? ) -{ - if ! $(joiner) { joiner = - ; } - return $(joiner)$(tag) ; -} - - -local rule toolset-tag ( name : type ? : property-set ) -{ - local tag = ; - - local properties = [ $(property-set).raw ] ; - switch [ $(property-set).get <toolset> ] - { - case borland* : tag += bcb ; - case clang* : - { - switch [ $(property-set).get <toolset-clang:platform> ] - { - case darwin : tag += clang-darwin ; - case linux : tag += clang ; - } - } - case como* : tag += como ; - case cw : tag += cw ; - case darwin* : tag += xgcc ; - case edg* : tag += edg ; - case gcc* : - { - switch [ $(property-set).get <toolset-gcc:flavor> ] - { - case *mingw* : tag += mgw ; - case * : tag += gcc ; - } - } - case intel : - if [ $(property-set).get <toolset-intel:platform> ] = win - { - tag += iw ; - } - else - { - tag += il ; - } - case kcc* : tag += kcc ; - case kylix* : tag += bck ; - #case metrowerks* : tag += cw ; - #case mingw* : tag += mgw ; - case mipspro* : tag += mp ; - case msvc* : tag += vc ; - case qcc* : tag += qcc ; - case sun* : tag += sw ; - case tru64cxx* : tag += tru ; - case vacpp* : tag += xlc ; - } - local version = [ MATCH "<toolset.*version>([0123456789]+)[.]([0123456789]*)" - : $(properties) ] ; - # For historical reasons, vc6.0 and vc7.0 use different naming. - if $(tag) = vc - { - if $(version[1]) = 6 - { - # Cancel minor version. - version = 6 ; - } - else if $(version[1]) = 7 && $(version[2]) = 0 - { - version = 7 ; - } - } - # On intel, version is not added, because it does not matter and it is the - # version of vc used as backend that matters. Ideally, we should encode the - # backend version but that would break compatibility with V1. - if $(tag) = iw - { - version = ; - } - - # On borland, version is not added for compatibility with V1. - if $(tag) = bcb - { - version = ; - } - - tag += $(version) ; - - return $(tag:J=) ; -} - - -local rule threading-tag ( name : type ? : property-set ) -{ - local tag = ; - local properties = [ $(property-set).raw ] ; - if <threading>multi in $(properties) { tag = mt ; } - - return $(tag:J=) ; -} - - -local rule runtime-tag ( name : type ? : property-set ) -{ - local tag = ; - - local properties = [ $(property-set).raw ] ; - if <runtime-link>static in $(properties) { tag += s ; } - - # This is an ugly thing. In V1, there is code to automatically detect which - # properties affect a target. So, if <runtime-debugging> does not affect gcc - # toolset, the tag rules will not even see <runtime-debugging>. Similar - # functionality in V2 is not implemented yet, so we just check for toolsets - # known to care about runtime debugging. - if ( <toolset>msvc in $(properties) ) || - ( <stdlib>stlport in $(properties) ) || - ( <toolset-intel:platform>win in $(properties) ) - { - if <runtime-debugging>on in $(properties) { tag += g ; } - } - - if <python-debugging>on in $(properties) { tag += y ; } - if <variant>debug in $(properties) { tag += d ; } - if <stdlib>stlport in $(properties) { tag += p ; } - if <stdlib-stlport:iostream>hostios in $(properties) { tag += n ; } - - return $(tag:J=) ; -} - -# Create a tag for the Qt library version -# "<qt>4.6.0" will result in tag "qt460" -local rule qt-tag ( name : type ? : property-set ) -{ - local properties = [ $(property-set).get <qt> ] ; - local version = [ MATCH "([0123456789]+)[.]?([0123456789]*)[.]?([0123456789]*)" - : $(properties) ] ; - local tag = "qt"$(version:J=) ; - return $(tag) ; -} - -# Create a tag for the address-model -# <address-model>64 will simply generate "64" -local rule address-model-tag ( name : type ? : property-set ) -{ - local tag = ; - local version = [ $(property-set).get <address-model> ] ; - return $(version) ; -} - -rule __test__ ( ) -{ - import assert ; - - local nl = " -" ; - - local save-os = [ modules.peek os : .name ] ; - - modules.poke os : .name : LINUX ; - - assert.result "PATH=\"foo:bar:baz\"$(nl)export PATH$(nl)" - : path-variable-setting-command PATH : foo bar baz ; - - assert.result "PATH=\"foo:bar:$PATH\"$(nl)export PATH$(nl)" - : prepend-path-variable-command PATH : foo bar ; - - modules.poke os : .name : NT ; - - assert.result "set PATH=foo;bar;baz$(nl)" - : path-variable-setting-command PATH : foo bar baz ; - - assert.result "set PATH=foo;bar;%PATH%$(nl)" - : prepend-path-variable-command PATH : foo bar ; - - modules.poke os : .name : $(save-os) ; -} diff --git a/jam-files/boost-build/tools/common.py b/jam-files/boost-build/tools/common.py deleted file mode 100644 index 612745b8..00000000 --- a/jam-files/boost-build/tools/common.py +++ /dev/null @@ -1,840 +0,0 @@ -# Status: being ported by Steven Watanabe -# Base revision: 47174 -# -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -""" Provides actions common to all toolsets, such as creating directories and - removing files. -""" - -import re -import bjam -import os -import os.path -import sys - -from b2.build import feature -from b2.util.utility import * -from b2.util import path - -__re__before_first_dash = re.compile ('([^-]*)-') - -def reset (): - """ Clear the module state. This is mainly for testing purposes. - Note that this must be called _after_ resetting the module 'feature'. - """ - global __had_unspecified_value, __had_value, __declared_subfeature - global __init_loc - global __all_signatures, __debug_configuration, __show_configuration - - # Stores toolsets without specified initialization values. - __had_unspecified_value = {} - - # Stores toolsets with specified initialization values. - __had_value = {} - - # Stores toolsets with declared subfeatures. - __declared_subfeature = {} - - # Stores all signatures of the toolsets. - __all_signatures = {} - - # Stores the initialization locations of each toolset - __init_loc = {} - - __debug_configuration = '--debug-configuration' in bjam.variable('ARGV') - __show_configuration = '--show-configuration' in bjam.variable('ARGV') - - global __executable_path_variable - OS = bjam.call("peek", [], "OS")[0] - if OS == "NT": - # On Windows the case and capitalization of PATH is not always predictable, so - # let's find out what variable name was really set. - for n in sys.environ: - if n.lower() == "path": - __executable_path_variable = n - break - else: - __executable_path_variable = "PATH" - - m = {"NT": __executable_path_variable, - "CYGWIN": "PATH", - "MACOSX": "DYLD_LIBRARY_PATH", - "AIX": "LIBPATH"} - global __shared_library_path_variable - __shared_library_path_variable = m.get(OS, "LD_LIBRARY_PATH") - -reset() - -def shared_library_path_variable(): - return __shared_library_path_variable - -# ported from trunk@47174 -class Configurations(object): - """ - This class helps to manage toolset configurations. Each configuration - has a unique ID and one or more parameters. A typical example of a unique ID - is a condition generated by 'common.check-init-parameters' rule. Other kinds - of IDs can be used. Parameters may include any details about the configuration - like 'command', 'path', etc. - - A toolset configuration may be in one of the following states: - - - registered - Configuration has been registered (e.g. by autodetection code) but has - not yet been marked as used, i.e. 'toolset.using' rule has not yet been - called for it. - - used - Once called 'toolset.using' rule marks the configuration as 'used'. - - The main difference between the states above is that while a configuration is - 'registered' its options can be freely changed. This is useful in particular - for autodetection code - all detected configurations may be safely overwritten - by user code. - """ - - def __init__(self): - self.used_ = set() - self.all_ = set() - self.params = {} - - def register(self, id): - """ - Registers a configuration. - - Returns True if the configuration has been added and False if - it already exists. Reports an error if the configuration is 'used'. - """ - if id in self.used_: - #FIXME - errors.error("common: the configuration '$(id)' is in use") - - if id not in self.all_: - self.all_ += [id] - - # Indicate that a new configuration has been added. - return True - else: - return False - - def use(self, id): - """ - Mark a configuration as 'used'. - - Returns True if the state of the configuration has been changed to - 'used' and False if it the state wasn't changed. Reports an error - if the configuration isn't known. - """ - if id not in self.all_: - #FIXME: - errors.error("common: the configuration '$(id)' is not known") - - if id not in self.used_: - self.used_ += [id] - - # indicate that the configuration has been marked as 'used' - return True - else: - return False - - def all(self): - """ Return all registered configurations. """ - return self.all_ - - def used(self): - """ Return all used configurations. """ - return self.used_ - - def get(self, id, param): - """ Returns the value of a configuration parameter. """ - self.params_.getdefault(param, {}).getdefault(id, None) - - def set (self, id, param, value): - """ Sets the value of a configuration parameter. """ - self.params_.setdefault(param, {})[id] = value - -# Ported from trunk@47174 -def check_init_parameters(toolset, requirement, *args): - """ The rule for checking toolset parameters. Trailing parameters should all be - parameter name/value pairs. The rule will check that each parameter either has - a value in each invocation or has no value in each invocation. Also, the rule - will check that the combination of all parameter values is unique in all - invocations. - - Each parameter name corresponds to a subfeature. This rule will declare a - subfeature the first time a non-empty parameter value is passed and will - extend it with all the values. - - The return value from this rule is a condition to be used for flags settings. - """ - # The type checking here is my best guess about - # what the types should be. - assert(isinstance(toolset, str)) - assert(isinstance(requirement, str) or requirement is None) - sig = toolset - condition = replace_grist(toolset, '<toolset>') - subcondition = [] - - for arg in args: - assert(isinstance(arg, tuple)) - assert(len(arg) == 2) - name = arg[0] - value = arg[1] - assert(isinstance(name, str)) - assert(isinstance(value, str) or value is None) - - str_toolset_name = str((toolset, name)) - - # FIXME: is this the correct translation? - ### if $(value)-is-not-empty - if value is not None: - condition = condition + '-' + value - if __had_unspecified_value.has_key(str_toolset_name): - raise BaseException("'%s' initialization: parameter '%s' inconsistent\n" \ - "no value was specified in earlier initialization\n" \ - "an explicit value is specified now" % (toolset, name)) - - # The logic below is for intel compiler. It calls this rule - # with 'intel-linux' and 'intel-win' as toolset, so we need to - # get the base part of toolset name. - # We can't pass 'intel' as toolset, because it that case it will - # be impossible to register versionles intel-linux and - # intel-win of specific version. - t = toolset - m = __re__before_first_dash.match(toolset) - if m: - t = m.group(1) - - if not __had_value.has_key(str_toolset_name): - if not __declared_subfeature.has_key(str((t, name))): - feature.subfeature('toolset', t, name, [], ['propagated']) - __declared_subfeature[str((t, name))] = True - - __had_value[str_toolset_name] = True - - feature.extend_subfeature('toolset', t, name, [value]) - subcondition += ['<toolset-' + t + ':' + name + '>' + value ] - - else: - if __had_value.has_key(str_toolset_name): - raise BaseException ("'%s' initialization: parameter '%s' inconsistent\n" \ - "an explicit value was specified in an earlier initialization\n" \ - "no value is specified now" % (toolset, name)) - - __had_unspecified_value[str_toolset_name] = True - - if value == None: value = '' - - sig = sig + value + '-' - - if __all_signatures.has_key(sig): - message = "duplicate initialization of '%s' with the following parameters: " % toolset - - for arg in args: - name = arg[0] - value = arg[1] - if value == None: value = '<unspecified>' - - message += "'%s' = '%s'\n" % (name, value) - - raise BaseException(message) - - __all_signatures[sig] = True - # FIXME - __init_loc[sig] = "User location unknown" #[ errors.nearest-user-location ] ; - - # If we have a requirement, this version should only be applied under that - # condition. To accomplish this we add a toolset requirement that imposes - # the toolset subcondition, which encodes the version. - if requirement: - r = ['<toolset>' + toolset, requirement] - r = ','.join(r) - toolset.add_requirements([r + ':' + c for c in subcondition]) - - # We add the requirements, if any, to the condition to scope the toolset - # variables and options to this specific version. - condition = [condition] - if requirement: - condition += [requirement] - - if __show_configuration: - print "notice:", condition - return ['/'.join(condition)] - -# Ported from trunk@47077 -def get_invocation_command_nodefault( - toolset, tool, user_provided_command=[], additional_paths=[], path_last=False): - """ - A helper rule to get the command to invoke some tool. If - 'user-provided-command' is not given, tries to find binary named 'tool' in - PATH and in the passed 'additional-path'. Otherwise, verifies that the first - element of 'user-provided-command' is an existing program. - - This rule returns the command to be used when invoking the tool. If we can't - find the tool, a warning is issued. If 'path-last' is specified, PATH is - checked after 'additional-paths' when searching for 'tool'. - """ - assert(isinstance(toolset, str)) - assert(isinstance(tool, str)) - assert(isinstance(user_provided_command, list)) - if additional_paths is not None: - assert(isinstance(additional_paths, list)) - assert(all([isinstance(path, str) for path in additional_paths])) - assert(all(isinstance(path, str) for path in additional_paths)) - assert(isinstance(path_last, bool)) - - if not user_provided_command: - command = find_tool(tool, additional_paths, path_last) - if not command and __debug_configuration: - print "warning: toolset", toolset, "initialization: can't find tool, tool" - #FIXME - #print "warning: initialized from" [ errors.nearest-user-location ] ; - else: - command = check_tool(user_provided_command) - if not command and __debug_configuration: - print "warning: toolset", toolset, "initialization:" - print "warning: can't find user-provided command", user_provided_command - #FIXME - #ECHO "warning: initialized from" [ errors.nearest-user-location ] - - assert(isinstance(command, str)) - - return command - -# ported from trunk@47174 -def get_invocation_command(toolset, tool, user_provided_command = [], - additional_paths = [], path_last = False): - """ Same as get_invocation_command_nodefault, except that if no tool is found, - returns either the user-provided-command, if present, or the 'tool' parameter. - """ - - assert(isinstance(toolset, str)) - assert(isinstance(tool, str)) - assert(isinstance(user_provided_command, list)) - if additional_paths is not None: - assert(isinstance(additional_paths, list)) - assert(all([isinstance(path, str) for path in additional_paths])) - assert(isinstance(path_last, bool)) - - result = get_invocation_command_nodefault(toolset, tool, - user_provided_command, - additional_paths, - path_last) - - if not result: - if user_provided_command: - result = user_provided_command[0] - else: - result = tool - - assert(isinstance(result, str)) - - return result - -# ported from trunk@47281 -def get_absolute_tool_path(command): - """ - Given an invocation command, - return the absolute path to the command. This works even if commnad - has not path element and is present in PATH. - """ - if os.path.dirname(command): - return os.path.dirname(command) - else: - programs = path.programs_path() - m = path.glob(programs, [command, command + '.exe' ]) - if not len(m): - print "Could not find:", command, "in", programs - return os.path.dirname(m[0]) - -# ported from trunk@47174 -def find_tool(name, additional_paths = [], path_last = False): - """ Attempts to find tool (binary) named 'name' in PATH and in - 'additional-paths'. If found in path, returns 'name'. If - found in additional paths, returns full name. If the tool - is found in several directories, returns the first path found. - Otherwise, returns the empty string. If 'path_last' is specified, - path is checked after 'additional_paths'. - """ - assert(isinstance(name, str)) - assert(isinstance(additional_paths, list)) - assert(isinstance(path_last, bool)) - - programs = path.programs_path() - match = path.glob(programs, [name, name + '.exe']) - additional_match = path.glob(additional_paths, [name, name + '.exe']) - - result = [] - if path_last: - result = additional_match - if not result and match: - result = match - - else: - if match: - result = match - - elif additional_match: - result = additional_match - - if result: - return path.native(result[0]) - else: - return '' - -#ported from trunk@47281 -def check_tool_aux(command): - """ Checks if 'command' can be found either in path - or is a full name to an existing file. - """ - assert(isinstance(command, str)) - dirname = os.path.dirname(command) - if dirname: - if os.path.exists(command): - return command - # Both NT and Cygwin will run .exe files by their unqualified names. - elif on_windows() and os.path.exists(command + '.exe'): - return command - # Only NT will run .bat files by their unqualified names. - elif os_name() == 'NT' and os.path.exists(command + '.bat'): - return command - else: - paths = path.programs_path() - if path.glob(paths, [command]): - return command - -# ported from trunk@47281 -def check_tool(command): - """ Checks that a tool can be invoked by 'command'. - If command is not an absolute path, checks if it can be found in 'path'. - If comand is absolute path, check that it exists. Returns 'command' - if ok and empty string otherwise. - """ - assert(isinstance(command, list)) - assert(all(isinstance(c, str) for c in command)) - #FIXME: why do we check the first and last elements???? - if check_tool_aux(command[0]) or check_tool_aux(command[-1]): - return command - -# ported from trunk@47281 -def handle_options(tool, condition, command, options): - """ Handle common options for toolset, specifically sets the following - flag variables: - - CONFIG_COMMAND to 'command' - - OPTIOns for compile to the value of <compileflags> in options - - OPTIONS for compile.c to the value of <cflags> in options - - OPTIONS for compile.c++ to the value of <cxxflags> in options - - OPTIONS for compile.fortran to the value of <fflags> in options - - OPTIONs for link to the value of <linkflags> in options - """ - from b2.build import toolset - - assert(isinstance(tool, str)) - assert(isinstance(condition, list)) - assert(isinstance(command, str)) - assert(isinstance(options, list)) - assert(command) - toolset.flags(tool, 'CONFIG_COMMAND', condition, [command]) - toolset.flags(tool + '.compile', 'OPTIONS', condition, feature.get_values('<compileflags>', options)) - toolset.flags(tool + '.compile.c', 'OPTIONS', condition, feature.get_values('<cflags>', options)) - toolset.flags(tool + '.compile.c++', 'OPTIONS', condition, feature.get_values('<cxxflags>', options)) - toolset.flags(tool + '.compile.fortran', 'OPTIONS', condition, feature.get_values('<fflags>', options)) - toolset.flags(tool + '.link', 'OPTIONS', condition, feature.get_values('<linkflags>', options)) - -# ported from trunk@47281 -def get_program_files_dir(): - """ returns the location of the "program files" directory on a windows - platform - """ - ProgramFiles = bjam.variable("ProgramFiles") - if ProgramFiles: - ProgramFiles = ' '.join(ProgramFiles) - else: - ProgramFiles = "c:\\Program Files" - return ProgramFiles - -# ported from trunk@47281 -def rm_command(): - return __RM - -# ported from trunk@47281 -def copy_command(): - return __CP - -# ported from trunk@47281 -def variable_setting_command(variable, value): - """ - Returns the command needed to set an environment variable on the current - platform. The variable setting persists through all following commands and is - visible in the environment seen by subsequently executed commands. In other - words, on Unix systems, the variable is exported, which is consistent with the - only possible behavior on Windows systems. - """ - assert(isinstance(variable, str)) - assert(isinstance(value, str)) - - if os_name() == 'NT': - return "set " + variable + "=" + value + os.linesep - else: - # (todo) - # The following does not work on CYGWIN and needs to be fixed. On - # CYGWIN the $(nl) variable holds a Windows new-line \r\n sequence that - # messes up the executed export command which then reports that the - # passed variable name is incorrect. This is most likely due to the - # extra \r character getting interpreted as a part of the variable name. - # - # Several ideas pop to mind on how to fix this: - # * One way would be to separate the commands using the ; shell - # command separator. This seems like the quickest possible - # solution but I do not know whether this would break code on any - # platforms I I have no access to. - # * Another would be to not use the terminating $(nl) but that would - # require updating all the using code so it does not simply - # prepend this variable to its own commands. - # * I guess the cleanest solution would be to update Boost Jam to - # allow explicitly specifying \n & \r characters in its scripts - # instead of always relying only on the 'current OS native newline - # sequence'. - # - # Some code found to depend on this behaviour: - # * This Boost Build module. - # * __test__ rule. - # * path-variable-setting-command rule. - # * python.jam toolset. - # * xsltproc.jam toolset. - # * fop.jam toolset. - # (todo) (07.07.2008.) (Jurko) - # - # I think that this works correctly in python -- Steven Watanabe - return variable + "=" + value + os.linesep + "export " + variable + os.linesep - -def path_variable_setting_command(variable, paths): - """ - Returns a command to sets a named shell path variable to the given NATIVE - paths on the current platform. - """ - assert(isinstance(variable, str)) - assert(isinstance(paths, list)) - sep = os.path.pathsep - return variable_setting_command(variable, sep.join(paths)) - -def prepend_path_variable_command(variable, paths): - """ - Returns a command that prepends the given paths to the named path variable on - the current platform. - """ - return path_variable_setting_command(variable, - paths + os.environ.get(variable, "").split(os.pathsep)) - -def file_creation_command(): - """ - Return a command which can create a file. If 'r' is result of invocation, then - 'r foobar' will create foobar with unspecified content. What happens if file - already exists is unspecified. - """ - if os_name() == 'NT': - return "echo. > " - else: - return "touch " - -#FIXME: global variable -__mkdir_set = set() -__re_windows_drive = re.compile(r'^.*:\$') - -def mkdir(engine, target): - # If dir exists, do not update it. Do this even for $(DOT). - bjam.call('NOUPDATE', target) - - global __mkdir_set - - # FIXME: Where is DOT defined? - #if $(<) != $(DOT) && ! $($(<)-mkdir): - if target != '.' and target not in __mkdir_set: - # Cheesy gate to prevent multiple invocations on same dir. - __mkdir_set.add(target) - - # Schedule the mkdir build action. - if os_name() == 'NT': - engine.set_update_action("common.MkDir1-quick-fix-for-windows", target, []) - else: - engine.set_update_action("common.MkDir1-quick-fix-for-unix", target, []) - - # Prepare a Jam 'dirs' target that can be used to make the build only - # construct all the target directories. - engine.add_dependency('dirs', target) - - # Recursively create parent directories. $(<:P) = $(<)'s parent & we - # recurse until root. - - s = os.path.dirname(target) - if os_name() == 'NT': - if(__re_windows_drive.match(s)): - s = '' - - if s: - if s != target: - engine.add_dependency(target, s) - mkdir(engine, s) - else: - bjam.call('NOTFILE', s) - -__re_version = re.compile(r'^([^.]+)[.]([^.]+)[.]?([^.]*)') - -def format_name(format, name, target_type, prop_set): - """ Given a target, as given to a custom tag rule, returns a string formatted - according to the passed format. Format is a list of properties that is - represented in the result. For each element of format the corresponding target - information is obtained and added to the result string. For all, but the - literal, the format value is taken as the as string to prepend to the output - to join the item to the rest of the result. If not given "-" is used as a - joiner. - - The format options can be: - - <base>[joiner] - :: The basename of the target name. - <toolset>[joiner] - :: The abbreviated toolset tag being used to build the target. - <threading>[joiner] - :: Indication of a multi-threaded build. - <runtime>[joiner] - :: Collective tag of the build runtime. - <version:/version-feature | X.Y[.Z]/>[joiner] - :: Short version tag taken from the given "version-feature" - in the build properties. Or if not present, the literal - value as the version number. - <property:/property-name/>[joiner] - :: Direct lookup of the given property-name value in the - build properties. /property-name/ is a regular expression. - e.g. <property:toolset-.*:flavor> will match every toolset. - /otherwise/ - :: The literal value of the format argument. - - For example this format: - - boost_ <base> <toolset> <threading> <runtime> <version:boost-version> - - Might return: - - boost_thread-vc80-mt-gd-1_33.dll, or - boost_regex-vc80-gd-1_33.dll - - The returned name also has the target type specific prefix and suffix which - puts it in a ready form to use as the value from a custom tag rule. - """ - assert(isinstance(format, list)) - assert(isinstance(name, str)) - assert(isinstance(target_type, str) or not type) - # assert(isinstance(prop_set, property_set.PropertySet)) - if type.is_derived(target_type, 'LIB'): - result = "" ; - for f in format: - grist = get_grist(f) - if grist == '<base>': - result += os.path.basename(name) - elif grist == '<toolset>': - result += join_tag(ungrist(f), - toolset_tag(name, target_type, prop_set)) - elif grist == '<threading>': - result += join_tag(ungrist(f), - threading_tag(name, target_type, prop_set)) - elif grist == '<runtime>': - result += join_tag(ungrist(f), - runtime_tag(name, target_type, prop_set)) - elif grist.startswith('<version:'): - key = grist[len('<version:'):-1] - version = prop_set.get('<' + key + '>') - if not version: - version = key - version = __re_version.match(version) - result += join_tag(ungrist(f), version[1] + '_' + version[2]) - elif grist.startswith('<property:'): - key = grist[len('<property:'):-1] - property_re = re.compile('<(' + key + ')>') - p0 = None - for prop in prop_set.raw(): - match = property_re.match(prop) - if match: - p0 = match[1] - break - if p0: - p = prop_set.get('<' + p0 + '>') - if p: - assert(len(p) == 1) - result += join_tag(ungrist(f), p) - else: - result += ungrist(f) - - result = virtual_target.add_prefix_and_suffix( - ''.join(result), target_type, prop_set) - return result - -def join_tag(joiner, tag): - if not joiner: joiner = '-' - return joiner + tag - -__re_toolset_version = re.compile(r"<toolset.*version>(\d+)[.](\d*)") - -def toolset_tag(name, target_type, prop_set): - tag = '' - - properties = prop_set.raw() - tools = prop_set.get('<toolset>') - assert(len(tools) == 0) - tools = tools[0] - if tools.startswith('borland'): tag += 'bcb' - elif tools.startswith('como'): tag += 'como' - elif tools.startswith('cw'): tag += 'cw' - elif tools.startswith('darwin'): tag += 'xgcc' - elif tools.startswith('edg'): tag += edg - elif tools.startswith('gcc'): - flavor = prop_set.get('<toolset-gcc:flavor>') - ''.find - if flavor.find('mingw') != -1: - tag += 'mgw' - else: - tag += 'gcc' - elif tools == 'intel': - if prop_set.get('<toolset-intel:platform>') == ['win']: - tag += 'iw' - else: - tag += 'il' - elif tools.startswith('kcc'): tag += 'kcc' - elif tools.startswith('kylix'): tag += 'bck' - #case metrowerks* : tag += cw ; - #case mingw* : tag += mgw ; - elif tools.startswith('mipspro'): tag += 'mp' - elif tools.startswith('msvc'): tag += 'vc' - elif tools.startswith('sun'): tag += 'sw' - elif tools.startswith('tru64cxx'): tag += 'tru' - elif tools.startswith('vacpp'): tag += 'xlc' - - for prop in properties: - match = __re_toolset_version.match(prop) - if(match): - version = match - break - version_string = None - # For historical reasons, vc6.0 and vc7.0 use different naming. - if tag == 'vc': - if version.group(1) == '6': - # Cancel minor version. - version_string = '6' - elif version.group(1) == '7' and version.group(2) == '0': - version_string = '7' - - # On intel, version is not added, because it does not matter and it's the - # version of vc used as backend that matters. Ideally, we'd encode the - # backend version but that would break compatibility with V1. - elif tag == 'iw': - version_string = '' - - # On borland, version is not added for compatibility with V1. - elif tag == 'bcb': - version_string = '' - - if version_string is None: - version = version.group(1) + version.group(2) - - tag += version - - return tag - - -def threading_tag(name, target_type, prop_set): - tag = '' - properties = prop_set.raw() - if '<threading>multi' in properties: tag = 'mt' - - return tag - - -def runtime_tag(name, target_type, prop_set ): - tag = '' - - properties = prop_set.raw() - if '<runtime-link>static' in properties: tag += 's' - - # This is an ugly thing. In V1, there's a code to automatically detect which - # properties affect a target. So, if <runtime-debugging> does not affect gcc - # toolset, the tag rules won't even see <runtime-debugging>. Similar - # functionality in V2 is not implemented yet, so we just check for toolsets - # which are known to care about runtime debug. - if '<toolset>msvc' in properties \ - or '<stdlib>stlport' in properties \ - or '<toolset-intel:platform>win' in properties: - if '<runtime-debugging>on' in properties: tag += 'g' - - if '<python-debugging>on' in properties: tag += 'y' - if '<variant>debug' in properties: tag += 'd' - if '<stdlib>stlport' in properties: tag += 'p' - if '<stdlib-stlport:iostream>hostios' in properties: tag += 'n' - - return tag - - -## TODO: -##rule __test__ ( ) -##{ -## import assert ; -## -## local nl = " -##" ; -## -## local save-os = [ modules.peek os : .name ] ; -## -## modules.poke os : .name : LINUX ; -## -## assert.result "PATH=foo:bar:baz$(nl)export PATH$(nl)" -## : path-variable-setting-command PATH : foo bar baz ; -## -## assert.result "PATH=foo:bar:$PATH$(nl)export PATH$(nl)" -## : prepend-path-variable-command PATH : foo bar ; -## -## modules.poke os : .name : NT ; -## -## assert.result "set PATH=foo;bar;baz$(nl)" -## : path-variable-setting-command PATH : foo bar baz ; -## -## assert.result "set PATH=foo;bar;%PATH%$(nl)" -## : prepend-path-variable-command PATH : foo bar ; -## -## modules.poke os : .name : $(save-os) ; -##} - -def init(manager): - engine = manager.engine() - - engine.register_action("common.MkDir1-quick-fix-for-unix", 'mkdir -p "$(<)"') - engine.register_action("common.MkDir1-quick-fix-for-windows", 'if not exist "$(<)\\" mkdir "$(<)"') - - import b2.tools.make - import b2.build.alias - - global __RM, __CP, __IGNORE, __LN - # ported from trunk@47281 - if os_name() == 'NT': - __RM = 'del /f /q' - __CP = 'copy' - __IGNORE = '2>nul >nul & setlocal' - __LN = __CP - #if not __LN: - # __LN = CP - else: - __RM = 'rm -f' - __CP = 'cp' - __IGNORE = '' - __LN = 'ln' - - engine.register_action("common.Clean", __RM + ' "$(>)"', - flags=['piecemeal', 'together', 'existing']) - engine.register_action("common.copy", __CP + ' "$(>)" "$(<)"') - engine.register_action("common.RmTemps", __RM + ' "$(>)" ' + __IGNORE, - flags=['quietly', 'updated', 'piecemeal', 'together']) - - engine.register_action("common.hard-link", - __RM + ' "$(<)" 2$(NULL_OUT) $(NULL_OUT)' + os.linesep + - __LN + ' "$(>)" "$(<)" $(NULL_OUT)') diff --git a/jam-files/boost-build/tools/como-linux.jam b/jam-files/boost-build/tools/como-linux.jam deleted file mode 100644 index 5c554c8f..00000000 --- a/jam-files/boost-build/tools/como-linux.jam +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# The following #// line will be used by the regression test table generation -# program as the column heading for HTML tables. Must not include a version -# number. -#//<a href="http://www.comeaucomputing.com/">Comeau<br>C++</a> - -import toolset ; -import feature ; -import toolset : flags ; -import common ; -import generators ; - -import unix ; -import como ; - -feature.extend-subfeature toolset como : platform : linux ; - -toolset.inherit-generators como-linux - <toolset>como <toolset-como:platform>linux : unix ; -generators.override como-linux.prebuilt : builtin.lib-generator ; -generators.override como-linux.searched-lib-generator : searched-lib-generator ; -toolset.inherit-flags como-linux : unix ; -toolset.inherit-rules como-linux : gcc ; - -generators.register-c-compiler como-linux.compile.c++ : CPP : OBJ - : <toolset>como <toolset-como:platform>linux ; -generators.register-c-compiler como-linux.compile.c : C : OBJ - : <toolset>como <toolset-como:platform>linux ; - - -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters como-linux - : version $(version) ] ; - - command = [ common.get-invocation-command como-linux : como - : $(command) ] ; - - common.handle-options como-linux : $(condition) : $(command) : $(options) ; -} - - -flags como-linux C++FLAGS <exception-handling>off : --no_exceptions ; -flags como-linux C++FLAGS <exception-handling>on : --exceptions ; - -flags como-linux CFLAGS <inlining>off : --no_inlining ; -flags como-linux CFLAGS <inlining>on <inlining>full : --inlining ; - -flags como-linux CFLAGS <optimization>off : -O0 ; -flags como-linux CFLAGS <optimization>speed : -O3 ; -flags como-linux CFLAGS <optimization>space : -Os ; - -flags como-linux CFLAGS <debug-symbols>on : -g ; -flags como-linux LINKFLAGS <debug-symbols>on : -g ; - -flags como-linux FINDLIBS : m ; -flags como-linux FINDLIBS : rt ; - -flags como-linux CFLAGS <cflags> ; -flags como-linux C++FLAGS <cxxflags> ; -flags como-linux DEFINES <define> ; -flags como-linux UNDEFS <undef> ; -flags como-linux HDRS <include> ; -flags como-linux STDHDRS <sysinclude> ; -flags como-linux LINKFLAGS <linkflags> ; -flags como-linux ARFLAGS <arflags> ; - -flags como-linux.link LIBRARIES <library-file> ; -flags como-linux.link LINKPATH <library-path> ; -flags como-linux.link FINDLIBS-ST <find-static-library> ; -flags como-linux.link FINDLIBS-SA <find-shared-library> ; - -flags como-linux.link RPATH <dll-path> ; -flags como-linux.link RPATH_LINK <xdll-path> ; - - -actions link bind LIBRARIES -{ - $(CONFIG_COMMAND) $(LINKFLAGS) -o "$(<[1])" "$(>)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" "$(LIBRARIES)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) 2>&1 -} - -actions link.dll bind LIBRARIES -{ - $(CONFIG_COMMAND) $(LINKFLAGS) -shared -o "$(<[1])" "$(>)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" "$(LIBRARIES)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) 2>&1 -} - -actions compile.c -{ - $(CONFIG_COMMAND) -c --c99 --long_long -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" 2>&1 -} - -actions compile.c++ -{ - $(CONFIG_COMMAND) -tused -c --long_long -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) $(C++FLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" 2>&1 -} - -actions archive -{ - ar rcu $(<) $(>) -} diff --git a/jam-files/boost-build/tools/como-win.jam b/jam-files/boost-build/tools/como-win.jam deleted file mode 100644 index d21a70d6..00000000 --- a/jam-files/boost-build/tools/como-win.jam +++ /dev/null @@ -1,117 +0,0 @@ -# (C) Copyright David Abrahams 2001. -# (C) Copyright MetaCommunications, Inc. 2004. - -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# The following #// line will be used by the regression test table generation -# program as the column heading for HTML tables. Must not include a version -# number. -#//<a href="http://www.comeaucomputing.com/">Comeau<br>C++</a> - -import common ; -import como ; -import feature ; -import generators ; -import toolset : flags ; - -feature.extend-subfeature toolset como : platform : win ; - - -# Initializes the Comeau toolset for windows. The command is the command which -# invokes the compiler. You should either set environment variable -# COMO_XXX_INCLUDE where XXX is the used backend (as described in the -# documentation), or pass that as part of command, e.g: -# -# using como-win : 4.3 : "set COMO_BCC_INCLUDE=C:/include &&" como.exe ; -# -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters como-win - : version $(version) ] ; - - command = [ common.get-invocation-command como-win : como.exe : - $(command) ] ; - - common.handle-options como-win : $(condition) : $(command) : $(options) ; -} - -generators.register-c-compiler como-win.compile.c++ : CPP : OBJ - : <toolset>como <toolset-como:platform>win ; -generators.register-c-compiler como-win.compile.c : C : OBJ - : <toolset>como <toolset-como:platform>win ; - - -generators.register-linker como-win.link - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : EXE - : <toolset>como <toolset-como:platform>win ; - -# Note that status of shared libraries support is not clear, so we do not define -# the link.dll generator. -generators.register-archiver como-win.archive - : OBJ : STATIC_LIB - : <toolset>como <toolset-como:platform>win ; - - -flags como-win C++FLAGS <exception-handling>off : --no_exceptions ; -flags como-win C++FLAGS <exception-handling>on : --exceptions ; - -flags como-win CFLAGS <inlining>off : --no_inlining ; -flags como-win CFLAGS <inlining>on <inlining>full : --inlining ; - - -# The following seems to be VC-specific options. At least, when I uncomment -# then, Comeau with bcc as backend reports that bcc32 invocation failed. -# -#flags como-win CFLAGS <debug-symbols>on : /Zi ; -#flags como-win CFLAGS <optimization>off : /Od ; - - -flags como-win CFLAGS <cflags> ; -flags como-win CFLAGS : -D_WIN32 ; # Make sure that we get the Boost Win32 platform config header. -flags como-win CFLAGS <threading>multi : -D_MT ; # Make sure that our config knows that threading is on. -flags como-win C++FLAGS <cxxflags> ; -flags como-win DEFINES <define> ; -flags como-win UNDEFS <undef> ; -flags como-win HDRS <include> ; -flags como-win SYSHDRS <sysinclude> ; -flags como-win LINKFLAGS <linkflags> ; -flags como-win ARFLAGS <arflags> ; -flags como-win NO_WARN <no-warn> ; - -#flags como-win STDHDRS : $(COMO_INCLUDE_PATH) ; -#flags como-win STDLIB_PATH : $(COMO_STDLIB_PATH)$(SLASH) ; - -flags como-win LIBPATH <library-path> ; -flags como-win LIBRARIES <library-file> ; -flags como-win FINDLIBS <find-shared-library> ; -flags como-win FINDLIBS <find-static-library> ; - -nl = " -" ; - - -# For como, we repeat all libraries so that dependencies are always resolved. -# -actions link bind LIBRARIES -{ - $(CONFIG_COMMAND) --no_version --no_prelink_verbose $(LINKFLAGS) -o "$(<[1]:S=)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)")" "$(LIBRARIES)" "$(FINDLIBS:S=.lib)" -} - -actions compile.c -{ - $(CONFIG_COMMAND) -c --c99 -e5 --no_version --display_error_number --diag_suppress=9,21,161,748,940,962 -U$(UNDEFS) -D$(DEFINES) $(WARN) $(CFLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -I"$(SYSHDRS)" -o "$(<:D=)" "$(>)" -} - -actions compile.c++ -{ - $(CONFIG_COMMAND) -c -e5 --no_version --no_prelink_verbose --display_error_number --long_long --diag_suppress=9,21,161,748,940,962 --diag_error=461 -D__STL_LONG_LONG -U$(UNDEFS) -D$(DEFINES) $(WARN) $(CFLAGS) $(C++FLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -I"$(SYSHDRS)" -o "$(<)" "$(>)" -} - -actions archive -{ - $(CONFIG_COMMAND) --no_version --no_prelink_verbose --prelink_object @"@($(<[1]:W).rsp:E=$(nl)"$(>)")" - lib $(ARFLAGS) /nologo /out:"$(<:S=.lib)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)")" -} diff --git a/jam-files/boost-build/tools/como.jam b/jam-files/boost-build/tools/como.jam deleted file mode 100644 index 04a05a94..00000000 --- a/jam-files/boost-build/tools/como.jam +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# This is a generic 'como' toolset. Depending on the current system, it -# forwards either to 'como-linux' or 'como-win' modules. - -import feature ; -import os ; -import toolset ; - -feature.extend toolset : como ; -feature.subfeature toolset como : platform : : propagated link-incompatible ; - -rule init ( * : * ) -{ - if [ os.name ] = LINUX - { - toolset.using como-linux : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - else - { - toolset.using como-win : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - - } -} diff --git a/jam-files/boost-build/tools/convert.jam b/jam-files/boost-build/tools/convert.jam deleted file mode 100644 index ac1d7010..00000000 --- a/jam-files/boost-build/tools/convert.jam +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2009 Vladimir Prus -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Implements 'convert' target that takes a bunch of source and -# tries to convert each one to the specified type. -# -# For example: -# -# convert objects obj : a.cpp b.cpp ; -# - -import targets ; -import generators ; -import project ; -import type ; -import "class" : new ; - -class convert-target-class : typed-target -{ - rule __init__ ( name : project : type - : sources * : requirements * : default-build * : usage-requirements * ) - { - typed-target.__init__ $(name) : $(project) : $(type) - : $(sources) : $(requirements) : $(default-build) : $(usage-requirements) ; - } - - rule construct ( name : source-targets * : property-set ) - { - local r = [ generators.construct $(self.project) : $(self.type) - : [ property-set.create [ $(property-set).raw ] # [ feature.expand - <main-target-type>$(self.type) ] - # ] - : $(source-targets) ] ; - if ! $(r) - { - errors.error "unable to construct" [ full-name ] ; - } - - return $(r) ; - } - -} - -rule convert ( name type : sources * : requirements * : default-build * - : usage-requirements * ) -{ - local project = [ project.current ] ; - - # This is a circular module dependency, so it must be imported here - modules.import targets ; - targets.main-target-alternative - [ new convert-target-class $(name) : $(project) : [ type.type-from-rule-name $(type) ] - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ; -} -IMPORT $(__name__) : convert : : convert ; diff --git a/jam-files/boost-build/tools/cw-config.jam b/jam-files/boost-build/tools/cw-config.jam deleted file mode 100644 index 1211b7c0..00000000 --- a/jam-files/boost-build/tools/cw-config.jam +++ /dev/null @@ -1,34 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for CodeWarrior toolset. To use, just import this module. - -import os ; -import toolset : using ; - -if [ os.name ] = NT -{ - for local R in 9 8 7 - { - local cw-path = [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions\\CodeWarrior for Windows R$(R)" - : "PATH" ] ; - local cw-version = [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions\\CodeWarrior for Windows R$(R)" - : "VERSION" ] ; - cw-path ?= [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Metrowerks\\CodeWarrior for Windows\\$(R).0" - : "PATH" ] ; - cw-version ?= $(R).0 ; - - if $(cw-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using cw ":" $(cw-version) ":" "$(cw-path)\\Other Metrowerks Tools\\Command Line Tools\\mwcc.exe" ; - } - using cw : $(cw-version) : "$(cw-path)\\Other Metrowerks Tools\\Command Line Tools\\mwcc.exe" ; - } - } -} diff --git a/jam-files/boost-build/tools/cw.jam b/jam-files/boost-build/tools/cw.jam deleted file mode 100644 index ddcbfeb2..00000000 --- a/jam-files/boost-build/tools/cw.jam +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (C) Reece H Dunn 2004 -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# based on the msvc.jam toolset - -import property ; -import generators ; -import os ; -import type ; -import toolset : flags ; -import errors : error ; -import feature : feature get-values ; -import path ; -import sequence : unique ; -import common ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -feature.extend toolset : cw ; - -toolset.add-requirements <toolset>cw,<runtime-link>shared:<threading>multi ; - -nl = " -" ; - -rule init ( version ? : command * : options * ) -{ - # TODO: fix the $(command[1]) = $(compiler) issue - - setup = [ get-values <setup> : $(options) ] ; - setup ?= cwenv.bat ; - compiler = [ get-values <compiler> : $(options) ] ; - compiler ?= mwcc ; - linker = [ get-values <linker> : $(options) ] ; - linker ?= mwld ; - - local condition = [ common.check-init-parameters cw : - version $(version) ] ; - - command = [ common.get-invocation-command cw : mwcc.exe : $(command) : - [ default-paths $(version) ] ] ; - - common.handle-options cw : $(condition) : $(command) : $(options) ; - - local root = [ feature.get-values <root> : $(options) ] ; - if $(command) - { - command = [ common.get-absolute-tool-path $(command[-1]) ] ; - } - local tool-root = $(command) ; - - setup = $(tool-root)\\$(setup) ; - - # map the batch file in setup so it can be executed - - other-tools = $(tool-root:D) ; - root ?= $(other-tools:D) ; - - flags cw.link RUN_PATH $(condition) : - "$(root)\\Win32-x86 Support\\Libraries\\Runtime" - "$(root)\\Win32-x86 Support\\Libraries\\Runtime\\Libs\\MSL_All-DLLs" ; - - setup = "set \"CWFOLDER="$(root)"\" && call \""$(setup)"\" > nul " ; - - if [ os.name ] = NT - { - setup = $(setup)" -" ; - } - else - { - setup = "cmd /S /C "$(setup)" \"&&\" " ; - } - - # bind the setup command to the tool so it can be executed before the - # command - - local prefix = $(setup) ; - - flags cw.compile .CC $(condition) : $(prefix)$(compiler) ; - flags cw.link .LD $(condition) : $(prefix)$(linker) ; - flags cw.archive .LD $(condition) : $(prefix)$(linker) ; - - if [ MATCH ^([89]\\.) : $(version) ] - { - if [ os.name ] = NT - { - # The runtime libraries - flags cw.compile CFLAGS <runtime-link>static/<threading>single/<runtime-debugging>off : -runtime ss ; - flags cw.compile CFLAGS <runtime-link>static/<threading>single/<runtime-debugging>on : -runtime ssd ; - - flags cw.compile CFLAGS <runtime-link>static/<threading>multi/<runtime-debugging>off : -runtime sm ; - flags cw.compile CFLAGS <runtime-link>static/<threading>multi/<runtime-debugging>on : -runtime smd ; - - flags cw.compile CFLAGS <runtime-link>shared/<runtime-debugging>off : -runtime dm ; - flags cw.compile CFLAGS <runtime-link>shared/<runtime-debugging>on : -runtime dmd ; - } - } -} - - -local rule default-paths ( version ? ) # FIXME -{ - local possible-paths ; - local ProgramFiles = [ common.get-program-files-dir ] ; - - # TODO: add support for cw8 and cw9 detection - - local version-6-path = $(ProgramFiles)"\\Metrowerks\\CodeWarrior" ; - possible-paths += $(version-6-path) ; - - # perform post-processing - - possible-paths - = $(possible-paths)"\\Other Metrowerks Tools\\Command Line Tools" ; - - possible-paths += [ modules.peek : PATH Path path ] ; - - return $(possible-paths) ; -} - - - - -## declare generators - -generators.register-c-compiler cw.compile.c++ : CPP : OBJ : <toolset>cw ; -generators.register-c-compiler cw.compile.c : C : OBJ : <toolset>cw ; - -generators.register-linker cw.link - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : EXE - : <toolset>cw - ; -generators.register-linker cw.link.dll - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : SHARED_LIB IMPORT_LIB - : <toolset>cw - ; - -generators.register-archiver cw.archive - : OBJ - : STATIC_LIB - : <toolset>cw - ; - -## compilation phase - -flags cw WHATEVER <toolset-cw:version> ; - -flags cw.compile CFLAGS <debug-symbols>on : -g ; -flags cw.compile CFLAGS <optimization>off : -O0 ; -flags cw.compile CFLAGS <optimization>speed : -O4,p ; -flags cw.compile CFLAGS <optimization>space : -O4,s ; -flags cw.compile CFLAGS <inlining>off : -inline off ; -flags cw.compile CFLAGS <inlining>on : -inline on ; -flags cw.compile CFLAGS <inlining>full : -inline all ; -flags cw.compile CFLAGS <exception-handling>off : -Cpp_exceptions off ; - - -flags cw.compile CFLAGS <rtti>on : -RTTI on ; -flags cw.compile CFLAGS <rtti>off : -RTTI off ; - -flags cw.compile CFLAGS <warnings>on : -w on ; -flags cw.compile CFLAGS <warnings>off : -w off ; -flags cw.compile CFLAGS <warnings>all : -w all ; -flags cw.compile CFLAGS <warnings-as-errors>on : -w error ; - -flags cw.compile USER_CFLAGS <cflags> : ; -flags cw.compile.c++ USER_CFLAGS <cxxflags> : ; - -flags cw.compile DEFINES <define> ; -flags cw.compile UNDEFS <undef> ; -flags cw.compile INCLUDES <include> ; - -actions compile.c -{ - $(.CC) -c -cwd include -lang c -U$(UNDEFS) $(CFLAGS) $(USER_CFLAGS) -I- -o "$(<)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)-D$(DEFINES) $(nl)"-I$(INCLUDES)")" -} -actions compile.c++ -{ - $(.CC) -c -cwd include -lang c++ -U$(UNDEFS) $(CFLAGS) $(USER_CFLAGS) -I- -o "$(<)" @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)-D$(DEFINES) $(nl)"-I$(INCLUDES)")" -} - -## linking phase - -flags cw.link DEF_FILE <def-file> ; - -flags cw LINKFLAGS : -search ; -flags cw LINKFLAGS <debug-symbols>on : -g ; -flags cw LINKFLAGS <user-interface>console : -subsystem console ; -flags cw LINKFLAGS <user-interface>gui : -subsystem windows ; -flags cw LINKFLAGS <user-interface>wince : -subsystem wince ; -flags cw LINKFLAGS <user-interface>native : -subsystem native ; -flags cw LINKFLAGS <user-interface>auto : -subsystem auto ; - -flags cw LINKFLAGS <main-target-type>LIB/<link>static : -library ; - -flags cw.link USER_LINKFLAGS <linkflags> ; -flags cw.link LINKPATH <library-path> ; - -flags cw.link FINDLIBS_ST <find-static-library> ; -flags cw.link FINDLIBS_SA <find-shared-library> ; -flags cw.link LIBRARY_OPTION <toolset>cw : "" : unchecked ; -flags cw.link LIBRARIES_MENTIONED_BY_FILE : <library-file> ; - -rule link.dll ( targets + : sources * : properties * ) -{ - DEPENDS $(<) : [ on $(<) return $(DEF_FILE) ] ; -} - -if [ os.name ] in NT -{ - actions archive - { - if exist "$(<[1])" DEL "$(<[1])" - $(.LD) -library -o "$(<[1])" @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES_MENTIONED_BY_FILE) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" - } -} -else # cygwin -{ - actions archive - { - _bbv2_out_="$(<)" - if test -f "$_bbv2_out_" ; then - _bbv2_existing_="$(<:W)" - fi - $(.LD) -library -o "$(<:W)" $_bbv2_existing_ @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES_MENTIONED_BY_FILE) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" - } -} - -actions link bind DEF_FILE -{ - $(.LD) -o "$(<[1]:W)" -L"$(LINKPATH)" $(LINKFLAGS) $(USER_LINKFLAGS) @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES_MENTIONED_BY_FILE) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" -} - -actions link.dll bind DEF_FILE -{ - $(.LD) -shared -o "$(<[1]:W)" -implib "$(<[2]:W)" -L"$(LINKPATH)" $(LINKFLAGS) -f"$(DEF_FILE)" $(USER_LINKFLAGS) @"@($(<[1]:W).rsp:E=$(nl)"$(>)" $(nl)$(LIBRARIES_MENTIONED_BY_FILE) $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST:S=.lib)" $(nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA:S=.lib)")" -} - diff --git a/jam-files/boost-build/tools/darwin.jam b/jam-files/boost-build/tools/darwin.jam deleted file mode 100644 index bb6dd45e..00000000 --- a/jam-files/boost-build/tools/darwin.jam +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright 2003 Christopher Currie -# Copyright 2006 Dave Abrahams -# Copyright 2003, 2004, 2005, 2006 Vladimir Prus -# Copyright 2005-2007 Mat Marcus -# Copyright 2005-2007 Adobe Systems Incorporated -# Copyright 2007-2010 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Please see http://article.gmane.org/gmane.comp.lib.boost.build/3389/ -# for explanation why it's a separate toolset. - -import feature : feature ; -import toolset : flags ; -import type ; -import common ; -import generators ; -import path : basename ; -import version ; -import property-set ; -import regex ; -import errors ; - -## Use a framework. -feature framework : : free ; - -## The MacOSX version to compile for, which maps to the SDK to use (sysroot). -feature macosx-version : : propagated link-incompatible symmetric optional ; - -## The minimal MacOSX version to target. -feature macosx-version-min : : propagated optional ; - -## A dependency, that is forced to be included in the link. -feature force-load : : free dependency incidental ; - -############################################################################# - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -feature.extend toolset : darwin ; -import gcc ; -toolset.inherit-generators darwin : gcc : gcc.mingw.link gcc.mingw.link.dll ; - -generators.override darwin.prebuilt : builtin.prebuilt ; -generators.override darwin.searched-lib-generator : searched-lib-generator ; - -# Override default do-nothing generators. -generators.override darwin.compile.c.pch : pch.default-c-pch-generator ; -generators.override darwin.compile.c++.pch : pch.default-cpp-pch-generator ; - -type.set-generated-target-suffix PCH : <toolset>darwin : gch ; - -toolset.inherit-rules darwin : gcc : localize ; -toolset.inherit-flags darwin : gcc - : <runtime-link>static - <architecture>arm/<address-model>32 - <architecture>arm/<address-model>64 - <architecture>arm/<instruction-set> - <architecture>x86/<address-model>32 - <architecture>x86/<address-model>64 - <architecture>x86/<instruction-set> - <architecture>power/<address-model>32 - <architecture>power/<address-model>64 - <architecture>power/<instruction-set> ; - -# Options: -# -# <root>PATH -# Platform root path. The common autodetection will set this to -# "/Developer". And when a command is given it will be set to -# the corresponding "*.platform/Developer" directory. -# -rule init ( version ? : command * : options * : requirement * ) -{ - # First time around, figure what is host OSX version - if ! $(.host-osx-version) - { - .host-osx-version = [ MATCH "^([0-9.]+)" - : [ SHELL "/usr/bin/sw_vers -productVersion" ] ] ; - if $(.debug-configuration) - { - ECHO notice: OSX version on this machine is $(.host-osx-version) ; - } - } - - # - The root directory of the tool install. - local root = [ feature.get-values <root> : $(options) ] ; - - # - The bin directory where to find the commands to execute. - local bin ; - - # - The configured compile driver command. - local command = [ common.get-invocation-command darwin : g++ : $(command) ] ; - - # The version as reported by the compiler - local real-version ; - - # - Autodetect the root and bin dir if not given. - if $(command) - { - bin ?= [ common.get-absolute-tool-path $(command[1]) ] ; - if $(bin) = "/usr/bin" - { - root ?= /Developer ; - } - else - { - local r = $(bin:D) ; - r = $(r:D) ; - root ?= $(r) ; - } - } - - # - Autodetect the version if not given. - if $(command) - { - # - The 'command' variable can have multiple elements. When calling - # the SHELL builtin we need a single string. - local command-string = $(command:J=" ") ; - real-version = [ MATCH "^([0-9.]+)" - : [ SHELL "$(command-string) -dumpversion" ] ] ; - version ?= $(real-version) ; - } - - .real-version.$(version) = $(real-version) ; - - # - Define the condition for this toolset instance. - local condition = - [ common.check-init-parameters darwin $(requirement) : version $(version) ] ; - - # - Set the toolset generic common options. - common.handle-options darwin : $(condition) : $(command) : $(options) ; - - # - GCC 4.0 and higher in Darwin does not have -fcoalesce-templates. - if $(real-version) < "4.0.0" - { - flags darwin.compile.c++ OPTIONS $(condition) : -fcoalesce-templates ; - } - # - GCC 4.2 and higher in Darwin does not have -Wno-long-double. - if $(real-version) < "4.2.0" - { - flags darwin.compile OPTIONS $(condition) : -Wno-long-double ; - } - - # - Set the link flags common with the GCC toolset. - gcc.init-link-flags darwin darwin $(condition) ; - - # - The symbol strip program. - local strip ; - if <striper> in $(options) - { - # We can turn off strip by specifying it as empty. In which - # case we switch to using the linker to do the strip. - flags darwin.link.dll OPTIONS - $(condition)/<main-target-type>LIB/<link>shared/<address-model>32/<strip>on : -Wl,-x ; - flags darwin.link.dll OPTIONS - $(condition)/<main-target-type>LIB/<link>shared/<address-model>/<strip>on : -Wl,-x ; - flags darwin.link OPTIONS - $(condition)/<main-target-type>EXE/<address-model>32/<strip>on : -s ; - flags darwin.link OPTIONS - $(condition)/<main-target-type>EXE/<address-model>/<strip>on : -s ; - } - else - { - # Otherwise we need to find a strip program to use. And hence - # also tell the link action that we need to use a strip - # post-process. - flags darwin.link NEED_STRIP $(condition)/<strip>on : "" ; - strip = - [ common.get-invocation-command darwin - : strip : [ feature.get-values <striper> : $(options) ] : $(bin) : search-path ] ; - flags darwin.link .STRIP $(condition) : $(strip[1]) ; - if $(.debug-configuration) - { - ECHO notice: using strip for $(condition) at $(strip[1]) ; - } - } - - # - The archive builder (libtool is the default as creating - # archives in darwin is complicated. - local archiver = - [ common.get-invocation-command darwin - : libtool : [ feature.get-values <archiver> : $(options) ] : $(bin) : search-path ] ; - flags darwin.archive .LIBTOOL $(condition) : $(archiver[1]) ; - if $(.debug-configuration) - { - ECHO notice: using archiver for $(condition) at $(archiver[1]) ; - } - - # - Initialize the SDKs available in the root for this tool. - local sdks = [ init-available-sdk-versions $(condition) : $(root) ] ; - - #~ ECHO --- ; - #~ ECHO --- bin :: $(bin) ; - #~ ECHO --- root :: $(root) ; - #~ ECHO --- version :: $(version) ; - #~ ECHO --- condition :: $(condition) ; - #~ ECHO --- strip :: $(strip) ; - #~ ECHO --- archiver :: $(archiver) ; - #~ ECHO --- sdks :: $(sdks) ; - #~ ECHO --- ; - #~ EXIT ; -} - -# Add and set options for a discovered SDK version. -local rule init-sdk ( condition * : root ? : version + : version-feature ? ) -{ - local rule version-to-feature ( version + ) - { - switch $(version[1]) - { - case iphone* : - { - return $(version[1])-$(version[2-]:J=.) ; - } - case mac* : - { - return $(version[2-]:J=.) ; - } - case * : - { - return $(version:J=.) ; - } - } - } - - if $(version-feature) - { - if $(.debug-configuration) - { - ECHO notice: available sdk for $(condition)/<macosx-version>$(version-feature) at $(sdk) ; - } - - # Add the version to the features for specifying them. - if ! $(version-feature) in [ feature.values macosx-version ] - { - feature.extend macosx-version : $(version-feature) ; - } - if ! $(version-feature) in [ feature.values macosx-version-min ] - { - feature.extend macosx-version-min : $(version-feature) ; - } - - # Set the flags the version needs to compile with, first - # generic options. - flags darwin.compile OPTIONS $(condition)/<macosx-version>$(version-feature) - : -isysroot $(sdk) ; - flags darwin.link OPTIONS $(condition)/<macosx-version>$(version-feature) - : -isysroot $(sdk) ; - - # Then device variation options. - switch $(version[1]) - { - case iphonesim* : - { - local N = $(version[2]) ; - if ! $(version[3]) { N += 00 ; } - else if [ regex.match (..) : $(version[3]) ] { N += $(version[3]) ; } - else { N += 0$(version[3]) ; } - if ! $(version[4]) { N += 00 ; } - else if [ regex.match (..) : $(version[4]) ] { N += $(version[4]) ; } - else { N += 0$(version[4]) ; } - N = $(N:J=) ; - flags darwin.compile OPTIONS <macosx-version-min>$(version-feature) - : -D__IPHONE_OS_VERSION_MIN_REQUIRED=$(N) ; - flags darwin.link OPTIONS <macosx-version-min>$(version-feature) - : -D__IPHONE_OS_VERSION_MIN_REQUIRED=$(N) ; - } - - case iphone* : - { - flags darwin.compile OPTIONS <macosx-version-min>$(version-feature) - : -miphoneos-version-min=$(version[2-]:J=.) ; - flags darwin.link OPTIONS <macosx-version-min>$(version-feature) - : -miphoneos-version-min=$(version[2-]:J=.) ; - } - - case mac* : - { - flags darwin.compile OPTIONS <macosx-version-min>$(version-feature) - : -mmacosx-version-min=$(version[2-]:J=.) ; - flags darwin.link OPTIONS <macosx-version-min>$(version-feature) - : -mmacosx-version-min=$(version[2-]:J=.) ; - } - } - - return $(version-feature) ; - } - else if $(version[4]) - { - # We have a patch version of an SDK. We want to set up - # both the specific patch version, and the minor version. - # So we recurse to set up the minor version. Plus the minor version. - return - [ init-sdk $(condition) : $(root) - : $(version[1-3]) : [ version-to-feature $(version[1-3]) ] ] - [ init-sdk $(condition) : $(root) - : $(version) : [ version-to-feature $(version) ] ] ; - } - else - { - # Yes, this is intentionally recursive. - return - [ init-sdk $(condition) : $(root) - : $(version) : [ version-to-feature $(version) ] ] ; - } -} - -# Determine the MacOSX SDK versions installed and their locations. -local rule init-available-sdk-versions ( condition * : root ? ) -{ - root ?= /Developer ; - local sdks-root = $(root)/SDKs ; - local sdks = [ GLOB $(sdks-root) : MacOSX*.sdk iPhoneOS*.sdk iPhoneSimulator*.sdk ] ; - local result ; - for local sdk in $(sdks) - { - local sdk-match = [ MATCH ([^0-9]+)([0-9]+)[.]([0-9x]+)[.]?([0-9x]+)? : $(sdk:D=) ] ; - local sdk-platform = $(sdk-match[1]:L) ; - local sdk-version = $(sdk-match[2-]) ; - if $(sdk-version) - { - switch $(sdk-platform) - { - case macosx : - { - sdk-version = mac $(sdk-version) ; - } - case iphoneos : - { - sdk-version = iphone $(sdk-version) ; - } - case iphonesimulator : - { - sdk-version = iphonesim $(sdk-version) ; - } - case * : - { - sdk-version = $(sdk-version:J=-) ; - } - } - result += [ init-sdk $(condition) : $(sdk) : $(sdk-version) ] ; - } - } - return $(result) ; -} - -# Generic options. -flags darwin.compile OPTIONS <flags> ; - -# The following adds objective-c support to darwin. -# Thanks to http://thread.gmane.org/gmane.comp.lib.boost.build/13759 - -generators.register-c-compiler darwin.compile.m : OBJECTIVE_C : OBJ : <toolset>darwin ; -generators.register-c-compiler darwin.compile.mm : OBJECTIVE_CPP : OBJ : <toolset>darwin ; - -rule setup-address-model ( targets * : sources * : properties * ) -{ - local ps = [ property-set.create $(properties) ] ; - local arch = [ $(ps).get <architecture> ] ; - local address-model = [ $(ps).get <address-model> ] ; - local osx-version = [ $(ps).get <macosx-version> ] ; - local gcc-version = [ $(ps).get <toolset-darwin:version> ] ; - gcc-version = $(.real-version.$(gcc-version)) ; - local options ; - - local support-ppc64 = 1 ; - - osx-version ?= $(.host-osx-version) ; - - switch $(osx-version) - { - case iphone* : - { - support-ppc64 = ; - } - - case * : - if $(osx-version) && ! [ version.version-less [ regex.split $(osx-version) \\. ] : 10 6 ] - { - # When targeting 10.6: - # - gcc 4.2 will give a compiler errir if ppc64 compilation is requested - # - gcc 4.0 will compile fine, somehow, but then fail at link time - support-ppc64 = ; - } - } - switch $(arch) - { - case combined : - { - if $(address-model) = 32_64 { - if $(support-ppc64) { - options = -arch i386 -arch ppc -arch x86_64 -arch ppc64 ; - } else { - # Build 3-way binary - options = -arch i386 -arch ppc -arch x86_64 ; - } - } else if $(address-model) = 64 { - if $(support-ppc64) { - options = -arch x86_64 -arch ppc64 ; - } else { - errors.user-error "64-bit PPC compilation is not supported when targeting OSX 10.6 or later" ; - } - } else { - options = -arch i386 -arch ppc ; - } - } - - case x86 : - { - if $(address-model) = 32_64 { - options = -arch i386 -arch x86_64 ; - } else if $(address-model) = 64 { - options = -arch x86_64 ; - } else { - options = -arch i386 ; - } - } - - case power : - { - if ! $(support-ppc64) - && ( $(address-model) = 32_64 || $(address-model) = 64 ) - { - errors.user-error "64-bit PPC compilation is not supported when targeting OSX 10.6 or later" ; - } - - if $(address-model) = 32_64 { - options = -arch ppc -arch ppc64 ; - } else if $(address-model) = 64 { - options = -arch ppc64 ; - } else { - options = -arch ppc ; - } - } - - case arm : - { - options = -arch armv6 ; - } - } - - if $(options) - { - OPTIONS on $(targets) += $(options) ; - } -} - -rule setup-threading ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; -} - -rule setup-fpic ( targets * : sources * : properties * ) -{ - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; -} - -rule compile.m ( targets * : sources * : properties * ) -{ - LANG on $(<) = "-x objective-c" ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.m -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.mm ( targets * : sources * : properties * ) -{ - LANG on $(<) = "-x objective-c++" ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.mm -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -# Set the max header padding to allow renaming of libs for installation. -flags darwin.link.dll OPTIONS : -headerpad_max_install_names ; - -# To link the static runtime we need to link to all the core runtime libraries. -flags darwin.link OPTIONS <runtime-link>static - : -nodefaultlibs -shared-libgcc -lstdc++-static -lgcc_eh -lgcc -lSystem ; - -# Strip as much as possible when optimizing. -flags darwin.link OPTIONS <optimization>speed : -Wl,-dead_strip -no_dead_strip_inits_and_terms ; -flags darwin.link OPTIONS <optimization>space : -Wl,-dead_strip -no_dead_strip_inits_and_terms ; - -# Dynamic/shared linking. -flags darwin.compile OPTIONS <link>shared : -dynamic ; - -# Misc options. -flags darwin.compile OPTIONS : -gdwarf-2 -fexceptions ; -#~ flags darwin.link OPTIONS : -fexceptions ; - -# Add the framework names to use. -flags darwin.link FRAMEWORK <framework> ; - -# -flags darwin.link FORCE_LOAD <force-load> ; - -# This is flag is useful for debugging the link step -# uncomment to see what libtool is doing under the hood -#~ flags darwin.link.dll OPTIONS : -Wl,-v ; - -_ = " " ; - -# set up the -F option to include the paths to any frameworks used. -local rule prepare-framework-path ( target + ) -{ - # The -framework option only takes basename of the framework. - # The -F option specifies the directories where a framework - # is searched for. So, if we find <framework> feature - # with some path, we need to generate property -F option. - local framework-paths = [ on $(target) return $(FRAMEWORK:D) ] ; - - # Be sure to generate no -F if there's no path. - for local framework-path in $(framework-paths) - { - if $(framework-path) != "" - { - FRAMEWORK_PATH on $(target) += -F$(framework-path) ; - } - } -} - -rule link ( targets * : sources * : properties * ) -{ - DEPENDS $(targets) : [ on $(targets) return $(FORCE_LOAD) ] ; - setup-address-model $(targets) : $(sources) : $(properties) ; - prepare-framework-path $(<) ; -} - -# Note that using strip without any options was reported to result in broken -# binaries, at least on OS X 10.5.5, see: -# http://svn.boost.org/trac/boost/ticket/2347 -# So we pass -S -x. -actions link bind LIBRARIES FORCE_LOAD -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -o "$(<)" "$(>)" -Wl,-force_load$(_)"$(FORCE_LOAD)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(FRAMEWORK_PATH) -framework$(_)$(FRAMEWORK:D=:S=) $(OPTIONS) $(USER_OPTIONS) - $(NEED_STRIP)"$(.STRIP)" $(NEED_STRIP)-S $(NEED_STRIP)-x $(NEED_STRIP)"$(<)" -} - -rule link.dll ( targets * : sources * : properties * ) -{ - setup-address-model $(targets) : $(sources) : $(properties) ; - prepare-framework-path $(<) ; -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -dynamiclib -Wl,-single_module -install_name "$(<:B)$(<:S)" -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(FRAMEWORK_PATH) -framework$(_)$(FRAMEWORK:D=:S=) $(OPTIONS) $(USER_OPTIONS) -} - -# We use libtool instead of ar to support universal binary linking -# TODO: Find a way to use the underlying tools, i.e. lipo, to do this. -actions piecemeal archive -{ - "$(.LIBTOOL)" -static -o "$(<:T)" $(ARFLAGS) "$(>:T)" -} diff --git a/jam-files/boost-build/tools/darwin.py b/jam-files/boost-build/tools/darwin.py deleted file mode 100644 index c2919606..00000000 --- a/jam-files/boost-build/tools/darwin.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) Christopher Currie 2003. Permission to copy, use, -# modify, sell and distribute this software is granted provided this -# copyright notice appears in all copies. This software is provided -# "as is" without express or implied warranty, and with no claim as to -# its suitability for any purpose. - -# Please see http://article.gmane.org/gmane.comp.lib.boost.build/3389/ -# for explanation why it's a separate toolset. - -import common, gcc, builtin -from b2.build import feature, toolset, type, action, generators -from b2.util.utility import * - -toolset.register ('darwin') - -toolset.inherit_generators ('darwin', [], 'gcc') -toolset.inherit_flags ('darwin', 'gcc') -toolset.inherit_rules ('darwin', 'gcc') - -def init (version = None, command = None, options = None): - options = to_seq (options) - - condition = common.check_init_parameters ('darwin', None, ('version', version)) - - command = common.get_invocation_command ('darwin', 'g++', command) - - common.handle_options ('darwin', condition, command, options) - - gcc.init_link_flags ('darwin', 'darwin', condition) - -# Darwin has a different shared library suffix -type.set_generated_target_suffix ('SHARED_LIB', ['<toolset>darwin'], 'dylib') - -# we need to be able to tell the type of .dylib files -type.register_suffixes ('dylib', 'SHARED_LIB') - -feature.feature ('framework', [], ['free']) - -toolset.flags ('darwin.compile', 'OPTIONS', '<link>shared', ['-dynamic']) -toolset.flags ('darwin.compile', 'OPTIONS', None, ['-Wno-long-double', '-no-cpp-precomp']) -toolset.flags ('darwin.compile.c++', 'OPTIONS', None, ['-fcoalesce-templates']) - -toolset.flags ('darwin.link', 'FRAMEWORK', '<framework>') - -# This is flag is useful for debugging the link step -# uncomment to see what libtool is doing under the hood -# toolset.flags ('darwin.link.dll', 'OPTIONS', None, '[-Wl,-v']) - -action.register ('darwin.compile.cpp', None, ['$(CONFIG_COMMAND) $(ST_OPTIONS) -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -framework$(_)$(FRAMEWORK) $(OPTIONS)']) - -# TODO: how to set 'bind LIBRARIES'? -action.register ('darwin.link.dll', None, ['$(CONFIG_COMMAND) -dynamiclib -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -framework$(_)$(FRAMEWORK) $(OPTIONS)']) - -def darwin_archive (manager, targets, sources, properties): - pass - -action.register ('darwin.archive', darwin_archive, ['ar -c -r -s $(ARFLAGS) "$(<:T)" "$(>:T)"']) diff --git a/jam-files/boost-build/tools/dmc.jam b/jam-files/boost-build/tools/dmc.jam deleted file mode 100644 index 8af8725a..00000000 --- a/jam-files/boost-build/tools/dmc.jam +++ /dev/null @@ -1,134 +0,0 @@ -# Digital Mars C++ - -# (C) Copyright Christof Meerwald 2003. -# (C) Copyright Aleksey Gurtovoy 2004. -# (C) Copyright Arjan Knepper 2006. -# -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# The following #// line will be used by the regression test table generation -# program as the column heading for HTML tables. Must not include version number. -#//<a href="http://www.digitalmars.com/">Digital<br>Mars C++</a> - -import feature generators common ; -import toolset : flags ; -import sequence regex ; - -feature.extend toolset : dmc ; - -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters dmc : version $(version) ] ; - - local command = [ common.get-invocation-command dmc : dmc : $(command) ] ; - command ?= dmc ; - - common.handle-options dmc : $(condition) : $(command) : $(options) ; - - if $(command) - { - command = [ common.get-absolute-tool-path $(command[-1]) ] ; - } - root = $(command:D) ; - - if $(root) - { - # DMC linker is sensitive the the direction of slashes, and - # won't link if forward slashes are used in command. - root = [ sequence.join [ regex.split $(root) "/" ] : "\\" ] ; - flags dmc .root $(condition) : $(root)\\bin\\ ; - } - else - { - flags dmc .root $(condition) : "" ; - } -} - - -# Declare generators -generators.register-linker dmc.link : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : EXE : <toolset>dmc ; -generators.register-linker dmc.link.dll : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : SHARED_LIB IMPORT_LIB : <toolset>dmc ; - -generators.register-archiver dmc.archive : OBJ : STATIC_LIB : <toolset>dmc ; -generators.register-c-compiler dmc.compile.c++ : CPP : OBJ : <toolset>dmc ; -generators.register-c-compiler dmc.compile.c : C : OBJ : <toolset>dmc ; - - -# Declare flags -# dmc optlink has some limitation on the amount of debug-info included. Therefore only linenumbers are enabled in debug builds. -# flags dmc.compile OPTIONS <debug-symbols>on : -g ; -flags dmc.compile OPTIONS <debug-symbols>on : -gl ; -flags dmc.link OPTIONS <debug-symbols>on : /CO /NOPACKF /DEBUGLI ; -flags dmc.link OPTIONS <debug-symbols>off : /PACKF ; - -flags dmc.compile OPTIONS <optimization>off : -S -o+none ; -flags dmc.compile OPTIONS <optimization>speed : -o+time ; -flags dmc.compile OPTIONS <optimization>space : -o+space ; -flags dmc.compile OPTIONS <exception-handling>on : -Ae ; -flags dmc.compile OPTIONS <rtti>on : -Ar ; -# FIXME: -# Compiling sources to be linked into a shared lib (dll) the -WD cflag should be used -# Compiling sources to be linked into a static lib (lib) or executable the -WA cflag should be used -# But for some reason the -WD cflag is always in use. -# flags dmc.compile OPTIONS <link>shared : -WD ; -# flags dmc.compile OPTIONS <link>static : -WA ; - -# Note that these two options actually imply multithreading support on DMC -# because there is no single-threaded dynamic runtime library. Specifying -# <threading>multi would be a bad idea, though, because no option would be -# matched when the build uses the default settings of <runtime-link>dynamic -# and <threading>single. -flags dmc.compile OPTIONS <runtime-debugging>off/<runtime-link>shared : -ND ; -flags dmc.compile OPTIONS <runtime-debugging>on/<runtime-link>shared : -ND ; - -flags dmc.compile OPTIONS <runtime-debugging>off/<runtime-link>static/<threading>single : ; -flags dmc.compile OPTIONS <runtime-debugging>on/<runtime-link>static/<threading>single : ; -flags dmc.compile OPTIONS <runtime-debugging>off/<runtime-link>static/<threading>multi : -D_MT ; -flags dmc.compile OPTIONS <runtime-debugging>on/<runtime-link>static/<threading>multi : -D_MT ; - -flags dmc.compile OPTIONS : <cflags> ; -flags dmc.compile.c++ OPTIONS : <cxxflags> ; - -flags dmc.compile DEFINES : <define> ; -flags dmc.compile INCLUDES : <include> ; - -flags dmc.link <linkflags> ; -flags dmc.archive OPTIONS <arflags> ; - -flags dmc LIBPATH <library-path> ; -flags dmc LIBRARIES <library-file> ; -flags dmc FINDLIBS <find-library-sa> ; -flags dmc FINDLIBS <find-library-st> ; - -actions together link bind LIBRARIES -{ - "$(.root)link" $(OPTIONS) /NOI /DE /XN "$(>)" , "$(<[1])" ,, $(LIBRARIES) user32.lib kernel32.lib "$(FINDLIBS:S=.lib)" , "$(<[2]:B).def" -} - -actions together link.dll bind LIBRARIES -{ - echo LIBRARY "$(<[1])" > $(<[2]:B).def - echo DESCRIPTION 'A Library' >> $(<[2]:B).def - echo EXETYPE NT >> $(<[2]:B).def - echo SUBSYSTEM WINDOWS >> $(<[2]:B).def - echo CODE EXECUTE READ >> $(<[2]:B).def - echo DATA READ WRITE >> $(<[2]:B).def - "$(.root)link" $(OPTIONS) /NOI /DE /XN /ENTRY:_DllMainCRTStartup /IMPLIB:"$(<[2])" "$(>)" $(LIBRARIES) , "$(<[1])" ,, user32.lib kernel32.lib "$(FINDLIBS:S=.lib)" , "$(<[2]:B).def" -} - -actions compile.c -{ - "$(.root)dmc" -c $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -o"$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(.root)dmc" -cpp -c -Ab $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -o"$(<)" "$(>)" -} - -actions together piecemeal archive -{ - "$(.root)lib" $(OPTIONS) -c -n -p256 "$(<)" "$(>)" -} diff --git a/jam-files/boost-build/tools/docutils.jam b/jam-files/boost-build/tools/docutils.jam deleted file mode 100644 index bf061617..00000000 --- a/jam-files/boost-build/tools/docutils.jam +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -# Support for docutils ReStructuredText processing. - -import type ; -import scanner ; -import generators ; -import os ; -import common ; -import toolset ; -import path ; -import feature : feature ; -import property ; - -.initialized = ; - -type.register ReST : rst ; - -class rst-scanner : common-scanner -{ - rule __init__ ( paths * ) - { - common-scanner.__init__ . $(paths) ; - } - - rule pattern ( ) - { - return "^[ ]*\\.\\.[ ]+include::[ ]+([^ -]+)" - "^[ ]*\\.\\.[ ]+image::[ ]+([^ -]+)" - "^[ ]*\\.\\.[ ]+figure::[ ]+([^ -]+)" - ; - } -} - -scanner.register rst-scanner : include ; -type.set-scanner ReST : rst-scanner ; - -generators.register-standard docutils.html : ReST : HTML ; - -rule init ( docutils-dir ? : tools-dir ? ) -{ - docutils-dir ?= [ modules.peek : DOCUTILS_DIR ] ; - tools-dir ?= $(docutils-dir)/tools ; - - if ! $(.initialized) - { - .initialized = true ; - .docutils-dir = $(docutils-dir) ; - .tools-dir = $(tools-dir:R="") ; - - .setup = [ - common.prepend-path-variable-command PYTHONPATH - : $(.docutils-dir) $(.docutils-dir)/extras ] ; - } -} - -rule html ( target : source : properties * ) -{ - if ! [ on $(target) return $(RST2XXX) ] - { - local python-cmd = [ property.select <python.interpreter> : $(properties) ] ; - RST2XXX on $(target) = $(python-cmd:G=:E="python") $(.tools-dir)/rst2html.py ; - } -} - - -feature docutils : : free ; -feature docutils-html : : free ; -feature docutils-cmd : : free ; -toolset.flags docutils COMMON-FLAGS : <docutils> ; -toolset.flags docutils HTML-FLAGS : <docutils-html> ; -toolset.flags docutils RST2XXX : <docutils-cmd> ; - -actions html -{ - $(.setup) - "$(RST2XXX)" $(COMMON-FLAGS) $(HTML-FLAGS) $(>) $(<) -} - diff --git a/jam-files/boost-build/tools/doxproc.py b/jam-files/boost-build/tools/doxproc.py deleted file mode 100644 index 4cbd5edd..00000000 --- a/jam-files/boost-build/tools/doxproc.py +++ /dev/null @@ -1,859 +0,0 @@ -#!/usr/bin/python -# Copyright 2006 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -''' -Processing of Doxygen generated XML. -''' - -import os -import os.path -import sys -import time -import string -import getopt -import glob -import re -import xml.dom.minidom - - -def usage(): - print ''' -Usage: - %s options - -Options: - --xmldir Directory with the Doxygen xml result files. - --output Write the output BoostBook to the given location. - --id The ID of the top level BoostBook section. - --title The title of the top level BoostBook section. - --enable-index Generate additional index sections for classes and - types. -''' % ( sys.argv[0] ) - - -def get_args( argv = sys.argv[1:] ): - spec = [ - 'xmldir=', - 'output=', - 'id=', - 'title=', - 'enable-index', - 'help' ] - options = { - '--xmldir' : 'xml', - '--output' : None, - '--id' : 'dox', - '--title' : 'Doxygen' - } - ( option_pairs, other ) = getopt.getopt( argv, '', spec ) - map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs ) - - if options.has_key( '--help' ): - usage() - sys.exit(1) - - return { - 'xmldir' : options['--xmldir'], - 'output' : options['--output'], - 'id' : options['--id'], - 'title' : options['--title'], - 'index' : options.has_key('--enable-index') - } - -def if_attribute(node, attribute, true_value, false_value=None): - if node.getAttribute(attribute) == 'yes': - return true_value - else: - return false_value - -class Doxygen2BoostBook: - - def __init__( self, **kwargs ): - ## - self.args = kwargs - self.args.setdefault('id','') - self.args.setdefault('title','') - self.args.setdefault('last_revision', time.asctime()) - self.args.setdefault('index', False) - self.id = '%(id)s.reference' % self.args - self.args['id'] = self.id - #~ This is our template BoostBook document we insert the generated content into. - self.boostbook = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?> -<section id="%(id)s" name="%(title)s" last-revision="%(last_revision)s"> - <title>%(title)s</title> - <library-reference id="%(id)s.headers"> - <title>Headers</title> - </library-reference> - <index id="%(id)s.classes"> - <title>Classes</title> - </index> - <index id="%(id)s.index"> - <title>Index</title> - </index> -</section> -''' % self.args ) - self.section = { - 'headers' : self._getChild('library-reference',id='%(id)s.headers' % self.args), - 'classes' : self._getChild('index',id='%(id)s.classes' % self.args), - 'index' : self._getChild('index',id='%(id)s.index' % self.args) - } - #~ Remove the index sections if we aren't generating it. - if not self.args['index']: - self.section['classes'].parentNode.removeChild(self.section['classes']) - self.section['classes'].unlink() - del self.section['classes'] - self.section['index'].parentNode.removeChild(self.section['index']) - self.section['index'].unlink() - del self.section['index'] - #~ The symbols, per Doxygen notion, that we translated. - self.symbols = {} - #~ Map of Doxygen IDs and BoostBook IDs, so we can translate as needed. - self.idmap = {} - #~ Marks generation, to prevent redoing it. - self.generated = False - - #~ Add an Doxygen generated XML document to the content we are translating. - def addDox( self, document ): - self._translateNode(document.documentElement) - - #~ Turns the internal XML tree into an output UTF-8 string. - def tostring( self ): - self._generate() - #~ return self.boostbook.toprettyxml(' ') - return self.boostbook.toxml('utf-8') - - #~ Does post-processing on the partial generated content to generate additional info - #~ now that we have the complete source documents. - def _generate( self ): - if not self.generated: - self.generated = True - symbols = self.symbols.keys() - symbols.sort() - #~ Populate the header section. - for symbol in symbols: - if self.symbols[symbol]['kind'] in ('header'): - self.section['headers'].appendChild(self.symbols[symbol]['dom']) - for symbol in symbols: - if self.symbols[symbol]['kind'] not in ('namespace', 'header'): - container = self._resolveContainer(self.symbols[symbol], - self.symbols[self.symbols[symbol]['header']]['dom']) - if container.nodeName != 'namespace': - ## The current BoostBook to Docbook translation doesn't - ## respect, nor assign, IDs to inner types of any kind. - ## So nuke the ID entry so as not create bogus links. - del self.idmap[self.symbols[symbol]['id']] - container.appendChild(self.symbols[symbol]['dom']) - self._rewriteIDs(self.boostbook.documentElement) - - #~ Rewrite the various IDs from Doxygen references to the newly created - #~ BoostBook references. - def _rewriteIDs( self, node ): - if node.nodeName in ('link'): - if (self.idmap.has_key(node.getAttribute('linkend'))): - #~ A link, and we have someplace to repoint it at. - node.setAttribute('linkend',self.idmap[node.getAttribute('linkend')]) - else: - #~ A link, but we don't have a generated target for it. - node.removeAttribute('linkend') - elif hasattr(node,'hasAttribute') and node.hasAttribute('id') and self.idmap.has_key(node.getAttribute('id')): - #~ Simple ID, and we have a translation. - node.setAttribute('id',self.idmap[node.getAttribute('id')]) - #~ Recurse, and iterate, depth-first traversal which turns out to be - #~ left-to-right and top-to-bottom for the document. - if node.firstChild: - self._rewriteIDs(node.firstChild) - if node.nextSibling: - self._rewriteIDs(node.nextSibling) - - def _resolveContainer( self, cpp, root ): - container = root - for ns in cpp['namespace']: - node = self._getChild('namespace',name=ns,root=container) - if not node: - node = container.appendChild( - self._createNode('namespace',name=ns)) - container = node - for inner in cpp['name'].split('::'): - node = self._getChild(name=inner,root=container) - if not node: - break - container = node - return container - - def _setID( self, id, name ): - self.idmap[id] = name.replace('::','.').replace('/','.') - #~ print '--| setID:',id,'::',self.idmap[id] - - #~ Translate a given node within a given context. - #~ The translation dispatches to a local method of the form - #~ "_translate[_context0,...,_contextN]", and the keyword args are - #~ passed along. If there is no translation handling method we - #~ return None. - def _translateNode( self, *context, **kwargs ): - node = None - names = [ ] - for c in context: - if c: - if not isinstance(c,xml.dom.Node): - suffix = '_'+c.replace('-','_') - else: - suffix = '_'+c.nodeName.replace('-','_') - node = c - names.append('_translate') - names = map(lambda x: x+suffix,names) - if node: - for name in names: - if hasattr(self,name): - return getattr(self,name)(node,**kwargs) - return None - - #~ Translates the children of the given parent node, appending the results - #~ to the indicated target. For nodes not translated by the translation method - #~ it copies the child over and recurses on that child to translate any - #~ possible interior nodes. Hence this will translate the entire subtree. - def _translateChildren( self, parent, **kwargs ): - target = kwargs['target'] - for n in parent.childNodes: - child = self._translateNode(n,target=target) - if child: - target.appendChild(child) - else: - child = n.cloneNode(False) - if hasattr(child,'data'): - child.data = re.sub(r'\s+',' ',child.data) - target.appendChild(child) - self._translateChildren(n,target=child) - - #~ Translate the given node as a description, into the description subnode - #~ of the target. If no description subnode is present in the target it - #~ is created. - def _translateDescription( self, node, target=None, tag='description', **kwargs ): - description = self._getChild(tag,root=target) - if not description: - description = target.appendChild(self._createNode(tag)) - self._translateChildren(node,target=description) - return description - - #~ Top level translation of: <doxygen ...>...</doxygen>, - #~ translates the children. - def _translate_doxygen( self, node ): - #~ print '_translate_doxygen:', node.nodeName - result = [] - for n in node.childNodes: - newNode = self._translateNode(n) - if newNode: - result.append(newNode) - return result - - #~ Top level translation of: - #~ <doxygenindex ...> - #~ <compound ...> - #~ <member ...> - #~ <name>...</name> - #~ </member> - #~ ... - #~ </compound> - #~ ... - #~ </doxygenindex> - #~ builds the class and symbol sections, if requested. - def _translate_doxygenindex( self, node ): - #~ print '_translate_doxygenindex:', node.nodeName - if self.args['index']: - entries = [] - classes = [] - #~ Accumulate all the index entries we care about. - for n in node.childNodes: - if n.nodeName == 'compound': - if n.getAttribute('kind') not in ('file','dir','define'): - cpp = self._cppName(self._getChildData('name',root=n)) - entry = { - 'name' : cpp['name'], - 'compoundname' : cpp['compoundname'], - 'id' : n.getAttribute('refid') - } - if n.getAttribute('kind') in ('class','struct'): - classes.append(entry) - entries.append(entry) - for m in n.childNodes: - if m.nodeName == 'member': - cpp = self._cppName(self._getChildData('name',root=m)) - entry = { - 'name' : cpp['name'], - 'compoundname' : cpp['compoundname'], - 'id' : n.getAttribute('refid') - } - if hasattr(m,'getAttribute') and m.getAttribute('kind') in ('class','struct'): - classes.append(entry) - entries.append(entry) - #~ Put them in a sensible order. - entries.sort(lambda x,y: cmp(x['name'].lower(),y['name'].lower())) - classes.sort(lambda x,y: cmp(x['name'].lower(),y['name'].lower())) - #~ And generate the BoostBook for them. - self._translate_index_(entries,target=self.section['index']) - self._translate_index_(classes,target=self.section['classes']) - return None - - #~ Translate a set of index entries in the BoostBook output. The output - #~ is grouped into groups of the first letter of the entry names. - def _translate_index_(self, entries, target=None, **kwargs ): - i = 0 - targetID = target.getAttribute('id') - while i < len(entries): - dividerKey = entries[i]['name'][0].upper() - divider = target.appendChild(self._createNode('indexdiv',id=targetID+'.'+dividerKey)) - divider.appendChild(self._createText('title',dividerKey)) - while i < len(entries) and dividerKey == entries[i]['name'][0].upper(): - iename = entries[i]['name'] - ie = divider.appendChild(self._createNode('indexentry')) - ie = ie.appendChild(self._createText('primaryie',iename)) - while i < len(entries) and entries[i]['name'] == iename: - ie.appendChild(self.boostbook.createTextNode(' (')) - ie.appendChild(self._createText( - 'link',entries[i]['compoundname'],linkend=entries[i]['id'])) - ie.appendChild(self.boostbook.createTextNode(')')) - i += 1 - - #~ Translate a <compounddef ...>...</compounddef>, - #~ by retranslating with the "kind" of compounddef. - def _translate_compounddef( self, node, target=None, **kwargs ): - return self._translateNode(node,node.getAttribute('kind')) - - #~ Translate a <compounddef kind="namespace"...>...</compounddef>. For - #~ namespaces we just collect the information for later use as there is no - #~ currently namespaces are not included in the BoostBook format. In the future - #~ it might be good to generate a namespace index. - def _translate_compounddef_namespace( self, node, target=None, **kwargs ): - namespace = { - 'id' : node.getAttribute('id'), - 'kind' : 'namespace', - 'name' : self._getChildData('compoundname',root=node), - 'brief' : self._getChildData('briefdescription',root=node), - 'detailed' : self._getChildData('detaileddescription',root=node), - 'parsed' : False - } - if self.symbols.has_key(namespace['name']): - if not self.symbols[namespace['name']]['parsed']: - self.symbols[namespace['name']]['parsed'] = True - #~ for n in node.childNodes: - #~ if hasattr(n,'getAttribute'): - #~ self._translateNode(n,n.getAttribute('kind'),target=target,**kwargs) - else: - self.symbols[namespace['name']] = namespace - #~ self._setID(namespace['id'],namespace['name']) - return None - - #~ Translate a <compounddef kind="class"...>...</compounddef>, which - #~ forwards to the kind=struct as they are the same. - def _translate_compounddef_class( self, node, target=None, **kwargs ): - return self._translate_compounddef_struct(node,tag='class',target=target,**kwargs) - - #~ Translate a <compounddef kind="struct"...>...</compounddef> into: - #~ <header id="?" name="?"> - #~ <struct name="?"> - #~ ... - #~ </struct> - #~ </header> - def _translate_compounddef_struct( self, node, tag='struct', target=None, **kwargs ): - result = None - includes = self._getChild('includes',root=node) - if includes: - ## Add the header into the output table. - self._translate_compounddef_includes_(includes,includes,**kwargs) - ## Compounds are the declared symbols, classes, types, etc. - ## We add them to the symbol table, along with the partial DOM for them - ## so that they can be organized into the output later. - compoundname = self._getChildData('compoundname',root=node) - compoundname = self._cppName(compoundname) - self._setID(node.getAttribute('id'),compoundname['compoundname']) - struct = self._createNode(tag,name=compoundname['name'].split('::')[-1]) - self.symbols[compoundname['compoundname']] = { - 'header' : includes.firstChild.data, - 'namespace' : compoundname['namespace'], - 'id' : node.getAttribute('id'), - 'kind' : tag, - 'name' : compoundname['name'], - 'dom' : struct - } - ## Add the children which will be the members of the struct. - for n in node.childNodes: - self._translateNode(n,target=struct,scope=compoundname['compoundname']) - result = struct - return result - - #~ Translate a <compounddef ...><includes ...>...</includes></compounddef>, - def _translate_compounddef_includes_( self, node, target=None, **kwargs ): - name = node.firstChild.data - if not self.symbols.has_key(name): - self._setID(node.getAttribute('refid'),name) - self.symbols[name] = { - 'kind' : 'header', - 'id' : node.getAttribute('refid'), - 'dom' : self._createNode('header', - id=node.getAttribute('refid'), - name=name) - } - return None - - #~ Translate a <basecompoundref...>...</basecompoundref> into: - #~ <inherit access="?"> - #~ ... - #~ </inherit> - def _translate_basecompoundref( self, ref, target=None, **kwargs ): - inherit = target.appendChild(self._createNode('inherit', - access=ref.getAttribute('prot'))) - self._translateChildren(ref,target=inherit) - return - - #~ Translate: - #~ <templateparamlist> - #~ <param> - #~ <type>...</type> - #~ <declname>...</declname> - #~ <defname>...</defname> - #~ <defval>...</defval> - #~ </param> - #~ ... - #~ </templateparamlist> - #~ Into: - #~ <template> - #~ <template-type-parameter name="?" /> - #~ <template-nontype-parameter name="?"> - #~ <type>?</type> - #~ <default>?</default> - #~ </template-nontype-parameter> - #~ </template> - def _translate_templateparamlist( self, templateparamlist, target=None, **kwargs ): - template = target.appendChild(self._createNode('template')) - for param in templateparamlist.childNodes: - if param.nodeName == 'param': - type = self._getChildData('type',root=param) - defval = self._getChild('defval',root=param) - paramKind = None - if type in ('class','typename'): - paramKind = 'template-type-parameter' - else: - paramKind = 'template-nontype-parameter' - templateParam = template.appendChild( - self._createNode(paramKind, - name=self._getChildData('declname',root=param))) - if paramKind == 'template-nontype-parameter': - template_type = templateParam.appendChild(self._createNode('type')) - self._translate_type( - self._getChild('type',root=param),target=template_type) - if defval: - value = self._getChildData('ref',root=defval.firstChild) - if not value: - value = self._getData(defval) - templateParam.appendChild(self._createText('default',value)) - return template - - #~ Translate: - #~ <briefdescription>...</briefdescription> - #~ Into: - #~ <purpose>...</purpose> - def _translate_briefdescription( self, brief, target=None, **kwargs ): - self._translateDescription(brief,target=target,**kwargs) - return self._translateDescription(brief,target=target,tag='purpose',**kwargs) - - #~ Translate: - #~ <detaileddescription>...</detaileddescription> - #~ Into: - #~ <description>...</description> - def _translate_detaileddescription( self, detailed, target=None, **kwargs ): - return self._translateDescription(detailed,target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="?">...</sectiondef> - #~ With kind specific translation. - def _translate_sectiondef( self, sectiondef, target=None, **kwargs ): - self._translateNode(sectiondef,sectiondef.getAttribute('kind'),target=target,**kwargs) - - #~ Translate non-function sections. - def _translate_sectiondef_x_( self, sectiondef, target=None, **kwargs ): - for n in sectiondef.childNodes: - if hasattr(n,'getAttribute'): - self._translateNode(n,n.getAttribute('kind'),target=target,**kwargs) - return None - - #~ Translate: - #~ <sectiondef kind="public-type">...</sectiondef> - def _translate_sectiondef_public_type( self, sectiondef, target=None, **kwargs ): - return self._translate_sectiondef_x_(sectiondef,target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="public-sttrib">...</sectiondef> - def _translate_sectiondef_public_attrib( self, sectiondef, target=None, **kwargs): - return self._translate_sectiondef_x_(sectiondef,target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="?-func">...</sectiondef> - #~ All the various function group translations end up here for which - #~ they are translated into: - #~ <method-group name="?"> - #~ ... - #~ </method-group> - def _translate_sectiondef_func_( self, sectiondef, name='functions', target=None, **kwargs ): - members = target.appendChild(self._createNode('method-group',name=name)) - for n in sectiondef.childNodes: - if hasattr(n,'getAttribute'): - self._translateNode(n,n.getAttribute('kind'),target=members,**kwargs) - return members - - #~ Translate: - #~ <sectiondef kind="public-func">...</sectiondef> - def _translate_sectiondef_public_func( self, sectiondef, target=None, **kwargs ): - return self._translate_sectiondef_func_(sectiondef, - name='public member functions',target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="public-static-func">...</sectiondef> - def _translate_sectiondef_public_static_func( self, sectiondef, target=None, **kwargs): - return self._translate_sectiondef_func_(sectiondef, - name='public static functions',target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="protected-func">...</sectiondef> - def _translate_sectiondef_protected_func( self, sectiondef, target=None, **kwargs ): - return self._translate_sectiondef_func_(sectiondef, - name='protected member functions',target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="private-static-func">...</sectiondef> - def _translate_sectiondef_private_static_func( self, sectiondef, target=None, **kwargs): - return self._translate_sectiondef_func_(sectiondef, - name='private static functions',target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="public-func">...</sectiondef> - def _translate_sectiondef_private_func( self, sectiondef, target=None, **kwargs ): - return self._translate_sectiondef_func_(sectiondef, - name='private member functions',target=target,**kwargs) - - #~ Translate: - #~ <sectiondef kind="user-defined"><header>...</header>...</sectiondef> - def _translate_sectiondef_user_defined( self, sectiondef, target=None, **kwargs ): - return self._translate_sectiondef_func_(sectiondef, - name=self._getChildData('header', root=sectiondef),target=target,**kwargs) - - #~ Translate: - #~ <memberdef kind="typedef" id="?"> - #~ <name>...</name> - #~ </memberdef> - #~ To: - #~ <typedef id="?" name="?"> - #~ <type>...</type> - #~ </typedef> - def _translate_memberdef_typedef( self, memberdef, target=None, scope=None, **kwargs ): - self._setID(memberdef.getAttribute('id'), - scope+'::'+self._getChildData('name',root=memberdef)) - typedef = target.appendChild(self._createNode('typedef', - id=memberdef.getAttribute('id'), - name=self._getChildData('name',root=memberdef))) - typedef_type = typedef.appendChild(self._createNode('type')) - self._translate_type(self._getChild('type',root=memberdef),target=typedef_type) - return typedef - - #~ Translate: - #~ <memberdef kind="function" id="?" const="?" static="?" explicit="?" inline="?"> - #~ <name>...</name> - #~ </memberdef> - #~ To: - #~ <method name="?" cv="?" specifiers="?"> - #~ ... - #~ </method> - def _translate_memberdef_function( self, memberdef, target=None, scope=None, **kwargs ): - name = self._getChildData('name',root=memberdef) - self._setID(memberdef.getAttribute('id'),scope+'::'+name) - ## Check if we have some specific kind of method. - if name == scope.split('::')[-1]: - kind = 'constructor' - target = target.parentNode - elif name == '~'+scope.split('::')[-1]: - kind = 'destructor' - target = target.parentNode - elif name == 'operator=': - kind = 'copy-assignment' - target = target.parentNode - else: - kind = 'method' - method = target.appendChild(self._createNode(kind, - # id=memberdef.getAttribute('id'), - name=name, - cv=' '.join([ - if_attribute(memberdef,'const','const','').strip() - ]), - specifiers=' '.join([ - if_attribute(memberdef,'static','static',''), - if_attribute(memberdef,'explicit','explicit',''), - if_attribute(memberdef,'inline','inline','') - ]).strip() - )) - ## We iterate the children to translate each part of the function. - for n in memberdef.childNodes: - self._translateNode(memberdef,'function',n,target=method) - return method - - #~ Translate: - #~ <memberdef kind="function"...><templateparamlist>...</templateparamlist></memberdef> - def _translate_memberdef_function_templateparamlist( - self, templateparamlist, target=None, **kwargs ): - return self._translate_templateparamlist(templateparamlist,target=target,**kwargs) - - #~ Translate: - #~ <memberdef kind="function"...><type>...</type></memberdef> - #~ To: - #~ ...<type>?</type> - def _translate_memberdef_function_type( self, resultType, target=None, **kwargs ): - methodType = self._createNode('type') - self._translate_type(resultType,target=methodType) - if methodType.hasChildNodes(): - target.appendChild(methodType) - return methodType - - #~ Translate: - #~ <memberdef kind="function"...><briefdescription>...</briefdescription></memberdef> - def _translate_memberdef_function_briefdescription( self, description, target=None, **kwargs ): - result = self._translateDescription(description,target=target,**kwargs) - ## For functions if we translate the brief docs to the purpose they end up - ## right above the regular description. And since we just added the brief to that - ## on the previous line, don't bother with the repetition. - # result = self._translateDescription(description,target=target,tag='purpose',**kwargs) - return result - - #~ Translate: - #~ <memberdef kind="function"...><detaileddescription>...</detaileddescription></memberdef> - def _translate_memberdef_function_detaileddescription( self, description, target=None, **kwargs ): - return self._translateDescription(description,target=target,**kwargs) - - #~ Translate: - #~ <memberdef kind="function"...><inbodydescription>...</inbodydescription></memberdef> - def _translate_memberdef_function_inbodydescription( self, description, target=None, **kwargs ): - return self._translateDescription(description,target=target,**kwargs) - - #~ Translate: - #~ <memberdef kind="function"...><param>...</param></memberdef> - def _translate_memberdef_function_param( self, param, target=None, **kwargs ): - return self._translate_param(param,target=target,**kwargs) - - #~ Translate: - #~ <memberdef kind="variable" id="?"> - #~ <name>...</name> - #~ <type>...</type> - #~ </memberdef> - #~ To: - #~ <data-member id="?" name="?"> - #~ <type>...</type> - #~ </data-member> - def _translate_memberdef_variable( self, memberdef, target=None, scope=None, **kwargs ): - self._setID(memberdef.getAttribute('id'), - scope+'::'+self._getChildData('name',root=memberdef)) - data_member = target.appendChild(self._createNode('data-member', - id=memberdef.getAttribute('id'), - name=self._getChildData('name',root=memberdef))) - data_member_type = data_member.appendChild(self._createNode('type')) - self._translate_type(self._getChild('type',root=memberdef),target=data_member_type) - - #~ Translate: - #~ <memberdef kind="enum" id="?"> - #~ <name>...</name> - #~ ... - #~ </memberdef> - #~ To: - #~ <enum id="?" name="?"> - #~ ... - #~ </enum> - def _translate_memberdef_enum( self, memberdef, target=None, scope=None, **kwargs ): - self._setID(memberdef.getAttribute('id'), - scope+'::'+self._getChildData('name',root=memberdef)) - enum = target.appendChild(self._createNode('enum', - id=memberdef.getAttribute('id'), - name=self._getChildData('name',root=memberdef))) - for n in memberdef.childNodes: - self._translateNode(memberdef,'enum',n,target=enum,scope=scope,**kwargs) - return enum - - #~ Translate: - #~ <memberdef kind="enum"...> - #~ <enumvalue id="?"> - #~ <name>...</name> - #~ <initializer>...</initializer> - #~ </enumvalue> - #~ </memberdef> - #~ To: - #~ <enumvalue id="?" name="?"> - #~ <default>...</default> - #~ </enumvalue> - def _translate_memberdef_enum_enumvalue( self, enumvalue, target=None, scope=None, **kwargs ): - self._setID(enumvalue.getAttribute('id'), - scope+'::'+self._getChildData('name',root=enumvalue)) - value = target.appendChild(self._createNode('enumvalue', - id=enumvalue.getAttribute('id'), - name=self._getChildData('name',root=enumvalue))) - initializer = self._getChild('initializer',root=enumvalue) - if initializer: - self._translateChildren(initializer, - target=target.appendChild(self._createNode('default'))) - return value - - #~ Translate: - #~ <param> - #~ <type>...</type> - #~ <declname>...</declname> - #~ <defval>...</defval> - #~ </param> - #~ To: - #~ <parameter name="?"> - #~ <paramtype>...</paramtype> - #~ ... - #~ </parameter> - def _translate_param( self, param, target=None, **kwargs): - parameter = target.appendChild(self._createNode('parameter', - name=self._getChildData('declname',root=param))) - paramtype = parameter.appendChild(self._createNode('paramtype')) - self._translate_type(self._getChild('type',root=param),target=paramtype) - defval = self._getChild('defval',root=param) - if defval: - self._translateChildren(self._getChild('defval',root=param),target=parameter) - return parameter - - #~ Translate: - #~ <ref kindref="?" ...>...</ref> - def _translate_ref( self, ref, **kwargs ): - return self._translateNode(ref,ref.getAttribute('kindref')) - - #~ Translate: - #~ <ref refid="?" kindref="compound">...</ref> - #~ To: - #~ <link linkend="?"><classname>...</classname></link> - def _translate_ref_compound( self, ref, **kwargs ): - result = self._createNode('link',linkend=ref.getAttribute('refid')) - classname = result.appendChild(self._createNode('classname')) - self._translateChildren(ref,target=classname) - return result - - #~ Translate: - #~ <ref refid="?" kindref="member">...</ref> - #~ To: - #~ <link linkend="?">...</link> - def _translate_ref_member( self, ref, **kwargs ): - result = self._createNode('link',linkend=ref.getAttribute('refid')) - self._translateChildren(ref,target=result) - return result - - #~ Translate: - #~ <type>...</type> - def _translate_type( self, type, target=None, **kwargs ): - result = self._translateChildren(type,target=target,**kwargs) - #~ Filter types to clean up various readability problems, most notably - #~ with really long types. - xml = target.toxml('utf-8'); - if ( - xml.startswith('<type>boost::mpl::') or - xml.startswith('<type>BOOST_PP_') or - re.match('<type>boost::(lazy_)?(enable|disable)_if',xml) - ): - while target.firstChild: - target.removeChild(target.firstChild) - target.appendChild(self._createText('emphasis','unspecified')) - return result - - def _getChild( self, tag = None, id = None, name = None, root = None ): - if not root: - root = self.boostbook.documentElement - for n in root.childNodes: - found = True - if tag and found: - found = found and tag == n.nodeName - if id and found: - if n.hasAttribute('id'): - found = found and n.getAttribute('id') == id - else: - found = found and n.hasAttribute('id') and n.getAttribute('id') == id - if name and found: - found = found and n.hasAttribute('name') and n.getAttribute('name') == name - if found: - #~ print '--|', n - return n - return None - - def _getChildData( self, tag, **kwargs ): - return self._getData(self._getChild(tag,**kwargs),**kwargs) - - def _getData( self, node, **kwargs ): - if node: - text = self._getChild('#text',root=node) - if text: - return text.data.strip() - return '' - - def _cppName( self, type ): - parts = re.search('^([^<]+)[<]?(.*)[>]?$',type.strip().strip(':')) - result = { - 'compoundname' : parts.group(1), - 'namespace' : parts.group(1).split('::')[0:-1], - 'name' : parts.group(1).split('::')[-1], - 'specialization' : parts.group(2) - } - if result['namespace'] and len(result['namespace']) > 0: - namespace = '::'.join(result['namespace']) - while ( - len(result['namespace']) > 0 and ( - not self.symbols.has_key(namespace) or - self.symbols[namespace]['kind'] != 'namespace') - ): - result['name'] = result['namespace'].pop()+'::'+result['name'] - namespace = '::'.join(result['namespace']) - return result - - def _createNode( self, tag, **kwargs ): - result = self.boostbook.createElement(tag) - for k in kwargs.keys(): - if kwargs[k] != '': - if k == 'id': - result.setAttribute('id',kwargs[k]) - else: - result.setAttribute(k,kwargs[k]) - return result - - def _createText( self, tag, data, **kwargs ): - result = self._createNode(tag,**kwargs) - data = data.strip() - if len(data) > 0: - result.appendChild(self.boostbook.createTextNode(data)) - return result - - -def main( xmldir=None, output=None, id=None, title=None, index=False ): - #~ print '--- main: xmldir = %s, output = %s' % (xmldir,output) - - input = glob.glob( os.path.abspath( os.path.join( xmldir, "*.xml" ) ) ) - input.sort - translator = Doxygen2BoostBook(id=id, title=title, index=index) - #~ Feed in the namespaces first to build up the set of namespaces - #~ and definitions so that lookup is unambiguous when reading in the definitions. - namespace_files = filter( - lambda x: - os.path.basename(x).startswith('namespace'), - input) - decl_files = filter( - lambda x: - not os.path.basename(x).startswith('namespace') and not os.path.basename(x).startswith('_'), - input) - for dox in namespace_files: - #~ print '--|',os.path.basename(dox) - translator.addDox(xml.dom.minidom.parse(dox)) - for dox in decl_files: - #~ print '--|',os.path.basename(dox) - translator.addDox(xml.dom.minidom.parse(dox)) - - if output: - output = open(output,'w') - else: - output = sys.stdout - if output: - output.write(translator.tostring()) - - -main( **get_args() ) diff --git a/jam-files/boost-build/tools/doxygen-config.jam b/jam-files/boost-build/tools/doxygen-config.jam deleted file mode 100644 index 2cd2ccae..00000000 --- a/jam-files/boost-build/tools/doxygen-config.jam +++ /dev/null @@ -1,11 +0,0 @@ -#~ Copyright 2005, 2006 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for Doxygen tools. To use, just import this module. - -import toolset : using ; - -ECHO "warning: doxygen-config.jam is deprecated. Use 'using doxygen ;' instead." ; - -using doxygen ; diff --git a/jam-files/boost-build/tools/doxygen.jam b/jam-files/boost-build/tools/doxygen.jam deleted file mode 100644 index 8394848d..00000000 --- a/jam-files/boost-build/tools/doxygen.jam +++ /dev/null @@ -1,776 +0,0 @@ -# Copyright 2003, 2004 Douglas Gregor -# Copyright 2003, 2004, 2005 Vladimir Prus -# Copyright 2006 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines rules to handle generation of various outputs from source -# files documented with doxygen comments. The supported transformations are: -# -# * Source -> Doxygen XML -> BoostBook XML -# * Source -> Doxygen HTML -# -# The type of transformation is selected based on the target requested. For -# BoostBook XML, the default, specifying a target with an ".xml" suffix, or an -# empty suffix, will produce a <target>.xml and <target>.boostbook. For Doxygen -# HTML specifying a target with an ".html" suffix will produce a directory -# <target> with the Doxygen html files, and a <target>.html file redirecting to -# that directory. - -import "class" : new ; -import targets ; -import feature ; -import property ; -import generators ; -import boostbook ; -import type ; -import path ; -import print ; -import regex ; -import stage ; -import project ; -import xsltproc ; -import make ; -import os ; -import toolset : flags ; -import alias ; -import common ; -import modules ; -import project ; -import utility ; -import errors ; - - -# Use to specify extra configuration paramters. These get translated -# into a doxyfile which configures the building of the docs. -feature.feature doxygen:param : : free ; - -# Specify the "<xsl:param>boost.doxygen.header.prefix" XSLT option. -feature.feature prefix : : free ; - -# Specify the "<xsl:param>boost.doxygen.reftitle" XSLT option. -feature.feature reftitle : : free ; - -# Which processor to use for various translations from Doxygen. -feature.feature doxygen.processor : xsltproc doxproc : propagated implicit ; - -# To generate, or not, index sections. -feature.feature doxygen.doxproc.index : no yes : propagated incidental ; - -# The ID for the resulting BoostBook reference section. -feature.feature doxygen.doxproc.id : : free ; - -# The title for the resulting BoostBook reference section. -feature.feature doxygen.doxproc.title : : free ; - -# Location for images when generating XML -feature.feature doxygen:xml-imagedir : : free ; - -# Indicates whether the entire directory should be deleted -feature.feature doxygen.rmdir : off on : optional incidental ; - -# Doxygen configuration input file. -type.register DOXYFILE : doxyfile ; - -# Doxygen XML multi-file output. -type.register DOXYGEN_XML_MULTIFILE : xml-dir : XML ; - -# Doxygen XML coallesed output. -type.register DOXYGEN_XML : doxygen : XML ; - -# Doxygen HTML multifile directory. -type.register DOXYGEN_HTML_MULTIFILE : html-dir : HTML ; - -# Redirection HTML file to HTML multifile directory. -type.register DOXYGEN_HTML : : HTML ; - -type.register DOXYGEN_XML_IMAGES : doxygen-xml-images ; - -# Initialize the Doxygen module. Parameters are: -# name: the name of the 'doxygen' executable. If not specified, the name -# 'doxygen' will be used -# -rule init ( name ? ) -{ - if ! $(.initialized) - { - .initialized = true ; - - .doxproc = [ modules.binding $(__name__) ] ; - .doxproc = $(.doxproc:D)/doxproc.py ; - - generators.register-composing doxygen.headers-to-doxyfile - : H HPP CPP : DOXYFILE ; - generators.register-standard doxygen.run - : DOXYFILE : DOXYGEN_XML_MULTIFILE ; - generators.register-standard doxygen.xml-dir-to-boostbook - : DOXYGEN_XML_MULTIFILE : BOOSTBOOK : <doxygen.processor>doxproc ; - generators.register-standard doxygen.xml-to-boostbook - : DOXYGEN_XML : BOOSTBOOK : <doxygen.processor>xsltproc ; - generators.register-standard doxygen.collect - : DOXYGEN_XML_MULTIFILE : DOXYGEN_XML ; - generators.register-standard doxygen.run - : DOXYFILE : DOXYGEN_HTML_MULTIFILE ; - generators.register-standard doxygen.html-redirect - : DOXYGEN_HTML_MULTIFILE : DOXYGEN_HTML ; - generators.register-standard doxygen.copy-latex-pngs - : DOXYGEN_HTML : DOXYGEN_XML_IMAGES ; - - IMPORT $(__name__) : doxygen : : doxygen ; - } - - if $(name) - { - modify-config ; - .doxygen = $(name) ; - check-doxygen ; - } - - if ! $(.doxygen) - { - check-doxygen ; - } -} - -rule freeze-config ( ) -{ - if ! $(.initialized) - { - errors.user-error "doxygen must be initialized before it can be used." ; - } - if ! $(.config-frozen) - { - .config-frozen = true ; - - if [ .is-cygwin ] - { - .is-cygwin = true ; - } - } -} - -rule modify-config ( ) -{ - if $(.config-frozen) - { - errors.user-error "Cannot change doxygen after it has been used." ; - } -} - -rule check-doxygen ( ) -{ - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using doxygen ":" $(.doxygen) ; - } - local extra-paths ; - if [ os.name ] = NT - { - local ProgramFiles = [ modules.peek : ProgramFiles ] ; - if $(ProgramFiles) - { - extra-paths = "$(ProgramFiles:J= )" ; - } - else - { - extra-paths = "C:\\Program Files" ; - } - } - .doxygen = [ common.get-invocation-command doxygen : - doxygen : $(.doxygen) : $(extra-paths) ] ; -} - -rule name ( ) -{ - freeze-config ; - return $(.doxygen) ; -} - -rule .is-cygwin ( ) -{ - if [ os.on-windows ] - { - local file = [ path.make [ modules.binding $(__name__) ] ] ; - local dir = [ path.native - [ path.join [ path.parent $(file) ] doxygen ] ] ; - local command = - "cd \"$(dir)\" && \"$(.doxygen)\" windows-paths-check.doxyfile 2>&1" ; - result = [ SHELL $(command) ] ; - if [ MATCH "(Parsing file /)" : $(result) ] - { - return true ; - } - } -} - -# Runs Doxygen on the given Doxygen configuration file (the source) to generate -# the Doxygen files. The output is dumped according to the settings in the -# Doxygen configuration file, not according to the target! Because of this, we -# essentially "touch" the target file, in effect making it look like we have -# really written something useful to it. Anyone that uses this action must deal -# with this behavior. -# -actions doxygen-action -{ - $(RM) "$(*.XML)" & "$(NAME:E=doxygen)" "$(>)" && echo "Stamped" > "$(<)" -} - - -# Runs the Python doxproc XML processor. -# -actions doxproc -{ - python "$(DOXPROC)" "--xmldir=$(>)" "--output=$(<)" "$(OPTIONS)" "--id=$(ID)" "--title=$(TITLE)" -} - - -rule translate-path ( path ) -{ - freeze-config ; - if [ os.on-windows ] - { - if [ os.name ] = CYGWIN - { - if $(.is-cygwin) - { - return $(path) ; - } - else - { - return $(path:W) ; - } - } - else - { - if $(.is-cygwin) - { - match = [ MATCH ^(.):(.*) : $(path) ] ; - if $(match) - { - return /cygdrive/$(match[1])$(match[2]:T) ; - } - else - { - return $(path:T) ; - } - } - else - { - return $(path) ; - } - } - } - else - { - return $(path) ; - } -} - - -# Generates a doxygen configuration file (doxyfile) given a set of C++ sources -# and a property list that may contain <doxygen:param> features. -# -rule headers-to-doxyfile ( target : sources * : properties * ) -{ - local text "# Generated by Boost.Build version 2" ; - - local output-dir ; - - # Translate <doxygen:param> into command line flags. - for local param in [ feature.get-values <doxygen:param> : $(properties) ] - { - local namevalue = [ regex.match ([^=]*)=(.*) : $(param) ] ; - if $(namevalue[1]) = OUTPUT_DIRECTORY - { - output-dir = [ translate-path - [ utility.unquote $(namevalue[2]) ] ] ; - text += "OUTPUT_DIRECTORY = \"$(output-dir)\"" ; - } - else - { - text += "$(namevalue[1]) = $(namevalue[2])" ; - } - } - - if ! $(output-dir) - { - output-dir = [ translate-path [ on $(target) return $(LOCATE) ] ] ; - text += "OUTPUT_DIRECTORY = \"$(output-dir)\"" ; - } - - local headers = ; - for local header in $(sources:G=) - { - header = [ translate-path $(header) ] ; - headers += \"$(header)\" ; - } - - # Doxygen generates LaTex by default. So disable it unconditionally, or at - # least until someone needs, and hence writes support for, LaTex output. - text += "GENERATE_LATEX = NO" ; - text += "INPUT = $(headers:J= )" ; - print.output $(target) plain ; - print.text $(text) : true ; -} - - -# Run Doxygen. See doxygen-action for a description of the strange properties of -# this rule. -# -rule run ( target : source : properties * ) -{ - freeze-config ; - if <doxygen.rmdir>on in $(properties) - { - local output-dir = - [ path.make - [ MATCH <doxygen:param>OUTPUT_DIRECTORY=\"?([^\"]*) : - $(properties) ] ] ; - local html-dir = - [ path.make - [ MATCH <doxygen:param>HTML_OUTPUT=(.*) : - $(properties) ] ] ; - if $(output-dir) && $(html-dir) && - [ path.glob $(output-dir) : $(html-dir) ] - { - HTMLDIR on $(target) = - [ path.native [ path.join $(output-dir) $(html-dir) ] ] ; - rm-htmldir $(target) ; - } - } - doxygen-action $(target) : $(source) ; - NAME on $(target) = $(.doxygen) ; - RM on $(target) = [ modules.peek common : RM ] ; - *.XML on $(target) = - [ path.native - [ path.join - [ path.make [ on $(target) return $(LOCATE) ] ] - $(target:B:S=) - *.xml ] ] ; -} - -if [ os.name ] = NT -{ - RMDIR = rmdir /s /q ; -} -else -{ - RMDIR = rm -rf ; -} - -actions quietly rm-htmldir -{ - $(RMDIR) $(HTMLDIR) -} - -# The rules below require Boost.Book stylesheets, so we need some code to check -# that the boostbook module has actualy been initialized. -# -rule check-boostbook ( ) -{ - if ! [ modules.peek boostbook : .initialized ] - { - ECHO "error: the boostbook module is not initialized" ; - ECHO "error: you've attempted to use the 'doxygen' toolset, " ; - ECHO "error: which requires Boost.Book," ; - ECHO "error: but never initialized Boost.Book." ; - EXIT "error: Hint: add 'using boostbook ;' to your user-config.jam" ; - } -} - - -# Collect the set of Doxygen XML files into a single XML source file that can be -# handled by an XSLT processor. The source is completely ignored (see -# doxygen-action), because this action picks up the Doxygen XML index file -# xml/index.xml. This is because we can not teach Doxygen to act like a NORMAL -# program and take a "-o output.xml" argument (grrrr). The target of the -# collection will be a single Doxygen XML file. -# -rule collect ( target : source : properties * ) -{ - check-boostbook ; - local collect-xsl-dir - = [ path.native [ path.join [ boostbook.xsl-dir ] doxygen collect ] ] ; - local source-path - = [ path.make [ on $(source) return $(LOCATE) ] ] ; - local collect-path - = [ path.root [ path.join $(source-path) $(source:B) ] [ path.pwd ] ] ; - local native-path - = [ path.native $(collect-path) ] ; - local real-source - = [ path.native [ path.join $(collect-path) index.xml ] ] ; - xsltproc.xslt $(target) : $(real-source) $(collect-xsl-dir:S=.xsl) - : <xsl:param>doxygen.xml.path=$(native-path) ; -} - - -# Translate Doxygen XML into BoostBook. -# -rule xml-to-boostbook ( target : source : properties * ) -{ - check-boostbook ; - local xsl-dir = [ boostbook.xsl-dir ] ; - local d2b-xsl = [ path.native [ path.join [ boostbook.xsl-dir ] doxygen - doxygen2boostbook.xsl ] ] ; - - local xslt-properties = $(properties) ; - for local prefix in [ feature.get-values <prefix> : $(properties) ] - { - xslt-properties += "<xsl:param>boost.doxygen.header.prefix=$(prefix)" ; - } - for local title in [ feature.get-values <reftitle> : $(properties) ] - { - xslt-properties += "<xsl:param>boost.doxygen.reftitle=$(title)" ; - } - - xsltproc.xslt $(target) : $(source) $(d2b-xsl) : $(xslt-properties) ; -} - - -flags doxygen.xml-dir-to-boostbook OPTIONS <doxygen.doxproc.index>yes : --enable-index ; -flags doxygen.xml-dir-to-boostbook ID <doxygen.doxproc.id> ; -flags doxygen.xml-dir-to-boostbook TITLE <doxygen.doxproc.title> ; - - -rule xml-dir-to-boostbook ( target : source : properties * ) -{ - DOXPROC on $(target) = $(.doxproc) ; - - LOCATE on $(source:S=) = [ on $(source) return $(LOCATE) ] ; - - doxygen.doxproc $(target) : $(source:S=) ; -} - - -# Generate the HTML redirect to HTML dir index.html file. -# -rule html-redirect ( target : source : properties * ) -{ - local uri = "$(target:B)/index.html" ; - print.output $(target) plain ; - print.text -"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" - \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"> -<html xmlns=\"http://www.w3.org/1999/xhtml\"> -<head> - <meta http-equiv=\"refresh\" content=\"0; URL=$(uri)\" /> - - <title></title> -</head> - -<body> - Automatic redirection failed, please go to <a href= - \"$(uri)\">$(uri)</a>. -</body> -</html> -" - : true ; -} - -rule copy-latex-pngs ( target : source : requirements * ) -{ - local directory = [ path.native - [ feature.get-values <doxygen:xml-imagedir> : - $(requirements) ] ] ; - - local location = [ on $(target) return $(LOCATE) ] ; - - local pdf-location = - [ path.native - [ path.join - [ path.make $(location) ] - [ path.make $(directory) ] ] ] ; - local html-location = - [ path.native - [ path.join - . - html - [ path.make $(directory) ] ] ] ; - - common.MkDir $(pdf-location) ; - common.MkDir $(html-location) ; - - DEPENDS $(target) : $(pdf-location) $(html-location) ; - - if [ os.name ] = NT - { - CP on $(target) = copy /y ; - FROM on $(target) = \\*.png ; - TOHTML on $(target) = .\\html\\$(directory) ; - TOPDF on $(target) = \\$(directory) ; - } - else - { - CP on $(target) = cp ; - FROM on $(target) = /*.png ; - TOHTML on $(target) = ./html/$(directory) ; - TOPDF on $(target) = $(target:D)/$(directory) ; - } -} - -actions copy-latex-pngs -{ - $(CP) $(>:S=)$(FROM) $(TOHTML) - $(CP) $(>:S=)$(FROM) $(<:D)$(TOPDF) - echo "Stamped" > "$(<)" -} - -# building latex images for doxygen XML depends -# on latex, dvips, and ps being in your PATH. -# This is true for most Unix installs, but -# not on Win32, where you will need to install -# MkTex and Ghostscript and add these tools -# to your path. - -actions check-latex -{ - latex -version >$(<) -} - -actions check-dvips -{ - dvips -version >$(<) -} - -if [ os.name ] = "NT" -{ - actions check-gs - { - gswin32c -version >$(<) - } -} -else -{ - actions check-gs - { - gs -version >$(<) - } -} - -rule check-tools ( ) -{ - if ! $(.check-tools-targets) - { - # Find the root project. - local root-project = [ project.current ] ; - root-project = [ $(root-project).project-module ] ; - while - [ project.attribute $(root-project) parent-module ] && - [ project.attribute $(root-project) parent-module ] != user-config - { - root-project = - [ project.attribute $(root-project) parent-module ] ; - } - - .latex.check = [ new file-target latex.check - : - : [ project.target $(root-project) ] - : [ new action : doxygen.check-latex ] - : - ] ; - .dvips.check = [ new file-target dvips.check - : - : [ project.target $(root-project) ] - : [ new action : doxygen.check-dvips ] - : - ] ; - .gs.check = [ new file-target gs.check - : - : [ project.target $(root-project) ] - : [ new action : doxygen.check-gs ] - : - ] ; - .check-tools-targets = $(.latex.check) $(.dvips.check) $(.gs.check) ; - } - return $(.check-tools-targets) ; -} - -project.initialize $(__name__) ; -project doxygen ; - -class doxygen-check-tools-target-class : basic-target -{ - import doxygen ; - rule construct ( name : sources * : property-set ) - { - return [ property-set.empty ] [ doxygen.check-tools ] ; - } -} - -local project = [ project.current ] ; - -targets.main-target-alternative - [ new doxygen-check-tools-target-class check-tools : $(project) - : [ targets.main-target-sources : check-tools : no-renaming ] - : [ targets.main-target-requirements : $(project) ] - : [ targets.main-target-default-build : $(project) ] - : [ targets.main-target-usage-requirements : $(project) ] - ] ; - -# User-level rule to generate BoostBook XML from a set of headers via Doxygen. -# -rule doxygen ( target : sources * : requirements * : default-build * : usage-requirements * ) -{ - freeze-config ; - local project = [ project.current ] ; - - if $(target:S) = .html - { - # Build an HTML directory from the sources. - local html-location = [ feature.get-values <location> : $(requirements) ] ; - local output-dir ; - if [ $(project).get build-dir ] - { - # Explicitly specified build dir. Add html at the end. - output-dir = [ path.join [ $(project).build-dir ] $(html-location:E=html) ] ; - } - else - { - # Trim 'bin' from implicit build dir, for no other reason that backward - # compatibility. - output-dir = [ path.join [ path.parent [ $(project).build-dir ] ] - $(html-location:E=html) ] ; - } - output-dir = [ path.root $(output-dir) [ path.pwd ] ] ; - local output-dir-native = [ path.native $(output-dir) ] ; - requirements = [ property.change $(requirements) : <location> ] ; - - ## The doxygen configuration file. - targets.main-target-alternative - [ new typed-target $(target:S=.tag) : $(project) : DOXYFILE - : [ targets.main-target-sources $(sources) : $(target:S=.tag) ] - : [ targets.main-target-requirements $(requirements) - <doxygen:param>GENERATE_HTML=YES - <doxygen:param>GENERATE_XML=NO - <doxygen:param>"OUTPUT_DIRECTORY=\"$(output-dir-native)\"" - <doxygen:param>HTML_OUTPUT=$(target:B) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target:S=.tag) ; - - ## The html directory to generate by running doxygen. - targets.main-target-alternative - [ new typed-target $(target:S=.dir) : $(project) : DOXYGEN_HTML_MULTIFILE - : $(target:S=.tag) - : [ targets.main-target-requirements $(requirements) - <doxygen:param>"OUTPUT_DIRECTORY=\"$(output-dir-native)\"" - <doxygen:param>HTML_OUTPUT=$(target:B) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target:S=.dir) ; - - ## The redirect html file into the generated html. - targets.main-target-alternative - [ new typed-target $(target) : $(project) : DOXYGEN_HTML - : $(target:S=.dir) - : [ targets.main-target-requirements $(requirements) - <location>$(output-dir) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - } - else - { - # Build a BoostBook XML file from the sources. - local location-xml = [ feature.get-values <location> : $(requirements) ] ; - requirements = [ property.change $(requirements) : <location> ] ; - local target-xml = $(target:B=$(target:B)-xml) ; - - # Check whether we need to build images - local images-location = - [ feature.get-values <doxygen:xml-imagedir> : $(requirements) ] ; - if $(images-location) - { - doxygen $(target).doxygen-xml-images.html : $(sources) - : $(requirements) - <doxygen.rmdir>on - <doxygen:param>QUIET=YES - <doxygen:param>WARNINGS=NO - <doxygen:param>WARN_IF_UNDOCUMENTED=NO - <dependency>/doxygen//check-tools ; - $(project).mark-target-as-explicit - $(target).doxygen-xml-images.html ; - - targets.main-target-alternative - [ new typed-target $(target).doxygen-xml-images - : $(project) : DOXYGEN_XML_IMAGES - : $(target).doxygen-xml-images.html - : [ targets.main-target-requirements $(requirements) - : $(project) ] - : [ targets.main-target-default-build $(default-build) - : $(project) ] - ] ; - - $(project).mark-target-as-explicit - $(target).doxygen-xml-images ; - - if ! [ regex.match "^(.*/)$" : $(images-location) ] - { - images-location = $(images-location)/ ; - } - - requirements += - <dependency>$(target).doxygen-xml-images - <xsl:param>boost.doxygen.formuladir=$(images-location) ; - } - - ## The doxygen configuration file. - targets.main-target-alternative - [ new typed-target $(target-xml:S=.tag) : $(project) : DOXYFILE - : [ targets.main-target-sources $(sources) : $(target-xml:S=.tag) ] - : [ targets.main-target-requirements $(requirements) - <doxygen:param>GENERATE_HTML=NO - <doxygen:param>GENERATE_XML=YES - <doxygen:param>XML_OUTPUT=$(target-xml) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target-xml:S=.tag) ; - - ## The Doxygen XML directory of the processed source files. - targets.main-target-alternative - [ new typed-target $(target-xml:S=.dir) : $(project) : DOXYGEN_XML_MULTIFILE - : $(target-xml:S=.tag) - : [ targets.main-target-requirements $(requirements) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target-xml:S=.dir) ; - - ## The resulting BoostBook file is generated by the processor tool. The - ## tool can be either the xsltproc plus accompanying XSL scripts. Or it - ## can be the python doxproc.py script. - targets.main-target-alternative - [ new typed-target $(target-xml) : $(project) : BOOSTBOOK - : $(target-xml:S=.dir) - : [ targets.main-target-requirements $(requirements) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target-xml) ; - - targets.main-target-alternative - [ new install-target-class $(target:S=.xml) : $(project) - : $(target-xml) - : [ targets.main-target-requirements $(requirements) - <location>$(location-xml:E=.) - <name>$(target:S=.xml) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(target:S=.xml) ; - - targets.main-target-alternative - [ new alias-target-class $(target) : $(project) - : - : [ targets.main-target-requirements $(requirements) - : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) - <dependency>$(target:S=.xml) - : $(project) ] - ] ; - } -} diff --git a/jam-files/boost-build/tools/doxygen/windows-paths-check.doxyfile b/jam-files/boost-build/tools/doxygen/windows-paths-check.doxyfile deleted file mode 100644 index 9b969df9..00000000 --- a/jam-files/boost-build/tools/doxygen/windows-paths-check.doxyfile +++ /dev/null @@ -1,3 +0,0 @@ -INPUT = windows-paths-check.hpp -GENERATE_HTML = NO -GENERATE_LATEX = NO diff --git a/jam-files/boost-build/tools/doxygen/windows-paths-check.hpp b/jam-files/boost-build/tools/doxygen/windows-paths-check.hpp deleted file mode 100644 index e69de29b..00000000 --- a/jam-files/boost-build/tools/doxygen/windows-paths-check.hpp +++ /dev/null diff --git a/jam-files/boost-build/tools/fop.jam b/jam-files/boost-build/tools/fop.jam deleted file mode 100644 index c24b8725..00000000 --- a/jam-files/boost-build/tools/fop.jam +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2003-2004 Doug Gregor and Dave Abrahams. Distributed -# under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) -# -# This module defines rules to handle generation of PDF and -# PostScript files from XSL Formatting Objects via Apache FOP - -import generators ; -import common ; -import boostbook ; - -generators.register-standard fop.render.pdf : FO : PDF ; -generators.register-standard fop.render.ps : FO : PS ; - -# Initializes the fop toolset. -# -rule init ( fop-command ? : java-home ? : java ? ) -{ - local has-command = $(.has-command) ; - - if $(fop-command) - { - .has-command = true ; - } - - if $(fop-command) || ! $(has-command) - { - fop-command = [ common.get-invocation-command fop : fop : $(fop-command) - : [ modules.peek : FOP_DIR ] ] ; - } - - if $(fop-command) - { - .FOP_COMMAND = $(fop-command) ; - } - - if $(java-home) || $(java) - { - .FOP_SETUP = ; - - - # JAVA_HOME is the location that java was installed to. - - if $(java-home) - { - .FOP_SETUP += [ common.variable-setting-command JAVA_HOME : $(java-home) ] ; - } - - # JAVACMD is the location that of the java executable, useful for a - # non-standard java installation, where the executable isn't at - # $JAVA_HOME/bin/java. - - if $(java) - { - .FOP_SETUP += [ common.variable-setting-command JAVACMD : $(java) ] ; - } - } -} - -actions render.pdf -{ - $(.FOP_SETUP) $(.FOP_COMMAND:E=fop) $(>) $(<) -} - -actions render.ps -{ - $(.FOP_SETUP) $(.FOP_COMMAND:E=fop) $(>) -ps $(<) -} diff --git a/jam-files/boost-build/tools/fortran.jam b/jam-files/boost-build/tools/fortran.jam deleted file mode 100644 index 37665825..00000000 --- a/jam-files/boost-build/tools/fortran.jam +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2004 Toon Knapen -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# -# This file contains common settings for all fortran tools -# - -import "class" : new ; -import feature : feature ; - -import type ; -import generators ; -import common ; - -type.register FORTRAN : f F for f77 ; -type.register FORTRAN90 : f90 F90 ; - -feature fortran : : free ; -feature fortran90 : : free ; - -class fortran-compiling-generator : generator -{ - rule __init__ ( id : source-types + : target-types + : requirements * : optional-properties * ) - { - generator.__init__ $(id) : $(source-types) : $(target-types) : $(requirements) : $(optional-properties) ; - } -} - -rule register-fortran-compiler ( id : source-types + : target-types + : requirements * : optional-properties * ) -{ - local g = [ new fortran-compiling-generator $(id) : $(source-types) : $(target-types) : $(requirements) : $(optional-properties) ] ; - generators.register $(g) ; -} - -class fortran90-compiling-generator : generator -{ - rule __init__ ( id : source-types + : target-types + : requirements * : optional-properties * ) - { - generator.__init__ $(id) : $(source-types) : $(target-types) : $(requirements) : $(optional-properties) ; - } -} - -rule register-fortran90-compiler ( id : source-types + : target-types + : requirements * : optional-properties * ) -{ - local g = [ new fortran90-compiling-generator $(id) : $(source-types) : $(target-types) : $(requirements) : $(optional-properties) ] ; - generators.register $(g) ; -} - -# FIXME: this is ugly, should find a better way (we'd want client code to -# register all generators as "generator.some-rule", not with "some-module.some-rule".) -IMPORT $(__name__) : register-fortran-compiler : : generators.register-fortran-compiler ; -IMPORT $(__name__) : register-fortran90-compiler : : generators.register-fortran90-compiler ; diff --git a/jam-files/boost-build/tools/gcc.jam b/jam-files/boost-build/tools/gcc.jam deleted file mode 100644 index f7b0da54..00000000 --- a/jam-files/boost-build/tools/gcc.jam +++ /dev/null @@ -1,1185 +0,0 @@ -# Copyright 2001 David Abrahams. -# Copyright 2002-2006 Rene Rivera. -# Copyright 2002-2003 Vladimir Prus. -# Copyright (c) 2005 Reece H. Dunn. -# Copyright 2006 Ilya Sokolov. -# Copyright 2007 Roland Schwarz -# Copyright 2007 Boris Gubenko. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import "class" : new ; -import common ; -import errors ; -import feature ; -import generators ; -import os ; -import pch ; -import property ; -import property-set ; -import toolset ; -import type ; -import rc ; -import regex ; -import set ; -import unix ; -import fortran ; - - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - - -feature.extend toolset : gcc ; -# feature.subfeature toolset gcc : flavor : : optional ; - -toolset.inherit-generators gcc : unix : unix.link unix.link.dll ; -toolset.inherit-flags gcc : unix ; -toolset.inherit-rules gcc : unix ; - -generators.override gcc.prebuilt : builtin.prebuilt ; -generators.override gcc.searched-lib-generator : searched-lib-generator ; - -# Make gcc toolset object files use the "o" suffix on all platforms. -type.set-generated-target-suffix OBJ : <toolset>gcc : o ; -type.set-generated-target-suffix OBJ : <toolset>gcc <target-os>windows : o ; -type.set-generated-target-suffix OBJ : <toolset>gcc <target-os>cygwin : o ; - -# Initializes the gcc toolset for the given version. If necessary, command may -# be used to specify where the compiler is located. The parameter 'options' is a -# space-delimited list of options, each one specified as -# <option-name>option-value. Valid option names are: cxxflags, linkflags and -# linker-type. Accepted linker-type values are aix, darwin, gnu, hpux, osf or -# sun and the default value will be selected based on the current OS. -# Example: -# using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ; -# -# The compiler command to use is detected in a three step manner: -# 1) If an explicit command is specified by the user, it will be used and must available. -# 2) If only a certain version is specified, it is enforced: -# - either a command 'g++-VERSION' must be available -# - or the default command 'g++' must be available and match the exact version. -# 3) Without user-provided restrictions use default 'g++' -rule init ( version ? : command * : options * ) -{ - #1): use user-provided command - local tool-command = ; - if $(command) - { - tool-command = [ common.get-invocation-command-nodefault gcc : g++ : $(command) ] ; - if ! $(tool-command) - { - errors.error "toolset gcc initialization:" : - "provided command '$(command)' not found" : - "initialized from" [ errors.nearest-user-location ] ; - } - } - #2): enforce user-provided version - else if $(version) - { - tool-command = [ common.get-invocation-command-nodefault gcc : "g++-$(version[1])" ] ; - - #2.1) fallback: check whether "g++" reports the requested version - if ! $(tool-command) - { - tool-command = [ common.get-invocation-command-nodefault gcc : g++ ] ; - if $(tool-command) - { - local tool-command-string = $(tool-command:J=" ") ; - local tool-version = [ MATCH "^([0-9.]+)" : [ SHELL "$(tool-command-string) -dumpversion" ] ] ; - if $(tool-version) != $(version) - { - # Permit a match betwen two-digit version specified by the user - # (e.g. 4.4) and 3-digit version reported by gcc. - # Since only two digits are present in binary name anyway, - # insisting that user specify 3-digit version when - # configuring Boost.Build while it's not required on - # command like would be strange. - local stripped = [ MATCH "^([0-9]+\.[0-9]+).*" : $(tool-version) ] ; - if $(stripped) != $(version) - { - errors.error "toolset gcc initialization:" : - "version '$(version)' requested but 'g++-$(version)' not found and version '$(tool-version)' of default '$(tool-command)' does not match" : - "initialized from" [ errors.nearest-user-location ] ; - tool-command = ; - } - # Use full 3-digit version to be compatible with the 'using gcc ;' case - version = $(tool-version) ; - } - } - else - { - errors.error "toolset gcc initialization:" : - "version '$(version)' requested but neither 'g++-$(version)' nor default 'g++' found" : - "initialized from" [ errors.nearest-user-location ] ; - } - } - } - #3) default: no command and no version specified, try using default command "g++" - else - { - tool-command = [ common.get-invocation-command-nodefault gcc : g++ ] ; - if ! $(tool-command) - { - errors.error "toolset gcc initialization:" : - "no command provided, default command 'g++' not found" : - "initialized from" [ errors.nearest-user-location ] ; - } - } - - - # Information about the gcc command... - # The command. - local command = $(tool-command) ; - # The root directory of the tool install. - local root = [ feature.get-values <root> : $(options) ] ; - # The bin directory where to find the command to execute. - local bin ; - # The flavor of compiler. - local flavor = [ feature.get-values <flavor> : $(options) ] ; - # Autodetect the root and bin dir if not given. - if $(command) - { - bin ?= [ common.get-absolute-tool-path $(command[-1]) ] ; - root ?= $(bin:D) ; - } - # The 'command' variable can have multiple elements. When calling - # the SHELL builtin we need a single string. - local command-string = $(command:J=" ") ; - # Autodetect the version and flavor if not given. - if $(command) - { - local machine = [ MATCH "^([^ ]+)" - : [ SHELL "$(command-string) -dumpmachine" ] ] ; - version ?= [ MATCH "^([0-9.]+)" - : [ SHELL "$(command-string) -dumpversion" ] ] ; - switch $(machine:L) - { - case *mingw* : flavor ?= mingw ; - } - } - - local condition ; - if $(flavor) - { - condition = [ common.check-init-parameters gcc - : version $(version) - : flavor $(flavor) - ] ; - } - else - { - condition = [ common.check-init-parameters gcc - : version $(version) - ] ; - condition = $(condition) ; #/<toolset-gcc:flavor> ; - } - - common.handle-options gcc : $(condition) : $(command) : $(options) ; - - local linker = [ feature.get-values <linker-type> : $(options) ] ; - # The logic below should actually be keyed on <target-os> - if ! $(linker) - { - if [ os.name ] = OSF - { - linker = osf ; - } - else if [ os.name ] = HPUX - { - linker = hpux ; - } - else if [ os.name ] = AIX - { - linker = aix ; - } - else if [ os.name ] = SOLARIS - { - linker = sun ; - } - else - { - linker = gnu ; - } - } - init-link-flags gcc $(linker) $(condition) ; - - - # If gcc is installed in non-standard location, we'd need to add - # LD_LIBRARY_PATH when running programs created with it (for unit-test/run - # rules). - if $(command) - { - # On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries - # and all must be added to LD_LIBRARY_PATH. The linker will pick the - # right onces. Note that we don't provide a clean way to build 32-bit - # binary with 64-bit compiler, but user can always pass -m32 manually. - local lib_path = $(root)/bin $(root)/lib $(root)/lib32 $(root)/lib64 ; - if $(.debug-configuration) - { - ECHO notice: using gcc libraries :: $(condition) :: $(lib_path) ; - } - toolset.flags gcc.link RUN_PATH $(condition) : $(lib_path) ; - } - - # If it's not a system gcc install we should adjust the various programs as - # needed to prefer using the install specific versions. This is essential - # for correct use of MinGW and for cross-compiling. - - local nl = " -" ; - - # - The archive builder. - local archiver = [ common.get-invocation-command gcc - : [ NORMALIZE_PATH [ MATCH "(.*)[$(nl)]+" : [ SHELL "$(command-string) -print-prog-name=ar" ] ] ] - : [ feature.get-values <archiver> : $(options) ] - : $(bin) - : search-path ] ; - toolset.flags gcc.archive .AR $(condition) : $(archiver[1]) ; - if $(.debug-configuration) - { - ECHO notice: using gcc archiver :: $(condition) :: $(archiver[1]) ; - } - - # - Ranlib - local ranlib = [ common.get-invocation-command gcc - : [ NORMALIZE_PATH [ MATCH "(.*)[$(nl)]+" : [ SHELL "$(command-string) -print-prog-name=ranlib" ] ] ] - : [ feature.get-values <ranlib> : $(options) ] - : $(bin) - : search-path ] ; - toolset.flags gcc.archive .RANLIB $(condition) : $(ranlib[1]) ; - if $(.debug-configuration) - { - ECHO notice: using gcc ranlib :: $(condition) :: $(ranlib[1]) ; - } - - - # - The resource compiler. - local rc = - [ common.get-invocation-command-nodefault gcc - : windres : [ feature.get-values <rc> : $(options) ] : $(bin) : search-path ] ; - local rc-type = - [ feature.get-values <rc-type> : $(options) ] ; - rc-type ?= windres ; - if ! $(rc) - { - # If we can't find an RC compiler we fallback to a null RC compiler that - # creates empty object files. This allows the same Jamfiles to work - # across the board. The null RC uses the assembler to create the empty - # objects, so configure that. - rc = [ common.get-invocation-command gcc : as : : $(bin) : search-path ] ; - rc-type = null ; - } - rc.configure $(rc) : $(condition) : <rc-type>$(rc-type) ; -} - -if [ os.name ] = NT -{ - # This causes single-line command invocation to not go through .bat files, - # thus avoiding command-line length limitations. - JAMSHELL = % ; -} - -generators.register-c-compiler gcc.compile.c++.preprocess : CPP : PREPROCESSED_CPP : <toolset>gcc ; -generators.register-c-compiler gcc.compile.c.preprocess : C : PREPROCESSED_C : <toolset>gcc ; -generators.register-c-compiler gcc.compile.c++ : CPP : OBJ : <toolset>gcc ; -generators.register-c-compiler gcc.compile.c : C : OBJ : <toolset>gcc ; -generators.register-c-compiler gcc.compile.asm : ASM : OBJ : <toolset>gcc ; -generators.register-fortran-compiler gcc.compile.fortran : FORTRAN FORTRAN90 : OBJ : <toolset>gcc ; - -# pch support - -# The compiler looks for a precompiled header in each directory just before it -# looks for the include file in that directory. The name searched for is the -# name specified in the #include directive with ".gch" suffix appended. The -# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to -# full name of the header. - -type.set-generated-target-suffix PCH : <toolset>gcc : gch ; - -# GCC-specific pch generator. -class gcc-pch-generator : pch-generator -{ - import project ; - import property-set ; - import type ; - - rule run-pch ( project name ? : property-set : sources + ) - { - # Find the header in sources. Ignore any CPP sources. - local header ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] H ] - { - header = $(s) ; - } - } - - # Error handling: Base header file name should be the same as the base - # precompiled header name. - local header-name = [ $(header).name ] ; - local header-basename = $(header-name:B) ; - if $(header-basename) != $(name) - { - local location = [ $(project).project-module ] ; - errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ; - } - - local pch-file = [ generator.run $(project) $(name) : $(property-set) - : $(header) ] ; - - # return result of base class and pch-file property as usage-requirements - return - [ property-set.create <pch-file>$(pch-file) <cflags>-Winvalid-pch ] - $(pch-file) - ; - } - - # Calls the base version specifying source's name as the name of the created - # target. As result, the PCH will be named whatever.hpp.gch, and not - # whatever.gch. - rule generated-targets ( sources + : property-set : project name ? ) - { - name = [ $(sources[1]).name ] ; - return [ generator.generated-targets $(sources) - : $(property-set) : $(project) $(name) ] ; - } -} - -# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The -# latter have HPP type, but HPP type is derived from H. The type of compilation -# is determined entirely by the destination type. -generators.register [ new gcc-pch-generator gcc.compile.c.pch : H : C_PCH : <pch>on <toolset>gcc ] ; -generators.register [ new gcc-pch-generator gcc.compile.c++.pch : H : CPP_PCH : <pch>on <toolset>gcc ] ; - -# Override default do-nothing generators. -generators.override gcc.compile.c.pch : pch.default-c-pch-generator ; -generators.override gcc.compile.c++.pch : pch.default-cpp-pch-generator ; - -toolset.flags gcc.compile PCH_FILE <pch>on : <pch-file> ; - -# Declare flags and action for compilation. -toolset.flags gcc.compile OPTIONS <optimization>off : -O0 ; -toolset.flags gcc.compile OPTIONS <optimization>speed : -O3 ; -toolset.flags gcc.compile OPTIONS <optimization>space : -Os ; - -toolset.flags gcc.compile OPTIONS <inlining>off : -fno-inline ; -toolset.flags gcc.compile OPTIONS <inlining>on : -Wno-inline ; -toolset.flags gcc.compile OPTIONS <inlining>full : -finline-functions -Wno-inline ; - -toolset.flags gcc.compile OPTIONS <warnings>off : -w ; -toolset.flags gcc.compile OPTIONS <warnings>on : -Wall ; -toolset.flags gcc.compile OPTIONS <warnings>all : -Wall -pedantic ; -toolset.flags gcc.compile OPTIONS <warnings-as-errors>on : -Werror ; - -toolset.flags gcc.compile OPTIONS <debug-symbols>on : -g ; -toolset.flags gcc.compile OPTIONS <profiling>on : -pg ; -toolset.flags gcc.compile OPTIONS <rtti>off : -fno-rtti ; - -rule setup-fpic ( targets * : sources * : properties * ) -{ - local link = [ feature.get-values link : $(properties) ] ; - if $(link) = shared - { - local target = [ feature.get-values target-os : $(properties) ] ; - - # This logic will add -fPIC for all compilations: - # - # lib a : a.cpp b ; - # obj b : b.cpp ; - # exe c : c.cpp a d ; - # obj d : d.cpp ; - # - # This all is fine, except that 'd' will be compiled with -fPIC even though - # it is not needed, as 'd' is used only in exe. However, it is hard to - # detect where a target is going to be used. Alternatively, we can set -fPIC - # only when main target type is LIB but than 'b' would be compiled without - # -fPIC which would lead to link errors on x86-64. So, compile everything - # with -fPIC. - # - # Yet another alternative would be to create a propagated <sharedable> - # feature and set it when building shared libraries, but that would be hard - # to implement and would increase the target path length even more. - - # On Windows, fPIC is default, specifying -fPIC explicitly leads to - # a warning. - if $(target) != cygwin && $(target) != windows - { - OPTIONS on $(targets) += -fPIC ; - } - } -} - -rule setup-address-model ( targets * : sources * : properties * ) -{ - local model = [ feature.get-values address-model : $(properties) ] ; - if $(model) - { - local option ; - local os = [ feature.get-values target-os : $(properties) ] ; - if $(os) = aix - { - if $(model) = 32 - { - option = -maix32 ; - } - else - { - option = -maix64 ; - } - } - else if $(os) = hpux - { - if $(model) = 32 - { - option = -milp32 ; - } - else - { - option = -mlp64 ; - } - } - else - { - if $(model) = 32 - { - option = -m32 ; - } - else if $(model) = 64 - { - option = -m64 ; - } - # For darwin, the model can be 32_64. darwin.jam will handle that - # on its own. - } - OPTIONS on $(targets) += $(option) ; - } -} - - -# FIXME: this should not use os.name. -if [ os.name ] != NT && [ os.name ] != OSF && [ os.name ] != HPUX && [ os.name ] != AIX -{ - # OSF does have an option called -soname but it does not seem to work as - # expected, therefore it has been disabled. - HAVE_SONAME = "" ; - SONAME_OPTION = -h ; -} - -# HPUX, for some reason, seem to use '+h', not '-h'. -if [ os.name ] = HPUX -{ - HAVE_SONAME = "" ; - SONAME_OPTION = +h ; -} - -toolset.flags gcc.compile USER_OPTIONS <cflags> ; -toolset.flags gcc.compile.c++ USER_OPTIONS <cxxflags> ; -toolset.flags gcc.compile DEFINES <define> ; -toolset.flags gcc.compile INCLUDES <include> ; -toolset.flags gcc.compile.c++ TEMPLATE_DEPTH <c++-template-depth> ; -toolset.flags gcc.compile.fortran USER_OPTIONS <fflags> ; - -rule compile.c++.pch ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c++.pch -{ - "$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.c.pch ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c.pch -{ - "$(CONFIG_COMMAND)" -x c-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.c++.preprocess ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - - # Some extensions are compiled as C++ by default. For others, we need to - # pass -x c++. We could always pass -x c++ but distcc does not work with it. - if ! $(>:S) in .cc .cp .cxx .cpp .c++ .C - { - LANG on $(<) = "-x c++" ; - } - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; -} - -rule compile.c.preprocess ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - - # If we use the name g++ then default file suffix -> language mapping does - # not work. So have to pass -x option. Maybe, we can work around this by - # allowing the user to specify both C and C++ compiler names. - #if $(>:S) != .c - #{ - LANG on $(<) = "-x c" ; - #} - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; -} - -rule compile.c++ ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - - # Some extensions are compiled as C++ by default. For others, we need to - # pass -x c++. We could always pass -x c++ but distcc does not work with it. - if ! $(>:S) in .cc .cp .cxx .cpp .c++ .C - { - LANG on $(<) = "-x c++" ; - } - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; - - # Here we want to raise the template-depth parameter value to something - # higher than the default value of 17. Note that we could do this using the - # feature.set-default rule but we do not want to set the default value for - # all toolsets as well. - # - # TODO: This 'modified default' has been inherited from some 'older Boost - # Build implementation' and has most likely been added to make some Boost - # library parts compile correctly. We should see what exactly prompted this - # and whether we can get around the problem more locally. - local template-depth = [ on $(<) return $(TEMPLATE_DEPTH) ] ; - if ! $(template-depth) - { - TEMPLATE_DEPTH on $(<) = 128 ; - } -} - -rule compile.c ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - - # If we use the name g++ then default file suffix -> language mapping does - # not work. So have to pass -x option. Maybe, we can work around this by - # allowing the user to specify both C and C++ compiler names. - #if $(>:S) != .c - #{ - LANG on $(<) = "-x c" ; - #} - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; -} - -rule compile.fortran ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c++ bind PCH_FILE -{ - "$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-$(TEMPLATE_DEPTH) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<:W)" "$(>:W)" -} - -actions compile.c bind PCH_FILE -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++.preprocess bind PCH_FILE -{ - "$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-$(TEMPLATE_DEPTH) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" "$(>:W)" -E >"$(<:W)" -} - -actions compile.c.preprocess bind PCH_FILE -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" "$(>)" -E >$(<) -} - -actions compile.fortran -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.asm ( targets * : sources * : properties * ) -{ - setup-fpic $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - LANG on $(<) = "-x assembler-with-cpp" ; -} - -actions compile.asm -{ - "$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -# The class which check that we don't try to use the <runtime-link>static -# property while creating or using shared library, since it's not supported by -# gcc/libc. -class gcc-linking-generator : unix-linking-generator -{ - rule run ( project name ? : property-set : sources + ) - { - # TODO: Replace this with the use of a target-os property. - local no-static-link = ; - if [ modules.peek : UNIX ] - { - switch [ modules.peek : JAMUNAME ] - { - case * : no-static-link = true ; - } - } - - local properties = [ $(property-set).raw ] ; - local reason ; - if $(no-static-link) && <runtime-link>static in $(properties) - { - if <link>shared in $(properties) - { - reason = - "On gcc, DLL can't be build with '<runtime-link>static'." ; - } - else if [ type.is-derived $(self.target-types[1]) EXE ] - { - for local s in $(sources) - { - local type = [ $(s).type ] ; - if $(type) && [ type.is-derived $(type) SHARED_LIB ] - { - reason = - "On gcc, using DLLS together with the" - "<runtime-link>static options is not possible " ; - } - } - } - } - if $(reason) - { - ECHO warning: - $(reason) ; - ECHO warning: - "It is suggested to use '<runtime-link>static' together" - "with '<link>static'." ; - return ; - } - else - { - local generated-targets = [ unix-linking-generator.run $(project) - $(name) : $(property-set) : $(sources) ] ; - return $(generated-targets) ; - } - } -} - -# The set of permissible input types is different on mingw. -# So, define two sets of generators, with mingw generators -# selected when target-os=windows. - -local g ; -g = [ new gcc-linking-generator gcc.mingw.link - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : EXE - : <toolset>gcc <target-os>windows ] ; -$(g).set-rule-name gcc.link ; -generators.register $(g) ; - -g = [ new gcc-linking-generator gcc.mingw.link.dll - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : IMPORT_LIB SHARED_LIB - : <toolset>gcc <target-os>windows ] ; -$(g).set-rule-name gcc.link.dll ; -generators.register $(g) ; - -generators.register - [ new gcc-linking-generator gcc.link - : LIB OBJ - : EXE - : <toolset>gcc ] ; -generators.register - [ new gcc-linking-generator gcc.link.dll - : LIB OBJ - : SHARED_LIB - : <toolset>gcc ] ; - -generators.override gcc.mingw.link : gcc.link ; -generators.override gcc.mingw.link.dll : gcc.link.dll ; - -# Cygwin is similar to msvc and mingw in that it uses import libraries. -# While in simple cases, it can directly link to a shared library, -# it is believed to be slower, and not always possible. Define cygwin-specific -# generators here. - -g = [ new gcc-linking-generator gcc.cygwin.link - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : EXE - : <toolset>gcc <target-os>cygwin ] ; -$(g).set-rule-name gcc.link ; -generators.register $(g) ; - -g = [ new gcc-linking-generator gcc.cygwin.link.dll - : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB - : IMPORT_LIB SHARED_LIB - : <toolset>gcc <target-os>cygwin ] ; -$(g).set-rule-name gcc.link.dll ; -generators.register $(g) ; - -generators.override gcc.cygwin.link : gcc.link ; -generators.override gcc.cygwin.link.dll : gcc.link.dll ; - -# Declare flags for linking. -# First, the common flags. -toolset.flags gcc.link OPTIONS <debug-symbols>on : -g ; -toolset.flags gcc.link OPTIONS <profiling>on : -pg ; -toolset.flags gcc.link USER_OPTIONS <linkflags> ; -toolset.flags gcc.link LINKPATH <library-path> ; -toolset.flags gcc.link FINDLIBS-ST <find-static-library> ; -toolset.flags gcc.link FINDLIBS-SA <find-shared-library> ; -toolset.flags gcc.link LIBRARIES <library-file> ; - -toolset.flags gcc.link.dll .IMPLIB-COMMAND <target-os>windows : "-Wl,--out-implib," ; -toolset.flags gcc.link.dll .IMPLIB-COMMAND <target-os>cygwin : "-Wl,--out-implib," ; - -# For <runtime-link>static we made sure there are no dynamic libraries in the -# link. On HP-UX not all system libraries exist as archived libraries (for -# example, there is no libunwind.a), so, on this platform, the -static option -# cannot be specified. -if [ os.name ] != HPUX -{ - toolset.flags gcc.link OPTIONS <runtime-link>static : -static ; -} - -# Now, the vendor specific flags. -# The parameter linker can be either aix, darwin, gnu, hpux, osf or sun. -rule init-link-flags ( toolset linker condition ) -{ - switch $(linker) - { - case aix : - { - # - # On AIX we *have* to use the native linker. - # - # Using -brtl, the AIX linker will look for libraries with both the .a - # and .so extensions, such as libfoo.a and libfoo.so. Without -brtl, the - # AIX linker looks only for libfoo.a. Note that libfoo.a is an archived - # file that may contain shared objects and is different from static libs - # as on Linux. - # - # The -bnoipath strips the prepending (relative) path of libraries from - # the loader section in the target library or executable. Hence, during - # load-time LIBPATH (identical to LD_LIBRARY_PATH) or a hard-coded - # -blibpath (*similar* to -lrpath/-lrpath-link) is searched. Without - # this option, the prepending (relative) path + library name is - # hard-coded in the loader section, causing *only* this path to be - # searched during load-time. Note that the AIX linker does not have an - # -soname equivalent, this is as close as it gets. - # - # The above options are definately for AIX 5.x, and most likely also for - # AIX 4.x and AIX 6.x. For details about the AIX linker see: - # http://download.boulder.ibm.com/ibmdl/pub/software/dw/aix/es-aix_ll.pdf - # - - toolset.flags $(toolset).link OPTIONS : -Wl,-brtl -Wl,-bnoipath - : unchecked ; - } - - case darwin : - { - # On Darwin, the -s option to ld does not work unless we pass -static, - # and passing -static unconditionally is a bad idea. So, don't pass -s. - # at all, darwin.jam will use separate 'strip' invocation. - toolset.flags $(toolset).link RPATH $(condition) : <dll-path> : unchecked ; - toolset.flags $(toolset).link RPATH_LINK $(condition) : <xdll-path> : unchecked ; - } - - case gnu : - { - # Strip the binary when no debugging is needed. We use --strip-all flag - # as opposed to -s since icc (intel's compiler) is generally - # option-compatible with and inherits from the gcc toolset, but does not - # support -s. - toolset.flags $(toolset).link OPTIONS $(condition)/<strip>on : -Wl,--strip-all : unchecked ; - toolset.flags $(toolset).link RPATH $(condition) : <dll-path> : unchecked ; - toolset.flags $(toolset).link RPATH_LINK $(condition) : <xdll-path> : unchecked ; - toolset.flags $(toolset).link START-GROUP $(condition) : -Wl,--start-group : unchecked ; - toolset.flags $(toolset).link END-GROUP $(condition) : -Wl,--end-group : unchecked ; - - # gnu ld has the ability to change the search behaviour for libraries - # referenced by -l switch. These modifiers are -Bstatic and -Bdynamic - # and change search for -l switches that follow them. The following list - # shows the tried variants. - # The search stops at the first variant that has a match. - # *nix: -Bstatic -lxxx - # libxxx.a - # - # *nix: -Bdynamic -lxxx - # libxxx.so - # libxxx.a - # - # windows (mingw,cygwin) -Bstatic -lxxx - # libxxx.a - # xxx.lib - # - # windows (mingw,cygwin) -Bdynamic -lxxx - # libxxx.dll.a - # xxx.dll.a - # libxxx.a - # xxx.lib - # cygxxx.dll (*) - # libxxx.dll - # xxx.dll - # libxxx.a - # - # (*) This is for cygwin - # Please note that -Bstatic and -Bdynamic are not a guarantee that a - # static or dynamic lib indeed gets linked in. The switches only change - # search patterns! - - # On *nix mixing shared libs with static runtime is not a good idea. - toolset.flags $(toolset).link FINDLIBS-ST-PFX $(condition)/<runtime-link>shared - : -Wl,-Bstatic : unchecked ; - toolset.flags $(toolset).link FINDLIBS-SA-PFX $(condition)/<runtime-link>shared - : -Wl,-Bdynamic : unchecked ; - - # On windows allow mixing of static and dynamic libs with static - # runtime. - toolset.flags $(toolset).link FINDLIBS-ST-PFX $(condition)/<runtime-link>static/<target-os>windows - : -Wl,-Bstatic : unchecked ; - toolset.flags $(toolset).link FINDLIBS-SA-PFX $(condition)/<runtime-link>static/<target-os>windows - : -Wl,-Bdynamic : unchecked ; - toolset.flags $(toolset).link OPTIONS $(condition)/<runtime-link>static/<target-os>windows - : -Wl,-Bstatic : unchecked ; - } - - case hpux : - { - toolset.flags $(toolset).link OPTIONS $(condition)/<strip>on - : -Wl,-s : unchecked ; - toolset.flags $(toolset).link OPTIONS $(condition)/<link>shared - : -fPIC : unchecked ; - } - - case osf : - { - # No --strip-all, just -s. - toolset.flags $(toolset).link OPTIONS $(condition)/<strip>on - : -Wl,-s : unchecked ; - toolset.flags $(toolset).link RPATH $(condition) : <dll-path> - : unchecked ; - # This does not supports -R. - toolset.flags $(toolset).link RPATH_OPTION $(condition) : -rpath - : unchecked ; - # -rpath-link is not supported at all. - } - - case sun : - { - toolset.flags $(toolset).link OPTIONS $(condition)/<strip>on - : -Wl,-s : unchecked ; - toolset.flags $(toolset).link RPATH $(condition) : <dll-path> - : unchecked ; - # Solaris linker does not have a separate -rpath-link, but allows to use - # -L for the same purpose. - toolset.flags $(toolset).link LINKPATH $(condition) : <xdll-path> - : unchecked ; - - # This permits shared libraries with non-PIC code on Solaris. - # VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the - # following is not needed. Whether -fPIC should be hardcoded, is a - # separate question. - # AH, 2004/10/16: it is still necessary because some tests link against - # static libraries that were compiled without PIC. - toolset.flags $(toolset).link OPTIONS $(condition)/<link>shared - : -mimpure-text : unchecked ; - } - - case * : - { - errors.user-error - "$(toolset) initialization: invalid linker '$(linker)'" : - "The value '$(linker)' specified for <linker> is not recognized." : - "Possible values are 'aix', 'darwin', 'gnu', 'hpux', 'osf' or 'sun'" ; - } - } -} - -# Enclose the RPATH variable on 'targets' in (double) quotes, -# unless it's already enclosed in single quotes. -# This special casing is done because it's common to pass -# '$ORIGIN' to linker -- and it has to have single quotes -# to prevent expansion by shell -- and if we add double -# quotes then preventing properties of single quotes disappear. -rule quote-rpath ( targets * ) -{ - local r = [ on $(targets[1]) return $(RPATH) ] ; - if ! [ MATCH "('.*')" : $(r) ] - { - r = "\"$(r)\"" ; - } - RPATH on $(targets) = $(r) ; -} - -# Declare actions for linking. -rule link ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - # Serialize execution of the 'link' action, since running N links in - # parallel is just slower. For now, serialize only gcc links, it might be a - # good idea to serialize all links. - JAM_SEMAPHORE on $(targets) = <s>gcc-link-semaphore ; - quote-rpath $(targets) ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,$(RPATH) -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) -l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) $(OPTIONS) $(USER_OPTIONS) - -} - -# Default value. Mostly for the sake of intel-linux that inherits from gcc, but -# does not have the same logic to set the .AR variable. We can put the same -# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is -# always available. -.AR = ar ; -.RANLIB = ranlib ; - -toolset.flags gcc.archive AROPTIONS <archiveflags> ; - -rule archive ( targets * : sources * : properties * ) -{ - # Always remove archive and start again. Here is the rationale from - # - # Andre Hentz: - # - # I had a file, say a1.c, that was included into liba.a. I moved a1.c to - # a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd - # errors. After some debugging I traced it back to the fact that a1.o was - # *still* in liba.a - # - # Rene Rivera: - # - # Originally removing the archive was done by splicing an RM onto the - # archive action. That makes archives fail to build on NT when they have - # many files because it will no longer execute the action directly and blow - # the line length limit. Instead we remove the file in a different action, - # just before building the archive. - # - local clean.a = $(targets[1])(clean) ; - TEMPORARY $(clean.a) ; - NOCARE $(clean.a) ; - LOCATE on $(clean.a) = [ on $(targets[1]) return $(LOCATE) ] ; - DEPENDS $(clean.a) : $(sources) ; - DEPENDS $(targets) : $(clean.a) ; - common.RmTemps $(clean.a) : $(targets) ; -} - -# Declare action for creating static libraries. -# The letter 'r' means to add files to the archive with replacement. Since we -# remove archive, we don't care about replacement, but there's no option "add -# without replacement". -# The letter 'c' suppresses the warning in case the archive does not exists yet. -# That warning is produced only on some platforms, for whatever reasons. -actions piecemeal archive -{ - "$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)" - "$(.RANLIB)" "$(<)" -} - -rule link.dll ( targets * : sources * : properties * ) -{ - setup-threading $(targets) : $(sources) : $(properties) ; - setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>gcc-link-semaphore ; - quote-rpath $(targets) ; -} - -# Differs from 'link' above only by -shared. -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,$(RPATH) "$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" $(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) -shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) -l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) $(OPTIONS) $(USER_OPTIONS) -} - -rule setup-threading ( targets * : sources * : properties * ) -{ - local threading = [ feature.get-values threading : $(properties) ] ; - if $(threading) = multi - { - local target = [ feature.get-values target-os : $(properties) ] ; - local option ; - local libs ; - - switch $(target) - { - case windows : - { - option = -mthreads ; - } - case cygwin : - { - option = -mthreads ; - } - case solaris : - { - option = -pthreads ; - libs = rt ; - } - case beos : - { - # BeOS has no threading options, so do not set anything here. - } - case *bsd : - { - option = -pthread ; - # There is no -lrt on BSD. - } - case sgi : - { - # gcc on IRIX does not support multi-threading so do not set anything - # here. - } - case darwin : - { - # Darwin has no threading options so do not set anything here. - } - case * : - { - option = -pthread ; - libs = rt ; - } - } - - if $(option) - { - OPTIONS on $(targets) += $(option) ; - } - if $(libs) - { - FINDLIBS-SA on $(targets) += $(libs) ; - } - } -} - -local rule cpu-flags ( toolset variable : architecture : instruction-set + : values + : default ? ) -{ - if $(default) - { - toolset.flags $(toolset) $(variable) - <architecture>$(architecture)/<instruction-set> - : $(values) ; - } - toolset.flags $(toolset) $(variable) - <architecture>/<instruction-set>$(instruction-set) - <architecture>$(architecture)/<instruction-set>$(instruction-set) - : $(values) ; -} - -# Set architecture/instruction-set options. -# -# x86 and compatible -# The 'native' option appeared in gcc 4.2 so we cannot safely use it -# as default. Use conservative i386 instead. -cpu-flags gcc OPTIONS : x86 : native : -march=native ; -cpu-flags gcc OPTIONS : x86 : i386 : -march=i386 : default ; -cpu-flags gcc OPTIONS : x86 : i486 : -march=i486 ; -cpu-flags gcc OPTIONS : x86 : i586 : -march=i586 ; -cpu-flags gcc OPTIONS : x86 : i686 : -march=i686 ; -cpu-flags gcc OPTIONS : x86 : pentium : -march=pentium ; -cpu-flags gcc OPTIONS : x86 : pentium-mmx : -march=pentium-mmx ; -cpu-flags gcc OPTIONS : x86 : pentiumpro : -march=pentiumpro ; -cpu-flags gcc OPTIONS : x86 : pentium2 : -march=pentium2 ; -cpu-flags gcc OPTIONS : x86 : pentium3 : -march=pentium3 ; -cpu-flags gcc OPTIONS : x86 : pentium3m : -march=pentium3m ; -cpu-flags gcc OPTIONS : x86 : pentium-m : -march=pentium-m ; -cpu-flags gcc OPTIONS : x86 : pentium4 : -march=pentium4 ; -cpu-flags gcc OPTIONS : x86 : pentium4m : -march=pentium4m ; -cpu-flags gcc OPTIONS : x86 : prescott : -march=prescott ; -cpu-flags gcc OPTIONS : x86 : nocona : -march=nocona ; -cpu-flags gcc OPTIONS : x86 : core2 : -march=core2 ; -cpu-flags gcc OPTIONS : x86 : k6 : -march=k6 ; -cpu-flags gcc OPTIONS : x86 : k6-2 : -march=k6-2 ; -cpu-flags gcc OPTIONS : x86 : k6-3 : -march=k6-3 ; -cpu-flags gcc OPTIONS : x86 : athlon : -march=athlon ; -cpu-flags gcc OPTIONS : x86 : athlon-tbird : -march=athlon-tbird ; -cpu-flags gcc OPTIONS : x86 : athlon-4 : -march=athlon-4 ; -cpu-flags gcc OPTIONS : x86 : athlon-xp : -march=athlon-xp ; -cpu-flags gcc OPTIONS : x86 : athlon-mp : -march=athlon-mp ; -## -cpu-flags gcc OPTIONS : x86 : k8 : -march=k8 ; -cpu-flags gcc OPTIONS : x86 : opteron : -march=opteron ; -cpu-flags gcc OPTIONS : x86 : athlon64 : -march=athlon64 ; -cpu-flags gcc OPTIONS : x86 : athlon-fx : -march=athlon-fx ; -cpu-flags gcc OPTIONS : x86 : winchip-c6 : -march=winchip-c6 ; -cpu-flags gcc OPTIONS : x86 : winchip2 : -march=winchip2 ; -cpu-flags gcc OPTIONS : x86 : c3 : -march=c3 ; -cpu-flags gcc OPTIONS : x86 : c3-2 : -march=c3-2 ; -# Sparc -cpu-flags gcc OPTIONS : sparc : c3 : -mcpu=c3 : default ; -cpu-flags gcc OPTIONS : sparc : v7 : -mcpu=v7 ; -cpu-flags gcc OPTIONS : sparc : cypress : -mcpu=cypress ; -cpu-flags gcc OPTIONS : sparc : v8 : -mcpu=v8 ; -cpu-flags gcc OPTIONS : sparc : supersparc : -mcpu=supersparc ; -cpu-flags gcc OPTIONS : sparc : sparclite : -mcpu=sparclite ; -cpu-flags gcc OPTIONS : sparc : hypersparc : -mcpu=hypersparc ; -cpu-flags gcc OPTIONS : sparc : sparclite86x : -mcpu=sparclite86x ; -cpu-flags gcc OPTIONS : sparc : f930 : -mcpu=f930 ; -cpu-flags gcc OPTIONS : sparc : f934 : -mcpu=f934 ; -cpu-flags gcc OPTIONS : sparc : sparclet : -mcpu=sparclet ; -cpu-flags gcc OPTIONS : sparc : tsc701 : -mcpu=tsc701 ; -cpu-flags gcc OPTIONS : sparc : v9 : -mcpu=v9 ; -cpu-flags gcc OPTIONS : sparc : ultrasparc : -mcpu=ultrasparc ; -cpu-flags gcc OPTIONS : sparc : ultrasparc3 : -mcpu=ultrasparc3 ; -# RS/6000 & PowerPC -cpu-flags gcc OPTIONS : power : 403 : -mcpu=403 ; -cpu-flags gcc OPTIONS : power : 505 : -mcpu=505 ; -cpu-flags gcc OPTIONS : power : 601 : -mcpu=601 ; -cpu-flags gcc OPTIONS : power : 602 : -mcpu=602 ; -cpu-flags gcc OPTIONS : power : 603 : -mcpu=603 ; -cpu-flags gcc OPTIONS : power : 603e : -mcpu=603e ; -cpu-flags gcc OPTIONS : power : 604 : -mcpu=604 ; -cpu-flags gcc OPTIONS : power : 604e : -mcpu=604e ; -cpu-flags gcc OPTIONS : power : 620 : -mcpu=620 ; -cpu-flags gcc OPTIONS : power : 630 : -mcpu=630 ; -cpu-flags gcc OPTIONS : power : 740 : -mcpu=740 ; -cpu-flags gcc OPTIONS : power : 7400 : -mcpu=7400 ; -cpu-flags gcc OPTIONS : power : 7450 : -mcpu=7450 ; -cpu-flags gcc OPTIONS : power : 750 : -mcpu=750 ; -cpu-flags gcc OPTIONS : power : 801 : -mcpu=801 ; -cpu-flags gcc OPTIONS : power : 821 : -mcpu=821 ; -cpu-flags gcc OPTIONS : power : 823 : -mcpu=823 ; -cpu-flags gcc OPTIONS : power : 860 : -mcpu=860 ; -cpu-flags gcc OPTIONS : power : 970 : -mcpu=970 ; -cpu-flags gcc OPTIONS : power : 8540 : -mcpu=8540 ; -cpu-flags gcc OPTIONS : power : power : -mcpu=power ; -cpu-flags gcc OPTIONS : power : power2 : -mcpu=power2 ; -cpu-flags gcc OPTIONS : power : power3 : -mcpu=power3 ; -cpu-flags gcc OPTIONS : power : power4 : -mcpu=power4 ; -cpu-flags gcc OPTIONS : power : power5 : -mcpu=power5 ; -cpu-flags gcc OPTIONS : power : powerpc : -mcpu=powerpc ; -cpu-flags gcc OPTIONS : power : powerpc64 : -mcpu=powerpc64 ; -cpu-flags gcc OPTIONS : power : rios : -mcpu=rios ; -cpu-flags gcc OPTIONS : power : rios1 : -mcpu=rios1 ; -cpu-flags gcc OPTIONS : power : rios2 : -mcpu=rios2 ; -cpu-flags gcc OPTIONS : power : rsc : -mcpu=rsc ; -cpu-flags gcc OPTIONS : power : rs64a : -mcpu=rs64 ; -# AIX variant of RS/6000 & PowerPC -toolset.flags gcc AROPTIONS <address-model>64/<target-os>aix : "-X 64" ; diff --git a/jam-files/boost-build/tools/gcc.py b/jam-files/boost-build/tools/gcc.py deleted file mode 100644 index 2a3e675e..00000000 --- a/jam-files/boost-build/tools/gcc.py +++ /dev/null @@ -1,796 +0,0 @@ -# Status: being ported by Steven Watanabe -# Base revision: 47077 -# TODO: common.jam needs to be ported -# TODO: generators.jam needs to have register_c_compiler. -# -# Copyright 2001 David Abrahams. -# Copyright 2002-2006 Rene Rivera. -# Copyright 2002-2003 Vladimir Prus. -# Copyright (c) 2005 Reece H. Dunn. -# Copyright 2006 Ilya Sokolov. -# Copyright 2007 Roland Schwarz -# Copyright 2007 Boris Gubenko. -# Copyright 2008 Steven Watanabe -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import os -import subprocess -import re - -import bjam - -from b2.tools import unix, common, rc, pch, builtin -from b2.build import feature, type, toolset, generators -from b2.util.utility import os_name, on_windows -from b2.manager import get_manager -from b2.build.generators import Generator -from b2.build.toolset import flags -from b2.util.utility import to_seq - -__debug = None - -def debug(): - global __debug - if __debug is None: - __debug = "--debug-configuration" in bjam.variable("ARGV") - return __debug - -feature.extend('toolset', ['gcc']) - - -toolset.inherit_generators('gcc', [], 'unix', ['unix.link', 'unix.link.dll']) -toolset.inherit_flags('gcc', 'unix') -toolset.inherit_rules('gcc', 'unix') - -generators.override('gcc.prebuilt', 'builtin.prebuilt') -generators.override('gcc.searched-lib-generator', 'searched-lib-generator') - -# Target naming is determined by types/lib.jam and the settings below this -# comment. -# -# On *nix: -# libxxx.a static library -# libxxx.so shared library -# -# On windows (mingw): -# libxxx.lib static library -# xxx.dll DLL -# xxx.lib import library -# -# On windows (cygwin) i.e. <target-os>cygwin -# libxxx.a static library -# xxx.dll DLL -# libxxx.dll.a import library -# -# Note: user can always override by using the <tag>@rule -# This settings have been choosen, so that mingw -# is in line with msvc naming conventions. For -# cygwin the cygwin naming convention has been choosen. - -# Make the "o" suffix used for gcc toolset on all -# platforms -type.set_generated_target_suffix('OBJ', ['<toolset>gcc'], 'o') -type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'a') - -type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'dll.a') -type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc', '<target-os>cygwin'], 'lib') - -__machine_match = re.compile('^([^ ]+)') -__version_match = re.compile('^([0-9.]+)') - -def init(version = None, command = None, options = None): - """ - Initializes the gcc toolset for the given version. If necessary, command may - be used to specify where the compiler is located. The parameter 'options' is a - space-delimited list of options, each one specified as - <option-name>option-value. Valid option names are: cxxflags, linkflags and - linker-type. Accepted linker-type values are gnu, darwin, osf, hpux or sun - and the default value will be selected based on the current OS. - Example: - using gcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ; - """ - - options = to_seq(options) - command = to_seq(command) - - # Information about the gcc command... - # The command. - command = to_seq(common.get_invocation_command('gcc', 'g++', command)) - # The root directory of the tool install. - root = feature.get_values('<root>', options) ; - # The bin directory where to find the command to execute. - bin = None - # The flavor of compiler. - flavor = feature.get_values('<flavor>', options) - # Autodetect the root and bin dir if not given. - if command: - if not bin: - bin = common.get_absolute_tool_path(command[-1]) - if not root: - root = os.path.dirname(bin) - # Autodetect the version and flavor if not given. - if command: - machine_info = subprocess.Popen(command + ['-dumpmachine'], stdout=subprocess.PIPE).communicate()[0] - machine = __machine_match.search(machine_info).group(1) - - version_info = subprocess.Popen(command + ['-dumpversion'], stdout=subprocess.PIPE).communicate()[0] - version = __version_match.search(version_info).group(1) - if not flavor and machine.find('mingw') != -1: - flavor = 'mingw' - - condition = None - if flavor: - condition = common.check_init_parameters('gcc', None, - ('version', version), - ('flavor', flavor)) - else: - condition = common.check_init_parameters('gcc', None, - ('version', version)) - - if command: - command = command[0] - - common.handle_options('gcc', condition, command, options) - - linker = feature.get_values('<linker-type>', options) - if not linker: - if os_name() == 'OSF': - linker = 'osf' - elif os_name() == 'HPUX': - linker = 'hpux' ; - else: - linker = 'gnu' - - init_link_flags('gcc', linker, condition) - - # If gcc is installed in non-standard location, we'd need to add - # LD_LIBRARY_PATH when running programs created with it (for unit-test/run - # rules). - if command: - # On multilib 64-bit boxes, there are both 32-bit and 64-bit libraries - # and all must be added to LD_LIBRARY_PATH. The linker will pick the - # right onces. Note that we don't provide a clean way to build 32-bit - # binary with 64-bit compiler, but user can always pass -m32 manually. - lib_path = [os.path.join(root, 'bin'), - os.path.join(root, 'lib'), - os.path.join(root, 'lib32'), - os.path.join(root, 'lib64')] - if debug(): - print 'notice: using gcc libraries ::', condition, '::', lib_path - toolset.flags('gcc.link', 'RUN_PATH', condition, lib_path) - - # If it's not a system gcc install we should adjust the various programs as - # needed to prefer using the install specific versions. This is essential - # for correct use of MinGW and for cross-compiling. - - # - The archive builder. - archiver = common.get_invocation_command('gcc', - 'ar', feature.get_values('<archiver>', options), [bin], path_last=True) - toolset.flags('gcc.archive', '.AR', condition, [archiver]) - if debug(): - print 'notice: using gcc archiver ::', condition, '::', archiver - - # - The resource compiler. - rc_command = common.get_invocation_command_nodefault('gcc', - 'windres', feature.get_values('<rc>', options), [bin], path_last=True) - rc_type = feature.get_values('<rc-type>', options) - - if not rc_type: - rc_type = 'windres' - - if not rc_command: - # If we can't find an RC compiler we fallback to a null RC compiler that - # creates empty object files. This allows the same Jamfiles to work - # across the board. The null RC uses the assembler to create the empty - # objects, so configure that. - rc_command = common.get_invocation_command('gcc', 'as', [], [bin], path_last=True) - rc_type = 'null' - rc.configure(rc_command, condition, '<rc-type>' + rc_type) - -###if [ os.name ] = NT -###{ -### # This causes single-line command invocation to not go through .bat files, -### # thus avoiding command-line length limitations. -### JAMSHELL = % ; -###} - -#FIXME: when register_c_compiler is moved to -# generators, these should be updated -builtin.register_c_compiler('gcc.compile.c++', ['CPP'], ['OBJ'], ['<toolset>gcc']) -builtin.register_c_compiler('gcc.compile.c', ['C'], ['OBJ'], ['<toolset>gcc']) -builtin.register_c_compiler('gcc.compile.asm', ['ASM'], ['OBJ'], ['<toolset>gcc']) - -# pch support - -# The compiler looks for a precompiled header in each directory just before it -# looks for the include file in that directory. The name searched for is the -# name specified in the #include directive with ".gch" suffix appended. The -# logic in gcc-pch-generator will make sure that BASE_PCH suffix is appended to -# full name of the header. - -type.set_generated_target_suffix('PCH', ['<toolset>gcc'], 'gch') - -# GCC-specific pch generator. -class GccPchGenerator(pch.PchGenerator): - - # Inherit the __init__ method - - def run_pch(self, project, name, prop_set, sources): - # Find the header in sources. Ignore any CPP sources. - header = None - for s in sources: - if type.is_derived(s.type, 'H'): - header = s - - # Error handling: Base header file name should be the same as the base - # precompiled header name. - header_name = header.name - header_basename = os.path.basename(header_name).rsplit('.', 1)[0] - if header_basename != name: - location = project.project_module - ###FIXME: - raise Exception() - ### errors.user-error "in" $(location)": pch target name `"$(name)"' should be the same as the base name of header file `"$(header-name)"'" ; - - pch_file = Generator.run(self, project, name, prop_set, [header]) - - # return result of base class and pch-file property as usage-requirements - # FIXME: what about multiple results from generator.run? - return (property_set.create('<pch-file>' + pch_file[0], '<cflags>-Winvalid-pch'), - pch_file) - - # Calls the base version specifying source's name as the name of the created - # target. As result, the PCH will be named whatever.hpp.gch, and not - # whatever.gch. - def generated_targets(self, sources, prop_set, project, name = None): - name = sources[0].name - return Generator.generated_targets(self, sources, - prop_set, project, name) - -# Note: the 'H' source type will catch both '.h' header and '.hpp' header. The -# latter have HPP type, but HPP type is derived from H. The type of compilation -# is determined entirely by the destination type. -generators.register(GccPchGenerator('gcc.compile.c.pch', False, ['H'], ['C_PCH'], ['<pch>on', '<toolset>gcc' ])) -generators.register(GccPchGenerator('gcc.compile.c++.pch', False, ['H'], ['CPP_PCH'], ['<pch>on', '<toolset>gcc' ])) - -# Override default do-nothing generators. -generators.override('gcc.compile.c.pch', 'pch.default-c-pch-generator') -generators.override('gcc.compile.c++.pch', 'pch.default-cpp-pch-generator') - -flags('gcc.compile', 'PCH_FILE', ['<pch>on'], ['<pch-file>']) - -# Declare flags and action for compilation -flags('gcc.compile', 'OPTIONS', ['<optimization>off'], ['-O0']) -flags('gcc.compile', 'OPTIONS', ['<optimization>speed'], ['-O3']) -flags('gcc.compile', 'OPTIONS', ['<optimization>space'], ['-Os']) - -flags('gcc.compile', 'OPTIONS', ['<inlining>off'], ['-fno-inline']) -flags('gcc.compile', 'OPTIONS', ['<inlining>on'], ['-Wno-inline']) -flags('gcc.compile', 'OPTIONS', ['<inlining>full'], ['-finline-functions', '-Wno-inline']) - -flags('gcc.compile', 'OPTIONS', ['<warnings>off'], ['-w']) -flags('gcc.compile', 'OPTIONS', ['<warnings>on'], ['-Wall']) -flags('gcc.compile', 'OPTIONS', ['<warnings>all'], ['-Wall', '-pedantic']) -flags('gcc.compile', 'OPTIONS', ['<warnings-as-errors>on'], ['-Werror']) - -flags('gcc.compile', 'OPTIONS', ['<debug-symbols>on'], ['-g']) -flags('gcc.compile', 'OPTIONS', ['<profiling>on'], ['-pg']) -flags('gcc.compile', 'OPTIONS', ['<rtti>off'], ['-fno-rtti']) - -# On cygwin and mingw, gcc generates position independent code by default, and -# warns if -fPIC is specified. This might not be the right way of checking if -# we're using cygwin. For example, it's possible to run cygwin gcc from NT -# shell, or using crosscompiling. But we'll solve that problem when it's time. -# In that case we'll just add another parameter to 'init' and move this login -# inside 'init'. -if not os_name () in ['CYGWIN', 'NT']: - # This logic will add -fPIC for all compilations: - # - # lib a : a.cpp b ; - # obj b : b.cpp ; - # exe c : c.cpp a d ; - # obj d : d.cpp ; - # - # This all is fine, except that 'd' will be compiled with -fPIC even though - # it's not needed, as 'd' is used only in exe. However, it's hard to detect - # where a target is going to be used. Alternative, we can set -fPIC only - # when main target type is LIB but than 'b' will be compiled without -fPIC. - # In x86-64 that will lead to link errors. So, compile everything with - # -fPIC. - # - # Yet another alternative would be to create propagated <sharedable> - # feature, and set it when building shared libraries, but that's hard to - # implement and will increase target path length even more. - flags('gcc.compile', 'OPTIONS', ['<link>shared'], ['-fPIC']) - -if os_name() != 'NT' and os_name() != 'OSF' and os_name() != 'HPUX': - # OSF does have an option called -soname but it doesn't seem to work as - # expected, therefore it has been disabled. - HAVE_SONAME = '' - SONAME_OPTION = '-h' - - -flags('gcc.compile', 'USER_OPTIONS', [], ['<cflags>']) -flags('gcc.compile.c++', 'USER_OPTIONS',[], ['<cxxflags>']) -flags('gcc.compile', 'DEFINES', [], ['<define>']) -flags('gcc.compile', 'INCLUDES', [], ['<include>']) - -engine = get_manager().engine() - -engine.register_action('gcc.compile.c++.pch', - '"$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"') - -engine.register_action('gcc.compile.c.pch', - '"$(CONFIG_COMMAND)" -x c-header $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"') - - -def gcc_compile_cpp(targets, sources, properties): - # Some extensions are compiled as C++ by default. For others, we need to - # pass -x c++. We could always pass -x c++ but distcc does not work with it. - extension = os.path.splitext (sources [0]) [1] - lang = '' - if not extension in ['.cc', '.cp', '.cxx', '.cpp', '.c++', '.C']: - lang = '-x c++' - get_manager().engine().set_target_variable (targets, 'LANG', lang) - engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE')) - -def gcc_compile_c(targets, sources, properties): - engine = get_manager().engine() - # If we use the name g++ then default file suffix -> language mapping does - # not work. So have to pass -x option. Maybe, we can work around this by - # allowing the user to specify both C and C++ compiler names. - #if $(>:S) != .c - #{ - engine.set_target_variable (targets, 'LANG', '-x c') - #} - engine.add_dependency(targets, bjam.call('get-target-variable', targets, 'PCH_FILE')) - -engine.register_action( - 'gcc.compile.c++', - '"$(CONFIG_COMMAND)" $(LANG) -ftemplate-depth-128 $(OPTIONS) ' + - '$(USER_OPTIONS) -D$(DEFINES) -I"$(PCH_FILE:D)" -I"$(INCLUDES)" ' + - '-c -o "$(<:W)" "$(>:W)"', - function=gcc_compile_cpp, - bound_list=['PCH_FILE']) - -engine.register_action( - 'gcc.compile.c', - '"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) ' + - '-I"$(PCH_FILE:D)" -I"$(INCLUDES)" -c -o "$(<)" "$(>)"', - function=gcc_compile_c, - bound_list=['PCH_FILE']) - -def gcc_compile_asm(targets, sources, properties): - get_manager().engine().set_target_variable(targets, 'LANG', '-x assembler-with-cpp') - -engine.register_action( - 'gcc.compile.asm', - '"$(CONFIG_COMMAND)" $(LANG) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)"', - function=gcc_compile_asm) - - -class GccLinkingGenerator(unix.UnixLinkingGenerator): - """ - The class which check that we don't try to use the <runtime-link>static - property while creating or using shared library, since it's not supported by - gcc/libc. - """ - def run(self, project, name, ps, sources): - # TODO: Replace this with the use of a target-os property. - - no_static_link = False - if bjam.variable('UNIX'): - no_static_link = True; - ##FIXME: what does this mean? -## { -## switch [ modules.peek : JAMUNAME ] -## { -## case * : no-static-link = true ; -## } -## } - - reason = None - if no_static_link and ps.get('runtime-link') == 'static': - if ps.get('link') == 'shared': - reason = "On gcc, DLL can't be build with '<runtime-link>static'." - elif type.is_derived(self.target_types[0], 'EXE'): - for s in sources: - source_type = s.type() - if source_type and type.is_derived(source_type, 'SHARED_LIB'): - reason = "On gcc, using DLLS together with the " +\ - "<runtime-link>static options is not possible " - if reason: - print 'warning:', reason - print 'warning:',\ - "It is suggested to use '<runtime-link>static' together",\ - "with '<link>static'." ; - return - else: - generated_targets = unix.UnixLinkingGenerator.run(self, project, - name, ps, sources) - return generated_targets - -if on_windows(): - flags('gcc.link.dll', '.IMPLIB-COMMAND', [], ['-Wl,--out-implib,']) - generators.register( - GccLinkingGenerator('gcc.link', True, - ['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'], - [ 'EXE' ], - [ '<toolset>gcc' ])) - generators.register( - GccLinkingGenerator('gcc.link.dll', True, - ['OBJ', 'SEARCHED_LIB', 'STATIC_LIB', 'IMPORT_LIB'], - ['IMPORT_LIB', 'SHARED_LIB'], - ['<toolset>gcc'])) -else: - generators.register( - GccLinkingGenerator('gcc.link', True, - ['LIB', 'OBJ'], - ['EXE'], - ['<toolset>gcc'])) - generators.register( - GccLinkingGenerator('gcc.link.dll', True, - ['LIB', 'OBJ'], - ['SHARED_LIB'], - ['<toolset>gcc'])) - -# Declare flags for linking. -# First, the common flags. -flags('gcc.link', 'OPTIONS', ['<debug-symbols>on'], ['-g']) -flags('gcc.link', 'OPTIONS', ['<profiling>on'], ['-pg']) -flags('gcc.link', 'USER_OPTIONS', [], ['<linkflags>']) -flags('gcc.link', 'LINKPATH', [], ['<library-path>']) -flags('gcc.link', 'FINDLIBS-ST', [], ['<find-static-library>']) -flags('gcc.link', 'FINDLIBS-SA', [], ['<find-shared-library>']) -flags('gcc.link', 'LIBRARIES', [], ['<library-file>']) - -# For <runtime-link>static we made sure there are no dynamic libraries in the -# link. On HP-UX not all system libraries exist as archived libraries (for -# example, there is no libunwind.a), so, on this platform, the -static option -# cannot be specified. -if os_name() != 'HPUX': - flags('gcc.link', 'OPTIONS', ['<runtime-link>static'], ['-static']) - -# Now, the vendor specific flags. -# The parameter linker can be either gnu, darwin, osf, hpux or sun. -def init_link_flags(toolset, linker, condition): - """ - Now, the vendor specific flags. - The parameter linker can be either gnu, darwin, osf, hpux or sun. - """ - toolset_link = toolset + '.link' - if linker == 'gnu': - # Strip the binary when no debugging is needed. We use --strip-all flag - # as opposed to -s since icc (intel's compiler) is generally - # option-compatible with and inherits from the gcc toolset, but does not - # support -s. - - # FIXME: what does unchecked translate to? - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,--strip-all']) # : unchecked ; - flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; - flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; - flags(toolset_link, 'START-GROUP', condition, ['-Wl,--start-group'])# : unchecked ; - flags(toolset_link, 'END-GROUP', condition, ['-Wl,--end-group']) # : unchecked ; - - # gnu ld has the ability to change the search behaviour for libraries - # referenced by -l switch. These modifiers are -Bstatic and -Bdynamic - # and change search for -l switches that follow them. The following list - # shows the tried variants. - # The search stops at the first variant that has a match. - # *nix: -Bstatic -lxxx - # libxxx.a - # - # *nix: -Bdynamic -lxxx - # libxxx.so - # libxxx.a - # - # windows (mingw,cygwin) -Bstatic -lxxx - # libxxx.a - # xxx.lib - # - # windows (mingw,cygwin) -Bdynamic -lxxx - # libxxx.dll.a - # xxx.dll.a - # libxxx.a - # xxx.lib - # cygxxx.dll (*) - # libxxx.dll - # xxx.dll - # libxxx.a - # - # (*) This is for cygwin - # Please note that -Bstatic and -Bdynamic are not a guarantee that a - # static or dynamic lib indeed gets linked in. The switches only change - # search patterns! - - # On *nix mixing shared libs with static runtime is not a good idea. - flags(toolset_link, 'FINDLIBS-ST-PFX', - map(lambda x: x + '/<runtime-link>shared', condition), - ['-Wl,-Bstatic']) # : unchecked ; - flags(toolset_link, 'FINDLIBS-SA-PFX', - map(lambda x: x + '/<runtime-link>shared', condition), - ['-Wl,-Bdynamic']) # : unchecked ; - - # On windows allow mixing of static and dynamic libs with static - # runtime. - flags(toolset_link, 'FINDLIBS-ST-PFX', - map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), - ['-Wl,-Bstatic']) # : unchecked ; - flags(toolset_link, 'FINDLIBS-SA-PFX', - map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), - ['-Wl,-Bdynamic']) # : unchecked ; - flags(toolset_link, 'OPTIONS', - map(lambda x: x + '/<runtime-link>static/<target-os>windows', condition), - ['-Wl,-Bstatic']) # : unchecked ; - - elif linker == 'darwin': - # On Darwin, the -s option to ld does not work unless we pass -static, - # and passing -static unconditionally is a bad idea. So, don't pass -s. - # at all, darwin.jam will use separate 'strip' invocation. - flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; - flags(toolset_link, 'RPATH_LINK', condition, ['<xdll-path>']) # : unchecked ; - - elif linker == 'osf': - # No --strip-all, just -s. - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) - # : unchecked ; - flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; - # This does not supports -R. - flags(toolset_link, 'RPATH_OPTION', condition, ['-rpath']) # : unchecked ; - # -rpath-link is not supported at all. - - elif linker == 'sun': - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), ['-Wl,-s']) - # : unchecked ; - flags(toolset_link, 'RPATH', condition, ['<dll-path>']) # : unchecked ; - # Solaris linker does not have a separate -rpath-link, but allows to use - # -L for the same purpose. - flags(toolset_link, 'LINKPATH', condition, ['<xdll-path>']) # : unchecked ; - - # This permits shared libraries with non-PIC code on Solaris. - # VP, 2004/09/07: Now that we have -fPIC hardcode in link.dll, the - # following is not needed. Whether -fPIC should be hardcoded, is a - # separate question. - # AH, 2004/10/16: it is still necessary because some tests link against - # static libraries that were compiled without PIC. - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), ['-mimpure-text']) - # : unchecked ; - - elif linker == 'hpux': - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<debug-symbols>off', condition), - ['-Wl,-s']) # : unchecked ; - flags(toolset_link, 'OPTIONS', map(lambda x: x + '/<link>shared', condition), - ['-fPIC']) # : unchecked ; - - else: - # FIXME: - errors.user_error( - "$(toolset) initialization: invalid linker '$(linker)' " + - "The value '$(linker)' specified for <linker> is not recognized. " + - "Possible values are 'gnu', 'darwin', 'osf', 'hpux' or 'sun'") - -# Declare actions for linking. -def gcc_link(targets, sources, properties): - engine = get_manager().engine() - engine.set_target_variable(targets, 'SPACE', ' ') - # Serialize execution of the 'link' action, since running N links in - # parallel is just slower. For now, serialize only gcc links, it might be a - # good idea to serialize all links. - engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore') - -engine.register_action( - 'gcc.link', - '"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' + - '-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' + - '-Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" ' + - '$(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' + - '-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' + - '$(OPTIONS) $(USER_OPTIONS)', - function=gcc_link, - bound_list=['LIBRARIES']) - -# Default value. Mostly for the sake of intel-linux that inherits from gcc, but -# does not have the same logic to set the .AR variable. We can put the same -# logic in intel-linux, but that's hardly worth the trouble as on Linux, 'ar' is -# always available. -__AR = 'ar' - -flags('gcc.archive', 'AROPTIONS', [], ['<archiveflags>']) - -def gcc_archive(targets, sources, properties): - # Always remove archive and start again. Here's rationale from - # - # Andre Hentz: - # - # I had a file, say a1.c, that was included into liba.a. I moved a1.c to - # a2.c, updated my Jamfiles and rebuilt. My program was crashing with absurd - # errors. After some debugging I traced it back to the fact that a1.o was - # *still* in liba.a - # - # Rene Rivera: - # - # Originally removing the archive was done by splicing an RM onto the - # archive action. That makes archives fail to build on NT when they have - # many files because it will no longer execute the action directly and blow - # the line length limit. Instead we remove the file in a different action, - # just before building the archive. - clean = targets[0] + '(clean)' - bjam.call('TEMPORARY', clean) - bjam.call('NOCARE', clean) - engine = get_manager().engine() - engine.set_target_variable('LOCATE', clean, bjam.call('get-target-variable', targets, 'LOCATE')) - engine.add_dependency(clean, sources) - engine.add_dependency(targets, clean) - engine.set_update_action('common.RmTemps', clean, targets) - -# Declare action for creating static libraries. -# The letter 'r' means to add files to the archive with replacement. Since we -# remove archive, we don't care about replacement, but there's no option "add -# without replacement". -# The letter 'c' suppresses the warning in case the archive does not exists yet. -# That warning is produced only on some platforms, for whatever reasons. -engine.register_action('gcc.archive', - '"$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)"', - function=gcc_archive, - flags=['piecemeal']) - -def gcc_link_dll(targets, sources, properties): - engine = get_manager().engine() - engine.set_target_variable(targets, 'SPACE', ' ') - engine.set_target_variable(targets, 'JAM_SEMAPHORE', '<s>gcc-link-semaphore') - engine.set_target_variable(targets, "HAVE_SONAME", HAVE_SONAME) - engine.set_target_variable(targets, "SONAME_OPTION", SONAME_OPTION) - -engine.register_action( - 'gcc.link.dll', - # Differ from 'link' above only by -shared. - '"$(CONFIG_COMMAND)" -L"$(LINKPATH)" ' + - '-Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" ' + - '"$(.IMPLIB-COMMAND)$(<[1])" -o "$(<[-1])" ' + - '$(HAVE_SONAME)-Wl,$(SONAME_OPTION)$(SPACE)-Wl,$(<[-1]:D=) ' + - '-shared $(START-GROUP) "$(>)" "$(LIBRARIES)" $(FINDLIBS-ST-PFX) ' + - '-l$(FINDLIBS-ST) $(FINDLIBS-SA-PFX) -l$(FINDLIBS-SA) $(END-GROUP) ' + - '$(OPTIONS) $(USER_OPTIONS)', - function = gcc_link_dll, - bound_list=['LIBRARIES']) - -# Set up threading support. It's somewhat contrived, so perform it at the end, -# to avoid cluttering other code. - -if on_windows(): - flags('gcc', 'OPTIONS', ['<threading>multi'], ['-mthreads']) -elif bjam.variable('UNIX'): - jamuname = bjam.variable('JAMUNAME') - host_os_name = jamuname[0] - if host_os_name.startswith('SunOS'): - flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthreads']) - flags('gcc', 'FINDLIBS-SA', [], ['rt']) - elif host_os_name == 'BeOS': - # BeOS has no threading options, don't set anything here. - pass - elif host_os_name.endswith('BSD'): - flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread']) - # there is no -lrt on BSD - elif host_os_name == 'DragonFly': - flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread']) - # there is no -lrt on BSD - DragonFly is a FreeBSD variant, - # which anoyingly doesn't say it's a *BSD. - elif host_os_name == 'IRIX': - # gcc on IRIX does not support multi-threading, don't set anything here. - pass - elif host_os_name == 'Darwin': - # Darwin has no threading options, don't set anything here. - pass - else: - flags('gcc', 'OPTIONS', ['<threading>multi'], ['-pthread']) - flags('gcc', 'FINDLIBS-SA', [], ['rt']) - -def cpu_flags(toolset, variable, architecture, instruction_set, values, default=None): - #FIXME: for some reason this fails. Probably out of date feature code -## if default: -## flags(toolset, variable, -## ['<architecture>' + architecture + '/<instruction-set>'], -## values) - flags(toolset, variable, - #FIXME: same as above - [##'<architecture>/<instruction-set>' + instruction_set, - '<architecture>' + architecture + '/<instruction-set>' + instruction_set], - values) - -# Set architecture/instruction-set options. -# -# x86 and compatible -flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>32'], ['-m32']) -flags('gcc', 'OPTIONS', ['<architecture>x86/<address-model>64'], ['-m64']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'i386', ['-march=i386'], default=True) -cpu_flags('gcc', 'OPTIONS', 'x86', 'i486', ['-march=i486']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'i586', ['-march=i586']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'i686', ['-march=i686']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium', ['-march=pentium']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-mmx', ['-march=pentium-mmx']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentiumpro', ['-march=pentiumpro']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium2', ['-march=pentium2']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3', ['-march=pentium3']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium3m', ['-march=pentium3m']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium-m', ['-march=pentium-m']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4', ['-march=pentium4']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'pentium4m', ['-march=pentium4m']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'prescott', ['-march=prescott']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'nocona', ['-march=nocona']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'k6', ['-march=k6']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-2', ['-march=k6-2']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'k6-3', ['-march=k6-3']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon', ['-march=athlon']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-tbird', ['-march=athlon-tbird']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-4', ['-march=athlon-4']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-xp', ['-march=athlon-xp']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-mp', ['-march=athlon-mp']) -## -cpu_flags('gcc', 'OPTIONS', 'x86', 'k8', ['-march=k8']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'opteron', ['-march=opteron']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon64', ['-march=athlon64']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'athlon-fx', ['-march=athlon-fx']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip-c6', ['-march=winchip-c6']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'winchip2', ['-march=winchip2']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'c3', ['-march=c3']) -cpu_flags('gcc', 'OPTIONS', 'x86', 'c3-2', ['-march=c3-2']) -# Sparc -flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>32'], ['-m32']) -flags('gcc', 'OPTIONS', ['<architecture>sparc/<address-model>64'], ['-m64']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'c3', ['-mcpu=c3'], default=True) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'v7', ['-mcpu=v7']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'cypress', ['-mcpu=cypress']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'v8', ['-mcpu=v8']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'supersparc', ['-mcpu=supersparc']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite', ['-mcpu=sparclite']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'hypersparc', ['-mcpu=hypersparc']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclite86x', ['-mcpu=sparclite86x']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'f930', ['-mcpu=f930']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'f934', ['-mcpu=f934']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'sparclet', ['-mcpu=sparclet']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'tsc701', ['-mcpu=tsc701']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'v9', ['-mcpu=v9']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc', ['-mcpu=ultrasparc']) -cpu_flags('gcc', 'OPTIONS', 'sparc', 'ultrasparc3', ['-mcpu=ultrasparc3']) -# RS/6000 & PowerPC -flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32'], ['-m32']) -flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64'], ['-m64']) -cpu_flags('gcc', 'OPTIONS', 'power', '403', ['-mcpu=403']) -cpu_flags('gcc', 'OPTIONS', 'power', '505', ['-mcpu=505']) -cpu_flags('gcc', 'OPTIONS', 'power', '601', ['-mcpu=601']) -cpu_flags('gcc', 'OPTIONS', 'power', '602', ['-mcpu=602']) -cpu_flags('gcc', 'OPTIONS', 'power', '603', ['-mcpu=603']) -cpu_flags('gcc', 'OPTIONS', 'power', '603e', ['-mcpu=603e']) -cpu_flags('gcc', 'OPTIONS', 'power', '604', ['-mcpu=604']) -cpu_flags('gcc', 'OPTIONS', 'power', '604e', ['-mcpu=604e']) -cpu_flags('gcc', 'OPTIONS', 'power', '620', ['-mcpu=620']) -cpu_flags('gcc', 'OPTIONS', 'power', '630', ['-mcpu=630']) -cpu_flags('gcc', 'OPTIONS', 'power', '740', ['-mcpu=740']) -cpu_flags('gcc', 'OPTIONS', 'power', '7400', ['-mcpu=7400']) -cpu_flags('gcc', 'OPTIONS', 'power', '7450', ['-mcpu=7450']) -cpu_flags('gcc', 'OPTIONS', 'power', '750', ['-mcpu=750']) -cpu_flags('gcc', 'OPTIONS', 'power', '801', ['-mcpu=801']) -cpu_flags('gcc', 'OPTIONS', 'power', '821', ['-mcpu=821']) -cpu_flags('gcc', 'OPTIONS', 'power', '823', ['-mcpu=823']) -cpu_flags('gcc', 'OPTIONS', 'power', '860', ['-mcpu=860']) -cpu_flags('gcc', 'OPTIONS', 'power', '970', ['-mcpu=970']) -cpu_flags('gcc', 'OPTIONS', 'power', '8540', ['-mcpu=8540']) -cpu_flags('gcc', 'OPTIONS', 'power', 'power', ['-mcpu=power']) -cpu_flags('gcc', 'OPTIONS', 'power', 'power2', ['-mcpu=power2']) -cpu_flags('gcc', 'OPTIONS', 'power', 'power3', ['-mcpu=power3']) -cpu_flags('gcc', 'OPTIONS', 'power', 'power4', ['-mcpu=power4']) -cpu_flags('gcc', 'OPTIONS', 'power', 'power5', ['-mcpu=power5']) -cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc', ['-mcpu=powerpc']) -cpu_flags('gcc', 'OPTIONS', 'power', 'powerpc64', ['-mcpu=powerpc64']) -cpu_flags('gcc', 'OPTIONS', 'power', 'rios', ['-mcpu=rios']) -cpu_flags('gcc', 'OPTIONS', 'power', 'rios1', ['-mcpu=rios1']) -cpu_flags('gcc', 'OPTIONS', 'power', 'rios2', ['-mcpu=rios2']) -cpu_flags('gcc', 'OPTIONS', 'power', 'rsc', ['-mcpu=rsc']) -cpu_flags('gcc', 'OPTIONS', 'power', 'rs64a', ['-mcpu=rs64']) -# AIX variant of RS/6000 & PowerPC -flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>32/<target-os>aix'], ['-maix32']) -flags('gcc', 'OPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-maix64']) -flags('gcc', 'AROPTIONS', ['<architecture>power/<address-model>64/<target-os>aix'], ['-X 64']) diff --git a/jam-files/boost-build/tools/generate.jam b/jam-files/boost-build/tools/generate.jam deleted file mode 100644 index 6732fa35..00000000 --- a/jam-files/boost-build/tools/generate.jam +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Declares main target 'generate' used to produce targets by calling a -# user-provided rule that takes and produces virtual targets. - -import "class" : new ; -import errors ; -import feature ; -import project ; -import property ; -import property-set ; -import targets ; -import regex ; - - -feature.feature generating-rule : : free ; - - -class generated-target-class : basic-target -{ - import errors ; - import indirect ; - import virtual-target ; - - rule __init__ ( name : project : sources * : requirements * - : default-build * : usage-requirements * ) - { - basic-target.__init__ $(name) : $(project) : $(sources) - : $(requirements) : $(default-build) : $(usage-requirements) ; - - if ! [ $(self.requirements).get <generating-rule> ] - { - errors.user-error "The generate rule requires the <generating-rule>" - "property to be set" ; - } - } - - rule construct ( name : sources * : property-set ) - { - local result ; - local gr = [ $(property-set).get <generating-rule> ] ; - - # FIXME: this is a copy-paste from virtual-target.jam. We should add a - # utility rule to call a rule like this. - local rule-name = [ MATCH ^@(.*) : $(gr) ] ; - if $(rule-name) - { - if $(gr[2]) - { - local target-name = [ full-name ] ; - errors.user-error "Multiple <generating-rule> properties" - "encountered for target $(target-name)." ; - } - - result = [ indirect.call $(rule-name) $(self.project) $(name) - : $(property-set) : $(sources) ] ; - - if ! $(result) - { - ECHO "warning: Unable to construct" [ full-name ] ; - } - } - - local ur ; - local targets ; - - if $(result) - { - if [ class.is-a $(result[1]) : property-set ] - { - ur = $(result[1]) ; - targets = $(result[2-]) ; - } - else - { - ur = [ property-set.empty ] ; - targets = $(result) ; - } - } - # FIXME: the following loop should be doable using sequence.transform or - # some similar utility rule. - local rt ; - for local t in $(targets) - { - rt += [ virtual-target.register $(t) ] ; - } - return $(ur) $(rt) ; - } -} - - -rule generate ( name : sources * : requirements * : default-build * - : usage-requirements * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new generated-target-class $(name) : $(project) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : $(project) ] - ] ; -} - -IMPORT $(__name__) : generate : : generate ; diff --git a/jam-files/boost-build/tools/gettext.jam b/jam-files/boost-build/tools/gettext.jam deleted file mode 100644 index 99a43ffe..00000000 --- a/jam-files/boost-build/tools/gettext.jam +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module support GNU gettext internationalization utilities. -# -# It provides two main target rules: 'gettext.catalog', used for -# creating machine-readable catalogs from translations files, and -# 'gettext.update', used for update translation files from modified -# sources. -# -# To add i18n support to your application you should follow these -# steps. -# -# - Decide on a file name which will contain translations and -# what main target name will be used to update it. For example:: -# -# gettext.update update-russian : russian.po a.cpp my_app ; -# -# - Create the initial translation file by running:: -# -# bjam update-russian -# -# - Edit russian.po. For example, you might change fields like LastTranslator. -# -# - Create a main target for final message catalog:: -# -# gettext.catalog russian : russian.po ; -# -# The machine-readable catalog will be updated whenever you update -# "russian.po". The "russian.po" file will be updated only on explicit -# request. When you're ready to update translations, you should -# -# - Run:: -# -# bjam update-russian -# -# - Edit "russian.po" in appropriate editor. -# -# The next bjam run will convert "russian.po" into machine-readable form. -# -# By default, translations are marked by 'i18n' call. The 'gettext.keyword' -# feature can be used to alter this. - - -import targets ; -import property-set ; -import virtual-target ; -import "class" : new ; -import project ; -import type ; -import generators ; -import errors ; -import feature : feature ; -import toolset : flags ; -import regex ; - -.path = "" ; - -# Initializes the gettext module. -rule init ( path ? # Path where all tools are located. If not specified, - # they should be in PATH. - ) -{ - if $(.initialized) && $(.path) != $(path) - { - errors.error "Attempt to reconfigure with different path" ; - } - .initialized = true ; - if $(path) - { - .path = $(path)/ ; - } -} - -# Creates a main target 'name', which, when updated, will cause -# file 'existing-translation' to be updated with translations -# extracted from 'sources'. It's possible to specify main target -# in sources --- it which case all target from dependency graph -# of those main targets will be scanned, provided they are of -# appropricate type. The 'gettext.types' feature can be used to -# control the types. -# -# The target will be updated only if explicitly requested on the -# command line. -rule update ( name : existing-translation sources + : requirements * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new typed-target $(name) : $(project) : gettext.UPDATE : - $(existing-translation) $(sources) - : [ targets.main-target-requirements $(requirements) : $(project) ] - ] ; - $(project).mark-target-as-explicit $(name) ; -} - - -# The human editable source, containing translation. -type.register gettext.PO : po ; -# The machine readable message catalog. -type.register gettext.catalog : mo ; -# Intermediate type produce by extracting translations from -# sources. -type.register gettext.POT : pot ; -# Pseudo type used to invoke update-translations generator -type.register gettext.UPDATE ; - -# Identifies the keyword that should be used when scanning sources. -# Default: i18n -feature gettext.keyword : : free ; -# Contains space-separated list of sources types which should be scanned. -# Default: "C CPP" -feature gettext.types : : free ; - -generators.register-standard gettext.compile : gettext.PO : gettext.catalog ; - -class update-translations-generator : generator -{ - import regex : split ; - import property-set ; - - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - # The rule should be called with at least two sources. The first source - # is the translation (.po) file to update. The remaining sources are targets - # which should be scanned for new messages. All sources files for those targets - # will be found and passed to the 'xgettext' utility, which extracts the - # messages for localization. Those messages will be merged to the .po file. - rule run ( project name ? : property-set : sources * : multiple ? ) - { - local types = [ $(property-set).get <gettext.types> ] ; - types ?= "C CPP" ; - types = [ regex.split $(types) " " ] ; - - local keywords = [ $(property-set).get <gettext.keyword> ] ; - property-set = [ property-set.create $(keywords:G=<gettext.keyword>) ] ; - - # First deterime the list of sources that must be scanned for - # messages. - local all-sources ; - # CONSIDER: I'm not sure if the logic should be the same as for 'stage': - # i.e. following dependency properties as well. - for local s in $(sources[2-]) - { - all-sources += [ virtual-target.traverse $(s) : : include-sources ] ; - } - local right-sources ; - for local s in $(all-sources) - { - if [ $(s).type ] in $(types) - { - right-sources += $(s) ; - } - } - - local .constructed ; - if $(right-sources) - { - # Create the POT file, which will contain list of messages extracted - # from the sources. - local extract = - [ new action $(right-sources) : gettext.extract : $(property-set) ] ; - local new-messages = [ new file-target $(name) : gettext.POT - : $(project) : $(extract) ] ; - - # Create a notfile target which will update the existing translation file - # with new messages. - local a = [ new action $(sources[1]) $(new-messages) - : gettext.update-po-dispatch ] ; - local r = [ new notfile-target $(name) : $(project) : $(a) ] ; - .constructed = [ virtual-target.register $(r) ] ; - } - else - { - errors.error "No source could be scanned by gettext tools" ; - } - return $(.constructed) ; - } -} -generators.register [ new update-translations-generator gettext.update : : gettext.UPDATE ] ; - -flags gettext.extract KEYWORD <gettext.keyword> ; -actions extract -{ - $(.path)xgettext -k$(KEYWORD:E=i18n) -o $(<) $(>) -} - -# Does realy updating of po file. The tricky part is that -# we're actually updating one of the sources: -# $(<) is the NOTFILE target we're updating -# $(>[1]) is the PO file to be really updated. -# $(>[2]) is the PO file created from sources. -# -# When file to be updated does not exist (during the -# first run), we need to copy the file created from sources. -# In all other cases, we need to update the file. -rule update-po-dispatch -{ - NOCARE $(>[1]) ; - gettext.create-po $(<) : $(>) ; - gettext.update-po $(<) : $(>) ; - _ on $(<) = " " ; - ok on $(<) = "" ; - EXISTING_PO on $(<) = $(>[1]) ; -} - -# Due to fancy interaction of existing and updated, this rule can be called with -# one source, in which case we copy the lonely source into EXISTING_PO, or with -# two sources, in which case the action body expands to nothing. I'd really like -# to have "missing" action modifier. -actions quietly existing updated create-po bind EXISTING_PO -{ - cp$(_)"$(>[1])"$(_)"$(EXISTING_PO)"$($(>[2]:E=ok)) -} - -actions updated update-po bind EXISTING_PO -{ - $(.path)msgmerge$(_)-U$(_)"$(EXISTING_PO)"$(_)"$(>[1])" -} - -actions gettext.compile -{ - $(.path)msgfmt -o $(<) $(>) -} - -IMPORT $(__name__) : update : : gettext.update ; diff --git a/jam-files/boost-build/tools/gfortran.jam b/jam-files/boost-build/tools/gfortran.jam deleted file mode 100644 index 0aa69b85..00000000 --- a/jam-files/boost-build/tools/gfortran.jam +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2004 Toon Knapen -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import toolset : flags ; -import feature ; -import fortran ; - -rule init ( version ? : command * : options * ) -{ -} - -# Declare flags and action for compilation -flags gfortran OPTIONS <fflags> ; - -flags gfortran OPTIONS <optimization>off : -O0 ; -flags gfortran OPTIONS <optimization>speed : -O3 ; -flags gfortran OPTIONS <optimization>space : -Os ; - -flags gfortran OPTIONS <debug-symbols>on : -g ; -flags gfortran OPTIONS <profiling>on : -pg ; - -flags gfortran OPTIONS <link>shared/<main-target-type>LIB : -fPIC ; - -flags gfortran DEFINES <define> ; -flags gfortran INCLUDES <include> ; - -rule compile.fortran -{ -} - -actions compile.fortran -{ - gcc -Wall $(OPTIONS) -D$(DEFINES) -I$(INCLUDES) -c -o "$(<)" "$(>)" -} - -generators.register-fortran-compiler gfortran.compile.fortran : FORTRAN FORTRAN90 : OBJ ; diff --git a/jam-files/boost-build/tools/hp_cxx.jam b/jam-files/boost-build/tools/hp_cxx.jam deleted file mode 100644 index 86cd783e..00000000 --- a/jam-files/boost-build/tools/hp_cxx.jam +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2001 David Abrahams. -# Copyright 2004, 2005 Markus Schoepflin. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# -# HP CXX compiler -# See http://h30097.www3.hp.com/cplus/?jumpid=reg_R1002_USEN -# -# -# Notes on this toolset: -# -# - Because of very subtle issues with the default ansi mode, strict_ansi mode -# is used for compilation. One example of things that don't work correctly in -# the default ansi mode is overload resolution of function templates when -# mixed with non-template functions. -# -# - For template instantiation "-timplicit_local" is used. Previously, -# "-tlocal" has been tried to avoid the need for a template repository -# but this doesn't work with manually instantiated templates. "-tweak" -# has not been used to avoid the stream of warning messages issued by -# ar or ld when creating a library or linking an application. -# -# - Debug symbols are generated with "-g3", as this works both in debug and -# release mode. When compiling C++ code without optimization, we additionally -# use "-gall", which generates full symbol table information for all classes, -# structs, and unions. As this turns off optimization, it can't be used when -# optimization is needed. -# - -import feature generators common ; -import toolset : flags ; - -feature.extend toolset : hp_cxx ; -feature.extend c++abi : cxxarm ; - -# Inherit from Unix toolset to get library ordering magic. -toolset.inherit hp_cxx : unix ; - -generators.override hp_cxx.prebuilt : builtin.lib-generator ; -generators.override hp_cxx.prebuilt : builtin.prebuilt ; -generators.override hp_cxx.searched-lib-generator : searched-lib-generator ; - - -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters hp_cxx : version $(version) ] ; - - local command = [ common.get-invocation-command hp_cxx : cxx : $(command) ] ; - - if $(command) - { - local root = [ common.get-absolute-tool-path $(command[-1]) ] ; - - if $(root) - { - flags hp_cxx .root $(condition) : "\"$(root)\"/" ; - } - } - # If we can't find 'cxx' anyway, at least show 'cxx' in the commands - command ?= cxx ; - - common.handle-options hp_cxx : $(condition) : $(command) : $(options) ; -} - -generators.register-c-compiler hp_cxx.compile.c++ : CPP : OBJ : <toolset>hp_cxx ; -generators.register-c-compiler hp_cxx.compile.c : C : OBJ : <toolset>hp_cxx ; - - - -# No static linking as far as I can tell. -# flags cxx LINKFLAGS <runtime-link>static : -bstatic ; -flags hp_cxx.compile OPTIONS <debug-symbols>on : -g3 ; -flags hp_cxx.compile OPTIONS <optimization>off/<debug-symbols>on : -gall ; -flags hp_cxx.link OPTIONS <debug-symbols>on : -g ; -flags hp_cxx.link OPTIONS <debug-symbols>off : -s ; - -flags hp_cxx.compile OPTIONS <optimization>off : -O0 ; -flags hp_cxx.compile OPTIONS <optimization>speed/<inlining>on : -O2 ; -flags hp_cxx.compile OPTIONS <optimization>speed : -O2 ; - -# This (undocumented) macro needs to be defined to get all C function -# overloads required by the C++ standard. -flags hp_cxx.compile.c++ OPTIONS : -D__CNAME_OVERLOADS ; - -# Added for threading support -flags hp_cxx.compile OPTIONS <threading>multi : -pthread ; -flags hp_cxx.link OPTIONS <threading>multi : -pthread ; - -flags hp_cxx.compile OPTIONS <optimization>space/<inlining>on : <inlining>size ; -flags hp_cxx.compile OPTIONS <optimization>space : -O1 ; -flags hp_cxx.compile OPTIONS <inlining>off : -inline none ; - -# The compiler versions tried (up to V6.5-040) hang when compiling Boost code -# with full inlining enabled. So leave it at the default level for now. -# -# flags hp_cxx.compile OPTIONS <inlining>full : -inline all ; - -flags hp_cxx.compile OPTIONS <profiling>on : -pg ; -flags hp_cxx.link OPTIONS <profiling>on : -pg ; - -# Selection of the object model. This flag is needed on both the C++ compiler -# and linker command line. - -# Unspecified ABI translates to '-model ansi' as most -# standard-conforming. -flags hp_cxx.compile.c++ OPTIONS <c++abi> : -model ansi : : hack-hack ; -flags hp_cxx.compile.c++ OPTIONS <c++abi>cxxarm : -model arm ; -flags hp_cxx.link OPTIONS <c++abi> : -model ansi : : hack-hack ; -flags hp_cxx.link OPTIONS <c++abi>cxxarm : -model arm ; - -# Display a descriptive tag together with each compiler message. This tag can -# be used by the user to explicitely suppress the compiler message. -flags hp_cxx.compile OPTIONS : -msg_display_tag ; - -flags hp_cxx.compile OPTIONS <cflags> ; -flags hp_cxx.compile.c++ OPTIONS <cxxflags> ; -flags hp_cxx.compile DEFINES <define> ; -flags hp_cxx.compile INCLUDES <include> ; -flags hp_cxx.link OPTIONS <linkflags> ; - -flags hp_cxx.link LIBPATH <library-path> ; -flags hp_cxx.link LIBRARIES <library-file> ; -flags hp_cxx.link FINDLIBS-ST <find-static-library> ; -flags hp_cxx.link FINDLIBS-SA <find-shared-library> ; - -flags hp_cxx.compile.c++ TEMPLATE_DEPTH <c++-template-depth> ; - -actions link bind LIBRARIES -{ - $(CONFIG_COMMAND) -noimplicit_include $(OPTIONS) -o "$(<)" -L$(LIBPATH) "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-ST) -l$(FINDLIBS-SA) -lrt -lm -} - -# When creating dynamic libraries, we don't want to be warned about unresolved -# symbols, therefore all unresolved symbols are marked as expected by -# '-expect_unresolved *'. This also mirrors the behaviour of the GNU tool -# chain. - -actions link.dll bind LIBRARIES -{ - $(CONFIG_COMMAND) -shared -expect_unresolved \* -noimplicit_include $(OPTIONS) -o "$(<[1])" -L$(LIBPATH) "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-ST) -l$(FINDLIBS-SA) -lm -} - - -# Note: Relaxed ANSI mode (-std) is used for compilation because in strict ANSI -# C89 mode (-std1) the compiler doesn't accept C++ comments in C files. As -std -# is the default, no special flag is needed. -actions compile.c -{ - $(.root:E=)cc -c $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -o "$(<)" "$(>)" -} - -# Note: The compiler is forced to compile the files as C++ (-x cxx) because -# otherwise it will silently ignore files with no file extension. -# -# Note: We deliberately don't suppress any warnings on the compiler command -# line, the user can always do this in a customized toolset later on. - -rule compile.c++ -{ - # We preprocess the TEMPLATE_DEPTH command line option here because we found - # no way to do it correctly in the actual action code. There we either get - # the -pending_instantiations parameter when no c++-template-depth property - # has been specified or we get additional quotes around - # "-pending_instantiations ". - local template-depth = [ on $(1) return $(TEMPLATE_DEPTH) ] ; - TEMPLATE_DEPTH on $(1) = "-pending_instantiations "$(template-depth) ; -} - -actions compile.c++ -{ - $(CONFIG_COMMAND) -x cxx -c -std strict_ansi -nopure_cname -noimplicit_include -timplicit_local -ptr "$(<[1]:D)/cxx_repository" $(OPTIONS) $(TEMPLATE_DEPTH) -D$(DEFINES) -I"$(INCLUDES)" -o "$(<)" "$(>)" -} - -# Always create archive from scratch. See the gcc toolet for rationale. -RM = [ common.rm-command ] ; -actions together piecemeal archive -{ - $(RM) "$(<)" - ar rc $(<) $(>) -} diff --git a/jam-files/boost-build/tools/hpfortran.jam b/jam-files/boost-build/tools/hpfortran.jam deleted file mode 100644 index 96e8d18b..00000000 --- a/jam-files/boost-build/tools/hpfortran.jam +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2004 Toon Knapen -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import toolset : flags ; -import feature ; -import fortran ; - -rule init ( version ? : command * : options * ) -{ -} - -# Declare flags and action for compilation -flags hpfortran OPTIONS <optimization>off : -O0 ; -flags hpfortran OPTIONS <optimization>speed : -O3 ; -flags hpfortran OPTIONS <optimization>space : -O1 ; - -flags hpfortran OPTIONS <debug-symbols>on : -g ; -flags hpfortran OPTIONS <profiling>on : -pg ; - -flags hpfortran DEFINES <define> ; -flags hpfortran INCLUDES <include> ; - -rule compile.fortran -{ -} - -actions compile.fortran -{ - f77 +DD64 $(OPTIONS) -D$(DEFINES) -I$(INCLUDES) -c -o "$(<)" "$(>)" -} - -generators.register-fortran-compiler hpfortran.compile.fortran : FORTRAN : OBJ ; diff --git a/jam-files/boost-build/tools/ifort.jam b/jam-files/boost-build/tools/ifort.jam deleted file mode 100644 index eb7c1988..00000000 --- a/jam-files/boost-build/tools/ifort.jam +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2004 Toon Knapen -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import toolset : flags ; -import feature ; -import fortran ; - -rule init ( version ? : command * : options * ) -{ -} - -# Declare flags and action for compilation -flags ifort OPTIONS <fflags> ; - -flags ifort OPTIONS <optimization>off : /Od ; -flags ifort OPTIONS <optimization>speed : /O3 ; -flags ifort OPTIONS <optimization>space : /O1 ; - -flags ifort OPTIONS <debug-symbols>on : /debug:full ; -flags ifort OPTIONS <profiling>on : /Qprof_gen ; - -flags ifort.compile FFLAGS <runtime-debugging>off/<runtime-link>shared : /MD ; -flags ifort.compile FFLAGS <runtime-debugging>on/<runtime-link>shared : /MDd ; -flags ifort.compile FFLAGS <runtime-debugging>off/<runtime-link>static/<threading>single : /ML ; -flags ifort.compile FFLAGS <runtime-debugging>on/<runtime-link>static/<threading>single : /MLd ; -flags ifort.compile FFLAGS <runtime-debugging>off/<runtime-link>static/<threading>multi : /MT ; -flags ifort.compile FFLAGS <runtime-debugging>on/<runtime-link>static/<threading>multi : /MTd ; - -flags ifort DEFINES <define> ; -flags ifort INCLUDES <include> ; - -rule compile.fortran -{ -} - -actions compile.fortran -{ - ifort $(FFLAGS) $(OPTIONS) /names:lowercase /D$(DEFINES) /I"$(INCLUDES)" /c /object:"$(<)" "$(>)" -} - -generators.register-fortran-compiler ifort.compile.fortran : FORTRAN : OBJ ; diff --git a/jam-files/boost-build/tools/intel-darwin.jam b/jam-files/boost-build/tools/intel-darwin.jam deleted file mode 100644 index aa0fd8fb..00000000 --- a/jam-files/boost-build/tools/intel-darwin.jam +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Copyright Noel Belcourt 2007. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -import intel ; -import feature : feature ; -import os ; -import toolset ; -import toolset : flags ; -import gcc ; -import common ; -import errors ; -import generators ; - -feature.extend-subfeature toolset intel : platform : darwin ; - -toolset.inherit-generators intel-darwin - <toolset>intel <toolset-intel:platform>darwin - : gcc - # Don't inherit PCH generators. They were not tested, and probably - # don't work for this compiler. - : gcc.mingw.link gcc.mingw.link.dll gcc.compile.c.pch gcc.compile.c++.pch - ; - -generators.override intel-darwin.prebuilt : builtin.lib-generator ; -generators.override intel-darwin.prebuilt : builtin.prebuilt ; -generators.override intel-darwin.searched-lib-generator : searched-lib-generator ; - -toolset.inherit-rules intel-darwin : gcc ; -toolset.inherit-flags intel-darwin : gcc - : <inlining>off <inlining>on <inlining>full <optimization>space - <warnings>off <warnings>all <warnings>on - <architecture>x86/<address-model>32 - <architecture>x86/<address-model>64 - ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -# vectorization diagnostics -feature vectorize : off on full ; - -# Initializes the intel-darwin toolset -# version in mandatory -# name (default icc) is used to invoke the specified intel complier -# compile and link options allow you to specify addition command line options for each version -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters intel-darwin - : version $(version) ] ; - - command = [ common.get-invocation-command intel-darwin : icc - : $(command) : /opt/intel_cc_80/bin ] ; - - common.handle-options intel-darwin : $(condition) : $(command) : $(options) ; - - gcc.init-link-flags intel-darwin darwin $(condition) ; - - # handle <library-path> - # local library-path = [ feature.get-values <library-path> : $(options) ] ; - # flags intel-darwin.link USER_OPTIONS $(condition) : [ feature.get-values <dll-path> : $(options) ] ; - - local root = [ feature.get-values <root> : $(options) ] ; - local bin ; - if $(command) || $(root) - { - bin ?= [ common.get-absolute-tool-path $(command[-1]) ] ; - root ?= $(bin:D) ; - - if $(root) - { - # Libraries required to run the executable may be in either - # $(root)/lib (10.1 and earlier) - # or - # $(root)/lib/architecture-name (11.0 and later: - local lib_path = $(root)/lib $(root:P)/lib/$(bin:B) ; - if $(.debug-configuration) - { - ECHO notice: using intel libraries :: $(condition) :: $(lib_path) ; - } - flags intel-darwin.link RUN_PATH $(condition) : $(lib_path) ; - } - } - - local m = [ MATCH (..).* : $(version) ] ; - local n = [ MATCH (.)\\. : $(m) ] ; - if $(n) { - m = $(n) ; - } - - local major = $(m) ; - - if $(major) = "9" { - flags intel-darwin.compile OPTIONS $(condition)/<inlining>off : -Ob0 ; - flags intel-darwin.compile OPTIONS $(condition)/<inlining>on : -Ob1 ; - flags intel-darwin.compile OPTIONS $(condition)/<inlining>full : -Ob2 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>off : -vec-report0 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>on : -vec-report1 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>full : -vec-report5 ; - flags intel-darwin.link OPTIONS $(condition)/<runtime-link>static : -static -static-libcxa -lstdc++ -lpthread ; - flags intel-darwin.link OPTIONS $(condition)/<runtime-link>shared : -shared-libcxa -lstdc++ -lpthread ; - } - else { - flags intel-darwin.compile OPTIONS $(condition)/<inlining>off : -inline-level=0 ; - flags intel-darwin.compile OPTIONS $(condition)/<inlining>on : -inline-level=1 ; - flags intel-darwin.compile OPTIONS $(condition)/<inlining>full : -inline-level=2 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>off : -vec-report0 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>on : -vec-report1 ; - flags intel-darwin.compile OPTIONS $(condition)/<vectorize>full : -vec-report5 ; - flags intel-darwin.link OPTIONS $(condition)/<runtime-link>static : -static -static-intel -lstdc++ -lpthread ; - flags intel-darwin.link OPTIONS $(condition)/<runtime-link>shared : -shared-intel -lstdc++ -lpthread ; - } - - local minor = [ MATCH ".*\\.(.).*" : $(version) ] ; - - # wchar_t char_traits workaround for compilers older than 10.2 - if $(major) = "9" || ( $(major) = "10" && ( $(minor) = "0" || $(minor) = "1" ) ) { - flags intel-darwin.compile DEFINES $(condition) : __WINT_TYPE__=int : unchecked ; - } -} - -SPACE = " " ; - -flags intel-darwin.compile OPTIONS <cflags> ; -flags intel-darwin.compile OPTIONS <cxxflags> ; -# flags intel-darwin.compile INCLUDES <include> ; - -flags intel-darwin.compile OPTIONS <optimization>space : -O1 ; # no specific space optimization flag in icc - -# -cpu-type-em64t = prescott nocona ; -flags intel-darwin.compile OPTIONS <instruction-set>$(cpu-type-em64t)/<address-model>32 : -m32 ; # -mcmodel=small ; -flags intel-darwin.compile OPTIONS <instruction-set>$(cpu-type-em64t)/<address-model>64 : -m64 ; # -mcmodel=large ; - -flags intel-darwin.compile.c OPTIONS <warnings>off : -w0 ; -flags intel-darwin.compile.c OPTIONS <warnings>on : -w1 ; -flags intel-darwin.compile.c OPTIONS <warnings>all : -w2 ; - -flags intel-darwin.compile.c++ OPTIONS <warnings>off : -w0 ; -flags intel-darwin.compile.c++ OPTIONS <warnings>on : -w1 ; -flags intel-darwin.compile.c++ OPTIONS <warnings>all : -w2 ; - -actions compile.c -{ - "$(CONFIG_COMMAND)" -xc $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" -xc++ $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -flags intel-darwin ARFLAGS <archiveflags> ; - -# Default value. Mostly for the sake of intel-linux -# that inherits from gcc, but does not has the same -# logic to set the .AR variable. We can put the same -# logic in intel-linux, but that's hardly worth the trouble -# as on Linux, 'ar' is always available. -.AR = ar ; - -rule archive ( targets * : sources * : properties * ) -{ - # Always remove archive and start again. Here's rationale from - # Andre Hentz: - # - # I had a file, say a1.c, that was included into liba.a. - # I moved a1.c to a2.c, updated my Jamfiles and rebuilt. - # My program was crashing with absurd errors. - # After some debugging I traced it back to the fact that a1.o was *still* - # in liba.a - # - # Rene Rivera: - # - # Originally removing the archive was done by splicing an RM - # onto the archive action. That makes archives fail to build on NT - # when they have many files because it will no longer execute the - # action directly and blow the line length limit. Instead we - # remove the file in a different action, just before the building - # of the archive. - # - local clean.a = $(targets[1])(clean) ; - TEMPORARY $(clean.a) ; - NOCARE $(clean.a) ; - LOCATE on $(clean.a) = [ on $(targets[1]) return $(LOCATE) ] ; - DEPENDS $(clean.a) : $(sources) ; - DEPENDS $(targets) : $(clean.a) ; - common.RmTemps $(clean.a) : $(targets) ; -} - -actions piecemeal archive -{ - "$(.AR)" $(AROPTIONS) rc "$(<)" "$(>)" - "ranlib" -cs "$(<)" -} - -flags intel-darwin.link USER_OPTIONS <linkflags> ; - -# Declare actions for linking -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; - # Serialize execution of the 'link' action, since - # running N links in parallel is just slower. - JAM_SEMAPHORE on $(targets) = <s>intel-darwin-link-semaphore ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(USER_OPTIONS) -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(USER_OPTIONS) -L"$(LINKPATH)" -o "$(<)" -single_module -dynamiclib -install_name "$(<[1]:D=)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) -} diff --git a/jam-files/boost-build/tools/intel-linux.jam b/jam-files/boost-build/tools/intel-linux.jam deleted file mode 100644 index d9164add..00000000 --- a/jam-files/boost-build/tools/intel-linux.jam +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright (c) 2003 Michael Stevens -# Copyright (c) 2011 Bryce Lelbach -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import toolset ; -import feature ; -import toolset : flags ; - -import intel ; -import gcc ; -import common ; -import errors ; -import generators ; -import type ; -import numbers ; - -feature.extend-subfeature toolset intel : platform : linux ; - -toolset.inherit-generators intel-linux - <toolset>intel <toolset-intel:platform>linux : gcc : gcc.mingw.link gcc.mingw.link.dll ; -generators.override intel-linux.prebuilt : builtin.lib-generator ; -generators.override intel-linux.prebuilt : builtin.prebuilt ; -generators.override intel-linux.searched-lib-generator : searched-lib-generator ; - -# Override default do-nothing generators. -generators.override intel-linux.compile.c.pch : pch.default-c-pch-generator ; -generators.override intel-linux.compile.c++.pch : pch.default-cpp-pch-generator ; - -type.set-generated-target-suffix PCH : <toolset>intel <toolset-intel:platform>linux : pchi ; - -toolset.inherit-rules intel-linux : gcc ; -toolset.inherit-flags intel-linux : gcc - : <inlining>off <inlining>on <inlining>full - <optimization>space <optimization>speed - <warnings>off <warnings>all <warnings>on - ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -# Initializes the intel-linux toolset -# version in mandatory -# name (default icpc) is used to invoke the specified intel-linux complier -# compile and link options allow you to specify addition command line options for each version -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters intel-linux - : version $(version) ] ; - - if $(.debug-configuration) - { - ECHO "notice: intel-linux version is" $(version) ; - } - - local default_path ; - - # Intel C++ Composer XE 2011 for Linux, aka Intel C++ Compiler XE 12.0, - # aka intel-linux-12.0. In this version, Intel thankfully decides to install - # to a sane 'intel' folder in /opt. - if [ MATCH "(12[.]0|12)" : $(version) ] - { default_path = /opt/intel/bin ; } - # Intel C++ Compiler 11.1. - else if [ MATCH "(11[.]1)" : $(version) ] - { default_path = /opt/intel_cce_11.1.064.x86_64/bin ; } - # Intel C++ Compiler 11.0. - else if [ MATCH "(11[.]0|11)" : $(version) ] - { default_path = /opt/intel_cce_11.0.074.x86_64/bin ; } - # Intel C++ Compiler 10.1. - else if [ MATCH "(10[.]1)" : $(version) ] - { default_path = /opt/intel_cce_10.1.013_x64/bin ; } - # Intel C++ Compiler 9.1. - else if [ MATCH "(9[.]1)" : $(version) ] - { default_path = /opt/intel_cc_91/bin ; } - # Intel C++ Compiler 9.0. - else if [ MATCH "(9[.]0|9)" : $(version) ] - { default_path = /opt/intel_cc_90/bin ; } - # Intel C++ Compiler 8.1. - else if [ MATCH "(8[.]1)" : $(version) ] - { default_path = /opt/intel_cc_81/bin ; } - # Intel C++ Compiler 8.0 - this used to be the default, so now it's the - # fallback. - else - { default_path = /opt/intel_cc_80/bin ; } - - if $(.debug-configuration) - { - ECHO "notice: default search path for intel-linux is" $(default_path) ; - } - - command = [ common.get-invocation-command intel-linux : icpc - : $(command) : $(default_path) ] ; - - common.handle-options intel-linux : $(condition) : $(command) : $(options) ; - - gcc.init-link-flags intel-linux gnu $(condition) ; - - local root = [ feature.get-values <root> : $(options) ] ; - local bin ; - if $(command) || $(root) - { - bin ?= [ common.get-absolute-tool-path $(command[-1]) ] ; - root ?= $(bin:D) ; - - local command-string = $(command:J=" ") ; - local version-output = [ SHELL "$(command-string) --version" ] ; - local real-version = [ MATCH "([0-9.]+)" : $(version-output) ] ; - local major = [ MATCH "([0-9]+).*" : $(real-version) ] ; - - # If we failed to determine major version, use the behaviour for - # the current compiler. - if $(major) && [ numbers.less $(major) 10 ] - { - flags intel-linux.compile OPTIONS $(condition)/<inlining>off : "-Ob0" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>on : "-Ob1" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>full : "-Ob2" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>space : "-O1" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>speed : "-O3 -ip" ; - } - else if $(major) && [ numbers.less $(major) 11 ] - { - flags intel-linux.compile OPTIONS $(condition)/<inlining>off : "-inline-level=0" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>on : "-inline-level=1" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>full : "-inline-level=2" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>space : "-O1" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>speed : "-O3 -ip" ; - } - else # newer version of intel do have -Os (at least 11+, don't know about 10) - { - flags intel-linux.compile OPTIONS $(condition)/<inlining>off : "-inline-level=0" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>on : "-inline-level=1" ; - flags intel-linux.compile OPTIONS $(condition)/<inlining>full : "-inline-level=2" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>space : "-Os" ; - flags intel-linux.compile OPTIONS $(condition)/<optimization>speed : "-O3 -ip" ; - } - - if $(root) - { - # Libraries required to run the executable may be in either - # $(root)/lib (10.1 and earlier) - # or - # $(root)/lib/architecture-name (11.0 and later: - local lib_path = $(root)/lib $(root:P)/lib/$(bin:B) ; - if $(.debug-configuration) - { - ECHO notice: using intel libraries :: $(condition) :: $(lib_path) ; - } - flags intel-linux.link RUN_PATH $(condition) : $(lib_path) ; - } - } -} - -SPACE = " " ; - -flags intel-linux.compile OPTIONS <warnings>off : -w0 ; -flags intel-linux.compile OPTIONS <warnings>on : -w1 ; -flags intel-linux.compile OPTIONS <warnings>all : -w2 ; - -rule compile.c++ ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; -} - -actions compile.c++ bind PCH_FILE -{ - "$(CONFIG_COMMAND)" -c -xc++ $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -use-pch"$(PCH_FILE)" -c -o "$(<)" "$(>)" -} - -rule compile.c ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - DEPENDS $(<) : [ on $(<) return $(PCH_FILE) ] ; -} - -actions compile.c bind PCH_FILE -{ - "$(CONFIG_COMMAND)" -c -xc $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -use-pch"$(PCH_FILE)" -c -o "$(<)" "$(>)" -} - -rule compile.c++.pch ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; -} -# -# Compiling a pch first deletes any existing *.pchi file, as Intel's compiler -# won't over-write an existing pch: instead it creates filename$1.pchi, filename$2.pchi -# etc - which appear not to do anything except take up disk space :-( -# -actions compile.c++.pch -{ - rm -f "$(<)" && "$(CONFIG_COMMAND)" -x c++-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -pch-create "$(<)" "$(>)" -} - -actions compile.fortran -{ - "ifort" -c $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.c.pch ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-fpic $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; -} - -actions compile.c.pch -{ - rm -f "$(<)" && "$(CONFIG_COMMAND)" -x c-header $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -pch-create "$(<)" "$(>)" -} - -rule link ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>intel-linux-link-semaphore ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) $(USER_OPTIONS) -} - -rule link.dll ( targets * : sources * : properties * ) -{ - gcc.setup-threading $(targets) : $(sources) : $(properties) ; - gcc.setup-address-model $(targets) : $(sources) : $(properties) ; - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>intel-linux-link-semaphore ; -} - -# Differ from 'link' above only by -shared. -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -o "$(<)" -Wl,-soname$(SPACE)-Wl,$(<[1]:D=) -shared "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) $(OPTIONS) $(USER_OPTIONS) -} - - - diff --git a/jam-files/boost-build/tools/intel-win.jam b/jam-files/boost-build/tools/intel-win.jam deleted file mode 100644 index 691b5dce..00000000 --- a/jam-files/boost-build/tools/intel-win.jam +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# Importing common is needed because the rules we inherit here depend on it. -# That is nasty. -import common ; -import errors ; -import feature ; -import intel ; -import msvc ; -import os ; -import toolset ; -import generators ; -import type ; - -feature.extend-subfeature toolset intel : platform : win ; - -toolset.inherit-generators intel-win <toolset>intel <toolset-intel:platform>win : msvc ; -toolset.inherit-flags intel-win : msvc : : YLOPTION ; -toolset.inherit-rules intel-win : msvc ; - -# Override default do-nothing generators. -generators.override intel-win.compile.c.pch : pch.default-c-pch-generator ; -generators.override intel-win.compile.c++.pch : pch.default-cpp-pch-generator ; -generators.override intel-win.compile.rc : rc.compile.resource ; -generators.override intel-win.compile.mc : mc.compile ; - -toolset.flags intel-win.compile PCH_SOURCE <pch>on : <pch-source> ; - -toolset.add-requirements <toolset>intel-win,<runtime-link>shared:<threading>multi ; - -# Initializes the intel toolset for windows -rule init ( version ? : # the compiler version - command * : # the command to invoke the compiler itself - options * # Additional option: <compatibility> - # either 'vc6', 'vc7', 'vc7.1' - # or 'native'(default). - ) -{ - local compatibility = - [ feature.get-values <compatibility> : $(options) ] ; - local condition = [ common.check-init-parameters intel-win - : version $(version) : compatibility $(compatibility) ] ; - - command = [ common.get-invocation-command intel-win : icl.exe : - $(command) ] ; - - common.handle-options intel-win : $(condition) : $(command) : $(options) ; - - local root ; - if $(command) - { - root = [ common.get-absolute-tool-path $(command[-1]) ] ; - root = $(root)/ ; - } - - local setup ; - setup = [ GLOB $(root) : iclvars_*.bat ] ; - if ! $(setup) - { - setup = $(root)/iclvars.bat ; - } - setup = "call \""$(setup)"\" > nul " ; - - if [ os.name ] = NT - { - setup = $(setup)" -" ; - } - else - { - setup = "cmd /S /C "$(setup)" \"&&\" " ; - } - - toolset.flags intel-win.compile .CC $(condition) : $(setup)icl ; - toolset.flags intel-win.link .LD $(condition) : $(setup)xilink ; - toolset.flags intel-win.archive .LD $(condition) : $(setup)xilink /lib ; - toolset.flags intel-win.link .MT $(condition) : $(setup)mt -nologo ; - toolset.flags intel-win.compile .MC $(condition) : $(setup)mc ; - toolset.flags intel-win.compile .RC $(condition) : $(setup)rc ; - - local m = [ MATCH (.).* : $(version) ] ; - local major = $(m[1]) ; - - local C++FLAGS ; - - C++FLAGS += /nologo ; - - # Reduce the number of spurious error messages - C++FLAGS += /Qwn5 /Qwd985 ; - - # Enable ADL - C++FLAGS += -Qoption,c,--arg_dep_lookup ; #"c" works for C++, too - - # Disable Microsoft "secure" overloads in Dinkumware libraries since they - # cause compile errors with Intel versions 9 and 10. - C++FLAGS += -D_SECURE_SCL=0 ; - - if $(major) > 5 - { - C++FLAGS += /Zc:forScope ; # Add support for correct for loop scoping. - } - - # Add options recognized only by intel7 and above. - if $(major) >= 7 - { - C++FLAGS += /Qansi_alias ; - } - - if $(compatibility) = vc6 - { - C++FLAGS += - # Emulate VC6 - /Qvc6 - - # No wchar_t support in vc6 dinkum library. Furthermore, in vc6 - # compatibility-mode, wchar_t is not a distinct type from unsigned - # short. - -DBOOST_NO_INTRINSIC_WCHAR_T - ; - } - else - { - if $(major) > 5 - { - # Add support for wchar_t - C++FLAGS += /Zc:wchar_t - # Tell the dinkumware library about it. - -D_NATIVE_WCHAR_T_DEFINED - ; - } - } - - if $(compatibility) && $(compatibility) != native - { - C++FLAGS += /Q$(base-vc) ; - } - else - { - C++FLAGS += - -Qoption,cpp,--arg_dep_lookup - # The following options were intended to disable the Intel compiler's - # 'bug-emulation' mode, but were later reported to be causing ICE with - # Intel-Win 9.0. It is not yet clear which options can be safely used. - # -Qoption,cpp,--const_string_literals - # -Qoption,cpp,--new_for_init - # -Qoption,cpp,--no_implicit_typename - # -Qoption,cpp,--no_friend_injection - # -Qoption,cpp,--no_microsoft_bugs - ; - } - - toolset.flags intel-win CFLAGS $(condition) : $(C++FLAGS) ; - # By default, when creating PCH, intel adds 'i' to the explicitly - # specified name of the PCH file. Of course, Boost.Build is not - # happy when compiler produces not the file it was asked for. - # The option below stops this behaviour. - toolset.flags intel-win CFLAGS : -Qpchi- ; - - if ! $(compatibility) - { - # If there's no backend version, assume 7.1. - compatibility = vc7.1 ; - } - - local extract-version = [ MATCH ^vc(.*) : $(compatibility) ] ; - if ! $(extract-version) - { - errors.user-error "Invalid value for compatibility option:" - $(compatibility) ; - } - - # Depending on the settings, running of tests require some runtime DLLs. - toolset.flags intel-win RUN_PATH $(condition) : $(root) ; - - msvc.configure-version-specific intel-win : $(extract-version[1]) : $(condition) ; -} - -toolset.flags intel-win.link LIBRARY_OPTION <toolset>intel : "" ; - -toolset.flags intel-win YLOPTION ; - diff --git a/jam-files/boost-build/tools/intel.jam b/jam-files/boost-build/tools/intel.jam deleted file mode 100644 index 67038aa2..00000000 --- a/jam-files/boost-build/tools/intel.jam +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# This is a generic 'intel' toolset. Depending on the current -# system, it forwards either to 'intel-linux' or 'intel-win' -# modules. - -import feature ; -import os ; -import toolset ; - -feature.extend toolset : intel ; -feature.subfeature toolset intel : platform : : propagated link-incompatible ; - -rule init ( * : * ) -{ - if [ os.name ] = LINUX - { - toolset.using intel-linux : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - else if [ os.name ] = MACOSX - { - toolset.using intel-darwin : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - else - { - toolset.using intel-win : - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } -} diff --git a/jam-files/boost-build/tools/lex.jam b/jam-files/boost-build/tools/lex.jam deleted file mode 100644 index 75d64131..00000000 --- a/jam-files/boost-build/tools/lex.jam +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import type ; -import generators ; -import feature ; -import property ; - - -feature.feature flex.prefix : : free ; -type.register LEX : l ; -type.register LEX++ : ll ; -generators.register-standard lex.lex : LEX : C ; -generators.register-standard lex.lex : LEX++ : CPP ; - -rule init ( ) -{ -} - -rule lex ( target : source : properties * ) -{ - local r = [ property.select flex.prefix : $(properties) ] ; - if $(r) - { - PREFIX on $(<) = $(r:G=) ; - } -} - -actions lex -{ - flex -P$(PREFIX) -o$(<) $(>) -} diff --git a/jam-files/boost-build/tools/make.jam b/jam-files/boost-build/tools/make.jam deleted file mode 100644 index 08567285..00000000 --- a/jam-files/boost-build/tools/make.jam +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003 Douglas Gregor -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines the 'make' main target rule. - -import "class" : new ; -import errors : error ; -import project ; -import property ; -import property-set ; -import regex ; -import targets ; - - -class make-target-class : basic-target -{ - import type regex virtual-target ; - import "class" : new ; - - rule __init__ ( name : project : sources * : requirements * - : default-build * : usage-requirements * ) - { - basic-target.__init__ $(name) : $(project) : $(sources) : - $(requirements) : $(default-build) : $(usage-requirements) ; - } - - rule construct ( name : source-targets * : property-set ) - { - local action-name = [ $(property-set).get <action> ] ; - # 'm' will always be set -- we add '@' ourselves in the 'make' rule - # below. - local m = [ MATCH ^@(.*) : $(action-name) ] ; - - local a = [ new action $(source-targets) : $(m[1]) : $(property-set) ] ; - local t = [ new file-target $(self.name) exact : [ type.type - $(self.name) ] : $(self.project) : $(a) ] ; - return [ property-set.empty ] [ virtual-target.register $(t) ] ; - } -} - - -# Declares the 'make' main target. -# -rule make ( target-name : sources * : generating-rule + : requirements * : - usage-requirements * ) -{ - local project = [ project.current ] ; - - # The '@' sign causes the feature.jam module to qualify rule name with the - # module name of current project, if needed. - local m = [ MATCH ^(@).* : $(generating-rule) ] ; - if ! $(m) - { - generating-rule = @$(generating-rule) ; - } - requirements += <action>$(generating-rule) ; - - targets.main-target-alternative - [ new make-target-class $(target-name) : $(project) - : [ targets.main-target-sources $(sources) : $(target-name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build : $(project) ] - : [ targets.main-target-usage-requirements $(usage-requirements) : - $(project) ] ] ; -} - - -IMPORT $(__name__) : make : : make ; diff --git a/jam-files/boost-build/tools/make.py b/jam-files/boost-build/tools/make.py deleted file mode 100644 index 10baa1cb..00000000 --- a/jam-files/boost-build/tools/make.py +++ /dev/null @@ -1,59 +0,0 @@ -# Status: ported. -# Base revision: 64068 - -# Copyright 2003 Dave Abrahams -# Copyright 2003 Douglas Gregor -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines the 'make' main target rule. - -from b2.build.targets import BasicTarget -from b2.build.virtual_target import Action, FileTarget -from b2.build import type -from b2.manager import get_manager -import b2.build.property_set - - -class MakeTarget(BasicTarget): - - def construct(self, name, source_targets, property_set): - - action_name = property_set.get("<action>")[0] - action = Action(get_manager(), source_targets, action_name[1:], property_set) - target = FileTarget(self.name(), type.type(self.name()), - self.project(), action, exact=True) - return [ b2.build.property_set.empty(), - [self.project().manager().virtual_targets().register(target)]] - -def make (target_name, sources, generating_rule, - requirements=None, usage_requirements=None): - - target_name = target_name[0] - generating_rule = generating_rule[0] - if generating_rule[0] != '@': - generating_rule = '@' + generating_rule - - if not requirements: - requirements = [] - - - requirements.append("<action>%s" % generating_rule) - - m = get_manager() - targets = m.targets() - project = m.projects().current() - engine = m.engine() - engine.register_bjam_action(generating_rule) - - targets.main_target_alternative(MakeTarget( - target_name, project, - targets.main_target_sources(sources, target_name), - targets.main_target_requirements(requirements, project), - targets.main_target_default_build([], project), - targets.main_target_usage_requirements(usage_requirements or [], project))) - -get_manager().projects().add_rule("make", make) - diff --git a/jam-files/boost-build/tools/mc.jam b/jam-files/boost-build/tools/mc.jam deleted file mode 100644 index 57837773..00000000 --- a/jam-files/boost-build/tools/mc.jam +++ /dev/null @@ -1,44 +0,0 @@ -#~ Copyright 2005 Alexey Pakhunov. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Support for Microsoft message compiler tool. -# Notes: -# - there's just message compiler tool, there's no tool for -# extracting message strings from sources -# - This file allows to use Microsoft message compiler -# with any toolset. In msvc.jam, there's more specific -# message compiling action. - -import common ; -import generators ; -import feature : feature get-values ; -import toolset : flags ; -import type ; -import rc ; - -rule init ( ) -{ -} - -type.register MC : mc ; - - -# Command line options -feature mc-input-encoding : ansi unicode : free ; -feature mc-output-encoding : unicode ansi : free ; -feature mc-set-customer-bit : no yes : free ; - -flags mc.compile MCFLAGS <mc-input-encoding>ansi : -a ; -flags mc.compile MCFLAGS <mc-input-encoding>unicode : -u ; -flags mc.compile MCFLAGS <mc-output-encoding>ansi : -A ; -flags mc.compile MCFLAGS <mc-output-encoding>unicode : -U ; -flags mc.compile MCFLAGS <mc-set-customer-bit>no : ; -flags mc.compile MCFLAGS <mc-set-customer-bit>yes : -c ; - -generators.register-standard mc.compile : MC : H RC ; - -actions compile -{ - mc $(MCFLAGS) -h "$(<[1]:DW)" -r "$(<[2]:DW)" "$(>:W)" -} diff --git a/jam-files/boost-build/tools/message.jam b/jam-files/boost-build/tools/message.jam deleted file mode 100644 index 212d8542..00000000 --- a/jam-files/boost-build/tools/message.jam +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2008 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Defines main target type 'message', that prints a message when built for the -# first time. - -import project ; -import "class" : new ; -import targets ; -import property-set ; - -class message-target-class : basic-target -{ - rule __init__ ( name-and-dir : project : * ) - { - basic-target.__init__ $(name-and-dir) : $(project) ; - self.3 = $(3) ; - self.4 = $(4) ; - self.5 = $(5) ; - self.6 = $(6) ; - self.7 = $(7) ; - self.8 = $(8) ; - self.9 = $(9) ; - self.built = ; - } - - rule construct ( name : source-targets * : property-set ) - { - if ! $(self.built) - { - for i in 3 4 5 6 7 8 9 - { - if $(self.$(i)) - { - ECHO $(self.$(i)) ; - } - } - self.built = 1 ; - } - - return [ property-set.empty ] ; - } -} - - -rule message ( name : * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new message-target-class $(name) : $(project) - : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) ] ; -} -IMPORT $(__name__) : message : : message ;
\ No newline at end of file diff --git a/jam-files/boost-build/tools/message.py b/jam-files/boost-build/tools/message.py deleted file mode 100644 index cc0b946f..00000000 --- a/jam-files/boost-build/tools/message.py +++ /dev/null @@ -1,46 +0,0 @@ -# Status: ported. -# Base revision: 64488. -# -# Copyright 2008, 2010 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Defines main target type 'message', that prints a message when built for the -# first time. - -import b2.build.targets as targets -import b2.build.property_set as property_set - -from b2.manager import get_manager - -class MessageTargetClass(targets.BasicTarget): - - def __init__(self, name, project, *args): - - targets.BasicTarget.__init__(self, name, project, []) - self.args = args - self.built = False - - def construct(self, name, sources, ps): - - if not self.built: - for arg in self.args: - if type(arg) == type([]): - arg = " ".join(arg) - print arg - self.built = True - - return (property_set.empty(), []) - -def message(name, *args): - - if type(name) == type([]): - name = name[0] - - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative(MessageTargetClass(*((name, project) + args))) - -get_manager().projects().add_rule("message", message) diff --git a/jam-files/boost-build/tools/midl.jam b/jam-files/boost-build/tools/midl.jam deleted file mode 100644 index 0aa5dda3..00000000 --- a/jam-files/boost-build/tools/midl.jam +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2005 Alexey Pakhunov. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Microsoft Interface Definition Language (MIDL) related routines - -import common ; -import generators ; -import feature : feature get-values ; -import os ; -import scanner ; -import toolset : flags ; -import type ; - -rule init ( ) -{ -} - -type.register IDL : idl ; - -# A type library (.tlb) is generated by MIDL compiler and can be included -# to resources of an application (.rc). In order to be found by a resource -# compiler its target type should be derived from 'H' - otherwise -# the property '<implicit-dependency>' will be ignored. -type.register MSTYPELIB : tlb : H ; - - -# Register scanner for MIDL files -class midl-scanner : scanner -{ - import path property-set regex scanner type virtual-target ; - - rule __init__ ( includes * ) - { - scanner.__init__ ; - - self.includes = $(includes) ; - - # List of quoted strings - self.re-strings = "[ \t]*\"([^\"]*)\"([ \t]*,[ \t]*\"([^\"]*)\")*[ \t]*" ; - - # 'import' and 'importlib' directives - self.re-import = "import"$(self.re-strings)"[ \t]*;" ; - self.re-importlib = "importlib[ \t]*[(]"$(self.re-strings)"[)][ \t]*;" ; - - # C preprocessor 'include' directive - self.re-include-angle = "#[ \t]*include[ \t]*<(.*)>" ; - self.re-include-quoted = "#[ \t]*include[ \t]*\"(.*)\"" ; - } - - rule pattern ( ) - { - # Match '#include', 'import' and 'importlib' directives - return "((#[ \t]*include|import(lib)?).+(<(.*)>|\"(.*)\").+)" ; - } - - rule process ( target : matches * : binding ) - { - local included-angle = [ regex.transform $(matches) : $(self.re-include-angle) : 1 ] ; - local included-quoted = [ regex.transform $(matches) : $(self.re-include-quoted) : 1 ] ; - local imported = [ regex.transform $(matches) : $(self.re-import) : 1 3 ] ; - local imported_tlbs = [ regex.transform $(matches) : $(self.re-importlib) : 1 3 ] ; - - # CONSIDER: the new scoping rule seem to defeat "on target" variables. - local g = [ on $(target) return $(HDRGRIST) ] ; - local b = [ NORMALIZE_PATH $(binding:D) ] ; - - # Attach binding of including file to included targets. - # When target is directly created from virtual target - # this extra information is unnecessary. But in other - # cases, it allows to distinguish between two headers of the - # same name included from different places. - local g2 = $(g)"#"$(b) ; - - included-angle = $(included-angle:G=$(g)) ; - included-quoted = $(included-quoted:G=$(g2)) ; - imported = $(imported:G=$(g2)) ; - imported_tlbs = $(imported_tlbs:G=$(g2)) ; - - local all = $(included-angle) $(included-quoted) $(imported) ; - - INCLUDES $(target) : $(all) ; - DEPENDS $(target) : $(imported_tlbs) ; - NOCARE $(all) $(imported_tlbs) ; - SEARCH on $(included-angle) = $(self.includes:G=) ; - SEARCH on $(included-quoted) = $(b) $(self.includes:G=) ; - SEARCH on $(imported) = $(b) $(self.includes:G=) ; - SEARCH on $(imported_tlbs) = $(b) $(self.includes:G=) ; - - scanner.propagate - [ type.get-scanner CPP : [ property-set.create $(self.includes) ] ] : - $(included-angle) $(included-quoted) : $(target) ; - - scanner.propagate $(__name__) : $(imported) : $(target) ; - } -} - -scanner.register midl-scanner : include ; -type.set-scanner IDL : midl-scanner ; - - -# Command line options -feature midl-stubless-proxy : yes no : propagated ; -feature midl-robust : yes no : propagated ; - -flags midl.compile.idl MIDLFLAGS <midl-stubless-proxy>yes : /Oicf ; -flags midl.compile.idl MIDLFLAGS <midl-stubless-proxy>no : /Oic ; -flags midl.compile.idl MIDLFLAGS <midl-robust>yes : /robust ; -flags midl.compile.idl MIDLFLAGS <midl-robust>no : /no_robust ; - -# Architecture-specific options -architecture-x86 = <architecture> <architecture>x86 ; -address-model-32 = <address-model> <address-model>32 ; -address-model-64 = <address-model> <address-model>64 ; - -flags midl.compile.idl MIDLFLAGS $(architecture-x86)/$(address-model-32) : /win32 ; -flags midl.compile.idl MIDLFLAGS $(architecture-x86)/<address-model>64 : /x64 ; -flags midl.compile.idl MIDLFLAGS <architecture>ia64/$(address-model-64) : /ia64 ; - - -flags midl.compile.idl DEFINES <define> ; -flags midl.compile.idl UNDEFS <undef> ; -flags midl.compile.idl INCLUDES <include> ; - - -generators.register-c-compiler midl.compile.idl : IDL : MSTYPELIB H C(%_i) C(%_proxy) C(%_dlldata) ; - - -# MIDL does not always generate '%_proxy.c' and '%_dlldata.c'. This behavior -# depends on contents of the source IDL file. Calling TOUCH_FILE below ensures -# that both files will be created so bjam will not try to recreate them -# constantly. -TOUCH_FILE = [ common.file-touch-command ] ; - -actions compile.idl -{ - midl /nologo @"@($(<[1]:W).rsp:E=$(nl)"$(>:W)" $(nl)-D$(DEFINES) $(nl)"-I$(INCLUDES)" $(nl)-U$(UNDEFS) $(nl)$(MIDLFLAGS) $(nl)/tlb "$(<[1]:W)" $(nl)/h "$(<[2]:W)" $(nl)/iid "$(<[3]:W)" $(nl)/proxy "$(<[4]:W)" $(nl)/dlldata "$(<[5]:W)")" - $(TOUCH_FILE) "$(<[4]:W)" - $(TOUCH_FILE) "$(<[5]:W)" -} diff --git a/jam-files/boost-build/tools/mipspro.jam b/jam-files/boost-build/tools/mipspro.jam deleted file mode 100644 index 417eaefc..00000000 --- a/jam-files/boost-build/tools/mipspro.jam +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright Noel Belcourt 2007. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import property ; -import generators ; -import os ; -import toolset : flags ; -import feature ; -import fortran ; -import type ; -import common ; - -feature.extend toolset : mipspro ; -toolset.inherit mipspro : unix ; -generators.override mipspro.prebuilt : builtin.lib-generator ; -generators.override mipspro.searched-lib-generator : searched-lib-generator ; - -# Documentation and toolchain description located -# http://www.sgi.com/products/software/irix/tools/ - -rule init ( version ? : command * : options * ) -{ - local condition = [ - common.check-init-parameters mipspro : version $(version) ] ; - - command = [ common.get-invocation-command mipspro : CC : $(command) ] ; - - common.handle-options mipspro : $(condition) : $(command) : $(options) ; - - command_c = $(command_c[1--2]) $(command[-1]:B=cc) ; - - toolset.flags mipspro CONFIG_C_COMMAND $(condition) : $(command_c) ; - - # fortran support - local command = [ - common.get-invocation-command mipspro : f77 : $(command) : $(install_dir) ] ; - - command_f = $(command_f[1--2]) $(command[-1]:B=f77) ; - toolset.flags mipspro CONFIG_F_COMMAND $(condition) : $(command_f) ; - - # set link flags - flags mipspro.link FINDLIBS-ST : [ - feature.get-values <find-static-library> : $(options) ] : unchecked ; - - flags mipspro.link FINDLIBS-SA : [ - feature.get-values <find-shared-library> : $(options) ] : unchecked ; -} - -# Declare generators -generators.register-c-compiler mipspro.compile.c : C : OBJ : <toolset>mipspro ; -generators.register-c-compiler mipspro.compile.c++ : CPP : OBJ : <toolset>mipspro ; -generators.register-fortran-compiler mipspro.compile.fortran : FORTRAN : OBJ : <toolset>mipspro ; - -cpu-arch-32 = - <architecture>/<address-model> - <architecture>/<address-model>32 ; - -cpu-arch-64 = - <architecture>/<address-model>64 ; - -flags mipspro.compile OPTIONS $(cpu-arch-32) : -n32 ; -flags mipspro.compile OPTIONS $(cpu-arch-64) : -64 ; - -# Declare flags and actions for compilation -flags mipspro.compile OPTIONS <debug-symbols>on : -g ; -# flags mipspro.compile OPTIONS <profiling>on : -xprofile=tcov ; -flags mipspro.compile OPTIONS <warnings>off : -w ; -flags mipspro.compile OPTIONS <warnings>on : -ansiW -diag_suppress 1429 ; # suppress long long is nonstandard warning -flags mipspro.compile OPTIONS <warnings>all : -fullwarn ; -flags mipspro.compile OPTIONS <optimization>speed : -Ofast ; -flags mipspro.compile OPTIONS <optimization>space : -O2 ; -flags mipspro.compile OPTIONS <cflags> : -LANG:std ; -flags mipspro.compile.c++ OPTIONS <inlining>off : -INLINE:none ; -flags mipspro.compile.c++ OPTIONS <cxxflags> ; -flags mipspro.compile DEFINES <define> ; -flags mipspro.compile INCLUDES <include> ; - - -flags mipspro.compile.fortran OPTIONS <fflags> ; - -actions compile.c -{ - "$(CONFIG_C_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" -FE:template_in_elf_section -ptused $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.fortran -{ - "$(CONFIG_F_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -# Declare flags and actions for linking -flags mipspro.link OPTIONS <debug-symbols>on : -g ; -# Strip the binary when no debugging is needed -# flags mipspro.link OPTIONS <debug-symbols>off : -s ; -# flags mipspro.link OPTIONS <profiling>on : -xprofile=tcov ; -# flags mipspro.link OPTIONS <threading>multi : -mt ; - -flags mipspro.link OPTIONS $(cpu-arch-32) : -n32 ; -flags mipspro.link OPTIONS $(cpu-arch-64) : -64 ; - -flags mipspro.link OPTIONS <optimization>speed : -Ofast ; -flags mipspro.link OPTIONS <optimization>space : -O2 ; -flags mipspro.link OPTIONS <linkflags> ; -flags mipspro.link LINKPATH <library-path> ; -flags mipspro.link FINDLIBS-ST <find-static-library> ; -flags mipspro.link FINDLIBS-SA <find-shared-library> ; -flags mipspro.link FINDLIBS-SA <threading>multi : pthread ; -flags mipspro.link LIBRARIES <library-file> ; -flags mipspro.link LINK-RUNTIME <runtime-link>static : static ; -flags mipspro.link LINK-RUNTIME <runtime-link>shared : dynamic ; -flags mipspro.link RPATH <dll-path> ; - -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -FE:template_in_elf_section -ptused $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -lm -} - -# Slight mods for dlls -rule link.dll ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -} - -# Declare action for creating static libraries -actions piecemeal archive -{ - ar -cr "$(<)" "$(>)" -} diff --git a/jam-files/boost-build/tools/mpi.jam b/jam-files/boost-build/tools/mpi.jam deleted file mode 100644 index 0fe490be..00000000 --- a/jam-files/boost-build/tools/mpi.jam +++ /dev/null @@ -1,583 +0,0 @@ -# Support for the Message Passing Interface (MPI) -# -# (C) Copyright 2005, 2006 Trustees of Indiana University -# (C) Copyright 2005 Douglas Gregor -# -# Distributed under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) -# -# Authors: Douglas Gregor -# Andrew Lumsdaine -# -# ==== MPI Configuration ==== -# -# For many users, MPI support can be enabled simply by adding the following -# line to your user-config.jam file: -# -# using mpi ; -# -# This should auto-detect MPI settings based on the MPI wrapper compiler in -# your path, e.g., "mpic++". If the wrapper compiler is not in your path, or -# has a different name, you can pass the name of the wrapper compiler as the -# first argument to the mpi module: -# -# using mpi : /opt/mpich2-1.0.4/bin/mpiCC ; -# -# If your MPI implementation does not have a wrapper compiler, or the MPI -# auto-detection code does not work with your MPI's wrapper compiler, -# you can pass MPI-related options explicitly via the second parameter to the -# mpi module: -# -# using mpi : : <find-shared-library>lammpio <find-shared-library>lammpi++ -# <find-shared-library>mpi <find-shared-library>lam -# <find-shared-library>dl ; -# -# To see the results of MPI auto-detection, pass "--debug-configuration" on -# the bjam command line. -# -# The (optional) fourth argument configures Boost.MPI for running -# regression tests. These parameters specify the executable used to -# launch jobs (default: "mpirun") followed by any necessary arguments -# to this to run tests and tell the program to expect the number of -# processors to follow (default: "-np"). With the default parameters, -# for instance, the test harness will execute, e.g., -# -# mpirun -np 4 all_gather_test -# -# ==== Linking Against the MPI Libraries === -# -# To link against the MPI libraries, import the "mpi" module and add the -# following requirement to your target: -# -# <library>/mpi//mpi -# -# Since MPI support is not always available, you should check -# "mpi.configured" before trying to link against the MPI libraries. - -import "class" : new ; -import common ; -import feature : feature ; -import generators ; -import os ; -import project ; -import property ; -import testing ; -import toolset ; -import type ; -import path ; - -# Make this module a project -project.initialize $(__name__) ; -project mpi ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -# Assuming the first part of the command line is the given prefix -# followed by some non-empty value, remove the first argument. Returns -# either nothing (if there was no prefix or no value) or a pair -# -# <name>value rest-of-cmdline -# -# This is a subroutine of cmdline_to_features -rule add_feature ( prefix name cmdline ) -{ - local match = [ MATCH "^$(prefix)([^\" ]+|\"[^\"]+\") *(.*)$" : $(cmdline) ] ; - - # If there was no value associated with the prefix, abort - if ! $(match) { - return ; - } - - local value = $(match[1]) ; - - if [ MATCH " +" : $(value) ] { - value = "\"$(value)\"" ; - } - - return "<$(name)>$(value)" $(match[2]) ; -} - -# Strip any end-of-line characters off the given string and return the -# result. -rule strip-eol ( string ) -{ - local match = [ MATCH "^(([A-Za-z0-9~`\.!@#$%^&*()_+={};:'\",.<>/?\\| -]|[|])*).*$" : $(string) ] ; - - if $(match) - { - return $(match[1]) ; - } - else - { - return $(string) ; - } -} - -# Split a command-line into a set of features. Certain kinds of -# compiler flags are recognized (e.g., -I, -D, -L, -l) and replaced -# with their Boost.Build equivalents (e.g., <include>, <define>, -# <library-path>, <find-library>). All other arguments are introduced -# using the features in the unknown-features parameter, because we -# don't know how to deal with them. For instance, if your compile and -# correct. The incoming command line should be a string starting with -# an executable (e.g., g++ -I/include/path") and may contain any -# number of command-line arguments thereafter. The result is a list of -# features corresponding to the given command line, ignoring the -# executable. -rule cmdline_to_features ( cmdline : unknown-features ? ) -{ - local executable ; - local features ; - local otherflags ; - local result ; - - unknown-features ?= <cxxflags> <linkflags> ; - - # Pull the executable out of the command line. At this point, the - # executable is just thrown away. - local match = [ MATCH "^([^\" ]+|\"[^\"]+\") *(.*)$" : $(cmdline) ] ; - executable = $(match[1]) ; - cmdline = $(match[2]) ; - - # List the prefix/feature pairs that we will be able to transform. - # Every kind of parameter not mentioned here will be placed in both - # cxxflags and linkflags, because we don't know where they should go. - local feature_kinds-D = "define" ; - local feature_kinds-I = "include" ; - local feature_kinds-L = "library-path" ; - local feature_kinds-l = "find-shared-library" ; - - while $(cmdline) { - - # Check for one of the feature prefixes we know about. If we - # find one (and the associated value is nonempty), convert it - # into a feature. - local match = [ MATCH "^(-.)(.*)" : $(cmdline) ] ; - local matched ; - if $(match) && $(match[2]) { - local prefix = $(match[1]) ; - if $(feature_kinds$(prefix)) { - local name = $(feature_kinds$(prefix)) ; - local add = [ add_feature $(prefix) $(name) $(cmdline) ] ; - - if $(add) { - - if $(add[1]) = <find-shared-library>pthread - { - # Uhm. It's not really nice that this MPI implementation - # uses -lpthread as opposed to -pthread. We do want to - # set <threading>multi, instead of -lpthread. - result += "<threading>multi" ; - MPI_EXTRA_REQUIREMENTS += "<threading>multi" ; - } - else - { - result += $(add[1]) ; - } - - cmdline = $(add[2]) ; - matched = yes ; - } - } - } - - # If we haven't matched a feature prefix, just grab the command-line - # argument itself. If we can map this argument to a feature - # (e.g., -pthread -> <threading>multi), then do so; otherwise, - # and add it to the list of "other" flags that we don't - # understand. - if ! $(matched) { - match = [ MATCH "^([^\" ]+|\"[^\"]+\") *(.*)$" : $(cmdline) ] ; - local value = $(match[1]) ; - cmdline = $(match[2]) ; - - # Check for multithreading support - if $(value) = "-pthread" || $(value) = "-pthreads" - { - result += "<threading>multi" ; - - # DPG: This is a hack intended to work around a BBv2 bug where - # requirements propagated from libraries are not checked for - # conflicts when BBv2 determines which "common" properties to - # apply to a target. In our case, the <threading>single property - # gets propagated from the common properties to Boost.MPI - # targets, even though <threading>multi is in the usage - # requirements of <library>/mpi//mpi. - MPI_EXTRA_REQUIREMENTS += "<threading>multi" ; - } - else if [ MATCH "(.*[a-zA-Z0-9<>?-].*)" : $(value) ] { - otherflags += $(value) ; - } - } - } - - # If there are other flags that we don't understand, add them to the - # result as both <cxxflags> and <linkflags> - if $(otherflags) { - for unknown in $(unknown-features) - { - result += "$(unknown)$(otherflags:J= )" ; - } - } - - return $(result) ; -} - -# Determine if it is safe to execute the given shell command by trying -# to execute it and determining whether the exit code is zero or -# not. Returns true for an exit code of zero, false otherwise. -local rule safe-shell-command ( cmdline ) -{ - local result = [ SHELL "$(cmdline) > /dev/null 2>/dev/null; if [ "$?" -eq "0" ]; then echo SSCOK; fi" ] ; - return [ MATCH ".*(SSCOK).*" : $(result) ] ; -} - -# Initialize the MPI module. -rule init ( mpicxx ? : options * : mpirun-with-options * ) -{ - if ! $(options) && $(.debug-configuration) - { - ECHO "===============MPI Auto-configuration===============" ; - } - - if ! $(mpicxx) && [ os.on-windows ] - { - # Try to auto-configure to the Microsoft Compute Cluster Pack - local cluster_pack_path_native = "C:\\Program Files\\Microsoft Compute Cluster Pack" ; - local cluster_pack_path = [ path.make $(cluster_pack_path_native) ] ; - if [ GLOB $(cluster_pack_path_native)\\Include : mpi.h ] - { - if $(.debug-configuration) - { - ECHO "Found Microsoft Compute Cluster Pack: $(cluster_pack_path_native)" ; - } - - # Pick up either the 32-bit or 64-bit library, depending on which address - # model the user has selected. Default to 32-bit. - options = <include>$(cluster_pack_path)/Include - <address-model>64:<library-path>$(cluster_pack_path)/Lib/amd64 - <library-path>$(cluster_pack_path)/Lib/i386 - <find-static-library>msmpi - <toolset>msvc:<define>_SECURE_SCL=0 - ; - - # Setup the "mpirun" equivalent (mpiexec) - .mpirun = "\"$(cluster_pack_path_native)\\Bin\\mpiexec.exe"\" ; - .mpirun_flags = -n ; - } - else if $(.debug-configuration) - { - ECHO "Did not find Microsoft Compute Cluster Pack in $(cluster_pack_path_native)." ; - } - } - - if ! $(options) - { - # Try to auto-detect options based on the wrapper compiler - local command = [ common.get-invocation-command mpi : mpic++ : $(mpicxx) ] ; - - if ! $(mpicxx) && ! $(command) - { - # Try "mpiCC", which is used by MPICH - command = [ common.get-invocation-command mpi : mpiCC ] ; - } - - if ! $(mpicxx) && ! $(command) - { - # Try "mpicxx", which is used by OpenMPI and MPICH2 - command = [ common.get-invocation-command mpi : mpicxx ] ; - } - - local result ; - local compile_flags ; - local link_flags ; - - if ! $(command) - { - # Do nothing: we'll complain later - } - # OpenMPI and newer versions of LAM-MPI have -showme:compile and - # -showme:link. - else if [ safe-shell-command "$(command) -showme:compile" ] && - [ safe-shell-command "$(command) -showme:link" ] - { - if $(.debug-configuration) - { - ECHO "Found recent LAM-MPI or Open MPI wrapper compiler: $(command)" ; - } - - compile_flags = [ SHELL "$(command) -showme:compile" ] ; - link_flags = [ SHELL "$(command) -showme:link" ] ; - - # Prepend COMPILER as the executable name, to match the format of - # other compilation commands. - compile_flags = "COMPILER $(compile_flags)" ; - link_flags = "COMPILER $(link_flags)" ; - } - # Look for LAM-MPI's -showme - else if [ safe-shell-command "$(command) -showme" ] - { - if $(.debug-configuration) - { - ECHO "Found older LAM-MPI wrapper compiler: $(command)" ; - } - - result = [ SHELL "$(command) -showme" ] ; - } - # Look for MPICH - else if [ safe-shell-command "$(command) -show" ] - { - if $(.debug-configuration) - { - ECHO "Found MPICH wrapper compiler: $(command)" ; - } - compile_flags = [ SHELL "$(command) -compile_info" ] ; - link_flags = [ SHELL "$(command) -link_info" ] ; - } - # Sun HPC and Ibm POE - else if [ SHELL "$(command) -v 2>/dev/null" ] - { - compile_flags = [ SHELL "$(command) -c -v -xtarget=native64 2>/dev/null" ] ; - - local back = [ MATCH "--------------------(.*)" : $(compile_flags) ] ; - if $(back) - { - # Sun HPC - if $(.debug-configuration) - { - ECHO "Found Sun MPI wrapper compiler: $(command)" ; - } - - compile_flags = [ MATCH "(.*)--------------------" : $(back) ] ; - compile_flags = [ MATCH "(.*)-v" : $(compile_flags) ] ; - link_flags = [ SHELL "$(command) -v -xtarget=native64 2>/dev/null" ] ; - link_flags = [ MATCH "--------------------(.*)" : $(link_flags) ] ; - link_flags = [ MATCH "(.*)--------------------" : $(link_flags) ] ; - - # strip out -v from compile options - local front = [ MATCH "(.*)-v" : $(link_flags) ] ; - local back = [ MATCH "-v(.*)" : $(link_flags) ] ; - link_flags = "$(front) $(back)" ; - front = [ MATCH "(.*)-xtarget=native64" : $(link_flags) ] ; - back = [ MATCH "-xtarget=native64(.*)" : $(link_flags) ] ; - link_flags = "$(front) $(back)" ; - } - else - { - # Ibm POE - if $(.debug-configuration) - { - ECHO "Found IBM MPI wrapper compiler: $(command)" ; - } - - # - compile_flags = [ SHELL "$(command) -c -v 2>/dev/null" ] ; - compile_flags = [ MATCH "(.*)exec: export.*" : $(compile_flags) ] ; - local front = [ MATCH "(.*)-v" : $(compile_flags) ] ; - local back = [ MATCH "-v(.*)" : $(compile_flags) ] ; - compile_flags = "$(front) $(back)" ; - front = [ MATCH "(.*)-c" : $(compile_flags) ] ; - back = [ MATCH "-c(.*)" : $(compile_flags) ] ; - compile_flags = "$(front) $(back)" ; - link_flags = $(compile_flags) ; - - # get location of mpif.h from mpxlf - local f_flags = [ SHELL "mpxlf -v 2>/dev/null" ] ; - f_flags = [ MATCH "(.*)exec: export.*" : $(f_flags) ] ; - front = [ MATCH "(.*)-v" : $(f_flags) ] ; - back = [ MATCH "-v(.*)" : $(f_flags) ] ; - f_flags = "$(front) $(back)" ; - f_flags = [ MATCH "xlf_r(.*)" : $(f_flags) ] ; - f_flags = [ MATCH "-F:mpxlf_r(.*)" : $(f_flags) ] ; - compile_flags = [ strip-eol $(compile_flags) ] ; - compile_flags = "$(compile_flags) $(f_flags)" ; - } - } - - if $(result) || $(compile_flags) && $(link_flags) - { - if $(result) - { - result = [ strip-eol $(result) ] ; - options = [ cmdline_to_features $(result) ] ; - } - else - { - compile_flags = [ strip-eol $(compile_flags) ] ; - link_flags = [ strip-eol $(link_flags) ] ; - - # Separately process compilation and link features, then combine - # them at the end. - local compile_features = [ cmdline_to_features $(compile_flags) - : "<cxxflags>" ] ; - local link_features = [ cmdline_to_features $(link_flags) - : "<linkflags>" ] ; - options = $(compile_features) $(link_features) ; - } - - # If requested, display MPI configuration information. - if $(.debug-configuration) - { - if $(result) - { - ECHO " Wrapper compiler command line: $(result)" ; - } - else - { - local match = [ MATCH "^([^\" ]+|\"[^\"]+\") *(.*)$" - : $(compile_flags) ] ; - ECHO "MPI compilation flags: $(match[2])" ; - local match = [ MATCH "^([^\" ]+|\"[^\"]+\") *(.*)$" - : $(link_flags) ] ; - ECHO "MPI link flags: $(match[2])" ; - } - } - } - else - { - if $(command) - { - ECHO "MPI auto-detection failed: unknown wrapper compiler $(command)" ; - ECHO "Please report this error to the Boost mailing list: http://www.boost.org" ; - } - else if $(mpicxx) - { - ECHO "MPI auto-detection failed: unable to find wrapper compiler $(mpicxx)" ; - } - else - { - ECHO "MPI auto-detection failed: unable to find wrapper compiler `mpic++' or `mpiCC'" ; - } - ECHO "You will need to manually configure MPI support." ; - } - - } - - # Find mpirun (or its equivalent) and its flags - if ! $(.mpirun) - { - .mpirun = - [ common.get-invocation-command mpi : mpirun : $(mpirun-with-options[1]) ] ; - .mpirun_flags = $(mpirun-with-options[2-]) ; - .mpirun_flags ?= -np ; - } - - if $(.debug-configuration) - { - if $(options) - { - echo "MPI build features: " ; - ECHO $(options) ; - } - - if $(.mpirun) - { - echo "MPI launcher: $(.mpirun) $(.mpirun_flags)" ; - } - - ECHO "====================================================" ; - } - - if $(options) - { - .configured = true ; - - # Set up the "mpi" alias - alias mpi : : : : $(options) ; - } -} - -# States whether MPI has bee configured -rule configured ( ) -{ - return $(.configured) ; -} - -# Returs the "extra" requirements needed to build MPI. These requirements are -# part of the /mpi//mpi library target, but they need to be added to anything -# that uses MPI directly to work around bugs in BBv2's propagation of -# requirements. -rule extra-requirements ( ) -{ - return $(MPI_EXTRA_REQUIREMENTS) ; -} - -# Support for testing; borrowed from Python -type.register RUN_MPI_OUTPUT ; -type.register RUN_MPI : : TEST ; - -class mpi-test-generator : generator -{ - import property-set ; - - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - self.composing = true ; - } - - rule run ( project name ? : property-set : sources * : multiple ? ) - { - # Generate an executable from the sources. This is the executable we will run. - local executable = - [ generators.construct $(project) $(name) : EXE : $(property-set) : $(sources) ] ; - - result = - [ construct-result $(executable[2-]) : $(project) $(name)-run : $(property-set) ] ; - } -} - -# Use mpi-test-generator to generate MPI tests from sources -generators.register - [ new mpi-test-generator mpi.capture-output : : RUN_MPI_OUTPUT ] ; - -generators.register-standard testing.expect-success - : RUN_MPI_OUTPUT : RUN_MPI ; - -# The number of processes to spawn when executing an MPI test. -feature mpi:processes : : free incidental ; - -# The flag settings on testing.capture-output do not -# apply to mpi.capture output at the moment. -# Redo this explicitly. -toolset.flags mpi.capture-output ARGS <testing.arg> ; -rule capture-output ( target : sources * : properties * ) -{ - # Use the standard capture-output rule to run the tests - testing.capture-output $(target) : $(sources[1]) : $(properties) ; - - # Determine the number of processes we should run on. - local num_processes = [ property.select <mpi:processes> : $(properties) ] ; - num_processes = $(num_processes:G=) ; - - # serialize the MPI tests to avoid overloading systems - JAM_SEMAPHORE on $(target) = <s>mpi-run-semaphore ; - - # We launch MPI processes using the "mpirun" equivalent specified by the user. - LAUNCHER on $(target) = - [ on $(target) return $(.mpirun) $(.mpirun_flags) $(num_processes) ] ; -} - -# Creates a set of test cases to be run through the MPI launcher. The name, sources, -# and requirements are the same as for any other test generator. However, schedule is -# a list of numbers, which indicates how many processes each test run will use. For -# example, passing 1 2 7 will run the test with 1 process, then 2 processes, then 7 -# 7 processes. The name provided is just the base name: the actual tests will be -# the name followed by a hypen, then the number of processes. -rule mpi-test ( name : sources * : requirements * : schedule * ) -{ - sources ?= $(name).cpp ; - schedule ?= 1 2 3 4 7 8 13 17 ; - - local result ; - for processes in $(schedule) - { - result += [ testing.make-test - run-mpi : $(sources) /boost/mpi//boost_mpi - : $(requirements) <toolset>msvc:<link>static <mpi:processes>$(processes) : $(name)-$(processes) ] ; - } - return $(result) ; -} diff --git a/jam-files/boost-build/tools/msvc-config.jam b/jam-files/boost-build/tools/msvc-config.jam deleted file mode 100644 index 6c71e3b0..00000000 --- a/jam-files/boost-build/tools/msvc-config.jam +++ /dev/null @@ -1,12 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for VisualStudio toolset. To use, just import this module. - -import toolset : using ; - -ECHO "warning: msvc-config.jam is deprecated. Use 'using msvc : all ;' instead." ; - -using msvc : all ; - diff --git a/jam-files/boost-build/tools/msvc.jam b/jam-files/boost-build/tools/msvc.jam deleted file mode 100644 index e33a66d2..00000000 --- a/jam-files/boost-build/tools/msvc.jam +++ /dev/null @@ -1,1392 +0,0 @@ -# Copyright (c) 2003 David Abrahams. -# Copyright (c) 2005 Vladimir Prus. -# Copyright (c) 2005 Alexey Pakhunov. -# Copyright (c) 2006 Bojan Resnik. -# Copyright (c) 2006 Ilya Sokolov. -# Copyright (c) 2007 Rene Rivera -# Copyright (c) 2008 Jurko Gospodnetic -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -################################################################################ -# -# MSVC Boost Build toolset module. -# -------------------------------- -# -# All toolset versions need to have their location either auto-detected or -# explicitly specified except for the special 'default' version that expects the -# environment to find the needed tools or report an error. -# -################################################################################ - -import "class" : new ; -import common ; -import errors ; -import feature ; -import generators ; -import mc ; -import midl ; -import os ; -import path ; -import pch ; -import property ; -import rc ; -import toolset ; -import type ; - - -type.register MANIFEST : manifest ; -feature.feature embed-manifest : on off : incidental propagated ; - -type.register PDB : pdb ; - -################################################################################ -# -# Public rules. -# -################################################################################ - -# Initialize a specific toolset version configuration. As the result, path to -# compiler and, possible, program names are set up, and will be used when that -# version of compiler is requested. For example, you might have: -# -# using msvc : 6.5 : cl.exe ; -# using msvc : 7.0 : Y:/foo/bar/cl.exe ; -# -# The version parameter may be ommited: -# -# using msvc : : Z:/foo/bar/cl.exe ; -# -# The following keywords have special meanings when specified as versions: -# - all - all detected but not yet used versions will be marked as used -# with their default options. -# - default - this is an equivalent to an empty version. -# -# Depending on a supplied version, detected configurations and presence 'cl.exe' -# in the path different results may be achieved. The following table describes -# the possible scenarios: -# -# Nothing "x.y" -# Passed Nothing "x.y" detected, detected, -# version detected detected cl.exe in path cl.exe in path -# -# default Error Use "x.y" Create "default" Use "x.y" -# all None Use all None Use all -# x.y - Use "x.y" - Use "x.y" -# a.b Error Error Create "a.b" Create "a.b" -# -# "x.y" - refers to a detected version; -# "a.b" - refers to an undetected version. -# -# FIXME: Currently the command parameter and the <compiler> property parameter -# seem to overlap in duties. Remove this duplication. This seems to be related -# to why someone started preparing to replace init with configure rules. -# -rule init ( - # The msvc version being configured. When omitted the tools invoked when no - # explicit version is given will be configured. - version ? - - # The command used to invoke the compiler. If not specified: - # - if version is given, default location for that version will be - # searched - # - # - if version is not given, default locations for MSVC 9.0, 8.0, 7.1, 7.0 - # and 6.* will be searched - # - # - if compiler is not found in the default locations, PATH will be - # searched. - : command * - - # Options may include: - # - # All options shared by multiple toolset types as handled by the - # common.handle-options() rule, e.g. <cflags>, <compileflags>, <cxxflags>, - # <fflags> & <linkflags>. - # - # <assembler> - # <compiler> - # <idl-compiler> - # <linker> - # <mc-compiler> - # <resource-compiler> - # Exact tool names to be used by this msvc toolset configuration. - # - # <compiler-filter> - # Command through which to pipe the output of running the compiler. - # For example to pass the output to STLfilt. - # - # <setup> - # Global setup command to invoke before running any of the msvc tools. - # It will be passed additional option parameters depending on the actual - # target platform. - # - # <setup-amd64> - # <setup-i386> - # <setup-ia64> - # Platform specific setup command to invoke before running any of the - # msvc tools used when builing a target for a specific platform, e.g. - # when building a 32 or 64 bit executable. - : options * -) -{ - if $(command) - { - options += <command>$(command) ; - } - configure $(version) : $(options) ; -} - - -# 'configure' is a newer version of 'init'. The parameter 'command' is passed as -# a part of the 'options' list. See the 'init' rule comment for more detailed -# information. -# -rule configure ( version ? : options * ) -{ - switch $(version) - { - case "all" : - if $(options) - { - errors.error "MSVC toolset configuration: options should be" - "empty when '$(version)' is specified." ; - } - - # Configure (i.e. mark as used) all registered versions. - local all-versions = [ $(.versions).all ] ; - if ! $(all-versions) - { - if $(.debug-configuration) - { - ECHO "notice: [msvc-cfg] Asked to configure all registered" - "msvc toolset versions when there are none currently" - "registered." ; - } - } - else - { - for local v in $(all-versions) - { - # Note that there is no need to skip already configured - # versions here as this will request configure-really rule - # to configure the version using default options which will - # in turn cause it to simply do nothing in case the version - # has already been configured. - configure-really $(v) ; - } - } - - case "default" : - configure-really : $(options) ; - - case * : - configure-really $(version) : $(options) ; - } -} - - -# Sets up flag definitions dependent on the compiler version used. -# - 'version' is the version of compiler in N.M format. -# - 'conditions' is the property set to be used as flag conditions. -# - 'toolset' is the toolset for which flag settings are to be defined. -# This makes the rule reusable for other msvc-option-compatible compilers. -# -rule configure-version-specific ( toolset : version : conditions ) -{ - toolset.push-checking-for-flags-module unchecked ; - # Starting with versions 7.0, the msvc compiler have the /Zc:forScope and - # /Zc:wchar_t options that improve C++ standard conformance, but those - # options are off by default. If we are sure that the msvc version is at - # 7.*, add those options explicitly. We can be sure either if user specified - # version 7.* explicitly or if we auto-detected the version ourselves. - if ! [ MATCH ^(6\\.) : $(version) ] - { - toolset.flags $(toolset).compile CFLAGS $(conditions) : /Zc:forScope /Zc:wchar_t ; - toolset.flags $(toolset).compile.c++ C++FLAGS $(conditions) : /wd4675 ; - - # Explicitly disable the 'function is deprecated' warning. Some msvc - # versions have a bug, causing them to emit the deprecation warning even - # with /W0. - toolset.flags $(toolset).compile CFLAGS $(conditions)/<warnings>off : /wd4996 ; - - if [ MATCH ^([78]\\.) : $(version) ] - { - # 64-bit compatibility warning deprecated since 9.0, see - # http://msdn.microsoft.com/en-us/library/yt4xw8fh.aspx - toolset.flags $(toolset).compile CFLAGS $(conditions)/<warnings>all : /Wp64 ; - } - } - - # - # Processor-specific optimization. - # - - if [ MATCH ^([67]) : $(version) ] - { - # 8.0 deprecates some of the options. - toolset.flags $(toolset).compile CFLAGS $(conditions)/<optimization>speed $(conditions)/<optimization>space : /Ogiy /Gs ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/<optimization>speed : /Ot ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/<optimization>space : /Os ; - - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set> : /GB ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set>i386 : /G3 ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set>i486 : /G4 ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set>$(.cpu-type-g5) : /G5 ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set>$(.cpu-type-g6) : /G6 ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-i386)/<instruction-set>$(.cpu-type-g7) : /G7 ; - - # Improve floating-point accuracy. Otherwise, some of C++ Boost's "math" - # tests will fail. - toolset.flags $(toolset).compile CFLAGS $(conditions) : /Op ; - - # 7.1 and below have single-threaded static RTL. - toolset.flags $(toolset).compile CFLAGS $(conditions)/<runtime-debugging>off/<runtime-link>static/<threading>single : /ML ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/<runtime-debugging>on/<runtime-link>static/<threading>single : /MLd ; - } - else - { - # 8.0 and above adds some more options. - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-amd64)/<instruction-set> : /favor:blend ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-amd64)/<instruction-set>$(.cpu-type-em64t) : /favor:EM64T ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/$(.cpu-arch-amd64)/<instruction-set>$(.cpu-type-amd64) : /favor:AMD64 ; - - # 8.0 and above only has multi-threaded static RTL. - toolset.flags $(toolset).compile CFLAGS $(conditions)/<runtime-debugging>off/<runtime-link>static/<threading>single : /MT ; - toolset.flags $(toolset).compile CFLAGS $(conditions)/<runtime-debugging>on/<runtime-link>static/<threading>single : /MTd ; - - # Specify target machine type so the linker will not need to guess. - toolset.flags $(toolset).link LINKFLAGS $(conditions)/$(.cpu-arch-amd64) : /MACHINE:X64 ; - toolset.flags $(toolset).link LINKFLAGS $(conditions)/$(.cpu-arch-i386) : /MACHINE:X86 ; - toolset.flags $(toolset).link LINKFLAGS $(conditions)/$(.cpu-arch-ia64) : /MACHINE:IA64 ; - - # Make sure that manifest will be generated even if there is no - # dependencies to put there. - toolset.flags $(toolset).link LINKFLAGS $(conditions)/<embed-manifest>off : /MANIFEST ; - } - toolset.pop-checking-for-flags-module ; -} - - -# Registers this toolset including all of its flags, features & generators. Does -# nothing on repeated calls. -# -rule register-toolset ( ) -{ - if ! msvc in [ feature.values toolset ] - { - register-toolset-really ; - } -} - - -# Declare action for creating static libraries. If library exists, remove it -# before adding files. See -# http://article.gmane.org/gmane.comp.lib.boost.build/4241 for rationale. -if [ os.name ] in NT -{ - # The 'DEL' command would issue a message to stdout if the file does not - # exist, so need a check. - actions archive - { - if exist "$(<[1])" DEL "$(<[1])" - $(.LD) $(AROPTIONS) /out:"$(<[1])" @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - } -} -else -{ - actions archive - { - $(.RM) "$(<[1])" - $(.LD) $(AROPTIONS) /out:"$(<[1])" @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - } -} - - -# For the assembler the following options are turned on by default: -# -# -Zp4 align structures to 4 bytes -# -Cp preserve case of user identifiers -# -Cx preserve case in publics, externs -# -actions compile.asm -{ - $(.ASM) -c -Zp4 -Cp -Cx -D$(DEFINES) $(ASMFLAGS) $(USER_ASMFLAGS) -Fo "$(<:W)" "$(>:W)" -} - - -rule compile.c ( targets + : sources * : properties * ) -{ - C++FLAGS on $(targets[1]) = ; - get-rspline $(targets) : -TC ; - compile-c-c++ $(<) : $(>) [ on $(<) return $(PCH_FILE) ] [ on $(<) return $(PCH_HEADER) ] ; -} - - -rule compile.c.preprocess ( targets + : sources * : properties * ) -{ - C++FLAGS on $(targets[1]) = ; - get-rspline $(targets) : -TC ; - preprocess-c-c++ $(<) : $(>) [ on $(<) return $(PCH_FILE) ] [ on $(<) return $(PCH_HEADER) ] ; -} - - -rule compile.c.pch ( targets + : sources * : properties * ) -{ - C++FLAGS on $(targets[1]) = ; - get-rspline $(targets[1]) : -TC ; - get-rspline $(targets[2]) : -TC ; - local pch-source = [ on $(<) return $(PCH_SOURCE) ] ; - if $(pch-source) - { - DEPENDS $(<) : $(pch-source) ; - compile-c-c++-pch-s $(targets) : $(sources) $(pch-source) ; - } - else - { - compile-c-c++-pch $(targets) : $(sources) ; - } -} - -toolset.flags msvc YLOPTION : "-Yl" ; - -# Action for running the C/C++ compiler without using precompiled headers. -# -# WARNING: Synchronize any changes this in action with intel-win -# -# Notes regarding PDB generation, for when we use <debug-symbols>on/<debug-store>database -# -# 1. PDB_CFLAG is only set for <debug-symbols>on/<debug-store>database, ensuring that the /Fd flag is dropped if PDB_CFLAG is empty -# -# 2. When compiling executables's source files, PDB_NAME is set on a per-source file basis by rule compile-c-c++. -# The linker will pull these into the executable's PDB -# -# 3. When compiling library's source files, PDB_NAME is updated to <libname>.pdb for each source file by rule archive, -# as in this case the compiler must be used to create a single PDB for our library. -# -actions compile-c-c++ bind PDB_NAME -{ - $(.CC) @"@($(<[1]:W).rsp:E="$(>[1]:W)" -Fo"$(<[1]:W)" $(PDB_CFLAG)"$(PDB_NAME)" -Yu"$(>[3]:D=)" -Fp"$(>[2]:W)" $(CC_RSPLINE))" $(.CC.FILTER) -} - -actions preprocess-c-c++ bind PDB_NAME -{ - $(.CC) @"@($(<[1]:W).rsp:E="$(>[1]:W)" -E $(PDB_CFLAG)"$(PDB_NAME)" -Yu"$(>[3]:D=)" -Fp"$(>[2]:W)" $(CC_RSPLINE))" >"$(<[1]:W)" -} - -rule compile-c-c++ ( targets + : sources * ) -{ - DEPENDS $(<[1]) : [ on $(<[1]) return $(PCH_HEADER) ] ; - DEPENDS $(<[1]) : [ on $(<[1]) return $(PCH_FILE) ] ; - PDB_NAME on $(<) = $(<:S=.pdb) ; -} - -rule preprocess-c-c++ ( targets + : sources * ) -{ - DEPENDS $(<[1]) : [ on $(<[1]) return $(PCH_HEADER) ] ; - DEPENDS $(<[1]) : [ on $(<[1]) return $(PCH_FILE) ] ; - PDB_NAME on $(<) = $(<:S=.pdb) ; -} - -# Action for running the C/C++ compiler using precompiled headers. In addition -# to whatever else it needs to compile, this action also adds a temporary source -# .cpp file used to compile the precompiled headers themselves. -# -# The global .escaped-double-quote variable is used to avoid messing up Emacs -# syntax highlighting in the messy N-quoted code below. -actions compile-c-c++-pch -{ - $(.CC) @"@($(<[1]:W).rsp:E="$(>[2]:W)" -Fo"$(<[2]:W)" -Yc"$(>[1]:D=)" $(YLOPTION)"__bjam_pch_symbol_$(>[1]:D=)" -Fp"$(<[1]:W)" $(CC_RSPLINE))" "@($(<[1]:W).cpp:E=#include $(.escaped-double-quote)$(>[1]:D=)$(.escaped-double-quote)$(.nl))" $(.CC.FILTER) -} - - -# Action for running the C/C++ compiler using precompiled headers. An already -# built source file for compiling the precompiled headers is expected to be -# given as one of the source parameters. -actions compile-c-c++-pch-s -{ - $(.CC) @"@($(<[1]:W).rsp:E="$(>[2]:W)" -Fo"$(<[2]:W)" -Yc"$(>[1]:D=)" $(YLOPTION)"__bjam_pch_symbol_$(>[1]:D=)" -Fp"$(<[1]:W)" $(CC_RSPLINE))" $(.CC.FILTER) -} - - -rule compile.c++ ( targets + : sources * : properties * ) -{ - get-rspline $(targets) : -TP ; - compile-c-c++ $(<) : $(>) [ on $(<) return $(PCH_FILE) ] [ on $(<) return $(PCH_HEADER) ] ; -} - -rule compile.c++.preprocess ( targets + : sources * : properties * ) -{ - get-rspline $(targets) : -TP ; - preprocess-c-c++ $(<) : $(>) [ on $(<) return $(PCH_FILE) ] [ on $(<) return $(PCH_HEADER) ] ; -} - - -rule compile.c++.pch ( targets + : sources * : properties * ) -{ - get-rspline $(targets[1]) : -TP ; - get-rspline $(targets[2]) : -TP ; - local pch-source = [ on $(<) return $(PCH_SOURCE) ] ; - if $(pch-source) - { - DEPENDS $(<) : $(pch-source) ; - compile-c-c++-pch-s $(targets) : $(sources) $(pch-source) ; - } - else - { - compile-c-c++-pch $(targets) : $(sources) ; - } -} - - -# See midl.jam for details. -# -actions compile.idl -{ - $(.IDL) /nologo @"@($(<[1]:W).rsp:E=$(.nl)"$(>:W)" $(.nl)-D$(DEFINES) $(.nl)"-I$(INCLUDES:W)" $(.nl)-U$(UNDEFS) $(.nl)$(MIDLFLAGS) $(.nl)/tlb "$(<[1]:W)" $(.nl)/h "$(<[2]:W)" $(.nl)/iid "$(<[3]:W)" $(.nl)/proxy "$(<[4]:W)" $(.nl)/dlldata "$(<[5]:W)")" - $(.TOUCH_FILE) "$(<[4]:W)" - $(.TOUCH_FILE) "$(<[5]:W)" -} - - -actions compile.mc -{ - $(.MC) $(MCFLAGS) -h "$(<[1]:DW)" -r "$(<[2]:DW)" "$(>:W)" -} - - -actions compile.rc -{ - $(.RC) -l 0x409 -U$(UNDEFS) -D$(DEFINES) -I"$(INCLUDES:W)" -fo "$(<:W)" "$(>:W)" -} - - -rule link ( targets + : sources * : properties * ) -{ - if <embed-manifest>on in $(properties) - { - msvc.manifest $(targets) : $(sources) : $(properties) ; - } -} - -rule link.dll ( targets + : sources * : properties * ) -{ - DEPENDS $(<) : [ on $(<) return $(DEF_FILE) ] ; - if <embed-manifest>on in $(properties) - { - msvc.manifest.dll $(targets) : $(sources) : $(properties) ; - } -} - -# Incremental linking a DLL causes no end of problems: if the actual exports do -# not change, the import .lib file is never updated. Therefore, the .lib is -# always out-of-date and gets rebuilt every time. I am not sure that incremental -# linking is such a great idea in general, but in this case I am sure we do not -# want it. - -# Windows manifest is a new way to specify dependencies on managed DotNet -# assemblies and Windows native DLLs. The manifests are embedded as resources -# and are useful in any PE target (both DLL and EXE). - -if [ os.name ] in NT -{ - actions link bind DEF_FILE LIBRARIES_MENTIONED_BY_FILE - { - $(.LD) $(LINKFLAGS) /out:"$(<[1]:W)" /LIBPATH:"$(LINKPATH:W)" $(OPTIONS) @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)$(LIBRARIES) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - } - - actions manifest - { - if exist "$(<[1]).manifest" ( - $(.MT) -manifest "$(<[1]).manifest" "-outputresource:$(<[1]);1" - ) - } - - actions link.dll bind DEF_FILE LIBRARIES_MENTIONED_BY_FILE - { - $(.LD) /DLL $(LINKFLAGS) /out:"$(<[1]:W)" /IMPLIB:"$(<[2]:W)" /LIBPATH:"$(LINKPATH:W)" /def:"$(DEF_FILE)" $(OPTIONS) @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)$(LIBRARIES) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% - } - - actions manifest.dll - { - if exist "$(<[1]).manifest" ( - $(.MT) -manifest "$(<[1]).manifest" "-outputresource:$(<[1]);2" - ) - } -} -else -{ - actions link bind DEF_FILE LIBRARIES_MENTIONED_BY_FILE - { - $(.LD) $(LINKFLAGS) /out:"$(<[1]:W)" /LIBPATH:"$(LINKPATH:W)" $(OPTIONS) @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)$(LIBRARIES) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - } - - actions manifest - { - if test -e "$(<[1]).manifest"; then - $(.MT) -manifest "$(<[1]:W).manifest" "-outputresource:$(<[1]:W);1" - fi - } - - actions link.dll bind DEF_FILE LIBRARIES_MENTIONED_BY_FILE - { - $(.LD) /DLL $(LINKFLAGS) /out:"$(<[1]:W)" /IMPLIB:"$(<[2]:W)" /LIBPATH:"$(LINKPATH:W)" /def:"$(DEF_FILE)" $(OPTIONS) @"@($(<[1]:W).rsp:E=$(.nl)"$(>)" $(.nl)$(LIBRARIES_MENTIONED_BY_FILE) $(.nl)$(LIBRARIES) $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_ST).lib" $(.nl)"$(LIBRARY_OPTION)$(FINDLIBS_SA).lib")" - } - - actions manifest.dll - { - if test -e "$(<[1]).manifest"; then - $(.MT) -manifest "$(<[1]:W).manifest" "-outputresource:$(<[1]:W);2" - fi - } -} - -# this rule sets up the pdb file that will be used when generating static -# libraries and the debug-store option is database, so that the compiler -# puts all debug info into a single .pdb file named after the library -# -# Poking at source targets this way is probably not clean, but it's the -# easiest approach. -rule archive ( targets + : sources * : properties * ) -{ - PDB_NAME on $(>) = $(<:S=.pdb) ; -} - -################################################################################ -# -# Classes. -# -################################################################################ - -class msvc-pch-generator : pch-generator -{ - import property-set ; - - rule run-pch ( project name ? : property-set : sources * ) - { - # Searching for the header and source file in the sources. - local pch-header ; - local pch-source ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] H ] - { - pch-header = $(s) ; - } - else if - [ type.is-derived [ $(s).type ] CPP ] || - [ type.is-derived [ $(s).type ] C ] - { - pch-source = $(s) ; - } - } - - if ! $(pch-header) - { - errors.user-error "can not build pch without pch-header" ; - } - - # If we do not have the PCH source - that is fine. We will just create a - # temporary .cpp file in the action. - - local generated = [ generator.run $(project) $(name) - : [ property-set.create - # Passing of <pch-source> is a dirty trick, needed because - # non-composing generators with multiple inputs are subtly - # broken. For more detailed information see: - # https://zigzag.cs.msu.su:7813/boost.build/ticket/111 - <pch-source>$(pch-source) - [ $(property-set).raw ] ] - : $(pch-header) ] ; - - local pch-file ; - for local g in $(generated) - { - if [ type.is-derived [ $(g).type ] PCH ] - { - pch-file = $(g) ; - } - } - - return [ property-set.create <pch-header>$(pch-header) - <pch-file>$(pch-file) ] $(generated) ; - } -} - - -################################################################################ -# -# Local rules. -# -################################################################################ - -# Detects versions listed as '.known-versions' by checking registry information, -# environment variables & default paths. Supports both native Windows and -# Cygwin. -# -local rule auto-detect-toolset-versions ( ) -{ - if [ os.name ] in NT CYGWIN - { - # Get installation paths from the registry. - for local i in $(.known-versions) - { - if $(.version-$(i)-reg) - { - local vc-path ; - for local x in "" "Wow6432Node\\" - { - vc-path += [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\"$(x)"\\Microsoft\\"$(.version-$(i)-reg) - : "ProductDir" ] ; - } - - if $(vc-path) - { - vc-path = [ path.join [ path.make-NT $(vc-path[1]) ] "bin" ] ; - register-configuration $(i) : [ path.native $(vc-path[1]) ] ; - } - } - } - } - - # Check environment and default installation paths. - for local i in $(.known-versions) - { - if ! $(i) in [ $(.versions).all ] - { - register-configuration $(i) : [ default-path $(i) ] ; - } - } -} - - -# Worker rule for toolset version configuration. Takes an explicit version id or -# nothing in case it should configure the default toolset version (the first -# registered one or a new 'default' one in case no toolset versions have been -# registered yet). -# -local rule configure-really ( version ? : options * ) -{ - local v = $(version) ; - - # Decide what the 'default' version is. - if ! $(v) - { - # Take the first registered (i.e. auto-detected) version. - version = [ $(.versions).all ] ; - version = $(version[1]) ; - v = $(version) ; - - # Note: 'version' can still be empty at this point if no versions have - # been auto-detected. - version ?= "default" ; - } - - # Version alias -> real version number. - if $(.version-alias-$(version)) - { - version = $(.version-alias-$(version)) ; - } - - # Check whether the selected configuration is already in use. - if $(version) in [ $(.versions).used ] - { - # Allow multiple 'toolset.using' calls for the same configuration if the - # identical sets of options are used. - if $(options) && ( $(options) != [ $(.versions).get $(version) : options ] ) - { - errors.error "MSVC toolset configuration: Toolset version" - "'$(version)' already configured." ; - } - } - else - { - # Register a new configuration. - $(.versions).register $(version) ; - - # Add user-supplied to auto-detected options. - options = [ $(.versions).get $(version) : options ] $(options) ; - - # Mark the configuration as 'used'. - $(.versions).use $(version) ; - - # Generate conditions and save them. - local conditions = [ common.check-init-parameters msvc : version $(v) ] - ; - - $(.versions).set $(version) : conditions : $(conditions) ; - - local command = [ feature.get-values <command> : $(options) ] ; - - # If version is specified, we try to search first in default paths, and - # only then in PATH. - command = [ common.get-invocation-command msvc : cl.exe : $(command) : - [ default-paths $(version) ] : $(version) ] ; - - common.handle-options msvc : $(conditions) : $(command) : $(options) ; - - if ! $(version) - { - # Even if version is not explicitly specified, try to detect the - # version from the path. - # FIXME: We currently detect both Microsoft Visual Studio 9.0 and - # 9.0express as 9.0 here. - if [ MATCH "(Microsoft Visual Studio 10)" : $(command) ] - { - version = 10.0 ; - } - else if [ MATCH "(Microsoft Visual Studio 9)" : $(command) ] - { - version = 9.0 ; - } - else if [ MATCH "(Microsoft Visual Studio 8)" : $(command) ] - { - version = 8.0 ; - } - else if [ MATCH "(NET 2003[\/\\]VC7)" : $(command) ] - { - version = 7.1 ; - } - else if [ MATCH "(Microsoft Visual C\\+\\+ Toolkit 2003)" : - $(command) ] - { - version = 7.1toolkit ; - } - else if [ MATCH "(.NET[\/\\]VC7)" : $(command) ] - { - version = 7.0 ; - } - else - { - version = 6.0 ; - } - } - - # Generate and register setup command. - - local below-8.0 = [ MATCH ^([67]\\.) : $(version) ] ; - - local cpu = i386 amd64 ia64 ; - if $(below-8.0) - { - cpu = i386 ; - } - - local setup-amd64 ; - local setup-i386 ; - local setup-ia64 ; - - if $(command) - { - # TODO: Note that if we specify a non-existant toolset version then - # this rule may find and use a corresponding compiler executable - # belonging to an incorrect toolset version. For example, if you - # have only MSVC 7.1 installed, have its executable on the path and - # specify you want Boost Build to use MSVC 9.0, then you want Boost - # Build to report an error but this may cause it to silently use the - # MSVC 7.1 compiler even though it thinks it is using the msvc-9.0 - # toolset version. - command = [ common.get-absolute-tool-path $(command[-1]) ] ; - } - - if $(command) - { - local parent = [ path.make $(command) ] ; - parent = [ path.parent $(parent) ] ; - parent = [ path.native $(parent) ] ; - - # Setup will be used if the command name has been specified. If - # setup is not specified explicitly then a default setup script will - # be used instead. Setup scripts may be global or arhitecture/ - # /platform/cpu specific. Setup options are used only in case of - # global setup scripts. - - # Default setup scripts provided with different VC distributions: - # - # VC 7.1 had only the vcvars32.bat script specific to 32 bit i386 - # builds. It was located in the bin folder for the regular version - # and in the root folder for the free VC 7.1 tools. - # - # Later 8.0 & 9.0 versions introduce separate platform specific - # vcvars*.bat scripts (e.g. 32 bit, 64 bit AMD or 64 bit Itanium) - # located in or under the bin folder. Most also include a global - # vcvarsall.bat helper script located in the root folder which runs - # one of the aforementioned vcvars*.bat scripts based on the options - # passed to it. So far only the version coming with some PlatformSDK - # distributions does not include this top level script but to - # support those we need to fall back to using the worker scripts - # directly in case the top level script can not be found. - - local global-setup = [ feature.get-values <setup> : $(options) ] ; - global-setup = $(global-setup[1]) ; - if ! $(below-8.0) - { - global-setup ?= [ locate-default-setup $(command) : $(parent) : - vcvarsall.bat ] ; - } - - local default-setup-amd64 = vcvarsx86_amd64.bat ; - local default-setup-i386 = vcvars32.bat ; - local default-setup-ia64 = vcvarsx86_ia64.bat ; - - # http://msdn2.microsoft.com/en-us/library/x4d2c09s(VS.80).aspx and - # http://msdn2.microsoft.com/en-us/library/x4d2c09s(vs.90).aspx - # mention an x86_IPF option, that seems to be a documentation bug - # and x86_ia64 is the correct option. - local default-global-setup-options-amd64 = x86_amd64 ; - local default-global-setup-options-i386 = x86 ; - local default-global-setup-options-ia64 = x86_ia64 ; - - # When using 64-bit Windows, and targeting 64-bit, it is possible to - # use a native 64-bit compiler, selected by the "amd64" & "ia64" - # parameters to vcvarsall.bat. There are two variables we can use -- - # PROCESSOR_ARCHITECTURE and PROCESSOR_IDENTIFIER. The first is - # 'x86' when running 32-bit Windows, no matter which processor is - # used, and 'AMD64' on 64-bit windows on x86 (either AMD64 or EM64T) - # Windows. - # - if [ MATCH ^(AMD64) : [ os.environ PROCESSOR_ARCHITECTURE ] ] - { - default-global-setup-options-amd64 = amd64 ; - } - # TODO: The same 'native compiler usage' should be implemented for - # the Itanium platform by using the "ia64" parameter. For this - # though we need someone with access to this platform who can find - # out how to correctly detect this case. - else if $(somehow-detect-the-itanium-platform) - { - default-global-setup-options-ia64 = ia64 ; - } - - local setup-prefix = "call " ; - local setup-suffix = " >nul"$(.nl) ; - if ! [ os.name ] in NT - { - setup-prefix = "cmd.exe /S /C call " ; - setup-suffix = " \">nul\" \"&&\" " ; - } - - for local c in $(cpu) - { - local setup-options ; - - setup-$(c) = [ feature.get-values <setup-$(c)> : $(options) ] ; - - if ! $(setup-$(c))-is-not-empty - { - if $(global-setup)-is-not-empty - { - setup-$(c) = $(global-setup) ; - - # If needed we can easily add using configuration flags - # here for overriding which options get passed to the - # global setup command for which target platform: - # setup-options = [ feature.get-values <setup-options-$(c)> : $(options) ] ; - - setup-options ?= $(default-global-setup-options-$(c)) ; - } - else - { - setup-$(c) = [ locate-default-setup $(command) : $(parent) : $(default-setup-$(c)) ] ; - } - } - - # Cygwin to Windows path translation. - setup-$(c) = "\""$(setup-$(c):W)"\"" ; - - # Append setup options to the setup name and add the final setup - # prefix & suffix. - setup-options ?= "" ; - setup-$(c) = $(setup-prefix)$(setup-$(c):J=" ")" "$(setup-options:J=" ")$(setup-suffix) ; - } - } - - # Get tool names (if any) and finish setup. - - compiler = [ feature.get-values <compiler> : $(options) ] ; - compiler ?= cl ; - - linker = [ feature.get-values <linker> : $(options) ] ; - linker ?= link ; - - resource-compiler = [ feature.get-values <resource-compiler> : $(options) ] ; - resource-compiler ?= rc ; - - # Turn on some options for i386 assembler - # -coff generate COFF format object file (compatible with cl.exe output) - local default-assembler-amd64 = ml64 ; - local default-assembler-i386 = "ml -coff" ; - local default-assembler-ia64 = ias ; - - assembler = [ feature.get-values <assembler> : $(options) ] ; - - idl-compiler = [ feature.get-values <idl-compiler> : $(options) ] ; - idl-compiler ?= midl ; - - mc-compiler = [ feature.get-values <mc-compiler> : $(options) ] ; - mc-compiler ?= mc ; - - manifest-tool = [ feature.get-values <manifest-tool> : $(options) ] ; - manifest-tool ?= mt ; - - local cc-filter = [ feature.get-values <compiler-filter> : $(options) ] ; - - for local c in $(cpu) - { - # Setup script is not required in some configurations. - setup-$(c) ?= "" ; - - local cpu-conditions = $(conditions)/$(.cpu-arch-$(c)) ; - - if $(.debug-configuration) - { - for local cpu-condition in $(cpu-conditions) - { - ECHO "notice: [msvc-cfg] condition: '$(cpu-condition)', setup: '$(setup-$(c))'" ; - } - } - - local cpu-assembler = $(assembler) ; - cpu-assembler ?= $(default-assembler-$(c)) ; - - toolset.flags msvc.compile .CC $(cpu-conditions) : $(setup-$(c))$(compiler) /Zm800 -nologo ; - toolset.flags msvc.compile .RC $(cpu-conditions) : $(setup-$(c))$(resource-compiler) ; - toolset.flags msvc.compile .ASM $(cpu-conditions) : $(setup-$(c))$(cpu-assembler) -nologo ; - toolset.flags msvc.link .LD $(cpu-conditions) : $(setup-$(c))$(linker) /NOLOGO /INCREMENTAL:NO ; - toolset.flags msvc.archive .LD $(cpu-conditions) : $(setup-$(c))$(linker) /lib /NOLOGO ; - toolset.flags msvc.compile .IDL $(cpu-conditions) : $(setup-$(c))$(idl-compiler) ; - toolset.flags msvc.compile .MC $(cpu-conditions) : $(setup-$(c))$(mc-compiler) ; - - toolset.flags msvc.link .MT $(cpu-conditions) : $(setup-$(c))$(manifest-tool) -nologo ; - - if $(cc-filter) - { - toolset.flags msvc .CC.FILTER $(cpu-conditions) : "|" $(cc-filter) ; - } - } - - # Set version-specific flags. - configure-version-specific msvc : $(version) : $(conditions) ; - } -} - - -# Returns the default installation path for the given version. -# -local rule default-path ( version ) -{ - # Use auto-detected path if possible. - local path = [ feature.get-values <command> : [ $(.versions).get $(version) - : options ] ] ; - - if $(path) - { - path = $(path:D) ; - } - else - { - # Check environment. - if $(.version-$(version)-env) - { - local vc-path = [ os.environ $(.version-$(version)-env) ] ; - if $(vc-path) - { - vc-path = [ path.make $(vc-path) ] ; - vc-path = [ path.join $(vc-path) $(.version-$(version)-envpath) ] ; - vc-path = [ path.native $(vc-path) ] ; - - path = $(vc-path) ; - } - } - - # Check default path. - if ! $(path) && $(.version-$(version)-path) - { - path = [ path.native [ path.join $(.ProgramFiles) $(.version-$(version)-path) ] ] ; - } - } - - return $(path) ; -} - - -# Returns either the default installation path (if 'version' is not empty) or -# list of all known default paths (if no version is given) -# -local rule default-paths ( version ? ) -{ - local possible-paths ; - - if $(version) - { - possible-paths += [ default-path $(version) ] ; - } - else - { - for local i in $(.known-versions) - { - possible-paths += [ default-path $(i) ] ; - } - } - - return $(possible-paths) ; -} - - -rule get-rspline ( target : lang-opt ) -{ - CC_RSPLINE on $(target) = [ on $(target) return $(lang-opt) -U$(UNDEFS) - $(CFLAGS) $(C++FLAGS) $(OPTIONS) -c $(.nl)-D$(DEFINES) - $(.nl)\"-I$(INCLUDES:W)\" ] ; -} - -class msvc-linking-generator : linking-generator -{ - # Calls the base version. If necessary, also create a target for the - # manifest file.specifying source's name as the name of the created - # target. As result, the PCH will be named whatever.hpp.gch, and not - # whatever.gch. - rule generated-targets ( sources + : property-set : project name ? ) - { - local result = [ linking-generator.generated-targets $(sources) - : $(property-set) : $(project) $(name) ] ; - - if $(result) - { - local name-main = [ $(result[0]).name ] ; - local action = [ $(result[0]).action ] ; - - if [ $(property-set).get <debug-symbols> ] = "on" - { - # We force exact name on PDB. The reason is tagging -- the tag rule may - # reasonably special case some target types, like SHARED_LIB. The tag rule - # will not catch PDB, and it cannot even easily figure if PDB is paired with - # SHARED_LIB or EXE or something else. Because PDB always get the - # same name as the main target, with .pdb as extension, just force it. - local target = [ class.new file-target $(name-main:S=.pdb) exact : PDB : $(project) : $(action) ] ; - local registered-target = [ virtual-target.register $(target) ] ; - if $(target) != $(registered-target) - { - $(action).replace-targets $(target) : $(registered-target) ; - } - result += $(registered-target) ; - } - - if [ $(property-set).get <embed-manifest> ] = "off" - { - # Manifest is evil target. It has .manifest appened to the name of - # main target, including extension. E.g. a.exe.manifest. We use 'exact' - # name because to achieve this effect. - local target = [ class.new file-target $(name-main).manifest exact : MANIFEST : $(project) : $(action) ] ; - local registered-target = [ virtual-target.register $(target) ] ; - if $(target) != $(registered-target) - { - $(action).replace-targets $(target) : $(registered-target) ; - } - result += $(registered-target) ; - } - } - return $(result) ; - } -} - - - -# Unsafe worker rule for the register-toolset() rule. Must not be called -# multiple times. -# -local rule register-toolset-really ( ) -{ - feature.extend toolset : msvc ; - - # Intel and msvc supposedly have link-compatible objects. - feature.subfeature toolset msvc : vendor : intel : propagated optional ; - - # Inherit MIDL flags. - toolset.inherit-flags msvc : midl ; - - # Inherit MC flags. - toolset.inherit-flags msvc : mc ; - - # Dynamic runtime comes only in MT flavour. - toolset.add-requirements - <toolset>msvc,<runtime-link>shared:<threading>multi ; - - # Declare msvc toolset specific features. - { - feature.feature debug-store : object database : propagated ; - feature.feature pch-source : : dependency free ; - } - - # Declare generators. - { - # TODO: Is it possible to combine these? Make the generators - # non-composing so that they do not convert each source into a separate - # .rsp file. - generators.register [ new msvc-linking-generator - msvc.link : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : EXE : <toolset>msvc ] ; - generators.register [ new msvc-linking-generator - msvc.link.dll : OBJ SEARCHED_LIB STATIC_LIB IMPORT_LIB : SHARED_LIB IMPORT_LIB : <toolset>msvc ] ; - - generators.register-archiver msvc.archive : OBJ : STATIC_LIB : <toolset>msvc ; - generators.register-c-compiler msvc.compile.c++ : CPP : OBJ : <toolset>msvc ; - generators.register-c-compiler msvc.compile.c : C : OBJ : <toolset>msvc ; - generators.register-c-compiler msvc.compile.c++.preprocess : CPP : PREPROCESSED_CPP : <toolset>msvc ; - generators.register-c-compiler msvc.compile.c.preprocess : C : PREPROCESSED_C : <toolset>msvc ; - - # Using 'register-c-compiler' adds the build directory to INCLUDES. - generators.register-c-compiler msvc.compile.rc : RC : OBJ(%_res) : <toolset>msvc ; - generators.override msvc.compile.rc : rc.compile.resource ; - generators.register-standard msvc.compile.asm : ASM : OBJ : <toolset>msvc ; - - generators.register-c-compiler msvc.compile.idl : IDL : MSTYPELIB H C(%_i) C(%_proxy) C(%_dlldata) : <toolset>msvc ; - generators.override msvc.compile.idl : midl.compile.idl ; - - generators.register-standard msvc.compile.mc : MC : H RC : <toolset>msvc ; - generators.override msvc.compile.mc : mc.compile ; - - # Note: the 'H' source type will catch both '.h' and '.hpp' headers as - # the latter have their HPP type derived from H. The type of compilation - # is determined entirely by the destination type. - generators.register [ new msvc-pch-generator msvc.compile.c.pch : H : C_PCH OBJ : <pch>on <toolset>msvc ] ; - generators.register [ new msvc-pch-generator msvc.compile.c++.pch : H : CPP_PCH OBJ : <pch>on <toolset>msvc ] ; - - generators.override msvc.compile.c.pch : pch.default-c-pch-generator ; - generators.override msvc.compile.c++.pch : pch.default-cpp-pch-generator ; - } - - toolset.flags msvc.compile PCH_FILE <pch>on : <pch-file> ; - toolset.flags msvc.compile PCH_SOURCE <pch>on : <pch-source> ; - toolset.flags msvc.compile PCH_HEADER <pch>on : <pch-header> ; - - # - # Declare flags for compilation. - # - - toolset.flags msvc.compile CFLAGS <optimization>speed : /O2 ; - toolset.flags msvc.compile CFLAGS <optimization>space : /O1 ; - - toolset.flags msvc.compile CFLAGS $(.cpu-arch-ia64)/<instruction-set>$(.cpu-type-itanium) : /G1 ; - toolset.flags msvc.compile CFLAGS $(.cpu-arch-ia64)/<instruction-set>$(.cpu-type-itanium2) : /G2 ; - - toolset.flags msvc.compile CFLAGS <debug-symbols>on/<debug-store>object : /Z7 ; - toolset.flags msvc.compile CFLAGS <debug-symbols>on/<debug-store>database : /Zi ; - toolset.flags msvc.compile CFLAGS <optimization>off : /Od ; - toolset.flags msvc.compile CFLAGS <inlining>off : /Ob0 ; - toolset.flags msvc.compile CFLAGS <inlining>on : /Ob1 ; - toolset.flags msvc.compile CFLAGS <inlining>full : /Ob2 ; - - toolset.flags msvc.compile CFLAGS <warnings>on : /W3 ; - toolset.flags msvc.compile CFLAGS <warnings>off : /W0 ; - toolset.flags msvc.compile CFLAGS <warnings>all : /W4 ; - toolset.flags msvc.compile CFLAGS <warnings-as-errors>on : /WX ; - - toolset.flags msvc.compile C++FLAGS <exception-handling>on/<asynch-exceptions>off/<extern-c-nothrow>off : /EHs ; - toolset.flags msvc.compile C++FLAGS <exception-handling>on/<asynch-exceptions>off/<extern-c-nothrow>on : /EHsc ; - toolset.flags msvc.compile C++FLAGS <exception-handling>on/<asynch-exceptions>on/<extern-c-nothrow>off : /EHa ; - toolset.flags msvc.compile C++FLAGS <exception-handling>on/<asynch-exceptions>on/<extern-c-nothrow>on : /EHac ; - - # By default 8.0 enables rtti support while prior versions disabled it. We - # simply enable or disable it explicitly so we do not have to depend on this - # default behaviour. - toolset.flags msvc.compile CFLAGS <rtti>on : /GR ; - toolset.flags msvc.compile CFLAGS <rtti>off : /GR- ; - toolset.flags msvc.compile CFLAGS <runtime-debugging>off/<runtime-link>shared : /MD ; - toolset.flags msvc.compile CFLAGS <runtime-debugging>on/<runtime-link>shared : /MDd ; - - toolset.flags msvc.compile CFLAGS <runtime-debugging>off/<runtime-link>static/<threading>multi : /MT ; - toolset.flags msvc.compile CFLAGS <runtime-debugging>on/<runtime-link>static/<threading>multi : /MTd ; - - toolset.flags msvc.compile OPTIONS <cflags> : ; - toolset.flags msvc.compile.c++ OPTIONS <cxxflags> : ; - - toolset.flags msvc.compile PDB_CFLAG <debug-symbols>on/<debug-store>database : /Fd ; - - toolset.flags msvc.compile DEFINES <define> ; - toolset.flags msvc.compile UNDEFS <undef> ; - toolset.flags msvc.compile INCLUDES <include> ; - - # Declare flags for the assembler. - toolset.flags msvc.compile.asm USER_ASMFLAGS <asmflags> ; - - toolset.flags msvc.compile.asm ASMFLAGS <debug-symbols>on : "/Zi /Zd" ; - - toolset.flags msvc.compile.asm ASMFLAGS <warnings>on : /W3 ; - toolset.flags msvc.compile.asm ASMFLAGS <warnings>off : /W0 ; - toolset.flags msvc.compile.asm ASMFLAGS <warnings>all : /W4 ; - toolset.flags msvc.compile.asm ASMFLAGS <warnings-as-errors>on : /WX ; - - toolset.flags msvc.compile.asm DEFINES <define> ; - - # Declare flags for linking. - { - toolset.flags msvc.link PDB_LINKFLAG <debug-symbols>on/<debug-store>database : /PDB: ; # not used yet - toolset.flags msvc.link LINKFLAGS <debug-symbols>on : /DEBUG ; - toolset.flags msvc.link DEF_FILE <def-file> ; - - # The linker disables the default optimizations when using /DEBUG so we - # have to enable them manually for release builds with debug symbols. - toolset.flags msvc LINKFLAGS <debug-symbols>on/<runtime-debugging>off : /OPT:REF,ICF ; - - toolset.flags msvc LINKFLAGS <user-interface>console : /subsystem:console ; - toolset.flags msvc LINKFLAGS <user-interface>gui : /subsystem:windows ; - toolset.flags msvc LINKFLAGS <user-interface>wince : /subsystem:windowsce ; - toolset.flags msvc LINKFLAGS <user-interface>native : /subsystem:native ; - toolset.flags msvc LINKFLAGS <user-interface>auto : /subsystem:posix ; - - toolset.flags msvc.link OPTIONS <linkflags> ; - toolset.flags msvc.link LINKPATH <library-path> ; - - toolset.flags msvc.link FINDLIBS_ST <find-static-library> ; - toolset.flags msvc.link FINDLIBS_SA <find-shared-library> ; - toolset.flags msvc.link LIBRARY_OPTION <toolset>msvc : "" : unchecked ; - toolset.flags msvc.link LIBRARIES_MENTIONED_BY_FILE : <library-file> ; - } - - toolset.flags msvc.archive AROPTIONS <archiveflags> ; -} - - -# Locates the requested setup script under the given folder and returns its full -# path or nothing in case the script can not be found. In case multiple scripts -# are found only the first one is returned. -# -# TODO: There used to exist a code comment for the msvc.init rule stating that -# we do not correctly detect the location of the vcvars32.bat setup script for -# the free VC7.1 tools in case user explicitly provides a path. This should be -# tested or simply remove this whole comment in case this toolset version is no -# longer important. -# -local rule locate-default-setup ( command : parent : setup-name ) -{ - local result = [ GLOB $(command) $(parent) : $(setup-name) ] ; - if $(result[1]) - { - return $(result[1]) ; - } -} - - -# Validates given path, registers found configuration and prints debug -# information about it. -# -local rule register-configuration ( version : path ? ) -{ - if $(path) - { - local command = [ GLOB $(path) : cl.exe ] ; - - if $(command) - { - if $(.debug-configuration) - { - ECHO "notice: [msvc-cfg] msvc-$(version) detected, command: '$(command)'" ; - } - - $(.versions).register $(version) ; - $(.versions).set $(version) : options : <command>$(command) ; - } - } -} - - -################################################################################ -# -# Startup code executed when loading this module. -# -################################################################################ - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -# Miscellaneous constants. -.RM = [ common.rm-command ] ; -.nl = " -" ; -.ProgramFiles = [ path.make [ common.get-program-files-dir ] ] ; -.escaped-double-quote = "\"" ; -.TOUCH_FILE = [ common.file-touch-command ] ; - -# List of all registered configurations. -.versions = [ new configurations ] ; - -# Supported CPU architectures. -.cpu-arch-i386 = - <architecture>/<address-model> - <architecture>/<address-model>32 - <architecture>x86/<address-model> - <architecture>x86/<address-model>32 ; - -.cpu-arch-amd64 = - <architecture>/<address-model>64 - <architecture>x86/<address-model>64 ; - -.cpu-arch-ia64 = - <architecture>ia64/<address-model> - <architecture>ia64/<address-model>64 ; - - -# Supported CPU types (only Itanium optimization options are supported from -# VC++ 2005 on). See -# http://msdn2.microsoft.com/en-us/library/h66s5s0e(vs.90).aspx for more -# detailed information. -.cpu-type-g5 = i586 pentium pentium-mmx ; -.cpu-type-g6 = i686 pentiumpro pentium2 pentium3 pentium3m pentium-m k6 - k6-2 k6-3 winchip-c6 winchip2 c3 c3-2 ; -.cpu-type-em64t = prescott nocona conroe conroe-xe conroe-l allendale mermon - mermon-xe kentsfield kentsfield-xe penryn wolfdale - yorksfield nehalem ; -.cpu-type-amd64 = k8 opteron athlon64 athlon-fx ; -.cpu-type-g7 = pentium4 pentium4m athlon athlon-tbird athlon-4 athlon-xp - athlon-mp $(.cpu-type-em64t) $(.cpu-type-amd64) ; -.cpu-type-itanium = itanium itanium1 merced ; -.cpu-type-itanium2 = itanium2 mckinley ; - - -# Known toolset versions, in order of preference. -.known-versions = 10.0 10.0express 9.0 9.0express 8.0 8.0express 7.1 7.1toolkit 7.0 6.0 ; - -# Version aliases. -.version-alias-6 = 6.0 ; -.version-alias-6.5 = 6.0 ; -.version-alias-7 = 7.0 ; -.version-alias-8 = 8.0 ; -.version-alias-9 = 9.0 ; -.version-alias-10 = 10.0 ; - -# Names of registry keys containing the Visual C++ installation path (relative -# to "HKEY_LOCAL_MACHINE\SOFTWARE\\Microsoft"). -.version-6.0-reg = "VisualStudio\\6.0\\Setup\\Microsoft Visual C++" ; -.version-7.0-reg = "VisualStudio\\7.0\\Setup\\VC" ; -.version-7.1-reg = "VisualStudio\\7.1\\Setup\\VC" ; -.version-8.0-reg = "VisualStudio\\8.0\\Setup\\VC" ; -.version-8.0express-reg = "VCExpress\\8.0\\Setup\\VC" ; -.version-9.0-reg = "VisualStudio\\9.0\\Setup\\VC" ; -.version-9.0express-reg = "VCExpress\\9.0\\Setup\\VC" ; -.version-10.0-reg = "VisualStudio\\10.0\\Setup\\VC" ; -.version-10.0express-reg = "VCExpress\\10.0\\Setup\\VC" ; - -# Visual C++ Toolkit 2003 does not store its installation path in the registry. -# The environment variable 'VCToolkitInstallDir' and the default installation -# path will be checked instead. -.version-7.1toolkit-path = "Microsoft Visual C++ Toolkit 2003" "bin" ; -.version-7.1toolkit-env = VCToolkitInstallDir ; - -# Path to the folder containing "cl.exe" relative to the value of the -# corresponding environment variable. -.version-7.1toolkit-envpath = "bin" ; - - -# Auto-detect all the available msvc installations on the system. -auto-detect-toolset-versions ; - - -# And finally trigger the actual Boost Build toolset registration. -register-toolset ; diff --git a/jam-files/boost-build/tools/notfile.jam b/jam-files/boost-build/tools/notfile.jam deleted file mode 100644 index 97a5b0e8..00000000 --- a/jam-files/boost-build/tools/notfile.jam +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2005 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import "class" : new ; -import generators ; -import project ; -import targets ; -import toolset ; -import type ; - - -type.register NOTFILE_MAIN ; - - -class notfile-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * : multiple ? ) - { - local action ; - local action-name = [ $(property-set).get <action> ] ; - - local m = [ MATCH ^@(.*) : $(action-name) ] ; - - if $(m) - { - action = [ new action $(sources) : $(m[1]) - : $(property-set) ] ; - } - else - { - action = [ new action $(sources) : notfile.run - : $(property-set) ] ; - } - return [ virtual-target.register - [ new notfile-target $(name) : $(project) : $(action) ] ] ; - } -} - - -generators.register [ new notfile-generator notfile.main : : NOTFILE_MAIN ] ; - - -toolset.flags notfile.run ACTION : <action> ; - - -actions run -{ - $(ACTION) -} - - -rule notfile ( target-name : action + : sources * : requirements * : default-build * ) -{ - local project = [ project.current ] ; - - requirements += <action>$(action) ; - - targets.main-target-alternative - [ new typed-target $(target-name) : $(project) : NOTFILE_MAIN - : [ targets.main-target-sources $(sources) : $(target-name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; -} - -IMPORT $(__name__) : notfile : : notfile ; diff --git a/jam-files/boost-build/tools/notfile.py b/jam-files/boost-build/tools/notfile.py deleted file mode 100644 index afbf68fb..00000000 --- a/jam-files/boost-build/tools/notfile.py +++ /dev/null @@ -1,51 +0,0 @@ -# Status: ported. -# Base revision: 64429. -# -# Copyright (c) 2005-2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - - -import b2.build.type as type -import b2.build.generators as generators -import b2.build.virtual_target as virtual_target -import b2.build.toolset as toolset -import b2.build.targets as targets - -from b2.manager import get_manager -from b2.util import bjam_signature - -type.register("NOTFILE_MAIN") - -class NotfileGenerator(generators.Generator): - - def run(self, project, name, ps, sources): - pass - action_name = ps.get('action')[0] - if action_name[0] == '@': - action = virtual_target.Action(get_manager(), sources, action_name[1:], ps) - else: - action = virtual_target.Action(get_manager(), sources, "notfile.run", ps) - - return [get_manager().virtual_targets().register( - virtual_target.NotFileTarget(name, project, action))] - -generators.register(NotfileGenerator("notfile.main", False, [], ["NOTFILE_MAIN"])) - -toolset.flags("notfile.run", "ACTION", [], ["<action>"]) - -get_manager().engine().register_action("notfile.run", "$(ACTION)") - -@bjam_signature((["target_name"], ["action"], ["sources", "*"], ["requirements", "*"], - ["default_build", "*"])) -def notfile(target_name, action, sources, requirements, default_build): - - requirements.append("<action>" + action) - - return targets.create_typed_metatarget(target_name, "NOTFILE_MAIN", sources, requirements, - default_build, []) - - -get_manager().projects().add_rule("notfile", notfile) diff --git a/jam-files/boost-build/tools/package.jam b/jam-files/boost-build/tools/package.jam deleted file mode 100644 index 198c2231..00000000 --- a/jam-files/boost-build/tools/package.jam +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2005 Vladimir Prus. -# Copyright 2006 Rene Rivera. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Provides mechanism for installing whole packages into a specific directory -# structure. This is opposed to the 'install' rule, that installs a number of -# targets to a single directory, and does not care about directory structure at -# all. - -# Example usage: -# -# package.install boost : <properties> -# : <binaries> -# : <libraries> -# : <headers> -# ; -# -# This will install binaries, libraries and headers to the 'proper' location, -# given by command line options --prefix, --exec-prefix, --bindir, --libdir and -# --includedir. -# -# The rule is just a convenient wrapper, avoiding the need to define several -# 'install' targets. -# -# The only install-related feature is <install-source-root>. It will apply to -# headers only and if present, paths of headers relatively to source root will -# be retained after installing. If it is not specified, then "." is assumed, so -# relative paths in headers are always preserved. - -import "class" : new ; -import option ; -import project ; -import feature ; -import property ; -import stage ; -import targets ; -import modules ; - -feature.feature install-default-prefix : : free incidental ; - -rule install ( name package-name ? : requirements * : binaries * : libraries * : headers * ) -{ - package-name ?= $(name) ; - if [ MATCH --prefix=(.*) : [ modules.peek : ARGV ] ] - { - # If --prefix is explicitly specified on the command line, - # then we need wipe away any settings of libdir/includir that - # is specified via options in config files. - option.set bindir : ; - option.set libdir : ; - option.set includedir : ; - } - - # If <install-source-root> is not specified, all headers are installed to - # prefix/include, no matter what their relative path is. Sometimes that is - # what is needed. - local install-source-root = [ property.select <install-source-root> : - $(requirements) ] ; - install-source-root = $(install-source-root:G=) ; - requirements = [ property.change $(requirements) : <install-source-root> ] ; - - local install-header-subdir = [ property.select <install-header-subdir> : - $(requirements) ] ; - install-header-subdir = /$(install-header-subdir:G=) ; - install-header-subdir ?= "" ; - requirements = [ property.change $(requirements) : <install-header-subdir> ] - ; - - # First, figure out all locations. Use the default if no prefix option - # given. - local prefix = [ get-prefix $(name) : $(requirements) ] ; - - # Architecture dependent files. - local exec-locate = [ option.get exec-prefix : $(prefix) ] ; - - # Binaries. - local bin-locate = [ option.get bindir : $(prefix)/bin ] ; - - # Object code libraries. - local lib-locate = [ option.get libdir : $(prefix)/lib ] ; - - # Source header files. - local include-locate = [ option.get includedir : $(prefix)/include ] ; - - stage.install $(name)-bin : $(binaries) : $(requirements) - <location>$(bin-locate) ; - alias $(name)-lib : $(name)-lib-shared $(name)-lib-static ; - - # Since the install location of shared libraries differs on universe - # and cygwin, use target alternatives to make different targets. - # We should have used indirection conditioanl requirements, but it's - # awkward to pass bin-locate and lib-locate from there to another rule. - alias $(name)-lib-shared : $(name)-lib-shared-universe ; - alias $(name)-lib-shared : $(name)-lib-shared-cygwin : <target-os>cygwin ; - - # For shared libraries, we install both explicitly specified one and the - # shared libraries that the installed executables depend on. - stage.install $(name)-lib-shared-universe : $(binaries) $(libraries) : $(requirements) - <location>$(lib-locate) <install-dependencies>on <install-type>SHARED_LIB ; - stage.install $(name)-lib-shared-cygwin : $(binaries) $(libraries) : $(requirements) - <location>$(bin-locate) <install-dependencies>on <install-type>SHARED_LIB ; - - # For static libraries, we do not care about executable dependencies, since - # static libraries are already incorporated into them. - stage.install $(name)-lib-static : $(libraries) : $(requirements) - <location>$(lib-locate) <install-dependencies>on <install-type>STATIC_LIB ; - stage.install $(name)-headers : $(headers) : $(requirements) - <location>$(include-locate)$(install-header-subdir) - <install-source-root>$(install-source-root) ; - alias $(name) : $(name)-bin $(name)-lib $(name)-headers ; - - local c = [ project.current ] ; - local project-module = [ $(c).project-module ] ; - module $(project-module) - { - explicit $(1)-bin $(1)-lib $(1)-headers $(1) $(1)-lib-shared $(1)-lib-static - $(1)-lib-shared-universe $(1)-lib-shared-cygwin ; - } -} - -rule install-data ( target-name : package-name : data * : requirements * ) -{ - package-name ?= target-name ; - if [ MATCH --prefix=(.*) : [ modules.peek : ARGV ] ] - { - # If --prefix is explicitly specified on the command line, - # then we need wipe away any settings of datarootdir - option.set datarootdir : ; - } - - local prefix = [ get-prefix $(package-name) : $(requirements) ] ; - local datadir = [ option.get datarootdir : $(prefix)/share ] ; - - stage.install $(target-name) - : $(data) - : $(requirements) <location>$(datadir)/$(package-name) - ; - - local c = [ project.current ] ; - local project-module = [ $(c).project-module ] ; - module $(project-module) - { - explicit $(1) ; - } -} - -local rule get-prefix ( package-name : requirements * ) -{ - local prefix = [ option.get prefix : [ property.select - <install-default-prefix> : $(requirements) ] ] ; - prefix = $(prefix:G=) ; - requirements = [ property.change $(requirements) : <install-default-prefix> - ] ; - # Or some likely defaults if neither is given. - if ! $(prefix) - { - if [ modules.peek : NT ] { prefix = C:\\$(package-name) ; } - else if [ modules.peek : UNIX ] { prefix = /usr/local ; } - } - return $(prefix) ; -} - diff --git a/jam-files/boost-build/tools/package.py b/jam-files/boost-build/tools/package.py deleted file mode 100644 index aa081b4f..00000000 --- a/jam-files/boost-build/tools/package.py +++ /dev/null @@ -1,168 +0,0 @@ -# Status: ported -# Base revision: 64488 -# -# Copyright (c) 2005, 2010 Vladimir Prus. -# Copyright 2006 Rene Rivera. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Provides mechanism for installing whole packages into a specific directory -# structure. This is opposed to the 'install' rule, that installs a number of -# targets to a single directory, and does not care about directory structure at -# all. - -# Example usage: -# -# package.install boost : <properties> -# : <binaries> -# : <libraries> -# : <headers> -# ; -# -# This will install binaries, libraries and headers to the 'proper' location, -# given by command line options --prefix, --exec-prefix, --bindir, --libdir and -# --includedir. -# -# The rule is just a convenient wrapper, avoiding the need to define several -# 'install' targets. -# -# The only install-related feature is <install-source-root>. It will apply to -# headers only and if present, paths of headers relatively to source root will -# be retained after installing. If it is not specified, then "." is assumed, so -# relative paths in headers are always preserved. - -import b2.build.feature as feature -import b2.build.property as property -import b2.util.option as option -import b2.tools.stage as stage - -from b2.build.alias import alias - -from b2.manager import get_manager - -from b2.util import bjam_signature -from b2.util.utility import ungrist - - -import os - -feature.feature("install-default-prefix", [], ["free", "incidental"]) - -@bjam_signature((["name", "package_name", "?"], ["requirements", "*"], - ["binaries", "*"], ["libraries", "*"], ["headers", "*"])) -def install(name, package_name=None, requirements=[], binaries=[], libraries=[], headers=[]): - - requirements = requirements[:] - binaries = binaries[:] - libraries - - if not package_name: - package_name = name - - if option.get("prefix"): - # If --prefix is explicitly specified on the command line, - # then we need wipe away any settings of libdir/includir that - # is specified via options in config files. - option.set("bindir", None) - option.set("libdir", None) - option.set("includedir", None) - - # If <install-source-root> is not specified, all headers are installed to - # prefix/include, no matter what their relative path is. Sometimes that is - # what is needed. - install_source_root = property.select('install-source-root', requirements) - if install_source_root: - requirements = property.change(requirements, 'install-source-root', None) - - install_header_subdir = property.select('install-header-subdir', requirements) - if install_header_subdir: - install_header_subdir = ungrist(install_header_subdir[0]) - requirements = property.change(requirements, 'install-header-subdir', None) - - # First, figure out all locations. Use the default if no prefix option - # given. - prefix = get_prefix(name, requirements) - - # Architecture dependent files. - exec_locate = option.get("exec-prefix", prefix) - - # Binaries. - bin_locate = option.get("bindir", os.path.join(prefix, "bin")) - - # Object code libraries. - lib_locate = option.get("libdir", os.path.join(prefix, "lib")) - - # Source header files. - include_locate = option.get("includedir", os.path.join(prefix, "include")) - - stage.install(name + "-bin", binaries, requirements + ["<location>" + bin_locate]) - - alias(name + "-lib", [name + "-lib-shared", name + "-lib-static"]) - - # Since the install location of shared libraries differs on universe - # and cygwin, use target alternatives to make different targets. - # We should have used indirection conditioanl requirements, but it's - # awkward to pass bin-locate and lib-locate from there to another rule. - alias(name + "-lib-shared", [name + "-lib-shared-universe"]) - alias(name + "-lib-shared", [name + "-lib-shared-cygwin"], ["<target-os>cygwin"]) - - # For shared libraries, we install both explicitly specified one and the - # shared libraries that the installed executables depend on. - stage.install(name + "-lib-shared-universe", binaries + libraries, - requirements + ["<location>" + lib_locate, "<install-dependencies>on", - "<install-type>SHARED_LIB"]) - stage.install(name + "-lib-shared-cygwin", binaries + libraries, - requirements + ["<location>" + bin_locate, "<install-dependencies>on", - "<install-type>SHARED_LIB"]) - - # For static libraries, we do not care about executable dependencies, since - # static libraries are already incorporated into them. - stage.install(name + "-lib-static", libraries, requirements + - ["<location>" + lib_locate, "<install-dependencies>on", "<install-type>STATIC_LIB"]) - stage.install(name + "-headers", headers, requirements \ - + ["<location>" + os.path.join(include_locate, s) for s in install_header_subdir] - + install_source_root) - - alias(name, [name + "-bin", name + "-lib", name + "-headers"]) - - pt = get_manager().projects().current() - - for subname in ["bin", "lib", "headers", "lib-shared", "lib-static", "lib-shared-universe", "lib-shared-cygwin"]: - pt.mark_targets_as_explicit([name + "-" + subname]) - -@bjam_signature((["target_name"], ["package_name"], ["data", "*"], ["requirements", "*"])) -def install_data(target_name, package_name, data, requirements): - if not package_name: - package_name = target_name - - if option.get("prefix"): - # If --prefix is explicitly specified on the command line, - # then we need wipe away any settings of datarootdir - option.set("datarootdir", None) - - prefix = get_prefix(package_name, requirements) - datadir = option.get("datarootdir", os.path.join(prefix, "share")) - - stage.install(target_name, data, - requirements + ["<location>" + os.path.join(datadir, package_name)]) - - get_manager().projects().current().mark_targets_as_explicit([target_name]) - -def get_prefix(package_name, requirements): - - specified = property.select("install-default-prefix", requirements) - if specified: - specified = ungrist(specified[0]) - prefix = option.get("prefix", specified) - requirements = property.change(requirements, "install-default-prefix", None) - # Or some likely defaults if neither is given. - if not prefix: - if os.name == "nt": - prefix = "C:\\" + package_name - elif os.name == "posix": - prefix = "/usr/local" - - return prefix - diff --git a/jam-files/boost-build/tools/pathscale.jam b/jam-files/boost-build/tools/pathscale.jam deleted file mode 100644 index 454e3454..00000000 --- a/jam-files/boost-build/tools/pathscale.jam +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2006 Noel Belcourt -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import property ; -import generators ; -import toolset : flags ; -import feature ; -import type ; -import common ; -import fortran ; - -feature.extend toolset : pathscale ; -toolset.inherit pathscale : unix ; -generators.override pathscale.prebuilt : builtin.prebuilt ; -generators.override pathscale.searched-lib-generator : searched-lib-generator ; - -# Documentation and toolchain description located -# http://www.pathscale.com/docs.html - -rule init ( version ? : command * : options * ) -{ - command = [ common.get-invocation-command pathscale : pathCC : $(command) - : /opt/ekopath/bin ] ; - - # Determine the version - local command-string = $(command:J=" ") ; - if $(command) - { - version ?= [ MATCH "^([0-9.]+)" - : [ SHELL "$(command-string) -dumpversion" ] ] ; - } - - local condition = [ common.check-init-parameters pathscale - : version $(version) ] ; - - common.handle-options pathscale : $(condition) : $(command) : $(options) ; - - toolset.flags pathscale.compile.fortran90 OPTIONS $(condition) : - [ feature.get-values <fflags> : $(options) ] : unchecked ; - - command_c = $(command_c[1--2]) $(command[-1]:B=pathcc) ; - - toolset.flags pathscale CONFIG_C_COMMAND $(condition) : $(command_c) ; - - # fortran support - local f-command = [ common.get-invocation-command pathscale : pathf90 : $(command) ] ; - local command_f = $(command_f[1--2]) $(f-command[-1]:B=pathf90) ; - local command_f90 = $(command_f[1--2]) $(f-command[-1]:B=pathf90) ; - - toolset.flags pathscale CONFIG_F_COMMAND $(condition) : $(command_f) ; - toolset.flags pathscale CONFIG_F90_COMMAND $(condition) : $(command_f90) ; - - # always link lib rt to resolve clock_gettime() - flags pathscale.link FINDLIBS-SA : rt : unchecked ; -} - -# Declare generators -generators.register-c-compiler pathscale.compile.c : C : OBJ : <toolset>pathscale ; -generators.register-c-compiler pathscale.compile.c++ : CPP : OBJ : <toolset>pathscale ; -generators.register-fortran-compiler pathscale.compile.fortran : FORTRAN : OBJ : <toolset>pathscale ; -generators.register-fortran90-compiler pathscale.compile.fortran90 : FORTRAN90 : OBJ : <toolset>pathscale ; - -# Declare flags and actions for compilation -flags pathscale.compile OPTIONS <optimization>off : -O0 ; -flags pathscale.compile OPTIONS <optimization>speed : -O3 ; -flags pathscale.compile OPTIONS <optimization>space : -Os ; - -flags pathscale.compile OPTIONS <inlining>off : -noinline ; -flags pathscale.compile OPTIONS <inlining>on : -inline ; -flags pathscale.compile OPTIONS <inlining>full : -inline ; - -flags pathscale.compile OPTIONS <warnings>off : -woffall ; -flags pathscale.compile OPTIONS <warnings>on : -Wall ; -flags pathscale.compile OPTIONS <warnings>all : -Wall -pedantic ; -flags pathscale.compile OPTIONS <warnings-as-errors>on : -Werror ; - -flags pathscale.compile OPTIONS <debug-symbols>on : -ggdb ; -flags pathscale.compile OPTIONS <profiling>on : -pg ; -flags pathscale.compile OPTIONS <link>shared : -fPIC ; -flags pathscale.compile OPTIONS <address-model>32 : -m32 ; -flags pathscale.compile OPTIONS <address-model>64 : -m64 ; - -flags pathscale.compile USER_OPTIONS <cflags> ; -flags pathscale.compile.c++ USER_OPTIONS <cxxflags> ; -flags pathscale.compile DEFINES <define> ; -flags pathscale.compile INCLUDES <include> ; - -flags pathscale.compile.fortran USER_OPTIONS <fflags> ; -flags pathscale.compile.fortran90 USER_OPTIONS <fflags> ; - -actions compile.c -{ - "$(CONFIG_C_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.fortran -{ - "$(CONFIG_F_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -rule compile.fortran90 ( targets * : sources * : properties * ) -{ - # the space rule inserts spaces between targets and it's necessary - SPACE on $(targets) = " " ; - # Serialize execution of the compile.fortran90 action - # F90 source must be compiled in a particular order so we - # serialize the build as a parallel F90 compile might fail - JAM_SEMAPHORE on $(targets) = <s>pathscale-f90-semaphore ; -} - -actions compile.fortran90 -{ - "$(CONFIG_F90_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -module $(<[1]:D) -c -o "$(<)" "$(>)" -} - -# Declare flags and actions for linking -flags pathscale.link OPTIONS <debug-symbols>on : -ggdb -rdynamic ; -# Strip the binary when no debugging is needed -flags pathscale.link OPTIONS <debug-symbols>off : -g0 ; -flags pathscale.link OPTIONS <profiling>on : -pg ; -flags pathscale.link USER_OPTIONS <linkflags> ; -flags pathscale.link LINKPATH <library-path> ; -flags pathscale.link FINDLIBS-ST <find-static-library> ; -flags pathscale.link FINDLIBS-SA <find-shared-library> ; -flags pathscale.link FINDLIBS-SA <threading>multi : pthread ; -flags pathscale.link LIBRARIES <library-file> ; -flags pathscale.link LINK-RUNTIME <runtime-link>static : static ; -flags pathscale.link LINK-RUNTIME <runtime-link>shared : dynamic ; -flags pathscale.link RPATH <dll-path> ; -# On gcc, there are separate options for dll path at runtime and -# link time. On Solaris, there's only one: -R, so we have to use -# it, even though it's bad idea. -flags pathscale.link RPATH <xdll-path> ; - -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -L"$(LINKPATH)" -Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -} - -# Slight mods for dlls -rule link.dll ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) $(USER_OPTIONS) -L"$(LINKPATH)" -Wl,$(RPATH_OPTION:E=-R)$(SPACE)-Wl,"$(RPATH)" -o "$(<)" -Wl,-soname$(SPACE)-Wl,$(<[1]:D=) -shared "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -} - -# Declare action for creating static libraries -# "$(CONFIG_COMMAND)" -ar -o "$(<)" "$(>)" -actions piecemeal archive -{ - ar $(ARFLAGS) ru "$(<)" "$(>)" -} diff --git a/jam-files/boost-build/tools/pch.jam b/jam-files/boost-build/tools/pch.jam deleted file mode 100644 index 0c6e98fa..00000000 --- a/jam-files/boost-build/tools/pch.jam +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2005 Reece H. Dunn. -# Copyright 2006 Ilya Sokolov -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -##### Using Precompiled Headers (Quick Guide) ##### -# -# Make precompiled mypch.hpp: -# -# import pch ; -# -# cpp-pch mypch -# : # sources -# mypch.hpp -# : # requiremnts -# <toolset>msvc:<source>mypch.cpp -# ; -# -# Add cpp-pch to sources: -# -# exe hello -# : main.cpp hello.cpp mypch -# ; - -import "class" : new ; -import type ; -import feature ; -import generators ; - -type.register PCH : pch ; - -type.register C_PCH : : PCH ; -type.register CPP_PCH : : PCH ; - -# Control precompiled header (PCH) generation. -feature.feature pch : - on - off - : propagated ; - - -feature.feature pch-header : : free dependency ; -feature.feature pch-file : : free dependency ; - -# Base PCH generator. The 'run' method has the logic to prevent this generator -# from being run unless it's being used for a top-level PCH target. -class pch-generator : generator -{ - import property-set ; - - rule action-class ( ) - { - return compile-action ; - } - - rule run ( project name ? : property-set : sources + ) - { - if ! $(name) - { - # Unless this generator is invoked as the top-most generator for a - # main target, fail. This allows using 'H' type as input type for - # this generator, while preventing Boost.Build to try this generator - # when not explicitly asked for. - # - # One bad example is msvc, where pch generator produces both PCH - # target and OBJ target, so if there's any header generated (like by - # bison, or by msidl), we'd try to use pch generator to get OBJ from - # that H, which is completely wrong. By restricting this generator - # only to pch main target, such problem is solved. - } - else - { - local r = [ run-pch $(project) $(name) - : [ $(property-set).add-raw <define>BOOST_BUILD_PCH_ENABLED ] - : $(sources) ] ; - return [ generators.add-usage-requirements $(r) - : <define>BOOST_BUILD_PCH_ENABLED ] ; - } - } - - # This rule must be overridden by the derived classes. - rule run-pch ( project name ? : property-set : sources + ) - { - } -} - - -# NOTE: requirements are empty, default pch generator can be applied when -# pch=off. -generators.register - [ new dummy-generator pch.default-c-pch-generator : : C_PCH ] ; -generators.register - [ new dummy-generator pch.default-cpp-pch-generator : : CPP_PCH ] ; diff --git a/jam-files/boost-build/tools/pch.py b/jam-files/boost-build/tools/pch.py deleted file mode 100644 index 21d3db09..00000000 --- a/jam-files/boost-build/tools/pch.py +++ /dev/null @@ -1,83 +0,0 @@ -# Status: Being ported by Steven Watanabe -# Base revision: 47077 -# -# Copyright (c) 2005 Reece H. Dunn. -# Copyright 2006 Ilya Sokolov -# Copyright (c) 2008 Steven Watanabe -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -##### Using Precompiled Headers (Quick Guide) ##### -# -# Make precompiled mypch.hpp: -# -# import pch ; -# -# cpp-pch mypch -# : # sources -# mypch.hpp -# : # requiremnts -# <toolset>msvc:<source>mypch.cpp -# ; -# -# Add cpp-pch to sources: -# -# exe hello -# : main.cpp hello.cpp mypch -# ; - -from b2.build import type, feature, generators - -type.register('PCH', ['pch']) -type.register('C_PCH', [], 'PCH') -type.register('CPP_PCH', [], 'PCH') - -# Control precompiled header (PCH) generation. -feature.feature('pch', - ['on', 'off'], - ['propagated']) - -feature.feature('pch-header', [], ['free', 'dependency']) -feature.feature('pch-file', [], ['free', 'dependency']) - -class PchGenerator(generators.Generator): - """ - Base PCH generator. The 'run' method has the logic to prevent this generator - from being run unless it's being used for a top-level PCH target. - """ - def action_class(self): - return 'compile-action' - - def run(self, project, name, prop_set, sources): - if not name: - # Unless this generator is invoked as the top-most generator for a - # main target, fail. This allows using 'H' type as input type for - # this generator, while preventing Boost.Build to try this generator - # when not explicitly asked for. - # - # One bad example is msvc, where pch generator produces both PCH - # target and OBJ target, so if there's any header generated (like by - # bison, or by msidl), we'd try to use pch generator to get OBJ from - # that H, which is completely wrong. By restricting this generator - # only to pch main target, such problem is solved. - pass - else: - r = self.run_pch(project, name, - prop_set.add_raw('<define>BOOST_BUILD_PCH_ENABLED'), - sources) - return generators.add_usage_requirements( - r, ['<define>BOOST_BUILD_PCH_ENABLED']) - - # This rule must be overridden by the derived classes. - def run_pch(self, project, name, prop_set, sources): - pass - -#FIXME: dummy-generator in builtins.jam needs to be ported. -# NOTE: requirements are empty, default pch generator can be applied when -# pch=off. -###generators.register( -### [ new dummy-generator pch.default-c-pch-generator : : C_PCH ] ; -###generators.register -### [ new dummy-generator pch.default-cpp-pch-generator : : CPP_PCH ] ; diff --git a/jam-files/boost-build/tools/pgi.jam b/jam-files/boost-build/tools/pgi.jam deleted file mode 100644 index 3a35c644..00000000 --- a/jam-files/boost-build/tools/pgi.jam +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright Noel Belcourt 2007. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import property ; -import generators ; -import os ; -import toolset : flags ; -import feature ; -import fortran ; -import type ; -import common ; -import gcc ; - -feature.extend toolset : pgi ; -toolset.inherit pgi : unix ; -generators.override pgi.prebuilt : builtin.lib-generator ; -generators.override pgi.searched-lib-generator : searched-lib-generator ; - -# Documentation and toolchain description located -# http://www.pgroup.com/resources/docs.htm - -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters pgi : version $(version) ] ; - - local l_command = [ common.get-invocation-command pgi : pgCC : $(command) ] ; - - common.handle-options pgi : $(condition) : $(l_command) : $(options) ; - - command_c = $(command_c[1--2]) $(l_command[-1]:B=cc) ; - - toolset.flags pgi CONFIG_C_COMMAND $(condition) : $(command_c) ; - - flags pgi.compile DEFINES $(condition) : - [ feature.get-values <define> : $(options) ] : unchecked ; - - # IOV_MAX support - flags pgi.compile DEFINES $(condition) : __need_IOV_MAX : unchecked ; - - # set link flags - flags pgi.link FINDLIBS-ST : [ - feature.get-values <find-static-library> : $(options) ] : unchecked ; - - # always link lib rt to resolve clock_gettime() - flags pgi.link FINDLIBS-SA : rt [ - feature.get-values <find-shared-library> : $(options) ] : unchecked ; - - gcc.init-link-flags pgi gnu $(condition) ; -} - -# Declare generators -generators.register-c-compiler pgi.compile.c : C : OBJ : <toolset>pgi ; -generators.register-c-compiler pgi.compile.c++ : CPP : OBJ : <toolset>pgi ; -generators.register-fortran-compiler pgi.compile.fortran : FORTRAN : OBJ : <toolset>pgi ; - -# Declare flags and actions for compilation -flags pgi.compile OPTIONS : -Kieee ; -flags pgi.compile OPTIONS <link>shared : -fpic -fPIC ; -flags pgi.compile OPTIONS <debug-symbols>on : -gopt ; -flags pgi.compile OPTIONS <profiling>on : -xprofile=tcov ; -flags pgi.compile OPTIONS <optimization>speed : -fast -Mx,8,0x10000000 ; -flags pgi.compile OPTIONS <optimization>space : -xO2 -xspace ; -# flags pgi.compile OPTIONS <threading>multi : -mt ; - -flags pgi.compile OPTIONS <warnings>off : -Minform=severe ; -flags pgi.compile OPTIONS <warnings>on : -Minform=warn ; - -flags pgi.compile.c++ OPTIONS <inlining>off : -INLINE:none ; - -flags pgi.compile OPTIONS <cflags> ; -flags pgi.compile.c++ OPTIONS <cxxflags> ; -flags pgi.compile DEFINES <define> ; -flags pgi.compile INCLUDES <include> ; - -flags pgi.compile.fortran OPTIONS <fflags> ; - -actions compile.c -{ - "$(CONFIG_C_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.fortran -{ - "$(CONFIG_F_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -# Declare flags and actions for linking -flags pgi.link OPTIONS <debug-symbols>on : -gopt ; -# Strip the binary when no debugging is needed -flags pgi.link OPTIONS <debug-symbols>off : -s ; -flags pgi.link OPTIONS <profiling>on : -xprofile=tcov ; -flags pgi.link OPTIONS <linkflags> ; -flags pgi.link OPTIONS <link>shared : -fpic -fPIC ; -flags pgi.link LINKPATH <library-path> ; -flags pgi.link FINDLIBS-ST <find-static-library> ; -flags pgi.link FINDLIBS-SA <find-shared-library> ; -flags pgi.link FINDLIBS-SA <threading>multi : pthread rt ; -flags pgi.link LIBRARIES <library-file> ; -flags pgi.link LINK-RUNTIME <runtime-link>static : static ; -flags pgi.link LINK-RUNTIME <runtime-link>shared : dynamic ; -flags pgi.link RPATH <dll-path> ; - -# On gcc, there are separate options for dll path at runtime and -# link time. On Solaris, there's only one: -R, so we have to use -# it, even though it's bad idea. -flags pgi.link RPATH <xdll-path> ; - -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -# reddish can only link statically and, somehow, the presence of -Bdynamic on the link line -# marks the executable as a dynamically linked exec even though no dynamic libraries are supplied. -# Yod on redstorm refuses to load an executable that is dynamically linked. -# removing the dynamic link options should get us where we need to be on redstorm. -# "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -Bstatic -l$(FINDLIBS-ST) -Bdynamic -l$(FINDLIBS-SA) -B$(LINK-RUNTIME) -} - -# Slight mods for dlls -rule link.dll ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -# "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" -h$(<[1]:D=) -G "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -shared -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" -Wl,-h -Wl,$(<[1]:D=) "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -} - -actions updated together piecemeal pgi.archive -{ - ar -rc$(ARFLAGS:E=) "$(<)" "$(>)" -} - diff --git a/jam-files/boost-build/tools/python-config.jam b/jam-files/boost-build/tools/python-config.jam deleted file mode 100644 index 40aa825b..00000000 --- a/jam-files/boost-build/tools/python-config.jam +++ /dev/null @@ -1,27 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for Python tools and librries. To use, just import this module. - -import os ; -import toolset : using ; - -if [ os.name ] = NT -{ - for local R in 2.4 2.3 2.2 - { - local python-path = [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\$(R)\\InstallPath" ] ; - local python-version = $(R) ; - - if $(python-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using python ":" $(python-version) ":" $(python-path) ; - } - using python : $(python-version) : $(python-path) ; - } - } -} diff --git a/jam-files/boost-build/tools/python.jam b/jam-files/boost-build/tools/python.jam deleted file mode 100644 index 97a9f9a5..00000000 --- a/jam-files/boost-build/tools/python.jam +++ /dev/null @@ -1,1267 +0,0 @@ -# Copyright 2004 Vladimir Prus. -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Support for Python and the the Boost.Python library. -# -# This module defines -# -# - a project 'python' with a target 'python' in it, that corresponds to the -# python library -# -# - a main target rule 'python-extension' which can be used to build a python -# extension. -# -# Extensions that use Boost.Python must explicitly link to it. - -import type ; -import testing ; -import generators ; -import project ; -import errors ; -import targets ; -import "class" : new ; -import os ; -import common ; -import toolset ; -import regex ; -import numbers ; -import string ; -import property ; -import sequence ; -import path ; -import feature ; -import set ; -import builtin ; -import version ; - - -# Make this module a project. -project.initialize $(__name__) ; -project python ; - -# Save the project so that if 'init' is called several times we define new -# targets in the python project, not in whatever project we were called by. -.project = [ project.current ] ; - -# Dynamic linker lib. Necessary to specify it explicitly on some platforms. -lib dl ; -# This contains 'openpty' function need by python. Again, on some system need to -# pass this to linker explicitly. -lib util ; -# Python uses pthread symbols. -lib pthread ; -# Extra library needed by phtread on some platforms. -lib rt ; - -# The pythonpath feature specifies additional elements for the PYTHONPATH -# environment variable, set by run-pyd. For example, pythonpath can be used to -# access Python modules that are part of the product being built, but are not -# installed in the development system's default paths. -feature.feature pythonpath : : free optional path ; - -# Initializes the Python toolset. Note that all parameters are optional. -# -# - version -- the version of Python to use. Should be in Major.Minor format, -# for example 2.3. Do not include the subminor version. -# -# - cmd-or-prefix: Preferably, a command that invokes a Python interpreter. -# Alternatively, the installation prefix for Python libraries and includes. If -# empty, will be guessed from the version, the platform's installation -# patterns, and the python executables that can be found in PATH. -# -# - includes: the include path to Python headers. If empty, will be guessed. -# -# - libraries: the path to Python library binaries. If empty, will be guessed. -# On MacOS/Darwin, you can also pass the path of the Python framework. -# -# - condition: if specified, should be a set of properties that are matched -# against the build configuration when Boost.Build selects a Python -# configuration to use. -# -# - extension-suffix: A string to append to the name of extension modules before -# the true filename extension. Ordinarily we would just compute this based on -# the value of the <python-debugging> feature. However ubuntu's python-dbg -# package uses the windows convention of appending _d to debug-build extension -# modules. We have no way of detecting ubuntu, or of probing python for the -# "_d" requirement, and if you configure and build python using -# --with-pydebug, you'll be using the standard *nix convention. Defaults to "" -# (or "_d" when targeting windows and <python-debugging> is set). -# -# Example usage: -# -# using python : 2.3 ; -# using python : 2.3 : /usr/local/bin/python ; -# -rule init ( version ? : cmd-or-prefix ? : includes * : libraries ? - : condition * : extension-suffix ? ) -{ - project.push-current $(.project) ; - - debug-message Configuring python... ; - for local v in version cmd-or-prefix includes libraries condition - { - if $($(v)) - { - debug-message " user-specified "$(v): \"$($(v))\" ; - } - } - - configure $(version) : $(cmd-or-prefix) : $(includes) : $(libraries) : $(condition) : $(extension-suffix) ; - - project.pop-current ; -} - -# A simpler version of SHELL that grabs stderr as well as stdout, but returns -# nothing if there was an error. -# -local rule shell-cmd ( cmd ) -{ - debug-message running command '$(cmd)" 2>&1"' ; - x = [ SHELL $(cmd)" 2>&1" : exit-status ] ; - if $(x[2]) = 0 - { - return $(x[1]) ; - } - else - { - return ; - } -} - - -# Try to identify Cygwin symlinks. Invoking such a file directly as an NT -# executable from a native Windows build of bjam would be fatal to the bjam -# process. One /can/ invoke them through sh.exe or bash.exe, if you can prove -# that those are not also symlinks. ;-) -# -# If a symlink is found returns non-empty; we try to extract the target of the -# symlink from the file and return that. -# -# Note: 1. only works on NT 2. path is a native path. -local rule is-cygwin-symlink ( path ) -{ - local is-symlink = ; - - # Look for a file with the given path having the S attribute set, as cygwin - # symlinks do. /-C means "do not use thousands separators in file sizes." - local dir-listing = [ shell-cmd "DIR /-C /A:S \""$(path)"\"" ] ; - - if $(dir-listing) - { - # Escape any special regex characters in the base part of the path. - local base-pat = [ regex.escape $(path:D=) : ].[()*+?|\\$^ : \\ ] ; - - # Extract the file's size from the directory listing. - local size-of-system-file = [ MATCH "([0-9]+) "$(base-pat) : $(dir-listing) : 1 ] ; - - # If the file has a reasonably small size, look for the special symlink - # identification text. - if $(size-of-system-file) && [ numbers.less $(size-of-system-file) 1000 ] - { - local link = [ SHELL "FIND /OFF \"!<symlink>\" \""$(path)"\" 2>&1" ] ; - if $(link[2]) != 0 - { - local nl = " - -" ; - is-symlink = [ MATCH ".*!<symlink>([^"$(nl)"]*)" : $(link[1]) : 1 ] ; - if $(is-symlink) - { - is-symlink = [ *nix-path-to-native $(is-symlink) ] ; - is-symlink = $(is-symlink:R=$(path:D)) ; - } - - } - } - } - return $(is-symlink) ; -} - - -# Append ext to each member of names that does not contain '.'. -# -local rule default-extension ( names * : ext * ) -{ - local result ; - for local n in $(names) - { - switch $(n) - { - case *.* : result += $(n) ; - case * : result += $(n)$(ext) ; - } - } - return $(result) ; -} - - -# Tries to determine whether invoking "cmd" would actually attempt to launch a -# cygwin symlink. -# -# Note: only works on NT. -# -local rule invokes-cygwin-symlink ( cmd ) -{ - local dirs = $(cmd:D) ; - if ! $(dirs) - { - dirs = . [ os.executable-path ] ; - } - local base = [ default-extension $(cmd:D=) : .exe .cmd .bat ] ; - local paths = [ GLOB $(dirs) : $(base) ] ; - if $(paths) - { - # Make sure we have not run into a Cygwin symlink. Invoking such a file - # as an NT executable would be fatal for the bjam process. - return [ is-cygwin-symlink $(paths[1]) ] ; - } -} - - -local rule debug-message ( message * ) -{ - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO notice: [python-cfg] $(message) ; - } -} - - -# Like W32_GETREG, except prepend HKEY_CURRENT_USER\SOFTWARE and -# HKEY_LOCAL_MACHINE\SOFTWARE to the first argument, returning the first result -# found. Also accounts for the fact that on 64-bit machines, 32-bit software has -# its own area, under SOFTWARE\Wow6432node. -# -local rule software-registry-value ( path : data ? ) -{ - local result ; - for local root in HKEY_CURRENT_USER HKEY_LOCAL_MACHINE - { - for local x64elt in "" Wow6432node\\ # Account for 64-bit windows - { - if ! $(result) - { - result = [ W32_GETREG $(root)\\SOFTWARE\\$(x64elt)$(path) : $(data) ] ; - } - } - - } - return $(result) ; -} - - -.windows-drive-letter-re = ^([A-Za-z]):[\\/](.*) ; -.cygwin-drive-letter-re = ^/cygdrive/([a-z])/(.*) ; - -.working-directory = [ PWD ] ; -.working-drive-letter = [ SUBST $(.working-directory) $(.windows-drive-letter-re) $1 ] ; -.working-drive-letter ?= [ SUBST $(.working-directory) $(.cygwin-drive-letter-re) $1 ] ; - - -local rule windows-to-cygwin-path ( path ) -{ - # If path is rooted with a drive letter, rewrite it using the /cygdrive - # mountpoint. - local p = [ SUBST $(path:T) $(.windows-drive-letter-re) /cygdrive/$1/$2 ] ; - - # Else if path is rooted without a drive letter, use the working directory. - p ?= [ SUBST $(path:T) ^/(.*) /cygdrive/$(.working-drive-letter:L)/$2 ] ; - - # Else return the path unchanged. - return $(p:E=$(path:T)) ; -} - - -# :W only works in Cygwin builds of bjam. This one works on NT builds as well. -# -local rule cygwin-to-windows-path ( path ) -{ - path = $(path:R="") ; # strip any trailing slash - - local drive-letter = [ SUBST $(path) $(.cygwin-drive-letter-re) $1:/$2 ] ; - if $(drive-letter) - { - path = $(drive-letter) ; - } - else if $(path:R=/x) = $(path) # already rooted? - { - # Look for a cygwin mount that includes each head sequence in $(path). - local head = $(path) ; - local tail = "" ; - - while $(head) - { - local root = [ software-registry-value - "Cygnus Solutions\\Cygwin\\mounts v2\\"$(head) : native ] ; - - if $(root) - { - path = $(tail:R=$(root)) ; - head = ; - } - tail = $(tail:R=$(head:D=)) ; - - if $(head) = / - { - head = ; - } - else - { - head = $(head:D) ; - } - } - } - return [ regex.replace $(path:R="") / \\ ] ; -} - - -# Convert a *nix path to native. -# -local rule *nix-path-to-native ( path ) -{ - if [ os.name ] = NT - { - path = [ cygwin-to-windows-path $(path) ] ; - } - return $(path) ; -} - - -# Convert an NT path to native. -# -local rule windows-path-to-native ( path ) -{ - if [ os.name ] = NT - { - return $(path) ; - } - else - { - return [ windows-to-cygwin-path $(path) ] ; - } -} - - -# Return nonempty if path looks like a windows path, i.e. it starts with a drive -# letter or contains backslashes. -# -local rule guess-windows-path ( path ) -{ - return [ SUBST $(path) ($(.windows-drive-letter-re)|.*([\\]).*) $1 ] ; -} - - -local rule path-to-native ( paths * ) -{ - local result ; - - for local p in $(paths) - { - if [ guess-windows-path $(p) ] - { - result += [ windows-path-to-native $(p) ] ; - } - else - { - result += [ *nix-path-to-native $(p:T) ] ; - } - } - return $(result) ; -} - - -# Validate the version string and extract the major/minor part we care about. -# -local rule split-version ( version ) -{ - local major-minor = [ MATCH ^([0-9]+)\.([0-9]+)(.*)$ : $(version) : 1 2 3 ] ; - if ! $(major-minor[2]) || $(major-minor[3]) - { - ECHO "Warning: \"using python\" expects a two part (major, minor) version number; got" $(version) instead ; - - # Add a zero to account for the missing digit if necessary. - major-minor += 0 ; - } - - return $(major-minor[1]) $(major-minor[2]) ; -} - - -# Build a list of versions from 3.0 down to 1.5. Because bjam can not enumerate -# registry sub-keys, we have no way of finding a version with a 2-digit minor -# version, e.g. 2.10 -- let us hope that never happens. -# -.version-countdown = ; -for local v in [ numbers.range 15 30 ] -{ - .version-countdown = [ SUBST $(v) (.)(.*) $1.$2 ] $(.version-countdown) ; -} - - -local rule windows-installed-pythons ( version ? ) -{ - version ?= $(.version-countdown) ; - local interpreters ; - - for local v in $(version) - { - local install-path = [ - software-registry-value "Python\\PythonCore\\"$(v)"\\InstallPath" ] ; - - if $(install-path) - { - install-path = [ windows-path-to-native $(install-path) ] ; - debug-message Registry indicates Python $(v) installed at \"$(install-path)\" ; - } - - interpreters += $(:E=python:R=$(install-path)) ; - } - return $(interpreters) ; -} - - -local rule darwin-installed-pythons ( version ? ) -{ - version ?= $(.version-countdown) ; - - local prefix - = [ GLOB /System/Library/Frameworks /Library/Frameworks - : Python.framework ] ; - - return $(prefix)/Versions/$(version)/bin/python ; -} - - -# Assume "python-cmd" invokes a python interpreter and invoke it to extract all -# the information we care about from its "sys" module. Returns void if -# unsuccessful. -# -local rule probe ( python-cmd ) -{ - # Avoid invoking a Cygwin symlink on NT. - local skip-symlink ; - if [ os.name ] = NT - { - skip-symlink = [ invokes-cygwin-symlink $(python-cmd) ] ; - } - - if $(skip-symlink) - { - debug-message -------------------------------------------------------------------- ; - debug-message \"$(python-cmd)\" would attempt to invoke a Cygwin symlink, ; - debug-message causing a bjam built for Windows to hang. ; - debug-message ; - debug-message If you intend to target a Cygwin build of Python, please ; - debug-message replace the path to the link with the path to a real executable ; - debug-message (guessing: \"$(skip-symlink)\") "in" your 'using python' line ; - debug-message "in" user-config.jam or site-config.jam. Do not forget to escape ; - debug-message backslashes ; - debug-message -------------------------------------------------------------------- ; - } - else - { - # Prepare a List of Python format strings and expressions that can be - # used to print the constants we want from the sys module. - - # We do not really want sys.version since that is a complicated string, - # so get the information from sys.version_info instead. - local format = "version=%d.%d" ; - local exprs = "version_info[0]" "version_info[1]" ; - - for local s in $(sys-elements[2-]) - { - format += $(s)=%s ; - exprs += $(s) ; - } - - # Invoke Python and ask it for all those values. - if [ version.check-jam-version 3 1 17 ] || ( [ os.name ] != NT ) - { - # Prior to version 3.1.17 Boost Jam's SHELL command did not support - # quoted commands correctly on Windows. This means that on that - # platform we do not support using a Python command interpreter - # executable whose path contains a space character. - python-cmd = \"$(python-cmd)\" ; - } - local full-cmd = - $(python-cmd)" -c \"from sys import *; print('"$(format:J=\\n)"' % ("$(exprs:J=,)"))\"" ; - - local output = [ shell-cmd $(full-cmd) ] ; - if $(output) - { - # Parse the output to get all the results. - local nl = " - -" ; - for s in $(sys-elements) - { - # These variables are expected to be declared local in the - # caller, so Jam's dynamic scoping will set their values there. - sys.$(s) = [ SUBST $(output) \\<$(s)=([^$(nl)]+) $1 ] ; - } - } - return $(output) ; - } -} - - -# Make sure the "libraries" and "includes" variables (in an enclosing scope) -# have a value based on the information given. -# -local rule compute-default-paths ( target-os : version ? : prefix ? : - exec-prefix ? ) -{ - exec-prefix ?= $(prefix) ; - - if $(target-os) = windows - { - # The exec_prefix is where you're supposed to look for machine-specific - # libraries. - local default-library-path = $(exec-prefix)\\libs ; - local default-include-path = $(:E=Include:R=$(prefix)) ; - - # If the interpreter was found in a directory called "PCBuild" or - # "PCBuild8," assume we're looking at a Python built from the source - # distro, and go up one additional level to the default root. Otherwise, - # the default root is the directory where the interpreter was found. - - # We ask Python itself what the executable path is in case of - # intermediate symlinks or shell scripts. - local executable-dir = $(sys.executable:D) ; - - if [ MATCH ^(PCBuild) : $(executable-dir:D=) ] - { - debug-message "This Python appears to reside in a source distribution;" ; - debug-message "prepending \""$(executable-dir)"\" to default library search path" ; - - default-library-path = $(executable-dir) $(default-library-path) ; - - default-include-path = $(:E=PC:R=$(executable-dir:D)) $(default-include-path) ; - - debug-message "and \""$(default-include-path[1])"\" to default #include path" ; - } - - libraries ?= $(default-library-path) ; - includes ?= $(default-include-path) ; - } - else - { - includes ?= $(prefix)/include/python$(version) ; - - local lib = $(exec-prefix)/lib ; - libraries ?= $(lib)/python$(version)/config $(lib) ; - } -} - -# The version of the python interpreter to use. -feature.feature python : : propagated ; -feature.feature python.interpreter : : free ; - -toolset.flags python.capture-output PYTHON : <python.interpreter> ; - -# -# Support for Python configured --with-pydebug -# -feature.feature python-debugging : off on : propagated ; -builtin.variant debug-python : debug : <python-debugging>on ; - - -# Return a list of candidate commands to try when looking for a Python -# interpreter. prefix is expected to be a native path. -# -local rule candidate-interpreters ( version ? : prefix ? : target-os ) -{ - local bin-path = bin ; - if $(target-os) = windows - { - # On Windows, look in the root directory itself and, to work with the - # result of a build-from-source, the PCBuild directory. - bin-path = PCBuild8 PCBuild "" ; - } - - bin-path = $(bin-path:R=$(prefix)) ; - - if $(target-os) in windows darwin - { - return # Search: - $(:E=python:R=$(bin-path)) # Relative to the prefix, if any - python # In the PATH - [ $(target-os)-installed-pythons $(version) ] # Standard install locations - ; - } - else - { - # Search relative to the prefix, or if none supplied, in PATH. - local unversioned = $(:E=python:R=$(bin-path:E=)) ; - - # If a version was specified, look for a python with that specific - # version appended before looking for one called, simply, "python" - return $(unversioned)$(version) $(unversioned) ; - } -} - - -# Compute system library dependencies for targets linking with static Python -# libraries. -# -# On many systems, Python uses libraries such as pthreads or libdl. Since static -# libraries carry no library dependency information of their own that the linker -# can extract, these extra dependencies have to be given explicitly on the link -# line of the client. The information about these dependencies is packaged into -# the "python" target below. -# -# Even where Python itself uses pthreads, it never allows extension modules to -# be entered concurrently (unless they explicitly give up the interpreter lock). -# Therefore, extension modules do not need the efficiency overhead of threadsafe -# code as produced by <threading>multi, and we handle libpthread along with -# other libraries here. Note: this optimization is based on an assumption that -# the compiler generates link-compatible code in both the single- and -# multi-threaded cases, and that system libraries do not change their ABIs -# either. -# -# Returns a list of usage-requirements that link to the necessary system -# libraries. -# -local rule system-library-dependencies ( target-os ) -{ - switch $(target-os) - { - case s[uo][nl]* : # solaris, sun, sunos - # Add a librt dependency for the gcc toolset on SunOS (the sun - # toolset adds -lrt unconditionally). While this appears to - # duplicate the logic already in gcc.jam, it does not as long as - # we are not forcing <threading>multi. - - # On solaris 10, distutils.sysconfig.get_config_var('LIBS') yields - # '-lresolv -lsocket -lnsl -lrt -ldl'. However, that does not seem - # to be the right list for extension modules. For example, on my - # installation, adding -ldl causes at least one test to fail because - # the library can not be found and removing it causes no failures. - - # Apparently, though, we need to add -lrt for gcc. - return <toolset>gcc:<library>rt ; - - case osf : return <library>pthread <toolset>gcc:<library>rt ; - - case qnx* : return ; - case darwin : return ; - case windows : return ; - - case hpux : return <library>rt ; - case *bsd : return <library>pthread <toolset>gcc:<library>util ; - - case aix : return <library>pthread <library>dl ; - - case * : return <library>pthread <library>dl - <toolset>gcc:<library>util <toolset-intel:platform>linux:<library>util ; - } -} - - -# Declare a target to represent Python's library. -# -local rule declare-libpython-target ( version ? : requirements * ) -{ - # Compute the representation of Python version in the name of Python's - # library file. - local lib-version = $(version) ; - if <target-os>windows in $(requirements) - { - local major-minor = [ split-version $(version) ] ; - lib-version = $(major-minor:J="") ; - if <python-debugging>on in $(requirements) - { - lib-version = $(lib-version)_d ; - } - } - - if ! $(lib-version) - { - ECHO *** warning: could not determine Python version, which will ; - ECHO *** warning: probably prevent us from linking with the python ; - ECHO *** warning: library. Consider explicitly passing the version ; - ECHO *** warning: to 'using python'. ; - } - - # Declare it. - lib python.lib : : <name>python$(lib-version) $(requirements) ; -} - - -# Implementation of init. -local rule configure ( version ? : cmd-or-prefix ? : includes * : libraries ? : - condition * : extension-suffix ? ) -{ - local prefix ; - local exec-prefix ; - local cmds-to-try ; - local interpreter-cmd ; - - local target-os = [ feature.get-values target-os : $(condition) ] ; - target-os ?= [ feature.defaults target-os ] ; - target-os = $(target-os:G=) ; - - if $(target-os) = windows && <python-debugging>on in $(condition) - { - extension-suffix ?= _d ; - } - extension-suffix ?= "" ; - - # Normalize and dissect any version number. - local major-minor ; - if $(version) - { - major-minor = [ split-version $(version) ] ; - version = $(major-minor:J=.) ; - } - - local cmds-to-try ; - - if ! $(cmd-or-prefix) || [ GLOB $(cmd-or-prefix) : * ] - { - # If the user did not pass a command, whatever we got was a prefix. - prefix = $(cmd-or-prefix) ; - cmds-to-try = [ candidate-interpreters $(version) : $(prefix) : $(target-os) ] ; - } - else - { - # Work with the command the user gave us. - cmds-to-try = $(cmd-or-prefix) ; - - # On Windows, do not nail down the interpreter command just yet in case - # the user specified something that turns out to be a cygwin symlink, - # which could bring down bjam if we invoke it. - if $(target-os) != windows - { - interpreter-cmd = $(cmd-or-prefix) ; - } - } - - # Values to use in case we can not really find anything in the system. - local fallback-cmd = $(cmds-to-try[1]) ; - local fallback-version ; - - # Anything left to find or check? - if ! ( $(interpreter-cmd) && $(includes) && $(libraries) ) - { - # Values to be extracted from python's sys module. These will be set by - # the probe rule, above, using Jam's dynamic scoping. - local sys-elements = version platform prefix exec_prefix executable ; - local sys.$(sys-elements) ; - - # Compute the string Python's sys.platform needs to match. If not - # targeting Windows or cygwin we will assume only native builds can - # possibly run, so we will not require a match and we leave sys.platform - # blank. - local platform ; - switch $(target-os) - { - case windows : platform = win32 ; - case cygwin : platform = cygwin ; - } - - while $(cmds-to-try) - { - # Pop top command. - local cmd = $(cmds-to-try[1]) ; - cmds-to-try = $(cmds-to-try[2-]) ; - - debug-message Checking interpreter command \"$(cmd)\"... ; - if [ probe $(cmd) ] - { - fallback-version ?= $(sys.version) ; - - # Check for version/platform validity. - for local x in version platform - { - if $($(x)) && $($(x)) != $(sys.$(x)) - { - debug-message ...$(x) "mismatch (looking for" - $($(x)) but found $(sys.$(x))")" ; - cmd = ; - } - } - - if $(cmd) - { - debug-message ...requested configuration matched! ; - - exec-prefix = $(sys.exec_prefix) ; - - compute-default-paths $(target-os) : $(sys.version) : - $(sys.prefix) : $(sys.exec_prefix) ; - - version = $(sys.version) ; - interpreter-cmd ?= $(cmd) ; - cmds-to-try = ; # All done. - } - } - else - { - debug-message ...does not invoke a working interpreter ; - } - } - } - - # Anything left to compute? - if $(includes) && $(libraries) - { - .configured = true ; - } - else - { - version ?= $(fallback-version) ; - version ?= 2.5 ; - exec-prefix ?= $(prefix) ; - compute-default-paths $(target-os) : $(version) : $(prefix:E=) ; - } - - if ! $(interpreter-cmd) - { - fallback-cmd ?= python ; - debug-message No working Python interpreter found. ; - if [ os.name ] != NT || ! [ invokes-cygwin-symlink $(fallback-cmd) ] - { - interpreter-cmd = $(fallback-cmd) ; - debug-message falling back to \"$(interpreter-cmd)\" ; - } - } - - includes = [ path-to-native $(includes) ] ; - libraries = [ path-to-native $(libraries) ] ; - - debug-message "Details of this Python configuration:" ; - debug-message " interpreter command:" \"$(interpreter-cmd:E=<empty>)\" ; - debug-message " include path:" \"$(includes:E=<empty>)\" ; - debug-message " library path:" \"$(libraries:E=<empty>)\" ; - if $(target-os) = windows - { - debug-message " DLL search path:" \"$(exec-prefix:E=<empty>)\" ; - } - - # - # End autoconfiguration sequence. - # - local target-requirements = $(condition) ; - - # Add the version, if any, to the target requirements. - if $(version) - { - if ! $(version) in [ feature.values python ] - { - feature.extend python : $(version) ; - } - target-requirements += <python>$(version:E=default) ; - } - - target-requirements += <target-os>$(target-os) ; - - # See if we can find a framework directory on darwin. - local framework-directory ; - if $(target-os) = darwin - { - # Search upward for the framework directory. - local framework-directory = $(libraries[-1]) ; - while $(framework-directory:D=) && $(framework-directory:D=) != Python.framework - { - framework-directory = $(framework-directory:D) ; - } - - if $(framework-directory:D=) = Python.framework - { - debug-message framework directory is \"$(framework-directory)\" ; - } - else - { - debug-message "no framework directory found; using library path" ; - framework-directory = ; - } - } - - local dll-path = $(libraries) ; - - # Make sure that we can find the Python DLL on Windows. - if ( $(target-os) = windows ) && $(exec-prefix) - { - dll-path += $(exec-prefix) ; - } - - # - # Prepare usage requirements. - # - local usage-requirements = [ system-library-dependencies $(target-os) ] ; - usage-requirements += <include>$(includes) <python.interpreter>$(interpreter-cmd) ; - if <python-debugging>on in $(condition) - { - if $(target-os) = windows - { - # In pyconfig.h, Py_DEBUG is set if _DEBUG is set. If we define - # Py_DEBUG we will get multiple definition warnings. - usage-requirements += <define>_DEBUG ; - } - else - { - usage-requirements += <define>Py_DEBUG ; - } - } - - # Global, but conditional, requirements to give access to the interpreter - # for general utilities, like other toolsets, that run Python scripts. - toolset.add-requirements - $(target-requirements:J=,):<python.interpreter>$(interpreter-cmd) ; - - # Register the right suffix for extensions. - register-extension-suffix $(extension-suffix) : $(target-requirements) ; - - # - # Declare the "python" target. This should really be called - # python_for_embedding. - # - - if $(framework-directory) - { - alias python - : - : $(target-requirements) - : - : $(usage-requirements) <framework>$(framework-directory) - ; - } - else - { - declare-libpython-target $(version) : $(target-requirements) ; - - # This is an evil hack. On, Windows, when Python is embedded, nothing - # seems to set up sys.path to include Python's standard library - # (http://article.gmane.org/gmane.comp.python.general/544986). The evil - # here, aside from the workaround necessitated by Python's bug, is that: - # - # a. we're guessing the location of the python standard library from the - # location of pythonXX.lib - # - # b. we're hijacking the <testing.launcher> property to get the - # environment variable set up, and the user may want to use it for - # something else (e.g. launch the debugger). - local set-PYTHONPATH ; - if $(target-os) = windows - { - set-PYTHONPATH = [ common.prepend-path-variable-command PYTHONPATH : - $(libraries:D)/Lib ] ; - } - - alias python - : - : $(target-requirements) - : - # Why python.lib must be listed here instead of along with the - # system libs is a mystery, but if we do not do it, on cygwin, - # -lpythonX.Y never appears in the command line (although it does on - # linux). - : $(usage-requirements) - <testing.launcher>$(set-PYTHONPATH) - <library-path>$(libraries) <library>python.lib - ; - } - - # On *nix, we do not want to link either Boost.Python or Python extensions - # to libpython, because the Python interpreter itself provides all those - # symbols. If we linked to libpython, we would get duplicate symbols. So - # declare two targets -- one for building extensions and another for - # embedding. - # - # Unlike most *nix systems, Mac OS X's linker does not permit undefined - # symbols when linking a shared library. So, we still need to link against - # the Python framework, even when building extensions. Note that framework - # builds of Python always use shared libraries, so we do not need to worry - # about duplicate Python symbols. - if $(target-os) in windows cygwin darwin - { - alias python_for_extensions : python : $(target-requirements) ; - } - # On AIX we need Python extensions and Boost.Python to import symbols from - # the Python interpreter. Dynamic libraries opened with dlopen() do not - # inherit the symbols from the Python interpreter. - else if $(target-os) = aix - { - alias python_for_extensions - : - : $(target-requirements) - : - : $(usage-requirements) <linkflags>-Wl,-bI:$(libraries[1])/python.exp - ; - } - else - { - alias python_for_extensions - : - : $(target-requirements) - : - : $(usage-requirements) - ; - } -} - - -rule configured ( ) -{ - return $(.configured) ; -} - - -type.register PYTHON_EXTENSION : : SHARED_LIB ; - - -local rule register-extension-suffix ( root : condition * ) -{ - local suffix ; - - switch [ feature.get-values target-os : $(condition) ] - { - case windows : suffix = pyd ; - case cygwin : suffix = dll ; - case hpux : - { - if [ feature.get-values python : $(condition) ] in 1.5 1.6 2.0 2.1 2.2 2.3 2.4 - { - suffix = sl ; - } - else - { - suffix = so ; - } - } - case * : suffix = so ; - } - - type.set-generated-target-suffix PYTHON_EXTENSION : $(condition) : <$(root).$(suffix)> ; -} - - -# Unset 'lib' prefix for PYTHON_EXTENSION -type.set-generated-target-prefix PYTHON_EXTENSION : : "" ; - - -rule python-extension ( name : sources * : requirements * : default-build * : - usage-requirements * ) -{ - if [ configured ] - { - requirements += <use>/python//python_for_extensions ; - } - requirements += <suppress-import-lib>true ; - - local project = [ project.current ] ; - - targets.main-target-alternative - [ new typed-target $(name) : $(project) : PYTHON_EXTENSION - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; -} - -IMPORT python : python-extension : : python-extension ; - -rule py2to3 -{ - common.copy $(>) $(<) ; - 2to3 $(<) ; -} - -actions 2to3 -{ - 2to3 -wn "$(<)" - 2to3 -dwn "$(<)" -} - - -# Support for testing. -type.register PY : py ; -type.register RUN_PYD_OUTPUT ; -type.register RUN_PYD : : TEST ; - - -class python-test-generator : generator -{ - import set ; - - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - self.composing = true ; - } - - rule run ( project name ? : property-set : sources * : multiple ? ) - { - local pyversion = [ $(property-set).get <python> ] ; - local python ; - local other-pythons ; - - # Make new target that converting Python source by 2to3 when running with Python 3. - local rule make-2to3-source ( source ) - { - if $(pyversion) >= 3.0 - { - local a = [ new action $(source) : python.py2to3 : $(property-set) ] ; - local t = [ utility.basename [ $(s).name ] ] ; - local p = [ new file-target $(t) : PY : $(project) : $(a) ] ; - return $(p) ; - } - else - { - return $(source) ; - } - } - - for local s in $(sources) - { - if [ $(s).type ] = PY - { - if ! $(python) - { - # First Python source ends up on command line. - python = [ make-2to3-source $(s) ] ; - - } - else - { - # Other Python sources become dependencies. - other-pythons += [ make-2to3-source $(s) ] ; - } - } - } - - local extensions ; - for local s in $(sources) - { - if [ $(s).type ] = PYTHON_EXTENSION - { - extensions += $(s) ; - } - } - - local libs ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] LIB ] - && ! $(s) in $(extensions) - { - libs += $(s) ; - } - } - - local new-sources ; - for local s in $(sources) - { - if [ type.is-derived [ $(s).type ] CPP ] - { - local name = [ utility.basename [ $(s).name ] ] ; - if $(name) = [ utility.basename [ $(python).name ] ] - { - name = $(name)_ext ; - } - local extension = [ generators.construct $(project) $(name) : - PYTHON_EXTENSION : $(property-set) : $(s) $(libs) ] ; - - # The important part of usage requirements returned from - # PYTHON_EXTENSION generator are xdll-path properties that will - # allow us to find the python extension at runtime. - property-set = [ $(property-set).add $(extension[1]) ] ; - - # Ignore usage requirements. We're a top-level generator and - # nobody is going to use what we generate. - new-sources += $(extension[2-]) ; - } - } - - property-set = [ $(property-set).add-raw <dependency>$(other-pythons) ] ; - - result = [ construct-result $(python) $(extensions) $(new-sources) : - $(project) $(name) : $(property-set) ] ; - } -} - - -generators.register - [ new python-test-generator python.capture-output : : RUN_PYD_OUTPUT ] ; - -generators.register-standard testing.expect-success - : RUN_PYD_OUTPUT : RUN_PYD ; - - -# There are two different ways of spelling OS names. One is used for [ os.name ] -# and the other is used for the <host-os> and <target-os> properties. Until that -# is remedied, this sets up a crude mapping from the latter to the former, that -# will work *for the purposes of cygwin/NT cross-builds only*. Could not think -# of a better name than "translate". -# -.translate-os-windows = NT ; -.translate-os-cygwin = CYGWIN ; -local rule translate-os ( src-os ) -{ - local x = $(.translate-os-$(src-os)) [ os.name ] ; - return $(x[1]) ; -} - - -# Extract the path to a single ".pyd" source. This is used to build the -# PYTHONPATH for running bpl tests. -# -local rule pyd-pythonpath ( source ) -{ - return [ on $(source) return $(LOCATE) $(SEARCH) ] ; -} - - -# The flag settings on testing.capture-output do not apply to python.capture -# output at the moment. Redo this explicitly. -toolset.flags python.capture-output ARGS <testing.arg> ; - - -rule capture-output ( target : sources * : properties * ) -{ - # Setup up a proper DLL search path. Here, $(sources[1]) is a python module - # and $(sources[2]) is a DLL. Only $(sources[1]) is passed to - # testing.capture-output, so RUN_PATH variable on $(sources[2]) is not - # consulted. Move it over explicitly. - RUN_PATH on $(sources[1]) = [ on $(sources[2-]) return $(RUN_PATH) ] ; - - PYTHONPATH = [ sequence.transform pyd-pythonpath : $(sources[2-]) ] ; - PYTHONPATH += [ feature.get-values pythonpath : $(properties) ] ; - - # After test is run, we remove the Python module, but not the Python script. - testing.capture-output $(target) : $(sources[1]) : $(properties) : - $(sources[2-]) ; - - # PYTHONPATH is different; it will be interpreted by whichever Python is - # invoked and so must follow path rules for the target os. The only OSes - # where we can run python for other OSes currently are NT and CYGWIN so we - # only need to handle those cases. - local target-os = [ feature.get-values target-os : $(properties) ] ; - # Oddly, host-os is not in properties, so grab the default value. - local host-os = [ feature.defaults host-os ] ; - host-os = $(host-os:G=) ; - if $(target-os) != $(host-os) - { - PYTHONPATH = [ sequence.transform $(host-os)-to-$(target-os)-path : - $(PYTHONPATH) ] ; - } - local path-separator = [ os.path-separator [ translate-os $(target-os) ] ] ; - local set-PYTHONPATH = [ common.variable-setting-command PYTHONPATH : - $(PYTHONPATH:J=$(path-separator)) ] ; - LAUNCHER on $(target) = $(set-PYTHONPATH) [ on $(target) return \"$(PYTHON)\" ] ; -} - - -rule bpl-test ( name : sources * : requirements * ) -{ - local s ; - sources ?= $(name).py $(name).cpp ; - return [ testing.make-test run-pyd : $(sources) /boost/python//boost_python - : $(requirements) : $(name) ] ; -} - - -IMPORT $(__name__) : bpl-test : : bpl-test ; diff --git a/jam-files/boost-build/tools/qcc.jam b/jam-files/boost-build/tools/qcc.jam deleted file mode 100644 index 4f2a4fc1..00000000 --- a/jam-files/boost-build/tools/qcc.jam +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) 2001 David Abrahams. -# Copyright (c) 2002-2003 Rene Rivera. -# Copyright (c) 2002-2003 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import "class" : new ; -import common ; -import errors ; -import feature ; -import generators ; -import os ; -import property ; -import set ; -import toolset ; -import type ; -import unix ; - -feature.extend toolset : qcc ; - -toolset.inherit-generators qcc : unix : unix.link unix.link.dll ; -generators.override builtin.lib-generator : qcc.prebuilt ; -toolset.inherit-flags qcc : unix ; -toolset.inherit-rules qcc : unix ; - -# Initializes the qcc toolset for the given version. If necessary, command may -# be used to specify where the compiler is located. The parameter 'options' is a -# space-delimited list of options, each one being specified as -# <option-name>option-value. Valid option names are: cxxflags, linkflags and -# linker-type. Accepted values for linker-type are gnu and sun, gnu being the -# default. -# -# Example: -# using qcc : 3.4 : : <cxxflags>foo <linkflags>bar <linker-type>sun ; -# -rule init ( version ? : command * : options * ) -{ - local condition = [ common.check-init-parameters qcc : version $(version) ] ; - local command = [ common.get-invocation-command qcc : QCC : $(command) ] ; - common.handle-options qcc : $(condition) : $(command) : $(options) ; -} - - -generators.register-c-compiler qcc.compile.c++ : CPP : OBJ : <toolset>qcc ; -generators.register-c-compiler qcc.compile.c : C : OBJ : <toolset>qcc ; -generators.register-c-compiler qcc.compile.asm : ASM : OBJ : <toolset>qcc ; - - -# Declare flags for compilation. -toolset.flags qcc.compile OPTIONS <debug-symbols>on : -gstabs+ ; - -# Declare flags and action for compilation. -toolset.flags qcc.compile OPTIONS <optimization>off : -O0 ; -toolset.flags qcc.compile OPTIONS <optimization>speed : -O3 ; -toolset.flags qcc.compile OPTIONS <optimization>space : -Os ; - -toolset.flags qcc.compile OPTIONS <inlining>off : -Wc,-fno-inline ; -toolset.flags qcc.compile OPTIONS <inlining>on : -Wc,-Wno-inline ; -toolset.flags qcc.compile OPTIONS <inlining>full : -Wc,-finline-functions -Wc,-Wno-inline ; - -toolset.flags qcc.compile OPTIONS <warnings>off : -w ; -toolset.flags qcc.compile OPTIONS <warnings>all : -Wc,-Wall ; -toolset.flags qcc.compile OPTIONS <warnings-as-errors>on : -Wc,-Werror ; - -toolset.flags qcc.compile OPTIONS <profiling>on : -p ; - -toolset.flags qcc.compile OPTIONS <cflags> ; -toolset.flags qcc.compile.c++ OPTIONS <cxxflags> ; -toolset.flags qcc.compile DEFINES <define> ; -toolset.flags qcc.compile INCLUDES <include> ; - -toolset.flags qcc.compile OPTIONS <link>shared : -shared ; - -toolset.flags qcc.compile.c++ TEMPLATE_DEPTH <c++-template-depth> ; - - -rule compile.c++ -{ - # Here we want to raise the template-depth parameter value to something - # higher than the default value of 17. Note that we could do this using the - # feature.set-default rule but we do not want to set the default value for - # all toolsets as well. - # - # TODO: This 'modified default' has been inherited from some 'older Boost - # Build implementation' and has most likely been added to make some Boost - # library parts compile correctly. We should see what exactly prompted this - # and whether we can get around the problem more locally. - local template-depth = [ on $(1) return $(TEMPLATE_DEPTH) ] ; - if ! $(template-depth) - { - TEMPLATE_DEPTH on $(1) = 128 ; - } -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" -Wc,-ftemplate-depth-$(TEMPLATE_DEPTH) $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.asm -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - - -# The class checking that we do not try to use the <runtime-link>static property -# while creating or using a shared library, since it is not supported by qcc/ -# /libc. -# -class qcc-linking-generator : unix-linking-generator -{ - rule generated-targets ( sources + : property-set : project name ? ) - { - if <runtime-link>static in [ $(property-set).raw ] - { - local m ; - if [ id ] = "qcc.link.dll" - { - m = "on qcc, DLL can't be build with <runtime-link>static" ; - } - if ! $(m) - { - for local s in $(sources) - { - local type = [ $(s).type ] ; - if $(type) && [ type.is-derived $(type) SHARED_LIB ] - { - m = "on qcc, using DLLS together with the <runtime-link>static options is not possible " ; - } - } - } - if $(m) - { - errors.user-error $(m) : "It is suggested to use" - "<runtime-link>static together with <link>static." ; - } - } - - return [ unix-linking-generator.generated-targets - $(sources) : $(property-set) : $(project) $(name) ] ; - } -} - -generators.register [ new qcc-linking-generator qcc.link : LIB OBJ : EXE - : <toolset>qcc ] ; - -generators.register [ new qcc-linking-generator qcc.link.dll : LIB OBJ - : SHARED_LIB : <toolset>qcc ] ; - -generators.override qcc.prebuilt : builtin.prebuilt ; -generators.override qcc.searched-lib-generator : searched-lib-generator ; - - -# Declare flags for linking. -# First, the common flags. -toolset.flags qcc.link OPTIONS <debug-symbols>on : -gstabs+ ; -toolset.flags qcc.link OPTIONS <profiling>on : -p ; -toolset.flags qcc.link OPTIONS <linkflags> ; -toolset.flags qcc.link LINKPATH <library-path> ; -toolset.flags qcc.link FINDLIBS-ST <find-static-library> ; -toolset.flags qcc.link FINDLIBS-SA <find-shared-library> ; -toolset.flags qcc.link LIBRARIES <library-file> ; - -toolset.flags qcc.link FINDLIBS-SA : m ; - -# For <runtime-link>static we made sure there are no dynamic libraries in the -# link. -toolset.flags qcc.link OPTIONS <runtime-link>static : -static ; - -# Assuming this is just like with gcc. -toolset.flags qcc.link RPATH : <dll-path> : unchecked ; -toolset.flags qcc.link RPATH_LINK : <xdll-path> : unchecked ; - - -# Declare actions for linking. -# -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; - # Serialize execution of the 'link' action, since running N links in - # parallel is just slower. For now, serialize only qcc links while it might - # be a good idea to serialize all links. - JAM_SEMAPHORE on $(targets) = <s>qcc-link-semaphore ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -Wl,-rpath-link$(SPACE)-Wl,"$(RPATH_LINK)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-ST) -l$(FINDLIBS-SA) $(OPTIONS) -} - - -# Always remove archive and start again. Here is the rationale from Andre Hentz: -# I had a file, say a1.c, that was included into liba.a. I moved a1.c to a2.c, -# updated my Jamfiles and rebuilt. My program was crashing with absurd errors. -# After some debugging I traced it back to the fact that a1.o was *still* in -# liba.a -RM = [ common.rm-command ] ; -if [ os.name ] = NT -{ - RM = "if exist \"$(<[1])\" DEL \"$(<[1])\"" ; -} - - -# Declare action for creating static libraries. The 'r' letter means to add -# files to the archive with replacement. Since we remove the archive, we do not -# care about replacement, but there is no option to "add without replacement". -# The 'c' letter suppresses warnings in case the archive does not exists yet. -# That warning is produced only on some platforms, for whatever reasons. -# -actions piecemeal archive -{ - $(RM) "$(<)" - ar rc "$(<)" "$(>)" -} - - -rule link.dll ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; - JAM_SEMAPHORE on $(targets) = <s>qcc-link-semaphore ; -} - - -# Differ from 'link' above only by -shared. -# -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" -L"$(LINKPATH)" -Wl,-R$(SPACE)-Wl,"$(RPATH)" -o "$(<)" $(HAVE_SONAME)-Wl,-h$(SPACE)-Wl,$(<[1]:D=) -shared "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-ST) -l$(FINDLIBS-SA) $(OPTIONS) -} diff --git a/jam-files/boost-build/tools/qt.jam b/jam-files/boost-build/tools/qt.jam deleted file mode 100644 index 8aa7ca26..00000000 --- a/jam-files/boost-build/tools/qt.jam +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2006 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Forwarning toolset file to Qt GUI library. Forwards to the toolset file -# for the current version of Qt. - -import qt4 ; - -rule init ( prefix : full_bin ? : full_inc ? : full_lib ? : version ? : condition * ) -{ - qt4.init $(prefix) : $(full_bin) : $(full_inc) : $(full_lib) : $(version) : $(condition) ; -} - - diff --git a/jam-files/boost-build/tools/qt3.jam b/jam-files/boost-build/tools/qt3.jam deleted file mode 100644 index f82cf0ac..00000000 --- a/jam-files/boost-build/tools/qt3.jam +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Support for the Qt GUI library version 3 -# (http://www.trolltech.com/products/qt3/index.html). -# For new developments, it is recommended to use Qt4 via the qt4 Boost.Build -# module. - -import modules ; -import feature ; -import errors ; -import type ; -import "class" : new ; -import generators ; -import project ; -import toolset : flags ; - -# Convert this module into a project, so that we can declare targets here. -project.initialize $(__name__) ; -project qt3 ; - - -# Initialized the QT support module. The 'prefix' parameter tells where QT is -# installed. When not given, environmental variable QTDIR should be set. -# -rule init ( prefix ? ) -{ - if ! $(prefix) - { - prefix = [ modules.peek : QTDIR ] ; - if ! $(prefix) - { - errors.error - "QT installation prefix not given and QTDIR variable is empty" ; - } - } - - if $(.initialized) - { - if $(prefix) != $(.prefix) - { - errors.error - "Attempt the reinitialize QT with different installation prefix" ; - } - } - else - { - .initialized = true ; - .prefix = $(prefix) ; - - generators.register-standard qt3.moc : H : CPP(moc_%) : <allow>qt3 ; - # Note: the OBJ target type here is fake, take a look at - # qt4.jam/uic-h-generator for explanations that apply in this case as - # well. - generators.register [ new moc-h-generator-qt3 - qt3.moc.cpp : MOCCABLE_CPP : OBJ : <allow>qt3 ] ; - - # The UI type is defined in types/qt.jam, and UIC_H is only used in - # qt.jam, but not in qt4.jam, so define it here. - type.register UIC_H : : H ; - - generators.register-standard qt3.uic-h : UI : UIC_H : <allow>qt3 ; - - # The following generator is used to convert UI files to CPP. It creates - # UIC_H from UI, and constructs CPP from UI/UIC_H. In addition, it also - # returns UIC_H target, so that it can be mocced. - class qt::uic-cpp-generator : generator - { - rule __init__ ( ) - { - generator.__init__ qt3.uic-cpp : UI UIC_H : CPP : <allow>qt3 ; - } - - rule run ( project name ? : properties * : sources + ) - { - # Consider this: - # obj test : test_a.cpp : <optimization>off ; - # - # This generator will somehow be called in this case, and, - # will fail -- which is okay. However, if there are <library> - # properties they will be converted to sources, so the size of - # 'sources' will be more than 1. In this case, the base generator - # will just crash -- and that's not good. Just use a quick test - # here. - - local result ; - if ! $(sources[2]) - { - # Construct CPP as usual - result = [ generator.run $(project) $(name) - : $(properties) : $(sources) ] ; - - # If OK, process UIC_H with moc. It's pretty clear that - # the object generated with UIC will have Q_OBJECT macro. - if $(result) - { - local action = [ $(result[1]).action ] ; - local sources = [ $(action).sources ] ; - local mocced = [ generators.construct $(project) $(name) - : CPP : $(properties) : $(sources[2]) ] ; - result += $(mocced[2-]) ; - } - } - - return $(result) ; - } - } - - generators.register [ new qt::uic-cpp-generator ] ; - - # Finally, declare prebuilt target for QT library. - local usage-requirements = - <include>$(.prefix)/include - <dll-path>$(.prefix)/lib - <library-path>$(.prefix)/lib - <allow>qt3 - ; - lib qt : : <name>qt-mt <threading>multi : : $(usage-requirements) ; - lib qt : : <name>qt <threading>single : : $(usage-requirements) ; - } -} - -class moc-h-generator-qt3 : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - if ! $(sources[2]) && [ $(sources[1]).type ] = MOCCABLE_CPP - { - name = [ $(sources[1]).name ] ; - name = $(name:B) ; - - local a = [ new action $(sources[1]) : qt3.moc.cpp : - $(property-set) ] ; - - local target = [ - new file-target $(name) : MOC : $(project) : $(a) ] ; - - local r = [ virtual-target.register $(target) ] ; - - # Since this generator will return a H target, the linking generator - # won't use it at all, and won't set any dependency on it. However, - # we need the target to be seen by bjam, so that the dependency from - # sources to this generated header is detected -- if Jam does not - # know about this target, it won't do anything. - DEPENDS all : [ $(r).actualize ] ; - - return $(r) ; - } - } -} - - -# Query the installation directory. This is needed in at least two scenarios. -# First, when re-using sources from the Qt-Tree. Second, to "install" custom Qt -# plugins to the Qt-Tree. -# -rule directory -{ - return $(.prefix) ; -} - -# -f forces moc to include the processed source file. Without it, it would think -# that .qpp is not a header and would not include it from the generated file. -# -actions moc -{ - $(.prefix)/bin/moc -f $(>) -o $(<) -} - -# When moccing .cpp files, we don't need -f, otherwise generated code will -# include .cpp and we'll get duplicated symbols. -# -actions moc.cpp -{ - $(.prefix)/bin/moc $(>) -o $(<) -} - - -space = " " ; - -# Sometimes it's required to make 'plugins' available during uic invocation. To -# help with this we add paths to all dependency libraries to uic commane line. -# The intention is that it's possible to write -# -# exe a : ... a.ui ... : <uses>some_plugin ; -# -# and have everything work. We'd add quite a bunch of unrelated paths but it -# won't hurt. -# -flags qt3.uic-h LIBRARY_PATH <xdll-path> ; -actions uic-h -{ - $(.prefix)/bin/uic $(>) -o $(<) -L$(space)$(LIBRARY_PATH) -} - - -flags qt3.uic-cpp LIBRARY_PATH <xdll-path> ; -# The second target is uic-generated header name. It's placed in build dir, but -# we want to include it using only basename. -actions uic-cpp -{ - $(.prefix)/bin/uic $(>[1]) -i $(>[2]:D=) -o $(<) -L$(space)$(LIBRARY_PATH) -} diff --git a/jam-files/boost-build/tools/qt4.jam b/jam-files/boost-build/tools/qt4.jam deleted file mode 100644 index 71d1b762..00000000 --- a/jam-files/boost-build/tools/qt4.jam +++ /dev/null @@ -1,724 +0,0 @@ -# Copyright 2002-2006 Vladimir Prus -# Copyright 2005 Alo Sarv -# Copyright 2005-2009 Juergen Hunold -# -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Qt4 library support module -# -# The module attempts to auto-detect QT installation location from QTDIR -# environment variable; failing that, installation location can be passed as -# argument: -# -# toolset.using qt4 : /usr/local/Trolltech/Qt-4.0.0 ; -# -# The module supports code generation from .ui and .qrc files, as well as -# running the moc preprocessor on headers. Note that you must list all your -# moc-able headers in sources. -# -# Example: -# -# exe myapp : myapp.cpp myapp.h myapp.ui myapp.qrc -# /qt4//QtGui /qt4//QtNetwork ; -# -# It's also possible to run moc on cpp sources: -# -# import cast ; -# -# exe myapp : myapp.cpp [ cast _ moccable-cpp : myapp.cpp ] /qt4//QtGui ; -# -# When moccing source file myapp.cpp you need to include "myapp.moc" from -# myapp.cpp. When moccing .h files, the output of moc will be automatically -# compiled and linked in, you don't need any includes. -# -# This is consistent with Qt guidelines: -# http://doc.trolltech.com/4.0/moc.html - -import modules ; -import feature ; -import errors ; -import type ; -import "class" : new ; -import generators ; -import project ; -import toolset : flags ; -import os ; -import virtual-target ; -import scanner ; - -# Qt3Support control feature -# -# Qt4 configure defaults to build Qt4 libraries with Qt3Support. -# The autodetection is missing, so we default to disable Qt3Support. -# This prevents the user from inadvertedly using a deprecated API. -# -# The Qt3Support library can be activated by adding -# "<qt3support>on" to requirements -# -# Use "<qt3support>on:<define>QT3_SUPPORT_WARNINGS" -# to get warnings about deprecated Qt3 support funtions and classes. -# Files ported by the "qt3to4" conversion tool contain _tons_ of -# warnings, so this define is not set as default. -# -# Todo: Detect Qt3Support from Qt's configure data. -# Or add more auto-configuration (like python). -feature.feature qt3support : off on : propagated link-incompatible ; - -# The Qt version used for requirements -# Valid are <qt>4.4 or <qt>4.5.0 -# Auto-detection via qmake sets '<qt>major.minor.patch' -feature.feature qt : : propagated ; - -project.initialize $(__name__) ; -project qt ; - -# Save the project so that we tolerate 'import + using' combo. -.project = [ project.current ] ; - -# Helper utils for easy debug output -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = TRUE ; -} - -local rule debug-message ( message * ) -{ - if $(.debug-configuration) = TRUE - { - ECHO notice: [qt4-cfg] $(message) ; - } -} - -# Capture qmake output line by line -local rule read-output ( content ) -{ - local lines ; - local nl = " -" ; - local << = "([^$(nl)]*)[$(nl)](.*)" ; - local line+ = [ MATCH "$(<<)" : "$(content)" ] ; - while $(line+) - { - lines += $(line+[1]) ; - line+ = [ MATCH "$(<<)" : "$(line+[2])" ] ; - } - return $(lines) ; -} - -# Capture Qt version from qmake -local rule check-version ( bin_prefix ) -{ - full-cmd = $(bin_prefix)"/qmake -v" ; - debug-message Running '$(full-cmd)' ; - local output = [ SHELL $(full-cmd) ] ; - for line in [ read-output $(output) ] - { - # Parse the output to get all the results. - if [ MATCH "QMake" : $(line) ] - { - # Skip first line of output - } - else - { - temp = [ MATCH "([0-9]*)\\.([0-9]*)\\.([0-9]*)" : $(line) ] ; - } - } - return $(temp) ; -} - -# Validate the version string and extract the major/minor part we care about. -# -local rule split-version ( version ) -{ - local major-minor = [ MATCH ^([0-9]+)\.([0-9]+)(.*)$ : $(version) : 1 2 3 ] ; - if ! $(major-minor[2]) || $(major-minor[3]) - { - ECHO "Warning: 'using qt' expects a two part (major, minor) version number; got" $(version) instead ; - - # Add a zero to account for the missing digit if necessary. - major-minor += 0 ; - } - - return $(major-minor[1]) $(major-minor[2]) ; -} - -# Initialize the QT support module. -# Parameters: -# - 'prefix' parameter tells where Qt is installed. -# - 'full_bin' optional full path to Qt binaries (qmake,moc,uic,rcc) -# - 'full_inc' optional full path to Qt top-level include directory -# - 'full_lib' optional full path to Qt library directory -# - 'version' optional version of Qt, else autodetected via 'qmake -v' -# - 'condition' optional requirements -rule init ( prefix : full_bin ? : full_inc ? : full_lib ? : version ? : condition * ) -{ - project.push-current $(.project) ; - - debug-message "==== Configuring Qt ... ====" ; - for local v in version cmd-or-prefix includes libraries condition - { - if $($(v)) - { - debug-message " user-specified "$(v): '$($(v))' ; - } - } - - # Needed as default value - .prefix = $(prefix) ; - - # pre-build paths to detect reinitializations changes - local inc_prefix lib_prefix bin_prefix ; - if $(full_inc) - { - inc_prefix = $(full_inc) ; - } - else - { - inc_prefix = $(prefix)/include ; - } - if $(full_lib) - { - lib_prefix = $(full_lib) ; - } - else - { - lib_prefix = $(prefix)/lib ; - } - if $(full_bin) - { - bin_prefix = $(full_bin) ; - } - else - { - bin_prefix = $(prefix)/bin ; - } - - # Globally needed variables - .incprefix = $(inc_prefix) ; - .libprefix = $(lib_prefix) ; - .binprefix = $(bin_prefix) ; - - if ! $(.initialized) - { - # Make sure this is initialised only once - .initialized = true ; - - # Generates cpp files from header files using "moc" tool - generators.register-standard qt4.moc : H : CPP(moc_%) : <allow>qt4 ; - - # The OBJ result type is a fake, 'H' will be really produced. See - # comments on the generator class, defined below the 'init' function. - generators.register [ new uic-generator qt4.uic : UI : OBJ : - <allow>qt4 ] ; - - # The OBJ result type is a fake here too. - generators.register [ new moc-h-generator - qt4.moc.inc : MOCCABLE_CPP : OBJ : <allow>qt4 ] ; - - generators.register [ new moc-inc-generator - qt4.moc.inc : MOCCABLE_H : OBJ : <allow>qt4 ] ; - - # Generates .cpp files from .qrc files. - generators.register-standard qt4.rcc : QRC : CPP(qrc_%) ; - - # dependency scanner for wrapped files. - type.set-scanner QRC : qrc-scanner ; - - # Save value of first occuring prefix - .PREFIX = $(prefix) ; - } - - if $(version) - { - major-minor = [ split-version $(version) ] ; - version = $(major-minor:J=.) ; - } - else - { - version = [ check-version $(bin_prefix) ] ; - if $(version) - { - version = $(version:J=.) ; - } - debug-message Detected version '$(version)' ; - } - - local target-requirements = $(condition) ; - - # Add the version, if any, to the target requirements. - if $(version) - { - if ! $(version) in [ feature.values qt ] - { - feature.extend qt : $(version) ; - } - target-requirements += <qt>$(version:E=default) ; - } - - local target-os = [ feature.get-values target-os : $(condition) ] ; - if ! $(target-os) - { - target-os ?= [ feature.defaults target-os ] ; - target-os = $(target-os:G=) ; - target-requirements += <target-os>$(target-os) ; - } - - # Build exact requirements for the tools - local tools-requirements = $(target-requirements:J=/) ; - - debug-message "Details of this Qt configuration:" ; - debug-message " prefix: " '$(prefix:E=<empty>)' ; - debug-message " binary path: " '$(bin_prefix:E=<empty>)' ; - debug-message " include path:" '$(inc_prefix:E=<empty>)' ; - debug-message " library path:" '$(lib_prefix:E=<empty>)' ; - debug-message " target requirements:" '$(target-requirements)' ; - debug-message " tool requirements: " '$(tools-requirements)' ; - - # setup the paths for the tools - toolset.flags qt4.moc .BINPREFIX $(tools-requirements) : $(bin_prefix) ; - toolset.flags qt4.rcc .BINPREFIX $(tools-requirements) : $(bin_prefix) ; - toolset.flags qt4.uic .BINPREFIX $(tools-requirements) : $(bin_prefix) ; - - # TODO: 2009-02-12: Better support for directories - # Most likely needed are separate getters for: include,libraries,binaries and sources. - toolset.flags qt4.directory .PREFIX $(tools-requirements) : $(prefix) ; - - # Test for a buildable Qt. - if [ glob $(.prefix)/Jamroot ] - { - .bjam-qt = true - - # this will declare QtCore (and qtmain on <target-os>windows) - add-shared-library QtCore ; - } - else - # Setup common pre-built Qt. - # Special setup for QtCore on which everything depends - { - local usage-requirements = - <include>$(.incprefix) - <library-path>$(.libprefix) - <dll-path>$(.libprefix) - <threading>multi - <allow>qt4 ; - - local suffix ; - - # Since Qt-4.2, debug versions on unix have to be built - # separately and therefore have no suffix. - .suffix_version = "" ; - .suffix_debug = "" ; - - # Control flag for auto-configuration of the debug libraries. - # This setup requires Qt 'configure -debug-and-release'. - # Only available on some platforms. - # ToDo: 2009-02-12: Maybe throw this away and - # require separate setup with <variant>debug as condition. - .have_separate_debug = FALSE ; - - # Setup other platforms - if $(target-os) in windows cygwin - { - .have_separate_debug = TRUE ; - - # On NT, the libs have "4" suffix, and "d" suffix in debug builds. - .suffix_version = "4" ; - .suffix_debug = "d" ; - - # On Windows we must link against the qtmain library - lib qtmain - : # sources - : # requirements - <name>qtmain$(.suffix_debug) - <variant>debug - $(target-requirements) - ; - - lib qtmain - : # sources - : # requirements - <name>qtmain - $(target-requirements) - ; - } - else if $(target-os) = darwin - { - # On MacOS X, both debug and release libraries are available. - .suffix_debug = "_debug" ; - - .have_separate_debug = TRUE ; - - alias qtmain ; - } - else - { - alias qtmain : : $(target-requirements) ; - } - - lib QtCore : qtmain - : # requirements - <name>QtCore$(.suffix_version) - $(target-requirements) - : # default-build - : # usage-requirements - <define>QT_CORE_LIB - <define>QT_NO_DEBUG - <include>$(.incprefix)/QtCore - $(usage-requirements) - ; - - if $(.have_separate_debug) = TRUE - { - debug-message Configure debug libraries with suffix '$(.suffix_debug)' ; - - lib QtCore : $(main) - : # requirements - <name>QtCore$(.suffix_debug)$(.suffix_version) - <variant>debug - $(target-requirements) - : # default-build - : # usage-requirements - <define>QT_CORE_LIB - <include>$(.incprefix)/QtCore - $(usage-requirements) - ; - } - } - - # Initialising the remaining libraries is canonical - # parameters 'module' : 'depends-on' : 'usage-define' : 'requirements' : 'include' - # 'include' only for non-canonical include paths. - add-shared-library QtGui : QtCore : QT_GUI_LIB : $(target-requirements) ; - add-shared-library QtNetwork : QtCore : QT_NETWORK_LIB : $(target-requirements) ; - add-shared-library QtSql : QtCore : QT_SQL_LIB : $(target-requirements) ; - add-shared-library QtXml : QtCore : QT_XML_LIB : $(target-requirements) ; - - add-shared-library Qt3Support : QtGui QtNetwork QtXml QtSql - : QT_QT3SUPPORT_LIB QT3_SUPPORT - : <qt3support>on $(target-requirements) ; - - # Dummy target to enable "<qt3support>off" and - # "<library>/qt//Qt3Support" at the same time. This enables quick - # switching from one to the other for test/porting purposes. - alias Qt3Support : : <qt3support>off $(target-requirements) ; - - # OpenGl Support - add-shared-library QtOpenGL : QtGui : QT_OPENGL_LIB : $(target-requirements) ; - - # SVG-Support (Qt 4.1) - add-shared-library QtSvg : QtXml QtOpenGL : QT_SVG_LIB : $(target-requirements) ; - - # Test-Support (Qt 4.1) - add-shared-library QtTest : QtCore : : $(target-requirements) ; - - # Qt designer library - add-shared-library QtDesigner : QtGui QtXml : : $(target-requirements) ; - add-shared-library QtDesignerComponents : QtGui QtXml : : $(target-requirements) ; - - # Support for dynamic Widgets (Qt 4.1) - add-static-library QtUiTools : QtGui QtXml : $(target-requirements) ; - - # DBus-Support (Qt 4.2) - add-shared-library QtDBus : QtXml : : $(target-requirements) ; - - # Script-Engine (Qt 4.3) - add-shared-library QtScript : QtGui QtXml : QT_SCRIPT_LIB : $(target-requirements) ; - - # Tools for the Script-Engine (Qt 4.5) - add-shared-library QtScriptTools : QtScript : QT_SCRIPTTOOLS_LIB : $(target-requirements) ; - - # WebKit (Qt 4.4) - add-shared-library QtWebKit : QtGui : QT_WEBKIT_LIB : $(target-requirements) ; - - # Phonon Multimedia (Qt 4.4) - add-shared-library phonon : QtGui QtXml : QT_PHONON_LIB : $(target-requirements) ; - - # Multimedia engine (Qt 4.6) - add-shared-library QtMultimedia : QtGui : QT_MULTIMEDIA_LIB : $(target-requirements) ; - - # XmlPatterns-Engine (Qt 4.4) - add-shared-library QtXmlPatterns : QtNetwork : QT_XMLPATTERNS_LIB : $(target-requirements) ; - - # Help-Engine (Qt 4.4) - add-shared-library QtHelp : QtGui QtSql QtXml : : $(target-requirements) ; - add-shared-library QtCLucene : QCore QtSql QtXml : : $(target-requirements) ; - - # QML-Engine (Qt 4.7) - add-shared-library QtDeclarative : QtGui QtXml : : $(target-requirements) ; - - # AssistantClient Support - # Compat library removed in 4.7.0 - # Pre-4.4 help system, use QtHelp for new programs - if $(version) < "4.7" - { - add-shared-library QtAssistantClient : QtGui : : $(target-requirements) : QtAssistant ; - } - debug-message "==== Configured Qt-$(version) ====" ; - - project.pop-current ; -} - -rule initialized ( ) -{ - return $(.initialized) ; -} - - - -# This custom generator is needed because in QT4, UI files are translated only -# into H files, and no C++ files are created. Further, the H files need not be -# passed via MOC. The header is used only via inclusion. If we define a standard -# UI -> H generator, Boost.Build will run MOC on H, and then compile the -# resulting cpp. It will give a warning, since output from moc will be empty. -# -# This generator is declared with a UI -> OBJ signature, so it gets invoked when -# linking generator tries to convert sources to OBJ, but it produces target of -# type H. This is non-standard, but allowed. That header won't be mocced. -# -class uic-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - if ! $(name) - { - name = [ $(sources[0]).name ] ; - name = $(name:B) ; - } - - local a = [ new action $(sources[1]) : qt4.uic : $(property-set) ] ; - - # The 'ui_' prefix is to match qmake's default behavior. - local target = [ new file-target ui_$(name) : H : $(project) : $(a) ] ; - - local r = [ virtual-target.register $(target) ] ; - - # Since this generator will return a H target, the linking generator - # won't use it at all, and won't set any dependency on it. However, we - # need the target to be seen by bjam, so that dependency from sources to - # this generated header is detected -- if jam does not know about this - # target, it won't do anything. - DEPENDS all : [ $(r).actualize ] ; - - return $(r) ; - } -} - - -class moc-h-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - if ! $(sources[2]) && [ $(sources[1]).type ] = MOCCABLE_CPP - { - name = [ $(sources[0]).name ] ; - name = $(name:B) ; - - local a = [ new action $(sources[1]) : qt4.moc.inc : - $(property-set) ] ; - - local target = [ new file-target $(name) : MOC : $(project) : $(a) - ] ; - - local r = [ virtual-target.register $(target) ] ; - - # Since this generator will return a H target, the linking generator - # won't use it at all, and won't set any dependency on it. However, - # we need the target to be seen by bjam, so that dependency from - # sources to this generated header is detected -- if jam does not - # know about this target, it won't do anything. - DEPENDS all : [ $(r).actualize ] ; - - return $(r) ; - } - } -} - - -class moc-inc-generator : generator -{ - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - if ! $(sources[2]) && [ $(sources[1]).type ] = MOCCABLE_H - { - name = [ $(sources[0]).name ] ; - name = $(name:B) ; - - local a = [ new action $(sources[1]) : qt4.moc.inc : - $(property-set) ] ; - - local target = [ new file-target moc_$(name) : CPP : $(project) : - $(a) ] ; - - # Since this generator will return a H target, the linking generator - # won't use it at all, and won't set any dependency on it. However, - # we need the target to be seen by bjam, so that dependency from - # sources to this generated header is detected -- if jam does not - # know about this target, it won't do anything. - DEPENDS all : [ $(target).actualize ] ; - - return [ virtual-target.register $(target) ] ; - } - } -} - - -# Query the installation directory. This is needed in at least two scenarios. -# First, when re-using sources from the Qt-Tree. Second, to "install" custom Qt -# plugins to the Qt-Tree. -# -rule directory -{ - return $(.PREFIX) ; -} - -# Add a shared Qt library. -rule add-shared-library ( lib-name : depends-on * : usage-defines * : requirements * : include ? ) -{ - add-library $(lib-name) : $(.suffix_version) : $(depends-on) : $(usage-defines) : $(requirements) : $(include) ; -} - -# Add a static Qt library. -rule add-static-library ( lib-name : depends-on * : usage-defines * : requirements * : include ? ) -{ - add-library $(lib-name) : : $(depends-on) : $(usage-defines) : $(requirements) : $(include) ; -} - -# Add a Qt library. -# Static libs are unversioned, whereas shared libs have the major number as suffix. -# Creates both release and debug versions on platforms where both are enabled by Qt configure. -# Flags: -# - lib-name Qt library Name -# - version Qt major number used as shared library suffix (QtCore4.so) -# - depends-on other Qt libraries -# - usage-defines those are set by qmake, so set them when using this library -# - requirements addional requirements -# - include non-canonical include path. The canonical path is $(.incprefix)/$(lib-name). -rule add-library ( lib-name : version ? : depends-on * : usage-defines * : requirements * : include ? ) -{ - if $(.bjam-qt) - { - # Import Qt module - # Eveything will be setup there - alias $(lib-name) - : $(.prefix)//$(lib-name) - : - : - : <allow>qt4 ; - } - else - { - local real_include ; - real_include ?= $(include) ; - real_include ?= $(lib-name) ; - - lib $(lib-name) - : # sources - $(depends-on) - : # requirements - <name>$(lib-name)$(version) - $(requirements) - : # default-build - : # usage-requirements - <define>$(usage-defines) - <include>$(.incprefix)/$(real_include) - ; - - if $(.have_separate_debug) = TRUE - { - lib $(lib-name) - : # sources - $(depends-on) - : # requirements - <name>$(lib-name)$(.suffix_debug)$(version) - $(requirements) - <variant>debug - : # default-build - : # usage-requirements - <define>$(usage-defines) - <include>$(.incprefix)/$(real_include) - ; - } - } - - # Make library explicit so that a simple <use>qt4 will not bring in everything. - # And some components like QtDBus/Phonon may not be available on all platforms. - explicit $(lib-name) ; -} - -# Use $(.BINPREFIX[-1]) for the paths as several tools-requirements can match. -# The exact match is the last one. - -# Get <include> and <defines> from current toolset. -flags qt4.moc INCLUDES <include> ; -flags qt4.moc DEFINES <define> ; - -# need a newline for expansion of DEFINES and INCLUDES in the response file. -.nl = " -" ; - -# Processes headers to create Qt MetaObject information. Qt4-moc has its -# c++-parser, so pass INCLUDES and DEFINES. -# We use response file with one INCLUDE/DEFINE per line -# -actions moc -{ - $(.BINPREFIX[-1])/moc -f $(>) -o $(<) @"@($(<).rsp:E=-D$(DEFINES)$(.nl) -I$(INCLUDES:T)$(.nl))" -} - -# When moccing files for include only, we don't need -f, otherwise the generated -# code will include the .cpp and we'll get duplicated symbols. -# -actions moc.inc -{ - $(.BINPREFIX[-1])/moc $(>) -o $(<) @"@($(<).rsp:E=-D$(DEFINES)$(.nl) -I$(INCLUDES:T)$(.nl))" -} - - -# Generates source files from resource files. -# -actions rcc -{ - $(.BINPREFIX[-1])/rcc $(>) -name $(>:B) -o $(<) -} - - -# Generates user-interface source from .ui files. -# -actions uic -{ - $(.BINPREFIX[-1])/uic $(>) -o $(<) -} - - -# Scanner for .qrc files. Look for the CDATA section of the <file> tag. Ignore -# the "alias" attribute. See http://doc.trolltech.com/qt/resources.html for -# detailed documentation of the Qt Resource System. -# -class qrc-scanner : common-scanner -{ - rule pattern ( ) - { - return "<file.*>(.*)</file>" ; - } -} - - -# Wrapped files are "included". -scanner.register qrc-scanner : include ; diff --git a/jam-files/boost-build/tools/quickbook-config.jam b/jam-files/boost-build/tools/quickbook-config.jam deleted file mode 100644 index e983a78a..00000000 --- a/jam-files/boost-build/tools/quickbook-config.jam +++ /dev/null @@ -1,44 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for BoostBook tools. To use, just import this module. - -import os ; -import toolset : using ; - -if [ os.name ] = NT -{ - local boost-dir = ; - for local R in snapshot cvs 1.33.0 - { - boost-dir += [ W32_GETREG - "HKEY_LOCAL_MACHINE\\SOFTWARE\\Boost.org\\$(R)" - : "InstallRoot" ] ; - } - local quickbook-path = [ GLOB "$(boost-dir)\\bin" "\\Boost\\bin" : quickbook.exe ] ; - quickbook-path = $(quickbook-path[1]) ; - - if $(quickbook-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using quickbook ":" $(quickbook-path) ; - } - using quickbook : $(quickbook-path) ; - } -} -else -{ - local quickbook-path = [ GLOB "/usr/local/bin" "/usr/bin" "/opt/bin" : quickbook ] ; - quickbook-path = $(quickbook-path[1]) ; - - if $(quickbook-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using quickbook ":" $(quickbook-path) ; - } - using quickbook : $(quickbook-path) ; - } -} diff --git a/jam-files/boost-build/tools/quickbook.jam b/jam-files/boost-build/tools/quickbook.jam deleted file mode 100644 index 6de2d42f..00000000 --- a/jam-files/boost-build/tools/quickbook.jam +++ /dev/null @@ -1,361 +0,0 @@ -# -# Copyright (c) 2005 Jo�o Abecasis -# Copyright (c) 2005 Vladimir Prus -# Copyright (c) 2006 Rene Rivera -# -# Distributed under the Boost Software License, Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) -# - -# This toolset defines a generator to translate QuickBook to BoostBook. It can -# be used to generate nice (!) user documentation in different formats -# (pdf/html/...), from a single text file with simple markup. -# -# The toolset defines the QUICKBOOK type (file extension 'qbk') and -# a QUICKBOOK to XML (BOOSTBOOK) generator. -# -# -# =========================================================================== -# Q & A -# =========================================================================== -# -# If you don't know what this is all about, some Q & A will hopefully get you -# up to speed with QuickBook and this toolset. -# -# -# What is QuickBook ? -# -# QuickBook is a WikiWiki style documentation tool geared towards C++ -# documentation using simple rules and markup for simple formatting tasks. -# QuickBook extends the WikiWiki concept. Like the WikiWiki, QuickBook -# documents are simple text files. A single QuickBook document can -# generate a fully linked set of nice HTML and PostScript/PDF documents -# complete with images and syntax-colorized source code. -# -# -# Where can I get QuickBook ? -# -# Quickbook can be found in Boost's repository, under the tools/quickbook -# directory it was added there on Jan 2005, some time after the release of -# Boost v1.32.0 and has been an integral part of the Boost distribution -# since v1.33. -# -# Here's a link to the SVN repository: -# https://svn.boost.org/svn/boost/trunk/tools/quickbook -# -# And to QuickBook's QuickBook-generated docs: -# http://www.boost.org/doc/libs/release/tools/quickbook/index.html -# -# -# How do I use QuickBook and this toolset in my projects ? -# -# The minimal example is: -# -# using boostbook ; -# import quickbook ; -# -# boostbook my_docs : my_docs_source.qbk ; -# -# where my_docs is a target name and my_docs_source.qbk is a QuickBook -# file. The documentation format to be generated is determined by the -# boostbook toolset. By default html documentation should be generated, -# but you should check BoostBook's docs to be sure. -# -# -# What do I need ? -# -# You should start by setting up the BoostBook toolset. Please refer to -# boostbook.jam and the BoostBook documentation for information on how to -# do this. -# -# A QuickBook executable is also needed. The toolset will generate this -# executable if it can find the QuickBook sources. The following -# directories will be searched: -# -# BOOST_ROOT/tools/quickbook/ -# BOOST_BUILD_PATH/../../quickbook/ -# -# (BOOST_ROOT and BOOST_BUILD_PATH are environment variables) -# -# If QuickBook sources are not found the toolset will then try to use -# the shell command 'quickbook'. -# -# -# How do I provide a custom QuickBook executable ? -# -# You may put the following in your user-config.jam or site-config.jam: -# -# using quickbook : /path/to/quickbook ; -# -# or, if 'quickbook' can be found in your PATH, -# -# using quickbook : quickbook ; -# -# -# For convenience three alternatives are tried to get a QuickBook executable: -# -# 1. If the user points us to the a QuickBook executable, that is used. -# -# 2. Otherwise, we search for the QuickBook sources and compile QuickBook -# using the default toolset. -# -# 3. As a last resort, we rely on the shell for finding 'quickbook'. -# - -import boostbook ; -import "class" : new ; -import feature ; -import generators ; -import toolset ; -import type ; -import scanner ; -import project ; -import targets ; -import build-system ; -import path ; -import common ; -import errors ; - -# The one and only QUICKBOOK type! -type.register QUICKBOOK : qbk ; - -# <quickbook-binary> shell command to run QuickBook -# <quickbook-binary-dependencies> targets to build QuickBook from sources. -feature.feature <quickbook-binary> : : free ; -feature.feature <quickbook-binary-dependencies> : : free dependency ; -feature.feature <quickbook-define> : : free ; -feature.feature <quickbook-indent> : : free ; -feature.feature <quickbook-line-width> : : free ; - - -# quickbook-binary-generator handles generation of the QuickBook executable, by -# marking it as a dependency for QuickBook docs. -# -# If the user supplied the QuickBook command that will be used. -# -# Otherwise we search some sensible places for the QuickBook sources and compile -# from scratch using the default toolset. -# -# As a last resort we rely on the shell to find 'quickbook'. -# -class quickbook-binary-generator : generator -{ - import modules path targets quickbook ; - - rule run ( project name ? : property-set : sources * : multiple ? ) - { - quickbook.freeze-config ; - # QuickBook invocation command and dependencies. - local quickbook-binary = [ modules.peek quickbook : .quickbook-binary ] ; - local quickbook-binary-dependencies ; - - if ! $(quickbook-binary) - { - # If the QuickBook source directory was found, mark its main target - # as a dependency for the current project. Otherwise, try to find - # 'quickbook' in user's PATH - local quickbook-dir = [ modules.peek quickbook : .quickbook-dir ] ; - if $(quickbook-dir) - { - # Get the main-target in QuickBook directory. - local quickbook-main-target = [ targets.resolve-reference $(quickbook-dir) : $(project) ] ; - - # The first element are actual targets, the second are - # properties found in target-id. We do not care about these - # since we have passed the id ourselves. - quickbook-main-target = - [ $(quickbook-main-target[1]).main-target quickbook ] ; - - quickbook-binary-dependencies = - [ $(quickbook-main-target).generate [ $(property-set).propagated ] ] ; - - # Ignore usage-requirements returned as first element. - quickbook-binary-dependencies = $(quickbook-binary-dependencies[2-]) ; - - # Some toolsets generate extra targets (e.g. RSP). We must mark - # all targets as dependencies for the project, but we will only - # use the EXE target for quickbook-to-boostbook translation. - for local target in $(quickbook-binary-dependencies) - { - if [ $(target).type ] = EXE - { - quickbook-binary = - [ path.native - [ path.join - [ $(target).path ] - [ $(target).name ] - ] - ] ; - } - } - } - } - - # Add $(quickbook-binary-dependencies) as a dependency of the current - # project and set it as the <quickbook-binary> feature for the - # quickbook-to-boostbook rule, below. - property-set = [ $(property-set).add-raw - <dependency>$(quickbook-binary-dependencies) - <quickbook-binary>$(quickbook-binary) - <quickbook-binary-dependencies>$(quickbook-binary-dependencies) - ] ; - - return [ generator.run $(project) $(name) : $(property-set) : $(sources) : $(multiple) ] ; - } -} - - -# Define a scanner for tracking QBK include dependencies. -# -class qbk-scanner : common-scanner -{ - rule pattern ( ) - { - return "\\[[ ]*include[ ]+([^]]+)\\]" - "\\[[ ]*include:[a-zA-Z0-9_]+[ ]+([^]]+)\\]" - "\\[[ ]*import[ ]+([^]]+)\\]" ; - } -} - - -scanner.register qbk-scanner : include ; - -type.set-scanner QUICKBOOK : qbk-scanner ; - - -# Initialization of toolset. -# -# Parameters: -# command ? -> path to QuickBook executable. -# -# When command is not supplied toolset will search for QuickBook directory and -# compile the executable from source. If that fails we still search the path for -# 'quickbook'. -# -rule init ( - command ? # path to the QuickBook executable. - ) -{ - if $(command) - { - if $(.config-frozen) - { - errors.user-error "quickbook: configuration cannot be changed after it has been used." ; - } - .command = $(command) ; - } -} - -rule freeze-config ( ) -{ - if ! $(.config-frozen) - { - .config-frozen = true ; - - # QuickBook invocation command and dependencies. - - .quickbook-binary = $(.command) ; - - if $(.quickbook-binary) - { - # Use user-supplied command. - .quickbook-binary = [ common.get-invocation-command quickbook : quickbook : $(.quickbook-binary) ] ; - } - else - { - # Search for QuickBook sources in sensible places, like - # $(BOOST_ROOT)/tools/quickbook - # $(BOOST_BUILD_PATH)/../../quickbook - - # And build quickbook executable from sources. - - local boost-root = [ modules.peek : BOOST_ROOT ] ; - local boost-build-path = [ build-system.location ] ; - - if $(boost-root) - { - .quickbook-dir += [ path.join $(boost-root) tools ] ; - } - - if $(boost-build-path) - { - .quickbook-dir += $(boost-build-path)/../.. ; - } - - .quickbook-dir = [ path.glob $(.quickbook-dir) : quickbook ] ; - - # If the QuickBook source directory was found, mark its main target - # as a dependency for the current project. Otherwise, try to find - # 'quickbook' in user's PATH - if $(.quickbook-dir) - { - .quickbook-dir = [ path.make $(.quickbook-dir[1]) ] ; - } - else - { - ECHO "QuickBook warning: The path to the quickbook executable was" ; - ECHO " not provided. Additionally, couldn't find QuickBook" ; - ECHO " sources searching in" ; - ECHO " * BOOST_ROOT/tools/quickbook" ; - ECHO " * BOOST_BUILD_PATH/../../quickbook" ; - ECHO " Will now try to find a precompiled executable by searching" ; - ECHO " the PATH for 'quickbook'." ; - ECHO " To disable this warning in the future, or to completely" ; - ECHO " avoid compilation of quickbook, you can explicitly set the" ; - ECHO " path to a quickbook executable command in user-config.jam" ; - ECHO " or site-config.jam with the call" ; - ECHO " using quickbook : /path/to/quickbook ;" ; - - # As a last resort, search for 'quickbook' command in path. Note - # that even if the 'quickbook' command is not found, - # get-invocation-command will still return 'quickbook' and might - # generate an error while generating the virtual-target. - - .quickbook-binary = [ common.get-invocation-command quickbook : quickbook ] ; - } - } - } -} - - -generators.register [ new quickbook-binary-generator quickbook.quickbook-to-boostbook : QUICKBOOK : XML ] ; - - -# <quickbook-binary> shell command to run QuickBook -# <quickbook-binary-dependencies> targets to build QuickBook from sources. -toolset.flags quickbook.quickbook-to-boostbook QB-COMMAND <quickbook-binary> ; -toolset.flags quickbook.quickbook-to-boostbook QB-DEPENDENCIES <quickbook-binary-dependencies> ; -toolset.flags quickbook.quickbook-to-boostbook INCLUDES <include> ; -toolset.flags quickbook.quickbook-to-boostbook QB-DEFINES <quickbook-define> ; -toolset.flags quickbook.quickbook-to-boostbook QB-INDENT <quickbook-indent> ; -toolset.flags quickbook.quickbook-to-boostbook QB-LINE-WIDTH <quickbook-line-width> ; - - -rule quickbook-to-boostbook ( target : source : properties * ) -{ - # Signal dependency of quickbook sources on <quickbook-binary-dependencies> - # upon invocation of quickbook-to-boostbook. - DEPENDS $(target) : [ on $(target) return $(QB-DEPENDENCIES) ] ; -} - - -actions quickbook-to-boostbook -{ - "$(QB-COMMAND)" -I"$(INCLUDES)" -D"$(QB-DEFINES)" --indent="$(QB-INDENT)" --linewidth="$(QB-LINE-WIDTH)" --output-file="$(1)" "$(2)" -} - - -# Declare a main target to convert a quickbook source into a boostbook XML file. -# -rule to-boostbook ( target-name : sources * : requirements * : default-build * ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new typed-target $(target-name) : $(project) : XML - : [ targets.main-target-sources $(sources) : $(target-name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; -} diff --git a/jam-files/boost-build/tools/rc.jam b/jam-files/boost-build/tools/rc.jam deleted file mode 100644 index 9964d339..00000000 --- a/jam-files/boost-build/tools/rc.jam +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (C) Andre Hentz 2003. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. -# -# Copyright (c) 2006 Rene Rivera. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import type ; -import generators ; -import feature ; -import errors ; -import scanner ; -import toolset : flags ; - -if [ MATCH (--debug-configuration) : [ modules.peek : ARGV ] ] -{ - .debug-configuration = true ; -} - -type.register RC : rc ; - -rule init ( ) -{ -} - -# Configures a new resource compilation command specific to a condition, -# usually a toolset selection condition. The possible options are: -# -# * <rc-type>(rc|windres) - Indicates the type of options the command -# accepts. -# -# Even though the arguments are all optional, only when a command, condition, -# and at minimum the rc-type option are given will the command be configured. -# This is so that callers don't have to check auto-configuration values -# before calling this. And still get the functionality of build failures when -# the resource compiler can't be found. -# -rule configure ( command ? : condition ? : options * ) -{ - local rc-type = [ feature.get-values <rc-type> : $(options) ] ; - - if $(command) && $(condition) && $(rc-type) - { - flags rc.compile.resource .RC $(condition) : $(command) ; - flags rc.compile.resource .RC_TYPE $(condition) : $(rc-type:L) ; - flags rc.compile.resource DEFINES <define> ; - flags rc.compile.resource INCLUDES <include> ; - if $(.debug-configuration) - { - ECHO notice: using rc compiler :: $(condition) :: $(command) ; - } - } -} - -rule compile.resource ( target : sources * : properties * ) -{ - local rc-type = [ on $(target) return $(.RC_TYPE) ] ; - rc-type ?= null ; - compile.resource.$(rc-type) $(target) : $(sources[1]) ; -} - -actions compile.resource.rc -{ - "$(.RC)" -l 0x409 "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -fo "$(<)" "$(>)" -} - -actions compile.resource.windres -{ - "$(.RC)" "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -o "$(<)" -i "$(>)" -} - -actions quietly compile.resource.null -{ - as /dev/null -o "$(<)" -} - -# Since it's a common practice to write -# exe hello : hello.cpp hello.rc -# we change the name of object created from RC file, to -# avoid conflict with hello.cpp. -# The reason we generate OBJ and not RES, is that gcc does not -# seem to like RES files, but works OK with OBJ. -# See http://article.gmane.org/gmane.comp.lib.boost.build/5643/ -# -# Using 'register-c-compiler' adds the build directory to INCLUDES -generators.register-c-compiler rc.compile.resource : RC : OBJ(%_res) ; - -# Register scanner for resources -class res-scanner : scanner -{ - import regex virtual-target path scanner ; - - rule __init__ ( includes * ) - { - scanner.__init__ ; - - self.includes = $(includes) ; - } - - rule pattern ( ) - { - return "(([^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)[ ]+([^ \"]+|\"[^\"]+\"))|(#include[ ]*(<[^<]+>|\"[^\"]+\")))" ; - } - - rule process ( target : matches * : binding ) - { - local angle = [ regex.transform $(matches) : "#include[ ]*<([^<]+)>" ] ; - local quoted = [ regex.transform $(matches) : "#include[ ]*\"([^\"]+)\"" ] ; - local res = [ regex.transform $(matches) : "[^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)[ ]+(([^ \"]+)|\"([^\"]+)\")" : 3 4 ] ; - - # Icons and other includes may referenced as - # - # IDR_MAINFRAME ICON "res\\icon.ico" - # - # so we have to replace double backslashes to single ones. - res = [ regex.replace-list $(res) : "\\\\\\\\" : "/" ] ; - - # CONSIDER: the new scoping rule seem to defeat "on target" variables. - local g = [ on $(target) return $(HDRGRIST) ] ; - local b = [ NORMALIZE_PATH $(binding:D) ] ; - - # Attach binding of including file to included targets. - # When target is directly created from virtual target - # this extra information is unnecessary. But in other - # cases, it allows to distinguish between two headers of the - # same name included from different places. - # We don't need this extra information for angle includes, - # since they should not depend on including file (we can't - # get literal "." in include path). - local g2 = $(g)"#"$(b) ; - - angle = $(angle:G=$(g)) ; - quoted = $(quoted:G=$(g2)) ; - res = $(res:G=$(g2)) ; - - local all = $(angle) $(quoted) ; - - INCLUDES $(target) : $(all) ; - DEPENDS $(target) : $(res) ; - NOCARE $(all) $(res) ; - SEARCH on $(angle) = $(self.includes:G=) ; - SEARCH on $(quoted) = $(b) $(self.includes:G=) ; - SEARCH on $(res) = $(b) $(self.includes:G=) ; - - # Just propagate current scanner to includes, in a hope - # that includes do not change scanners. - scanner.propagate $(__name__) : $(angle) $(quoted) : $(target) ; - } -} - -scanner.register res-scanner : include ; -type.set-scanner RC : res-scanner ; diff --git a/jam-files/boost-build/tools/rc.py b/jam-files/boost-build/tools/rc.py deleted file mode 100644 index 0b82d231..00000000 --- a/jam-files/boost-build/tools/rc.py +++ /dev/null @@ -1,189 +0,0 @@ -# Status: being ported by Steven Watanabe -# Base revision: 47077 -# -# Copyright (C) Andre Hentz 2003. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. -# -# Copyright (c) 2006 Rene Rivera. -# -# Copyright (c) 2008 Steven Watanabe -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -##import type ; -##import generators ; -##import feature ; -##import errors ; -##import scanner ; -##import toolset : flags ; - -from b2.build import type, toolset, generators, scanner, feature -from b2.tools import builtin -from b2.util import regex -from b2.build.toolset import flags -from b2.manager import get_manager - -__debug = None - -def debug(): - global __debug - if __debug is None: - __debug = "--debug-configuration" in bjam.variable("ARGV") - return __debug - -type.register('RC', ['rc']) - -def init(): - pass - -def configure (command = None, condition = None, options = None): - """ - Configures a new resource compilation command specific to a condition, - usually a toolset selection condition. The possible options are: - - * <rc-type>(rc|windres) - Indicates the type of options the command - accepts. - - Even though the arguments are all optional, only when a command, condition, - and at minimum the rc-type option are given will the command be configured. - This is so that callers don't have to check auto-configuration values - before calling this. And still get the functionality of build failures when - the resource compiler can't be found. - """ - rc_type = feature.get_values('<rc-type>', options) - if rc_type: - assert(len(rc_type) == 1) - rc_type = rc_type[0] - - if command and condition and rc_type: - flags('rc.compile.resource', '.RC', condition, command) - flags('rc.compile.resource', '.RC_TYPE', condition, rc_type.lower()) - flags('rc.compile.resource', 'DEFINES', [], ['<define>']) - flags('rc.compile.resource', 'INCLUDES', [], ['<include>']) - if debug(): - print 'notice: using rc compiler ::', condition, '::', command - -engine = get_manager().engine() - -class RCAction: - """Class representing bjam action defined from Python. - The function must register the action to execute.""" - - def __init__(self, action_name, function): - self.action_name = action_name - self.function = function - - def __call__(self, targets, sources, property_set): - if self.function: - self.function(targets, sources, property_set) - -# FIXME: What is the proper way to dispatch actions? -def rc_register_action(action_name, function = None): - global engine - if engine.actions.has_key(action_name): - raise "Bjam action %s is already defined" % action_name - engine.actions[action_name] = RCAction(action_name, function) - -def rc_compile_resource(targets, sources, properties): - rc_type = bjam.call('get-target-variable', targets, '.RC_TYPE') - global engine - engine.set_update_action('rc.compile.resource.' + rc_type, targets, sources, properties) - -rc_register_action('rc.compile.resource', rc_compile_resource) - - -engine.register_action( - 'rc.compile.resource.rc', - '"$(.RC)" -l 0x409 "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -fo "$(<)" "$(>)"') - -engine.register_action( - 'rc.compile.resource.windres', - '"$(.RC)" "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -o "$(<)" -i "$(>)"') - -# FIXME: this was originally declared quietly -engine.register_action( - 'compile.resource.null', - 'as /dev/null -o "$(<)"') - -# Since it's a common practice to write -# exe hello : hello.cpp hello.rc -# we change the name of object created from RC file, to -# avoid conflict with hello.cpp. -# The reason we generate OBJ and not RES, is that gcc does not -# seem to like RES files, but works OK with OBJ. -# See http://article.gmane.org/gmane.comp.lib.boost.build/5643/ -# -# Using 'register-c-compiler' adds the build directory to INCLUDES -# FIXME: switch to generators -builtin.register_c_compiler('rc.compile.resource', ['RC'], ['OBJ(%_res)'], []) - -__angle_include_re = "#include[ ]*<([^<]+)>" - -# Register scanner for resources -class ResScanner(scanner.Scanner): - - def __init__(self, includes): - scanner.__init__ ; - self.includes = includes - - def pattern(self): - return "(([^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\ - "[ ]+([^ \"]+|\"[^\"]+\"))|(#include[ ]*(<[^<]+>|\"[^\"]+\")))" ; - - def process(self, target, matches, binding): - - angle = regex.transform(matches, "#include[ ]*<([^<]+)>") - quoted = regex.transform(matches, "#include[ ]*\"([^\"]+)\"") - res = regex.transform(matches, - "[^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\ - "[ ]+(([^ \"]+)|\"([^\"]+)\")", [3, 4]) - - # Icons and other includes may referenced as - # - # IDR_MAINFRAME ICON "res\\icon.ico" - # - # so we have to replace double backslashes to single ones. - res = [ re.sub(r'\\\\', '/', match) for match in res ] - - # CONSIDER: the new scoping rule seem to defeat "on target" variables. - g = bjam.call('get-target-variable', target, 'HDRGRIST') - b = os.path.normalize_path(os.path.dirname(binding)) - - # Attach binding of including file to included targets. - # When target is directly created from virtual target - # this extra information is unnecessary. But in other - # cases, it allows to distinguish between two headers of the - # same name included from different places. - # We don't need this extra information for angle includes, - # since they should not depend on including file (we can't - # get literal "." in include path). - g2 = g + "#" + b - - g = "<" + g + ">" - g2 = "<" + g2 + ">" - angle = [g + x for x in angle] - quoted = [g2 + x for x in quoted] - res = [g2 + x for x in res] - - all = angle + quoted - - bjam.call('mark-included', target, all) - - engine = get_manager().engine() - - engine.add_dependency(target, res) - bjam.call('NOCARE', all + res) - engine.set_target_variable(angle, 'SEARCH', ungrist(self.includes)) - engine.set_target_variable(quoted, 'SEARCH', b + ungrist(self.includes)) - engine.set_target_variable(res, 'SEARCH', b + ungrist(self.includes)) ; - - # Just propagate current scanner to includes, in a hope - # that includes do not change scanners. - get_manager().scanners().propagate(self, angle + quoted) - -scanner.register(ResScanner, 'include') -type.set_scanner('RC', ResScanner) diff --git a/jam-files/boost-build/tools/stage.jam b/jam-files/boost-build/tools/stage.jam deleted file mode 100644 index 296e7558..00000000 --- a/jam-files/boost-build/tools/stage.jam +++ /dev/null @@ -1,524 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2005, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines the 'install' rule, used to copy a set of targets to a -# single location. - -import targets ; -import "class" : new ; -import errors ; -import type ; -import generators ; -import feature ; -import project ; -import virtual-target ; -import path ; -import types/register ; - - -feature.feature <install-dependencies> : off on : incidental ; -feature.feature <install-type> : : free incidental ; -feature.feature <install-source-root> : : free path ; -feature.feature <so-version> : : free incidental ; - -# If 'on', version symlinks for shared libraries will not be created. Affects -# Unix builds only. -feature.feature <install-no-version-symlinks> : on : optional incidental ; - - -class install-target-class : basic-target -{ - import feature ; - import project ; - import type ; - import errors ; - import generators ; - import path ; - import stage ; - import "class" : new ; - import property ; - import property-set ; - - rule __init__ ( name-and-dir : project : sources * : requirements * : default-build * ) - { - basic-target.__init__ $(name-and-dir) : $(project) : $(sources) : - $(requirements) : $(default-build) ; - } - - # If <location> is not set, sets it based on the project data. - # - rule update-location ( property-set ) - { - local loc = [ $(property-set).get <location> ] ; - if ! $(loc) - { - loc = [ path.root $(self.name) [ $(self.project).get location ] ] ; - property-set = [ $(property-set).add-raw $(loc:G=<location>) ] ; - } - - return $(property-set) ; - } - - # Takes a target that is installed and a property set which is used when - # installing. - # - rule adjust-properties ( target : build-property-set ) - { - local ps-raw ; - local a = [ $(target).action ] ; - if $(a) - { - local ps = [ $(a).properties ] ; - ps-raw = [ $(ps).raw ] ; - - # Unless <hardcode-dll-paths>true is in properties, which can happen - # only if the user has explicitly requested it, nuke all <dll-path> - # properties. - if [ $(build-property-set).get <hardcode-dll-paths> ] != true - { - ps-raw = [ property.change $(ps-raw) : <dll-path> ] ; - } - - # If any <dll-path> properties were specified for installing, add - # them. - local l = [ $(build-property-set).get <dll-path> ] ; - ps-raw += $(l:G=<dll-path>) ; - - # Also copy <linkflags> feature from current build set, to be used - # for relinking. - local l = [ $(build-property-set).get <linkflags> ] ; - ps-raw += $(l:G=<linkflags>) ; - - # Remove the <tag> feature on original targets. - ps-raw = [ property.change $(ps-raw) : <tag> ] ; - - # And <location>. If stage target has another stage target in - # sources, then we shall get virtual targets with the <location> - # property set. - ps-raw = [ property.change $(ps-raw) : <location> ] ; - } - - local d = [ $(build-property-set).get <dependency> ] ; - ps-raw += $(d:G=<dependency>) ; - - local d = [ $(build-property-set).get <location> ] ; - ps-raw += $(d:G=<location>) ; - - local ns = [ $(build-property-set).get <install-no-version-symlinks> ] ; - ps-raw += $(ns:G=<install-no-version-symlinks>) ; - - local d = [ $(build-property-set).get <install-source-root> ] ; - # Make the path absolute: we shall use it to compute relative paths and - # making the path absolute will help. - if $(d) - { - d = [ path.root $(d) [ path.pwd ] ] ; - ps-raw += $(d:G=<install-source-root>) ; - } - - if $(ps-raw) - { - return [ property-set.create $(ps-raw) ] ; - } - else - { - return [ property-set.empty ] ; - } - } - - rule construct ( name : source-targets * : property-set ) - { - source-targets = [ targets-to-stage $(source-targets) : - $(property-set) ] ; - - property-set = [ update-location $(property-set) ] ; - - local ename = [ $(property-set).get <name> ] ; - - if $(ename) && $(source-targets[2]) - { - errors.error "When <name> property is used in 'install', only one" - "source is allowed" ; - } - - local result ; - for local i in $(source-targets) - { - local staged-targets ; - - local new-properties = [ adjust-properties $(i) : - $(property-set) ] ; - - # See if something special should be done when staging this type. It - # is indicated by the presence of a special "INSTALLED_" type. - local t = [ $(i).type ] ; - if $(t) && [ type.registered INSTALLED_$(t) ] - { - if $(ename) - { - errors.error "In 'install': <name> property specified with target that requires relinking." ; - } - else - { - local targets = [ generators.construct $(self.project) - $(name) : INSTALLED_$(t) : $(new-properties) : $(i) ] ; - staged-targets += $(targets[2-]) ; - } - } - else - { - staged-targets = [ stage.copy-file $(self.project) $(ename) : - $(i) : $(new-properties) ] ; - } - - if ! $(staged-targets) - { - errors.error "Unable to generate staged version of " [ $(source).str ] ; - } - - for t in $(staged-targets) - { - result += [ virtual-target.register $(t) ] ; - } - } - - return [ property-set.empty ] $(result) ; - } - - # Given the list of source targets explicitly passed to 'stage', returns the - # list of targets which must be staged. - # - rule targets-to-stage ( source-targets * : property-set ) - { - local result ; - - # Traverse the dependencies, if needed. - if [ $(property-set).get <install-dependencies> ] = "on" - { - source-targets = [ collect-targets $(source-targets) ] ; - } - - # Filter the target types, if needed. - local included-types = [ $(property-set).get <install-type> ] ; - for local r in $(source-targets) - { - local ty = [ $(r).type ] ; - if $(ty) - { - # Do not stage searched libs. - if $(ty) != SEARCHED_LIB - { - if $(included-types) - { - if [ include-type $(ty) : $(included-types) ] - { - result += $(r) ; - } - } - else - { - result += $(r) ; - } - } - } - else if ! $(included-types) - { - # Don't install typeless target if there is an explicit list of - # allowed types. - result += $(r) ; - } - } - - return $(result) ; - } - - # CONSIDER: figure out why we can not use virtual-target.traverse here. - # - rule collect-targets ( targets * ) - { - # Find subvariants - local s ; - for local t in $(targets) - { - s += [ $(t).creating-subvariant ] ; - } - s = [ sequence.unique $(s) ] ; - - local result = [ new set ] ; - $(result).add $(targets) ; - - for local i in $(s) - { - $(i).all-referenced-targets $(result) ; - } - local result2 ; - for local r in [ $(result).list ] - { - if $(r:G) != <use> - { - result2 += $(r:G=) ; - } - } - DELETE_MODULE $(result) ; - result = [ sequence.unique $(result2) ] ; - } - - # Returns true iff 'type' is subtype of some element of 'types-to-include'. - # - local rule include-type ( type : types-to-include * ) - { - local found ; - while $(types-to-include) && ! $(found) - { - if [ type.is-subtype $(type) $(types-to-include[1]) ] - { - found = true ; - } - types-to-include = $(types-to-include[2-]) ; - } - - return $(found) ; - } -} - - -# Creates a copy of target 'source'. The 'properties' object should have a -# <location> property which specifies where the target must be placed. -# -rule copy-file ( project name ? : source : properties ) -{ - name ?= [ $(source).name ] ; - local relative ; - - local new-a = [ new non-scanning-action $(source) : common.copy : - $(properties) ] ; - local source-root = [ $(properties).get <install-source-root> ] ; - if $(source-root) - { - # Get the real path of the target. We probably need to strip relative - # path from the target name at construction. - local path = [ $(source).path ] ; - path = [ path.root $(name:D) $(path) ] ; - # Make the path absolute. Otherwise, it would be hard to compute the - # relative path. The 'source-root' is already absolute, see the - # 'adjust-properties' method above. - path = [ path.root $(path) [ path.pwd ] ] ; - - relative = [ path.relative-to $(source-root) $(path) ] ; - } - - # Note: Using $(name:D=$(relative)) might be faster here, but then we would - # need to explicitly check that relative is not ".", otherwise we might get - # paths like '<prefix>/boost/.', try to create it and mkdir would obviously - # fail. - name = [ path.join $(relative) $(name:D=) ] ; - - return [ new file-target $(name) exact : [ $(source).type ] : $(project) : - $(new-a) ] ; -} - - -rule symlink ( name : project : source : properties ) -{ - local a = [ new action $(source) : symlink.ln : $(properties) ] ; - return [ new file-target $(name) exact : [ $(source).type ] : $(project) : - $(a) ] ; -} - - -rule relink-file ( project : source : property-set ) -{ - local action = [ $(source).action ] ; - local cloned-action = [ virtual-target.clone-action $(action) : $(project) : - "" : $(property-set) ] ; - return [ $(cloned-action).targets ] ; -} - - -# Declare installed version of the EXE type. Generator for this type will cause -# relinking to the new location. -type.register INSTALLED_EXE : : EXE ; - - -class installed-exe-generator : generator -{ - import type ; - import property-set ; - import modules ; - import stage ; - - rule __init__ ( ) - { - generator.__init__ install-exe : EXE : INSTALLED_EXE ; - } - - rule run ( project name ? : property-set : source : multiple ? ) - { - local need-relink ; - - if [ $(property-set).get <os> ] in NT CYGWIN || - [ $(property-set).get <target-os> ] in windows cygwin - { - } - else - { - # See if the dll-path properties are not changed during - # install. If so, copy, don't relink. - local a = [ $(source).action ] ; - local p = [ $(a).properties ] ; - local original = [ $(p).get <dll-path> ] ; - local current = [ $(property-set).get <dll-path> ] ; - - if $(current) != $(original) - { - need-relink = true ; - } - } - - - if $(need-relink) - { - return [ stage.relink-file $(project) - : $(source) : $(property-set) ] ; - } - else - { - return [ stage.copy-file $(project) - : $(source) : $(property-set) ] ; - } - } -} - - -generators.register [ new installed-exe-generator ] ; - - -# Installing a shared link on Unix might cause a creation of versioned symbolic -# links. -type.register INSTALLED_SHARED_LIB : : SHARED_LIB ; - - -class installed-shared-lib-generator : generator -{ - import type ; - import property-set ; - import modules ; - import stage ; - - rule __init__ ( ) - { - generator.__init__ install-shared-lib : SHARED_LIB - : INSTALLED_SHARED_LIB ; - } - - rule run ( project name ? : property-set : source : multiple ? ) - { - if [ $(property-set).get <os> ] in NT CYGWIN || - [ $(property-set).get <target-os> ] in windows cygwin - { - local copied = [ stage.copy-file $(project) : $(source) : - $(property-set) ] ; - return [ virtual-target.register $(copied) ] ; - } - else - { - local a = [ $(source).action ] ; - local copied ; - if ! $(a) - { - # Non-derived file, just copy. - copied = [ stage.copy-file $(project) : $(source) : - $(property-set) ] ; - } - else - { - local cp = [ $(a).properties ] ; - local current-dll-path = [ $(cp).get <dll-path> ] ; - local new-dll-path = [ $(property-set).get <dll-path> ] ; - - if $(current-dll-path) != $(new-dll-path) - { - # Rpath changed, need to relink. - copied = [ stage.relink-file $(project) : $(source) : - $(property-set) ] ; - } - else - { - copied = [ stage.copy-file $(project) : $(source) : - $(property-set) ] ; - } - } - - copied = [ virtual-target.register $(copied) ] ; - - local result = $(copied) ; - # If the name is in the form NNN.XXX.YYY.ZZZ, where all 'X', 'Y' and - # 'Z' are numbers, we need to create NNN.XXX and NNN.XXX.YYY - # symbolic links. - local m = [ MATCH (.*)\\.([0123456789]+)\\.([0123456789]+)\\.([0123456789]+)$ - : [ $(copied).name ] ] ; - if $(m) - { - # Symlink without version at all is used to make - # -lsome_library work. - result += [ stage.symlink $(m[1]) : $(project) : $(copied) : - $(property-set) ] ; - - # Symlinks of some libfoo.N and libfoo.N.M are used so that - # library can found at runtime, if libfoo.N.M.X has soname of - # libfoo.N. That happens when the library makes some binary - # compatibility guarantees. If not, it is possible to skip those - # symlinks. - local suppress = - [ $(property-set).get <install-no-version-symlinks> ] ; - - if $(suppress) != "on" - { - result += [ stage.symlink $(m[1]).$(m[2]) : $(project) - : $(copied) : $(property-set) ] ; - result += [ stage.symlink $(m[1]).$(m[2]).$(m[3]) : $(project) - : $(copied) : $(property-set) ] ; - } - } - - return $(result) ; - } - } -} - -generators.register [ new installed-shared-lib-generator ] ; - - -# Main target rule for 'install'. -# -rule install ( name : sources * : requirements * : default-build * ) -{ - local project = [ project.current ] ; - - # Unless the user has explicitly asked us to hardcode dll paths, add - # <hardcode-dll-paths>false in requirements, to override default value. - if ! <hardcode-dll-paths>true in $(requirements) - { - requirements += <hardcode-dll-paths>false ; - } - - if <tag> in $(requirements:G) - { - errors.user-error - "The <tag> property is not allowed for the 'install' rule" ; - } - - targets.main-target-alternative - [ new install-target-class $(name) : $(project) - : [ targets.main-target-sources $(sources) : $(name) ] - : [ targets.main-target-requirements $(requirements) : $(project) ] - : [ targets.main-target-default-build $(default-build) : $(project) ] - ] ; -} - - -IMPORT $(__name__) : install : : install ; -IMPORT $(__name__) : install : : stage ; diff --git a/jam-files/boost-build/tools/stage.py b/jam-files/boost-build/tools/stage.py deleted file mode 100644 index 25eccbe5..00000000 --- a/jam-files/boost-build/tools/stage.py +++ /dev/null @@ -1,350 +0,0 @@ -# Status: ported. -# Base revision 64444. -# -# Copyright 2003 Dave Abrahams -# Copyright 2005, 2006 Rene Rivera -# Copyright 2002, 2003, 2004, 2005, 2006, 2010 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module defines the 'install' rule, used to copy a set of targets to a -# single location. - -import b2.build.feature as feature -import b2.build.targets as targets -import b2.build.property as property -import b2.build.property_set as property_set -import b2.build.generators as generators -import b2.build.virtual_target as virtual_target - -from b2.manager import get_manager -from b2.util.sequence import unique -from b2.util import bjam_signature - -import b2.build.type - -import os.path -import re -import types - -feature.feature('install-dependencies', ['off', 'on'], ['incidental']) -feature.feature('install-type', [], ['free', 'incidental']) -feature.feature('install-source-root', [], ['free', 'path']) -feature.feature('so-version', [], ['free', 'incidental']) - -# If 'on', version symlinks for shared libraries will not be created. Affects -# Unix builds only. -feature.feature('install-no-version-symlinks', ['on'], ['optional', 'incidental']) - -class InstallTargetClass(targets.BasicTarget): - - def update_location(self, ps): - """If <location> is not set, sets it based on the project data.""" - - loc = ps.get('location') - if not loc: - loc = os.path.join(self.project().get('location'), self.name()) - ps = ps.add_raw(["<location>" + loc]) - - return ps - - def adjust_properties(self, target, build_ps): - a = target.action() - properties = [] - if a: - ps = a.properties() - properties = ps.all() - - # Unless <hardcode-dll-paths>true is in properties, which can happen - # only if the user has explicitly requested it, nuke all <dll-path> - # properties. - - if build_ps.get('hardcode-dll-paths') != ['true']: - properties = [p for p in properties if p.feature().name() != 'dll-path'] - - # If any <dll-path> properties were specified for installing, add - # them. - properties.extend(build_ps.get_properties('dll-path')) - - # Also copy <linkflags> feature from current build set, to be used - # for relinking. - properties.extend(build_ps.get_properties('linkflags')) - - # Remove the <tag> feature on original targets. - # And <location>. If stage target has another stage target in - # sources, then we shall get virtual targets with the <location> - # property set. - properties = [p for p in properties - if not p.feature().name() in ['tag', 'location']] - - properties.extend(build_ps.get_properties('dependency')) - - properties.extend(build_ps.get_properties('location')) - - - properties.extend(build_ps.get_properties('install-no-version-symlinks')) - - d = build_ps.get_properties('install-source-root') - - # Make the path absolute: we shall use it to compute relative paths and - # making the path absolute will help. - if d: - p = d[0] - properties.append(property.Property(p.feature(), os.path.abspath(p.value()))) - - return property_set.create(properties) - - - def construct(self, name, source_targets, ps): - - source_targets = self.targets_to_stage(source_targets, ps) - ps = self.update_location(ps) - - ename = ps.get('name') - if ename: - ename = ename[0] - if ename and len(source_targets) > 1: - get_manager().errors()("When <name> property is used in 'install', only one source is allowed") - - result = [] - - for i in source_targets: - - staged_targets = [] - new_ps = self.adjust_properties(i, ps) - - # See if something special should be done when staging this type. It - # is indicated by the presence of a special "INSTALLED_" type. - t = i.type() - if t and b2.build.type.registered("INSTALLED_" + t): - - if ename: - get_manager().errors()("In 'install': <name> property specified with target that requires relinking.") - else: - (r, targets) = generators.construct(self.project(), name, "INSTALLED_" + t, - new_ps, [i]) - assert isinstance(r, property_set.PropertySet) - staged_targets.extend(targets) - - else: - staged_targets.append(copy_file(self.project(), ename, i, new_ps)) - - if not staged_targets: - get_manager().errors()("Unable to generate staged version of " + i) - - result.extend(get_manager().virtual_targets().register(t) for t in staged_targets) - - return (property_set.empty(), result) - - def targets_to_stage(self, source_targets, ps): - """Given the list of source targets explicitly passed to 'stage', returns the - list of targets which must be staged.""" - - result = [] - - # Traverse the dependencies, if needed. - if ps.get('install-dependencies') == ['on']: - source_targets = self.collect_targets(source_targets) - - # Filter the target types, if needed. - included_types = ps.get('install-type') - for r in source_targets: - ty = r.type() - if ty: - # Do not stage searched libs. - if ty != "SEARCHED_LIB": - if included_types: - if self.include_type(ty, included_types): - result.append(r) - else: - result.append(r) - elif not included_types: - # Don't install typeless target if there is an explicit list of - # allowed types. - result.append(r) - - return result - - # CONSIDER: figure out why we can not use virtual-target.traverse here. - # - def collect_targets(self, targets): - - s = [t.creating_subvariant() for t in targets] - s = unique(s) - - result = set(targets) - for i in s: - i.all_referenced_targets(result) - - result2 = [] - for r in result: - if isinstance(r, property.Property): - - if r.feature().name() != 'use': - result2.append(r.value()) - else: - result2.append(r) - result2 = unique(result2) - return result2 - - # Returns true iff 'type' is subtype of some element of 'types-to-include'. - # - def include_type(self, type, types_to_include): - return any(b2.build.type.is_subtype(type, ti) for ti in types_to_include) - -# Creates a copy of target 'source'. The 'properties' object should have a -# <location> property which specifies where the target must be placed. -# -def copy_file(project, name, source, ps): - - if not name: - name = source.name() - - relative = "" - - new_a = virtual_target.NonScanningAction([source], "common.copy", ps) - source_root = ps.get('install-source-root') - if source_root: - source_root = source_root[0] - # Get the real path of the target. We probably need to strip relative - # path from the target name at construction. - path = os.path.join(source.path(), os.path.dirname(name)) - # Make the path absolute. Otherwise, it would be hard to compute the - # relative path. The 'source-root' is already absolute, see the - # 'adjust-properties' method above. - path = os.path.abspath(path) - - relative = os.path.relpath(path, source_root) - - name = os.path.join(relative, os.path.basename(name)) - return virtual_target.FileTarget(name, source.type(), project, new_a, exact=True) - -def symlink(name, project, source, ps): - a = virtual_target.Action([source], "symlink.ln", ps) - return virtual_target.FileTarget(name, source.type(), project, a, exact=True) - -def relink_file(project, source, ps): - action = source.action() - cloned_action = virtual_target.clone_action(action, project, "", ps) - targets = cloned_action.targets() - # We relink only on Unix, where exe or shared lib is always a single file. - assert len(targets) == 1 - return targets[0] - - -# Declare installed version of the EXE type. Generator for this type will cause -# relinking to the new location. -b2.build.type.register('INSTALLED_EXE', [], 'EXE') - -class InstalledExeGenerator(generators.Generator): - - def __init__(self): - generators.Generator.__init__(self, "install-exe", False, ['EXE'], ['INSTALLED_EXE']) - - def run(self, project, name, ps, source): - - need_relink = False; - - if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']: - # Never relink - pass - else: - # See if the dll-path properties are not changed during - # install. If so, copy, don't relink. - need_relink = ps.get('dll-path') != source[0].action().properties().get('dll-path') - - if need_relink: - return [relink_file(project, source, ps)] - else: - return [copy_file(project, None, source[0], ps)] - -generators.register(InstalledExeGenerator()) - - -# Installing a shared link on Unix might cause a creation of versioned symbolic -# links. -b2.build.type.register('INSTALLED_SHARED_LIB', [], 'SHARED_LIB') - -class InstalledSharedLibGenerator(generators.Generator): - - def __init__(self): - generators.Generator.__init__(self, 'install-shared-lib', False, ['SHARED_LIB'], ['INSTALLED_SHARED_LIB']) - - def run(self, project, name, ps, source): - - source = source[0] - if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']: - copied = copy_file(project, None, source, ps) - return [get_manager().virtual_targets().register(copied)] - else: - a = source.action() - if not a: - # Non-derived file, just copy. - copied = copy_file(project, source, ps) - else: - - need_relink = ps.get('dll-path') != source.action().properties().get('dll-path') - - if need_relink: - # Rpath changed, need to relink. - copied = relink_file(project, source, ps) - else: - copied = copy_file(project, None, source, ps) - - result = [get_manager().virtual_targets().register(copied)] - # If the name is in the form NNN.XXX.YYY.ZZZ, where all 'X', 'Y' and - # 'Z' are numbers, we need to create NNN.XXX and NNN.XXX.YYY - # symbolic links. - m = re.match("(.*)\\.([0123456789]+)\\.([0123456789]+)\\.([0123456789]+)$", - copied.name()); - if m: - # Symlink without version at all is used to make - # -lsome_library work. - result.append(symlink(m.group(1), project, copied, ps)) - - # Symlinks of some libfoo.N and libfoo.N.M are used so that - # library can found at runtime, if libfoo.N.M.X has soname of - # libfoo.N. That happens when the library makes some binary - # compatibility guarantees. If not, it is possible to skip those - # symlinks. - if ps.get('install-no-version-symlinks') != ['on']: - - result.append(symlink(m.group(1) + '.' + m.group(2), project, copied, ps)) - result.append(symlink(m.group(1) + '.' + m.group(2) + '.' + m.group(3), - project, copied, ps)) - - return result - -generators.register(InstalledSharedLibGenerator()) - - -# Main target rule for 'install'. -# -@bjam_signature((["name"], ["sources", "*"], ["requirements", "*"], - ["default_build", "*"], ["usage_requirements", "*"])) -def install(name, sources, requirements=[], default_build=[], usage_requirements=[]): - - requirements = requirements[:] - # Unless the user has explicitly asked us to hardcode dll paths, add - # <hardcode-dll-paths>false in requirements, to override default value. - if not '<hardcode-dll-paths>true' in requirements: - requirements.append('<hardcode-dll-paths>false') - - if any(r.startswith('<tag>') for r in requirements): - get_manager().errors()("The <tag> property is not allowed for the 'install' rule") - - from b2.manager import get_manager - t = get_manager().targets() - - project = get_manager().projects().current() - - return t.main_target_alternative( - InstallTargetClass(name, project, - t.main_target_sources(sources, name), - t.main_target_requirements(requirements, project), - t.main_target_default_build(default_build, project), - t.main_target_usage_requirements(usage_requirements, project))) - -get_manager().projects().add_rule("install", install) -get_manager().projects().add_rule("stage", install) - diff --git a/jam-files/boost-build/tools/stlport.jam b/jam-files/boost-build/tools/stlport.jam deleted file mode 100644 index 62eebda5..00000000 --- a/jam-files/boost-build/tools/stlport.jam +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright Gennadiy Rozental -# Copyright 2006 Rene Rivera -# Copyright 2003, 2004, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# The STLPort is usable by means of 'stdlib' feature. When -# stdlib=stlport is specified, default version of STLPort will be used, -# while stdlib=stlport-4.5 will use specific version. -# The subfeature value 'hostios' means to use host compiler's iostreams. -# -# The specific version of stlport is selected by features: -# The <runtime-link> feature selects between static and shared library -# The <runtime-debugging>on selects STLPort with debug symbols -# and stl debugging. -# There's no way to use STLPort with debug symbols but without -# stl debugging. - -# TODO: must implement selection of different STLPort installations based -# on used toolset. -# Also, finish various flags: -# -# This is copied from V1 toolset, "+" means "implemented" -#+flags $(CURR_TOOLSET) DEFINES <stlport-iostream>off : _STLP_NO_OWN_IOSTREAMS=1 _STLP_HAS_NO_NEW_IOSTREAMS=1 ; -#+flags $(CURR_TOOLSET) DEFINES <stlport-extensions>off : _STLP_NO_EXTENSIONS=1 ; -# flags $(CURR_TOOLSET) DEFINES <stlport-anachronisms>off : _STLP_NO_ANACHRONISMS=1 ; -# flags $(CURR_TOOLSET) DEFINES <stlport-cstd-namespace>global : _STLP_VENDOR_GLOBAL_CSTD=1 ; -# flags $(CURR_TOOLSET) DEFINES <exception-handling>off : _STLP_NO_EXCEPTIONS=1 ; -# flags $(CURR_TOOLSET) DEFINES <stlport-debug-alloc>on : _STLP_DEBUG_ALLOC=1 ; -#+flags $(CURR_TOOLSET) DEFINES <runtime-build>debug : _STLP_DEBUG=1 _STLP_DEBUG_UNINITIALIZED=1 ; -#+flags $(CURR_TOOLSET) DEFINES <runtime-link>dynamic : _STLP_USE_DYNAMIC_LIB=1 ; - - -import feature : feature subfeature ; -import project ; -import "class" : new ; -import targets ; -import property-set ; -import common ; -import type ; - -# Make this module into a project. -project.initialize $(__name__) ; -project stlport ; - -# The problem: how to request to use host compiler's iostreams? -# -# Solution 1: Global 'stlport-iostream' feature. -# That's ugly. Subfeature make more sense for stlport-specific thing. -# Solution 2: Use subfeature with two values, one of which ("use STLPort iostream") -# is default. -# The problem is that such subfeature will appear in target paths, and that's ugly -# Solution 3: Use optional subfeature with only one value. - -feature.extend stdlib : stlport ; -feature.compose <stdlib>stlport : <library>/stlport//stlport ; - -# STLport iostreams or native iostreams -subfeature stdlib stlport : iostream : hostios : optional propagated ; - -# STLport extensions -subfeature stdlib stlport : extensions : noext : optional propagated ; - -# STLport anachronisms -- NOT YET SUPPORTED -# subfeature stdlib stlport : anachronisms : on off ; - -# STLport debug allocation -- NOT YET SUPPORTED -#subfeature stdlib stlport : debug-alloc : off on ; - -# Declare a special target class to handle the creation of search-lib-target -# instances for STLport. We need a special class, because otherwise we'll have -# - declare prebuilt targets for all possible toolsets. And by the time 'init' -# is called we don't even know the list of toolsets that are registered -# - when host iostreams are used, we really should produce nothing. It would -# be hard/impossible to achieve this using prebuilt targets. - -class stlport-target-class : basic-target -{ - import feature project type errors generators ; - import set : difference ; - - rule __init__ ( project : headers ? : libraries * : version ? ) - { - basic-target.__init__ stlport : $(project) ; - self.headers = $(headers) ; - self.libraries = $(libraries) ; - self.version = $(version) ; - self.version.5 = [ MATCH "^(5[.][0123456789]+).*" : $(version) ] ; - - local requirements ; - requirements += <stdlib-stlport:version>$(self.version) ; - self.requirements = [ property-set.create $(requirements) ] ; - } - - rule generate ( property-set ) - { - # Since this target is built with <stdlib>stlport, it will also - # have <library>/stlport//stlport in requirements, which will - # cause a loop in main target references. Remove that property - # manually. - - property-set = [ property-set.create - [ difference - [ $(property-set).raw ] : - <library>/stlport//stlport - <stdlib>stlport - ] - ] ; - return [ basic-target.generate $(property-set) ] ; - } - - rule construct ( name : source-targets * : property-set ) - { - # Deduce the name of stlport library, based on toolset and - # debug setting. - local raw = [ $(property-set).raw ] ; - local hostios = [ feature.get-values <stdlib-stlport:iostream> : $(raw) ] ; - local toolset = [ feature.get-values <toolset> : $(raw) ] ; - - if $(self.version.5) - { - # Version 5.x - - # STLport host IO streams no longer supported. So we always - # need libraries. - - # name: stlport(stl)?[dg]?(_static)?.M.R - local name = stlport ; - if [ feature.get-values <runtime-debugging> : $(raw) ] = "on" - { - name += stl ; - switch $(toolset) - { - case gcc* : name += g ; - case darwin* : name += g ; - case * : name += d ; - } - } - - if [ feature.get-values <runtime-link> : $(raw) ] = "static" - { - name += _static ; - } - - # Starting with version 5.2.0, the STLport static libraries no longer - # include a version number in their name - local version.pre.5.2 = [ MATCH "^(5[.][01]+).*" : $(version) ] ; - if $(version.pre.5.2) || [ feature.get-values <runtime-link> : $(raw) ] != "static" - { - name += .$(self.version.5) ; - } - - name = $(name:J=) ; - - if [ feature.get-values <install-dependencies> : $(raw) ] = "on" - { - #~ Allow explicitly asking to install the STLport lib by - #~ refering to it directly: /stlport//stlport/<install-dependencies>on - #~ This allows for install packaging of all libs one might need for - #~ a standalone distribution. - import path : make : path-make ; - local runtime-link - = [ feature.get-values <runtime-link> : $(raw) ] ; - local lib-file.props - = [ property-set.create $(raw) <link>$(runtime-link) ] ; - local lib-file.prefix - = [ type.generated-target-prefix $(runtime-link:U)_LIB : $(lib-file.props) ] ; - local lib-file.suffix - = [ type.generated-target-suffix $(runtime-link:U)_LIB : $(lib-file.props) ] ; - lib-file.prefix - ?= "" "lib" ; - lib-file.suffix - ?= "" ; - local lib-file - = [ GLOB $(self.libraries) [ modules.peek : PATH ] : - $(lib-file.prefix)$(name).$(lib-file.suffix) ] ; - lib-file - = [ new file-reference [ path-make $(lib-file[1]) ] : $(self.project) ] ; - lib-file - = [ $(lib-file).generate "" ] ; - local lib-file.requirements - = [ targets.main-target-requirements - [ $(lib-file.props).raw ] <file>$(lib-file[-1]) - : $(self.project) ] ; - return [ generators.construct $(self.project) $(name) : LIB : $(lib-file.requirements) ] ; - } - else - { - #~ Otherwise, it's just a regular usage of the library. - return [ generators.construct - $(self.project) $(name) : SEARCHED_LIB : $(property-set) ] ; - } - } - else if ! $(hostios) && $(toolset) != msvc - { - # We don't need libraries if host istreams are used. For - # msvc, automatic library selection will be used. - - # name: stlport_<toolset>(_stldebug)? - local name = stlport ; - name = $(name)_$(toolset) ; - if [ feature.get-values <runtime-debugging> : $(raw) ] = "on" - { - name = $(name)_stldebug ; - } - - return [ generators.construct - $(self.project) $(name) : SEARCHED_LIB : $(property-set) ] ; - } - else - { - return [ property-set.empty ] ; - } - } - - rule compute-usage-requirements ( subvariant ) - { - local usage-requirements = - <include>$(self.headers) - <dll-path>$(self.libraries) - <library-path>$(self.libraries) - ; - - local rproperties = [ $(subvariant).build-properties ] ; - # CONSIDER: should this "if" sequence be replaced with - # some use of 'property-map' class? - if [ $(rproperties).get <runtime-debugging> ] = "on" - { - usage-requirements += - <define>_STLP_DEBUG=1 - <define>_STLP_DEBUG_UNINITIALIZED=1 ; - } - if [ $(rproperties).get <runtime-link> ] = "shared" - { - usage-requirements += - <define>_STLP_USE_DYNAMIC_LIB=1 ; - } - if [ $(rproperties).get <stdlib-stlport:extensions> ] = noext - { - usage-requirements += - <define>_STLP_NO_EXTENSIONS=1 ; - } - if [ $(rproperties).get <stdlib-stlport:iostream> ] = hostios - { - usage-requirements += - <define>_STLP_NO_OWN_IOSTREAMS=1 - <define>_STLP_HAS_NO_NEW_IOSTREAMS=1 ; - } - if $(self.version.5) - { - # Version 5.x - if [ $(rproperties).get <threading> ] = "single" - { - # Since STLport5 doesn't normally support single-thread - # we force STLport5 into the multi-thread mode. Hence - # getting what other libs provide of single-thread code - # linking against a multi-thread lib. - usage-requirements += - <define>_STLP_THREADS=1 ; - } - } - - return [ property-set.create $(usage-requirements) ] ; - } -} - -rule stlport-target ( headers ? : libraries * : version ? ) -{ - local project = [ project.current ] ; - - targets.main-target-alternative - [ new stlport-target-class $(project) : $(headers) : $(libraries) - : $(version) - ] ; -} - -local .version-subfeature-defined ; - -# Initialize stlport support. -rule init ( - version ? : - headers : # Location of header files - libraries * # Location of libraries, lib and bin subdirs of STLport. - ) -{ - # FIXME: need to use common.check-init-parameters here. - # At the moment, that rule always tries to define subfeature - # of the 'toolset' feature, while we need to define subfeature - # of <stdlib>stlport, so tweaks to check-init-parameters are needed. - if $(version) - { - if ! $(.version-subfeature-defined) - { - feature.subfeature stdlib stlport : version : : propagated ; - .version-subfeature-defined = true ; - } - feature.extend-subfeature stdlib stlport : version : $(version) ; - } - - # Declare the main target for this STLPort version. - stlport-target $(headers) : $(libraries) : $(version) ; -} - diff --git a/jam-files/boost-build/tools/sun.jam b/jam-files/boost-build/tools/sun.jam deleted file mode 100644 index 0ca927d3..00000000 --- a/jam-files/boost-build/tools/sun.jam +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (C) Christopher Currie 2003. Permission to copy, use, -# modify, sell and distribute this software is granted provided this -# copyright notice appears in all copies. This software is provided -# "as is" without express or implied warranty, and with no claim as -# to its suitability for any purpose. - -import property ; -import generators ; -import os ; -import toolset : flags ; -import feature ; -import type ; -import common ; - -feature.extend toolset : sun ; -toolset.inherit sun : unix ; -generators.override sun.prebuilt : builtin.lib-generator ; -generators.override sun.prebuilt : builtin.prebuilt ; -generators.override sun.searched-lib-generator : searched-lib-generator ; - -feature.extend stdlib : sun-stlport ; -feature.compose <stdlib>sun-stlport - : <cxxflags>-library=stlport4 <linkflags>-library=stlport4 - ; - -rule init ( version ? : command * : options * ) -{ - local condition = [ - common.check-init-parameters sun : version $(version) ] ; - - command = [ common.get-invocation-command sun : CC - : $(command) : "/opt/SUNWspro/bin" ] ; - - # Even if the real compiler is not found, put CC to - # command line so that user see command line that would have being executed. - command ?= CC ; - - common.handle-options sun : $(condition) : $(command) : $(options) ; - - command_c = $(command[1--2]) $(command[-1]:B=cc) ; - - toolset.flags sun CONFIG_C_COMMAND $(condition) : $(command_c) ; -} - -# Declare generators -generators.register-c-compiler sun.compile.c : C : OBJ : <toolset>sun ; -generators.register-c-compiler sun.compile.c++ : CPP : OBJ : <toolset>sun ; - -# Declare flags and actions for compilation -flags sun.compile OPTIONS <debug-symbols>on : -g ; -flags sun.compile OPTIONS <profiling>on : -xprofile=tcov ; -flags sun.compile OPTIONS <optimization>speed : -xO4 ; -flags sun.compile OPTIONS <optimization>space : -xO2 -xspace ; -flags sun.compile OPTIONS <threading>multi : -mt ; -flags sun.compile OPTIONS <warnings>off : -erroff ; -flags sun.compile OPTIONS <warnings>on : -erroff=%none ; -flags sun.compile OPTIONS <warnings>all : -erroff=%none ; -flags sun.compile OPTIONS <warnings-as-errors>on : -errwarn ; - -flags sun.compile.c++ OPTIONS <inlining>off : +d ; - -# The -m32 and -m64 options are supported starting -# with Sun Studio 12. On earlier compilers, the -# 'address-model' feature is not supported and should not -# be used. Instead, use -xarch=generic64 command line -# option. -# See http://svn.boost.org/trac/boost/ticket/1186 -# for details. -flags sun OPTIONS <address-model>32 : -m32 ; -flags sun OPTIONS <address-model>64 : -m64 ; -# On sparc, there's a difference between -Kpic -# and -KPIC. The first is slightly more efficient, -# but has the limits on the size of GOT table. -# For minimal fuss on user side, we use -KPIC here. -# See http://svn.boost.org/trac/boost/ticket/1186#comment:6 -# for detailed explanation. -flags sun OPTIONS <link>shared : -KPIC ; - -flags sun.compile OPTIONS <cflags> ; -flags sun.compile.c++ OPTIONS <cxxflags> ; -flags sun.compile DEFINES <define> ; -flags sun.compile INCLUDES <include> ; - -actions compile.c -{ - "$(CONFIG_C_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -actions compile.c++ -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -D$(DEFINES) -I"$(INCLUDES)" -c -o "$(<)" "$(>)" -} - -# Declare flags and actions for linking -flags sun.link OPTIONS <debug-symbols>on : -g ; -# Strip the binary when no debugging is needed -flags sun.link OPTIONS <debug-symbols>off : -s ; -flags sun.link OPTIONS <profiling>on : -xprofile=tcov ; -flags sun.link OPTIONS <threading>multi : -mt ; -flags sun.link OPTIONS <linkflags> ; -flags sun.link LINKPATH <library-path> ; -flags sun.link FINDLIBS-ST <find-static-library> ; -flags sun.link FINDLIBS-SA <find-shared-library> ; -flags sun.link LIBRARIES <library-file> ; -flags sun.link LINK-RUNTIME <runtime-link>static : static ; -flags sun.link LINK-RUNTIME <runtime-link>shared : dynamic ; -flags sun.link RPATH <dll-path> ; -# On gcc, there are separate options for dll path at runtime and -# link time. On Solaris, there's only one: -R, so we have to use -# it, even though it's bad idea. -flags sun.link RPATH <xdll-path> ; - -# The POSIX real-time library is always needed (nanosleep, clock_gettime etc.) -flags sun.link FINDLIBS-SA : rt ; - -rule link ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -} - -# Slight mods for dlls -rule link.dll ( targets * : sources * : properties * ) -{ - SPACE on $(targets) = " " ; -} - -actions link.dll bind LIBRARIES -{ - "$(CONFIG_COMMAND)" $(OPTIONS) -L"$(LINKPATH)" -R"$(RPATH)" -o "$(<)" -h$(<[1]:D=) -G "$(>)" "$(LIBRARIES)" -Bdynamic -l$(FINDLIBS-SA) -Bstatic -l$(FINDLIBS-ST) -B$(LINK-RUNTIME) -} - -# Declare action for creating static libraries -actions piecemeal archive -{ - "$(CONFIG_COMMAND)" -xar -o "$(<)" "$(>)" -} - diff --git a/jam-files/boost-build/tools/symlink.jam b/jam-files/boost-build/tools/symlink.jam deleted file mode 100644 index b33e8260..00000000 --- a/jam-files/boost-build/tools/symlink.jam +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2002, 2003 Rene Rivera -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Defines the "symlink" special target. 'symlink' targets make symbolic links -# to the sources. - -import targets modules path class os feature project property-set ; - -.count = 0 ; - -feature.feature symlink-location : project-relative build-relative : incidental ; - -# The class representing "symlink" targets. -# -class symlink-targets : basic-target -{ - import numbers modules class property project path ; - - rule __init__ ( - project - : targets * - : sources * - ) - { - # Generate a fake name for now. Need unnamed targets eventually. - local c = [ modules.peek symlink : .count ] ; - modules.poke symlink : .count : [ numbers.increment $(c) ] ; - local fake-name = symlink#$(c) ; - - basic-target.__init__ $(fake-name) : $(project) : $(sources) ; - - # Remember the targets to map the sources onto. Pad or truncate - # to fit the sources given. - self.targets = ; - for local source in $(sources) - { - if $(targets) - { - self.targets += $(targets[1]) ; - targets = $(targets[2-]) ; - } - else - { - self.targets += $(source) ; - } - } - - # The virtual targets corresponding to the given targets. - self.virtual-targets = ; - } - - rule construct ( name : source-targets * : property-set ) - { - local i = 1 ; - for local t in $(source-targets) - { - local s = $(self.targets[$(i)]) ; - local a = [ class.new action $(t) : symlink.ln : $(property-set) ] ; - local vt = [ class.new file-target $(s:D=) - : [ $(t).type ] : $(self.project) : $(a) ] ; - - # Place the symlink in the directory relative to the project - # location, instead of placing it in the build directory. - if [ property.select <symlink-location> : [ $(property-set).raw ] ] = <symlink-location>project-relative - { - $(vt).set-path [ path.root $(s:D) [ $(self.project).get location ] ] ; - } - - self.virtual-targets += $(vt) ; - i = [ numbers.increment $(i) ] ; - } - return [ property-set.empty ] $(self.virtual-targets) ; - } -} - -# Creates a symbolic link from a set of targets to a set of sources. -# The targets and sources map one to one. The symlinks generated are -# limited to be the ones given as the sources. That is, the targets -# are either padded or trimmed to equate to the sources. The padding -# is done with the name of the corresponding source. For example:: -# -# symlink : one two ; -# -# Is equal to:: -# -# symlink one two : one two ; -# -# Names for symlink are relative to the project location. They cannot -# include ".." path components. -rule symlink ( - targets * - : sources * - ) -{ - local project = [ project.current ] ; - - return [ targets.main-target-alternative - [ class.new symlink-targets $(project) : $(targets) : - # Note: inline targets are not supported for symlink, intentionally, - # since it's used to linking existing non-local targets. - $(sources) ] ] ; -} - -rule ln -{ - local os ; - if [ modules.peek : UNIX ] { os = UNIX ; } - else { os ?= [ os.name ] ; } - # Remember the path to make the link relative to where the symlink is located. - local path-to-source = [ path.relative-to - [ path.make [ on $(<) return $(LOCATE) ] ] - [ path.make [ on $(>) return $(LOCATE) ] ] ] ; - if $(path-to-source) = . - { - PATH_TO_SOURCE on $(<) = "" ; - } - else - { - PATH_TO_SOURCE on $(<) = [ path.native $(path-to-source) ] ; - } - ln-$(os) $(<) : $(>) ; -} - -actions ln-UNIX -{ - ln -f -s '$(>:D=:R=$(PATH_TO_SOURCE))' '$(<)' -} - -# there is a way to do this; we fall back to a copy for now -actions ln-NT -{ - echo "NT symlinks not supported yet, making copy" - del /f /q "$(<)" 2>nul >nul - copy "$(>)" "$(<)" $(NULL_OUT) -} - -IMPORT $(__name__) : symlink : : symlink ; diff --git a/jam-files/boost-build/tools/symlink.py b/jam-files/boost-build/tools/symlink.py deleted file mode 100644 index 6345ded6..00000000 --- a/jam-files/boost-build/tools/symlink.py +++ /dev/null @@ -1,112 +0,0 @@ -# Status: ported. -# Base revision: 64488. - -# Copyright 2003 Dave Abrahams -# Copyright 2002, 2003 Rene Rivera -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Defines the "symlink" special target. 'symlink' targets make symbolic links -# to the sources. - -import b2.build.feature as feature -import b2.build.targets as targets -import b2.build.property_set as property_set -import b2.build.virtual_target as virtual_target -import b2.build.targets - -from b2.manager import get_manager - -import bjam - -import os - - -feature.feature("symlink-location", ["project-relative", "build-relative"], ["incidental"]) - -class SymlinkTarget(targets.BasicTarget): - - _count = 0 - - def __init__(self, project, targets, sources): - - # Generate a fake name for now. Need unnamed targets eventually. - fake_name = "symlink#%s" % SymlinkTarget._count - SymlinkTarget._count = SymlinkTarget._count + 1 - - b2.build.targets.BasicTarget.__init__(self, fake_name, project, sources) - - # Remember the targets to map the sources onto. Pad or truncate - # to fit the sources given. - assert len(targets) <= len(sources) - self.targets = targets[:] + sources[len(targets):] - - # The virtual targets corresponding to the given targets. - self.virtual_targets = [] - - def construct(self, name, source_targets, ps): - i = 0 - for t in source_targets: - s = self.targets[i] - a = virtual_target.Action(self.manager(), [t], "symlink.ln", ps) - vt = virtual_target.FileTarget(os.path.basename(s), t.type(), self.project(), a) - - # Place the symlink in the directory relative to the project - # location, instead of placing it in the build directory. - if not ps.get('symlink-location') == "project-relative": - vt.set_path(os.path.join(self.project().get('location'), os.path.dirname(s))) - - vt = get_manager().virtual_targets().register(vt) - self.virtual_targets.append(vt) - i = i + 1 - - return (property_set.empty(), self.virtual_targets) - -# Creates a symbolic link from a set of targets to a set of sources. -# The targets and sources map one to one. The symlinks generated are -# limited to be the ones given as the sources. That is, the targets -# are either padded or trimmed to equate to the sources. The padding -# is done with the name of the corresponding source. For example:: -# -# symlink : one two ; -# -# Is equal to:: -# -# symlink one two : one two ; -# -# Names for symlink are relative to the project location. They cannot -# include ".." path components. -def symlink(targets, sources): - - from b2.manager import get_manager - t = get_manager().targets() - p = get_manager().projects().current() - - return t.main_target_alternative( - SymlinkTarget(p, targets, - # Note: inline targets are not supported for symlink, intentionally, - # since it's used to linking existing non-local targets. - sources)) - - -def setup_ln(targets, sources, ps): - - source_path = bjam.call("get-target-variable", sources[0], "LOCATE")[0] - target_path = bjam.call("get-target-variable", targets[0], "LOCATE")[0] - rel = os.path.relpath(source_path, target_path) - if rel == ".": - bjam.call("set-target-variable", targets, "PATH_TO_SOURCE", "") - else: - bjam.call("set-target-variable", targets, "PATH_TO_SOURCE", rel) - -if os.name == 'nt': - ln_action = """echo "NT symlinks not supported yet, making copy" -del /f /q "$(<)" 2>nul >nul -copy "$(>)" "$(<)" $(NULL_OUT)""" -else: - ln_action = "ln -f -s '$(>:D=:R=$(PATH_TO_SOURCE))' '$(<)'" - -get_manager().engine().register_action("symlink.ln", ln_action, function=setup_ln) - -get_manager().projects().add_rule("symlink", symlink) diff --git a/jam-files/boost-build/tools/testing-aux.jam b/jam-files/boost-build/tools/testing-aux.jam deleted file mode 100644 index 525dafd0..00000000 --- a/jam-files/boost-build/tools/testing-aux.jam +++ /dev/null @@ -1,210 +0,0 @@ -# This module is imported by testing.py. The definitions here are -# too tricky to do in Python - -# Causes the 'target' to exist after bjam invocation if and only if all the -# dependencies were successfully built. -# -rule expect-success ( target : dependency + : requirements * ) -{ - **passed** $(target) : $(sources) ; -} -IMPORT testing : expect-success : : testing.expect-success ; - -# Causes the 'target' to exist after bjam invocation if and only if all some of -# the dependencies were not successfully built. -# -rule expect-failure ( target : dependency + : properties * ) -{ - local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ; - local marker = $(dependency:G=$(grist)*fail) ; - (failed-as-expected) $(marker) ; - FAIL_EXPECTED $(dependency) ; - LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ; - RMOLD $(marker) ; - DEPENDS $(marker) : $(dependency) ; - DEPENDS $(target) : $(marker) ; - **passed** $(target) : $(marker) ; -} -IMPORT testing : expect-failure : : testing.expect-failure ; - -# The rule/action combination used to report successful passing of a test. -# -rule **passed** -{ - # Force deletion of the target, in case any dependencies failed to build. - RMOLD $(<) ; -} - - -# Used to create test files signifying passed tests. -# -actions **passed** -{ - echo passed > "$(<)" -} - - -# Used to create replacement object files that do not get created during tests -# that are expected to fail. -# -actions (failed-as-expected) -{ - echo failed as expected > "$(<)" -} - -# Runs executable 'sources' and stores stdout in file 'target'. Unless -# --preserve-test-targets command line option has been specified, removes the -# executable. The 'target-to-remove' parameter controls what should be removed: -# - if 'none', does not remove anything, ever -# - if empty, removes 'source' -# - if non-empty and not 'none', contains a list of sources to remove. -# -rule capture-output ( target : source : properties * : targets-to-remove * ) -{ - output-file on $(target) = $(target:S=.output) ; - LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ; - - # The INCLUDES kill a warning about independent target... - INCLUDES $(target) : $(target:S=.output) ; - # but it also puts .output into dependency graph, so we must tell jam it is - # OK if it cannot find the target or updating rule. - NOCARE $(target:S=.output) ; - - # This has two-fold effect. First it adds input files to the dependendency - # graph, preventing a warning. Second, it causes input files to be bound - # before target is created. Therefore, they are bound using SEARCH setting - # on them and not LOCATE setting of $(target), as in other case (due to jam - # bug). - DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ; - - if $(targets-to-remove) = none - { - targets-to-remove = ; - } - else if ! $(targets-to-remove) - { - targets-to-remove = $(source) ; - } - - if [ on $(target) return $(REMOVE_TEST_TARGETS) ] - { - TEMPORARY $(targets-to-remove) ; - # Set a second action on target that will be executed after capture - # output action. The 'RmTemps' rule has the 'ignore' modifier so it is - # always considered succeeded. This is needed for 'run-fail' test. For - # that test the target will be marked with FAIL_EXPECTED, and without - # 'ignore' successful execution will be negated and be reported as - # failure. With 'ignore' we do not detect a case where removing files - # fails, but it is not likely to happen. - RmTemps $(target) : $(targets-to-remove) ; - } -} - - -if [ os.name ] = NT -{ - .STATUS = %status% ; - .SET_STATUS = "set status=%ERRORLEVEL%" ; - .RUN_OUTPUT_NL = "echo." ; - .STATUS_0 = "%status% EQU 0 (" ; - .STATUS_NOT_0 = "%status% NEQ 0 (" ; - .VERBOSE = "%verbose% EQU 1 (" ; - .ENDIF = ")" ; - .SHELL_SET = "set " ; - .CATENATE = type ; - .CP = copy ; -} -else -{ - .STATUS = "$status" ; - .SET_STATUS = "status=$?" ; - .RUN_OUTPUT_NL = "echo" ; - .STATUS_0 = "test $status -eq 0 ; then" ; - .STATUS_NOT_0 = "test $status -ne 0 ; then" ; - .VERBOSE = "test $verbose -eq 1 ; then" ; - .ENDIF = "fi" ; - .SHELL_SET = "" ; - .CATENATE = cat ; - .CP = cp ; -} - - -.VERBOSE_TEST = 0 ; -if --verbose-test in [ modules.peek : ARGV ] -{ - .VERBOSE_TEST = 1 ; -} - - -.RM = [ common.rm-command ] ; - - -actions capture-output bind INPUT_FILES output-file -{ - $(PATH_SETUP) - $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 - $(.SET_STATUS) - $(.RUN_OUTPUT_NL) >> "$(output-file)" - echo EXIT STATUS: $(.STATUS) >> "$(output-file)" - if $(.STATUS_0) - $(.CP) "$(output-file)" "$(<)" - $(.ENDIF) - $(.SHELL_SET)verbose=$(.VERBOSE_TEST) - if $(.STATUS_NOT_0) - $(.SHELL_SET)verbose=1 - $(.ENDIF) - if $(.VERBOSE) - echo ====== BEGIN OUTPUT ====== - $(.CATENATE) "$(output-file)" - echo ====== END OUTPUT ====== - $(.ENDIF) - exit $(.STATUS) -} - -IMPORT testing : capture-output : : testing.capture-output ; - - -actions quietly updated ignore piecemeal together RmTemps -{ - $(.RM) "$(>)" -} - - -.MAKE_FILE = [ common.file-creation-command ] ; - -actions unit-test -{ - $(PATH_SETUP) - $(LAUNCHER) $(>) $(ARGS) && $(.MAKE_FILE) $(<) -} - -rule record-time ( target : source : start end user system ) -{ - local src-string = [$(source:G=:J=",")"] " ; - USER_TIME on $(target) += $(src-string)$(user) ; - SYSTEM_TIME on $(target) += $(src-string)$(system) ; -} - -# Calling this rule requests that Boost Build time how long it taks to build the -# 'source' target and display the results both on the standard output and in the -# 'target' file. -# -rule time ( target : source : properties * ) -{ - # Set up rule for recording timing information. - __TIMING_RULE__ on $(source) = testing.record-time $(target) ; - - # Make sure that the source is rebuilt any time we need to retrieve that - # information. - REBUILDS $(target) : $(source) ; -} - - -actions time -{ - echo user: $(USER_TIME) - echo system: $(SYSTEM_TIME) - - echo user: $(USER_TIME)" seconds" > "$(<)" - echo system: $(SYSTEM_TIME)" seconds" >> "$(<)" -} diff --git a/jam-files/boost-build/tools/testing.jam b/jam-files/boost-build/tools/testing.jam deleted file mode 100644 index c42075b7..00000000 --- a/jam-files/boost-build/tools/testing.jam +++ /dev/null @@ -1,581 +0,0 @@ -# Copyright 2005 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module implements regression testing framework. It declares a number of -# main target rules which perform some action and, if the results are OK, -# creates an output file. -# -# The exact list of rules is: -# 'compile' -- creates .test file if compilation of sources was -# successful. -# 'compile-fail' -- creates .test file if compilation of sources failed. -# 'run' -- creates .test file is running of executable produced from -# sources was successful. Also leaves behind .output file -# with the output from program run. -# 'run-fail' -- same as above, but .test file is created if running fails. -# -# In all cases, presence of .test file is an indication that the test passed. -# For more convenient reporting, you might want to use C++ Boost regression -# testing utilities (see http://www.boost.org/more/regression.html). -# -# For historical reason, a 'unit-test' rule is available which has the same -# syntax as 'exe' and behaves just like 'run'. - -# Things to do: -# - Teach compiler_status handle Jamfile.v2. -# Notes: -# - <no-warn> is not implemented, since it is Como-specific, and it is not -# clear how to implement it -# - std::locale-support is not implemented (it is used in one test). - - -import alias ; -import "class" ; -import common ; -import errors ; -import feature ; -import generators ; -import os ; -import path ; -import project ; -import property ; -import property-set ; -import regex ; -import sequence ; -import targets ; -import toolset ; -import type ; -import virtual-target ; - - -rule init ( ) -{ -} - - -# Feature controling the command used to lanch test programs. -feature.feature testing.launcher : : free optional ; - -feature.feature test-info : : free incidental ; -feature.feature testing.arg : : free incidental ; -feature.feature testing.input-file : : free dependency ; - -feature.feature preserve-test-targets : on off : incidental propagated ; - -# Register target types. -type.register TEST : test ; -type.register COMPILE : : TEST ; -type.register COMPILE_FAIL : : TEST ; -type.register RUN_OUTPUT : run ; -type.register RUN : : TEST ; -type.register RUN_FAIL : : TEST ; -type.register LINK_FAIL : : TEST ; -type.register LINK : : TEST ; -type.register UNIT_TEST : passed : TEST ; - - -# Declare the rules which create main targets. While the 'type' module already -# creates rules with the same names for us, we need extra convenience: default -# name of main target, so write our own versions. - -# Helper rule. Create a test target, using basename of first source if no target -# name is explicitly passed. Remembers the created target in a global variable. -# -rule make-test ( target-type : sources + : requirements * : target-name ? ) -{ - target-name ?= $(sources[1]:D=:S=) ; - - # Having periods (".") in the target name is problematic because the typed - # generator will strip the suffix and use the bare name for the file - # targets. Even though the location-prefix averts problems most times it - # does not prevent ambiguity issues when referring to the test targets. For - # example when using the XML log output. So we rename the target to remove - # the periods, and provide an alias for users. - local real-name = [ regex.replace $(target-name) "[.]" "~" ] ; - - local project = [ project.current ] ; - # The <location-prefix> forces the build system for generate paths in the - # form '$build_dir/array1.test/gcc/debug'. This is necessary to allow - # post-processing tools to work. - local t = [ targets.create-typed-target [ type.type-from-rule-name - $(target-type) ] : $(project) : $(real-name) : $(sources) : - $(requirements) <location-prefix>$(real-name).test ] ; - - # The alias to the real target, per period replacement above. - if $(real-name) != $(target-name) - { - alias $(target-name) : $(t) ; - } - - # Remember the test (for --dump-tests). A good way would be to collect all - # given a project. This has some technical problems: e.g. we can not call - # this dump from a Jamfile since projects referred by 'build-project' are - # not available until the whole Jamfile has been loaded. - .all-tests += $(t) ; - return $(t) ; -} - - -# Note: passing more that one cpp file here is known to fail. Passing a cpp file -# and a library target works. -# -rule compile ( sources + : requirements * : target-name ? ) -{ - return [ make-test compile : $(sources) : $(requirements) : $(target-name) ] - ; -} - - -rule compile-fail ( sources + : requirements * : target-name ? ) -{ - return [ make-test compile-fail : $(sources) : $(requirements) : - $(target-name) ] ; -} - - -rule link ( sources + : requirements * : target-name ? ) -{ - return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ; -} - - -rule link-fail ( sources + : requirements * : target-name ? ) -{ - return [ make-test link-fail : $(sources) : $(requirements) : $(target-name) - ] ; -} - - -rule handle-input-files ( input-files * ) -{ - if $(input-files[2]) - { - # Check that sorting made when creating property-set instance will not - # change the ordering. - if [ sequence.insertion-sort $(input-files) ] != $(input-files) - { - errors.user-error "Names of input files must be sorted alphabetically" - : "due to internal limitations" ; - } - } - return <testing.input-file>$(input-files) ; -} - - -rule run ( sources + : args * : input-files * : requirements * : target-name ? : - default-build * ) -{ - requirements += <testing.arg>$(args:J=" ") ; - requirements += [ handle-input-files $(input-files) ] ; - return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ; -} - - -rule run-fail ( sources + : args * : input-files * : requirements * : - target-name ? : default-build * ) -{ - requirements += <testing.arg>$(args:J=" ") ; - requirements += [ handle-input-files $(input-files) ] ; - return [ make-test run-fail : $(sources) : $(requirements) : $(target-name) - ] ; -} - - -# Use 'test-suite' as a synonym for 'alias', for backward compatibility. -IMPORT : alias : : test-suite ; - - -# For all main targets in 'project-module', which are typed targets with type -# derived from 'TEST', produce some interesting information. -# -rule dump-tests -{ - for local t in $(.all-tests) - { - dump-test $(t) ; - } -} - - -# Given a project location in normalized form (slashes are forward), compute the -# name of the Boost library. -# -local rule get-library-name ( path ) -{ - # Path is in normalized form, so all slashes are forward. - local match1 = [ MATCH /(tools|libs)/(.*)/(test|example) : $(path) ] ; - local match2 = [ MATCH /(tools|libs)/(.*)$ : $(path) ] ; - local match3 = [ MATCH (/status$) : $(path) ] ; - - if $(match1) { return $(match1[2]) ; } - else if $(match2) { return $(match2[2]) ; } - else if $(match3) { return "" ; } - else if --dump-tests in [ modules.peek : ARGV ] - { - # The 'run' rule and others might be used outside boost. In that case, - # just return the path, since the 'library name' makes no sense. - return $(path) ; - } -} - - -# Was an XML dump requested? -.out-xml = [ MATCH --out-xml=(.*) : [ modules.peek : ARGV ] ] ; - - -# Takes a target (instance of 'basic-target') and prints -# - its type -# - its name -# - comments specified via the <test-info> property -# - relative location of all source from the project root. -# -rule dump-test ( target ) -{ - local type = [ $(target).type ] ; - local name = [ $(target).name ] ; - local project = [ $(target).project ] ; - - local project-root = [ $(project).get project-root ] ; - local library = [ get-library-name [ path.root [ $(project).get location ] - [ path.pwd ] ] ] ; - if $(library) - { - name = $(library)/$(name) ; - } - - local sources = [ $(target).sources ] ; - local source-files ; - for local s in $(sources) - { - if [ class.is-a $(s) : file-reference ] - { - local location = [ path.root [ path.root [ $(s).name ] - [ $(s).location ] ] [ path.pwd ] ] ; - - source-files += [ path.relative-to [ path.root $(project-root) - [ path.pwd ] ] $(location) ] ; - } - } - - local target-name = [ $(project).get location ] // [ $(target).name ] .test - ; - target-name = $(target-name:J=) ; - - local r = [ $(target).requirements ] ; - # Extract values of the <test-info> feature. - local test-info = [ $(r).get <test-info> ] ; - - # If the user requested XML output on the command-line, add the test info to - # that XML file rather than dumping them to stdout. - if $(.out-xml) - { - local nl = " -" ; - .contents on $(.out-xml) += - "$(nl) <test type=\"$(type)\" name=\"$(name)\">" - "$(nl) <target><![CDATA[$(target-name)]]></target>" - "$(nl) <info><![CDATA[$(test-info)]]></info>" - "$(nl) <source><![CDATA[$(source-files)]]></source>" - "$(nl) </test>" - ; - } - else - { - # Format them into a single string of quoted strings. - test-info = \"$(test-info:J=\"\ \")\" ; - - ECHO boost-test($(type)) \"$(name)\" [$(test-info)] ":" - \"$(source-files)\" ; - } -} - - -# Register generators. Depending on target type, either 'expect-success' or -# 'expect-failure' rule will be used. -generators.register-standard testing.expect-success : OBJ : COMPILE ; -generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ; -generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ; -generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ; -generators.register-standard testing.expect-failure : EXE : LINK_FAIL ; -generators.register-standard testing.expect-success : EXE : LINK ; - -# Generator which runs an EXE and captures output. -generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ; - -# Generator which creates a target if sources run successfully. Differs from RUN -# in that run output is not captured. The reason why it exists is that the 'run' -# rule is much better for automated testing, but is not user-friendly (see -# http://article.gmane.org/gmane.comp.lib.boost.build/6353). -generators.register-standard testing.unit-test : EXE : UNIT_TEST ; - - -# The action rules called by generators. - -# Causes the 'target' to exist after bjam invocation if and only if all the -# dependencies were successfully built. -# -rule expect-success ( target : dependency + : requirements * ) -{ - **passed** $(target) : $(sources) ; -} - - -# Causes the 'target' to exist after bjam invocation if and only if all some of -# the dependencies were not successfully built. -# -rule expect-failure ( target : dependency + : properties * ) -{ - local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ; - local marker = $(dependency:G=$(grist)*fail) ; - (failed-as-expected) $(marker) ; - FAIL_EXPECTED $(dependency) ; - LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ; - RMOLD $(marker) ; - DEPENDS $(marker) : $(dependency) ; - DEPENDS $(target) : $(marker) ; - **passed** $(target) : $(marker) ; -} - - -# The rule/action combination used to report successful passing of a test. -# -rule **passed** -{ - # Dump all the tests, if needed. We do it here, since dump should happen - # only after all Jamfiles have been read, and there is no such place - # currently defined (but there should be). - if ! $(.dumped-tests) && ( --dump-tests in [ modules.peek : ARGV ] ) - { - .dumped-tests = true ; - dump-tests ; - } - - # Force deletion of the target, in case any dependencies failed to build. - RMOLD $(<) ; -} - - -# Used to create test files signifying passed tests. -# -actions **passed** -{ - echo passed > "$(<)" -} - - -# Used to create replacement object files that do not get created during tests -# that are expected to fail. -# -actions (failed-as-expected) -{ - echo failed as expected > "$(<)" -} - - -rule run-path-setup ( target : source : properties * ) -{ - # For testing, we need to make sure that all dynamic libraries needed by the - # test are found. So, we collect all paths from dependency libraries (via - # xdll-path property) and add whatever explicit dll-path user has specified. - # The resulting paths are added to the environment on each test invocation. - local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ; - dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ; - dll-paths += [ on $(source) return $(RUN_PATH) ] ; - dll-paths = [ sequence.unique $(dll-paths) ] ; - if $(dll-paths) - { - dll-paths = [ sequence.transform path.native : $(dll-paths) ] ; - PATH_SETUP on $(target) = [ common.prepend-path-variable-command - [ os.shared-library-path-variable ] : $(dll-paths) ] ; - } -} - - -local argv = [ modules.peek : ARGV ] ; - -toolset.flags testing.capture-output ARGS <testing.arg> ; -toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ; -toolset.flags testing.capture-output LAUNCHER <testing.launcher> ; - - -# Runs executable 'sources' and stores stdout in file 'target'. Unless -# --preserve-test-targets command line option has been specified, removes the -# executable. The 'target-to-remove' parameter controls what should be removed: -# - if 'none', does not remove anything, ever -# - if empty, removes 'source' -# - if non-empty and not 'none', contains a list of sources to remove. -# -rule capture-output ( target : source : properties * : targets-to-remove * ) -{ - output-file on $(target) = $(target:S=.output) ; - LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ; - - # The INCLUDES kill a warning about independent target... - INCLUDES $(target) : $(target:S=.output) ; - # but it also puts .output into dependency graph, so we must tell jam it is - # OK if it cannot find the target or updating rule. - NOCARE $(target:S=.output) ; - - # This has two-fold effect. First it adds input files to the dependendency - # graph, preventing a warning. Second, it causes input files to be bound - # before target is created. Therefore, they are bound using SEARCH setting - # on them and not LOCATE setting of $(target), as in other case (due to jam - # bug). - DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ; - - if $(targets-to-remove) = none - { - targets-to-remove = ; - } - else if ! $(targets-to-remove) - { - targets-to-remove = $(source) ; - } - - run-path-setup $(target) : $(source) : $(properties) ; - - if [ feature.get-values preserve-test-targets : $(properties) ] = off - { - TEMPORARY $(targets-to-remove) ; - # Set a second action on target that will be executed after capture - # output action. The 'RmTemps' rule has the 'ignore' modifier so it is - # always considered succeeded. This is needed for 'run-fail' test. For - # that test the target will be marked with FAIL_EXPECTED, and without - # 'ignore' successful execution will be negated and be reported as - # failure. With 'ignore' we do not detect a case where removing files - # fails, but it is not likely to happen. - RmTemps $(target) : $(targets-to-remove) ; - } -} - - -if [ os.name ] = NT -{ - .STATUS = %status% ; - .SET_STATUS = "set status=%ERRORLEVEL%" ; - .RUN_OUTPUT_NL = "echo." ; - .STATUS_0 = "%status% EQU 0 (" ; - .STATUS_NOT_0 = "%status% NEQ 0 (" ; - .VERBOSE = "%verbose% EQU 1 (" ; - .ENDIF = ")" ; - .SHELL_SET = "set " ; - .CATENATE = type ; - .CP = copy ; -} -else -{ - .STATUS = "$status" ; - .SET_STATUS = "status=$?" ; - .RUN_OUTPUT_NL = "echo" ; - .STATUS_0 = "test $status -eq 0 ; then" ; - .STATUS_NOT_0 = "test $status -ne 0 ; then" ; - .VERBOSE = "test $verbose -eq 1 ; then" ; - .ENDIF = "fi" ; - .SHELL_SET = "" ; - .CATENATE = cat ; - .CP = cp ; -} - - -.VERBOSE_TEST = 0 ; -if --verbose-test in [ modules.peek : ARGV ] -{ - .VERBOSE_TEST = 1 ; -} - - -.RM = [ common.rm-command ] ; - - -actions capture-output bind INPUT_FILES output-file -{ - $(PATH_SETUP) - $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 - $(.SET_STATUS) - $(.RUN_OUTPUT_NL) >> "$(output-file)" - echo EXIT STATUS: $(.STATUS) >> "$(output-file)" - if $(.STATUS_0) - $(.CP) "$(output-file)" "$(<)" - $(.ENDIF) - $(.SHELL_SET)verbose=$(.VERBOSE_TEST) - if $(.STATUS_NOT_0) - $(.SHELL_SET)verbose=1 - $(.ENDIF) - if $(.VERBOSE) - echo ====== BEGIN OUTPUT ====== - $(.CATENATE) "$(output-file)" - echo ====== END OUTPUT ====== - $(.ENDIF) - exit $(.STATUS) -} - - -actions quietly updated ignore piecemeal together RmTemps -{ - $(.RM) "$(>)" -} - - -.MAKE_FILE = [ common.file-creation-command ] ; - -toolset.flags testing.unit-test LAUNCHER <testing.launcher> ; -toolset.flags testing.unit-test ARGS <testing.arg> ; - - -rule unit-test ( target : source : properties * ) -{ - run-path-setup $(target) : $(source) : $(properties) ; -} - - -actions unit-test -{ - $(PATH_SETUP) - $(LAUNCHER) $(>) $(ARGS) && $(.MAKE_FILE) $(<) -} - - -IMPORT $(__name__) : compile compile-fail run run-fail link link-fail - : : compile compile-fail run run-fail link link-fail ; - - -type.register TIME : time ; -generators.register-standard testing.time : : TIME ; - - -rule record-time ( target : source : start end user system ) -{ - local src-string = [$(source:G=:J=",")"] " ; - USER_TIME on $(target) += $(src-string)$(user) ; - SYSTEM_TIME on $(target) += $(src-string)$(system) ; -} - - -IMPORT testing : record-time : : testing.record-time ; - - -# Calling this rule requests that Boost Build time how long it taks to build the -# 'source' target and display the results both on the standard output and in the -# 'target' file. -# -rule time ( target : source : properties * ) -{ - # Set up rule for recording timing information. - __TIMING_RULE__ on $(source) = testing.record-time $(target) ; - - # Make sure that the source is rebuilt any time we need to retrieve that - # information. - REBUILDS $(target) : $(source) ; -} - - -actions time -{ - echo user: $(USER_TIME) - echo system: $(SYSTEM_TIME) - - echo user: $(USER_TIME)" seconds" > "$(<)" - echo system: $(SYSTEM_TIME)" seconds" >> "$(<)" -} diff --git a/jam-files/boost-build/tools/testing.py b/jam-files/boost-build/tools/testing.py deleted file mode 100644 index 3b53500c..00000000 --- a/jam-files/boost-build/tools/testing.py +++ /dev/null @@ -1,342 +0,0 @@ -# Status: ported, except for --out-xml -# Base revision: 64488 -# -# Copyright 2005 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005, 2010 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This module implements regression testing framework. It declares a number of -# main target rules which perform some action and, if the results are OK, -# creates an output file. -# -# The exact list of rules is: -# 'compile' -- creates .test file if compilation of sources was -# successful. -# 'compile-fail' -- creates .test file if compilation of sources failed. -# 'run' -- creates .test file is running of executable produced from -# sources was successful. Also leaves behind .output file -# with the output from program run. -# 'run-fail' -- same as above, but .test file is created if running fails. -# -# In all cases, presence of .test file is an indication that the test passed. -# For more convenient reporting, you might want to use C++ Boost regression -# testing utilities (see http://www.boost.org/more/regression.html). -# -# For historical reason, a 'unit-test' rule is available which has the same -# syntax as 'exe' and behaves just like 'run'. - -# Things to do: -# - Teach compiler_status handle Jamfile.v2. -# Notes: -# - <no-warn> is not implemented, since it is Como-specific, and it is not -# clear how to implement it -# - std::locale-support is not implemented (it is used in one test). - -import b2.build.feature as feature -import b2.build.type as type -import b2.build.targets as targets -import b2.build.generators as generators -import b2.build.toolset as toolset -import b2.tools.common as common -import b2.util.option as option -import b2.build_system as build_system - - - -from b2.manager import get_manager -from b2.util import stem, bjam_signature -from b2.util.sequence import unique - -import bjam - -import re -import os.path -import sys - -def init(): - pass - -# Feature controling the command used to lanch test programs. -feature.feature("testing.launcher", [], ["free", "optional"]) - -feature.feature("test-info", [], ["free", "incidental"]) -feature.feature("testing.arg", [], ["free", "incidental"]) -feature.feature("testing.input-file", [], ["free", "dependency"]) - -feature.feature("preserve-test-targets", ["on", "off"], ["incidental", "propagated"]) - -# Register target types. -type.register("TEST", ["test"]) -type.register("COMPILE", [], "TEST") -type.register("COMPILE_FAIL", [], "TEST") - -type.register("RUN_OUTPUT", ["run"]) -type.register("RUN", [], "TEST") -type.register("RUN_FAIL", [], "TEST") - -type.register("LINK", [], "TEST") -type.register("LINK_FAIL", [], "TEST") -type.register("UNIT_TEST", ["passed"], "TEST") - -__all_tests = [] - -# Declare the rules which create main targets. While the 'type' module already -# creates rules with the same names for us, we need extra convenience: default -# name of main target, so write our own versions. - -# Helper rule. Create a test target, using basename of first source if no target -# name is explicitly passed. Remembers the created target in a global variable. -def make_test(target_type, sources, requirements, target_name=None): - - if not target_name: - target_name = stem(os.path.basename(sources[0])) - - # Having periods (".") in the target name is problematic because the typed - # generator will strip the suffix and use the bare name for the file - # targets. Even though the location-prefix averts problems most times it - # does not prevent ambiguity issues when referring to the test targets. For - # example when using the XML log output. So we rename the target to remove - # the periods, and provide an alias for users. - real_name = target_name.replace(".", "~") - - project = get_manager().projects().current() - # The <location-prefix> forces the build system for generate paths in the - # form '$build_dir/array1.test/gcc/debug'. This is necessary to allow - # post-processing tools to work. - t = get_manager().targets().create_typed_target( - type.type_from_rule_name(target_type), project, real_name, sources, - requirements + ["<location-prefix>" + real_name + ".test"], [], []) - - # The alias to the real target, per period replacement above. - if real_name != target_name: - get_manager().projects().project_rules().all_names_["alias"]( - target_name, [t]) - - # Remember the test (for --dump-tests). A good way would be to collect all - # given a project. This has some technical problems: e.g. we can not call - # this dump from a Jamfile since projects referred by 'build-project' are - # not available until the whole Jamfile has been loaded. - __all_tests.append(t) - return t - - -# Note: passing more that one cpp file here is known to fail. Passing a cpp file -# and a library target works. -# -@bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"])) -def compile(sources, requirements, target_name=None): - return make_test("compile", sources, requirements, target_name) - -@bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"])) -def compile_fail(sources, requirements, target_name=None): - return make_test("compile-fail", sources, requirements, target_name) - -@bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"])) -def link(sources, requirements, target_name=None): - return make_test("link", sources, requirements, target_name) - -@bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"])) -def link_fail(sources, requirements, target_name=None): - return make_test("link-fail", sources, requirements, target_name) - -def handle_input_files(input_files): - if len(input_files) > 1: - # Check that sorting made when creating property-set instance will not - # change the ordering. - if sorted(input_files) != input_files: - get_manager().errors()("Names of input files must be sorted alphabetically\n" + - "due to internal limitations") - return ["<testing.input-file>" + f for f in input_files] - -@bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"], - ["requirements", "*"], ["target_name", "?"], - ["default_build", "*"])) -def run(sources, args, input_files, requirements, target_name=None, default_build=[]): - if args: - requirements.append("<testing.arg>" + " ".join(args)) - requirements.extend(handle_input_files(input_files)) - return make_test("run", sources, requirements, target_name) - -@bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"], - ["requirements", "*"], ["target_name", "?"], - ["default_build", "*"])) -def run_fail(sources, args, input_files, requirements, target_name=None, default_build=[]): - if args: - requirements.append("<testing.arg>" + " ".join(args)) - requirements.extend(handle_input_files(input_files)) - return make_test("run-fail", sources, requirements, target_name) - -# Register all the rules -for name in ["compile", "compile-fail", "link", "link-fail", "run", "run-fail"]: - get_manager().projects().add_rule(name, getattr(sys.modules[__name__], name.replace("-", "_"))) - -# Use 'test-suite' as a synonym for 'alias', for backward compatibility. -from b2.build.alias import alias -get_manager().projects().add_rule("test-suite", alias) - -# For all main targets in 'project-module', which are typed targets with type -# derived from 'TEST', produce some interesting information. -# -def dump_tests(): - for t in __all_tests: - dump_test(t) - -# Given a project location in normalized form (slashes are forward), compute the -# name of the Boost library. -# -__ln1 = re.compile("/(tools|libs)/(.*)/(test|example)") -__ln2 = re.compile("/(tools|libs)/(.*)$") -__ln3 = re.compile("(/status$)") -def get_library_name(path): - - path = path.replace("\\", "/") - match1 = __ln1.match(path) - match2 = __ln2.match(path) - match3 = __ln3.match(path) - - if match1: - return match1.group(2) - elif match2: - return match2.group(2) - elif match3: - return "" - elif option.get("dump-tests", False, True): - # The 'run' rule and others might be used outside boost. In that case, - # just return the path, since the 'library name' makes no sense. - return path - -# Was an XML dump requested? -__out_xml = option.get("out-xml", False, True) - -# Takes a target (instance of 'basic-target') and prints -# - its type -# - its name -# - comments specified via the <test-info> property -# - relative location of all source from the project root. -# -def dump_test(target): - type = target.type() - name = target.name() - project = target.project() - - project_root = project.get('project-root') - library = get_library_name(os.path.abspath(project.get('location'))) - if library: - name = library + "/" + name - - sources = target.sources() - source_files = [] - for s in sources: - if isinstance(s, targets.FileReference): - location = os.path.abspath(os.path.join(s.location(), s.name())) - source_files.append(os.path.relpath(location, os.path.abspath(project_root))) - - target_name = project.get('location') + "//" + target.name() + ".test" - - test_info = target.requirements().get('test-info') - test_info = " ".join('"' + ti + '"' for ti in test_info) - - # If the user requested XML output on the command-line, add the test info to - # that XML file rather than dumping them to stdout. - #if $(.out-xml) - #{ -# local nl = " -#" ; -# .contents on $(.out-xml) += -# "$(nl) <test type=\"$(type)\" name=\"$(name)\">" -# "$(nl) <target><![CDATA[$(target-name)]]></target>" -# "$(nl) <info><![CDATA[$(test-info)]]></info>" -# "$(nl) <source><![CDATA[$(source-files)]]></source>" -# "$(nl) </test>" -# ; -# } -# else - - source_files = " ".join('"' + s + '"' for s in source_files) - if test_info: - print 'boost-test(%s) "%s" [%s] : %s' % (type, name, test_info, source_files) - else: - print 'boost-test(%s) "%s" : %s' % (type, name, source_files) - -# Register generators. Depending on target type, either 'expect-success' or -# 'expect-failure' rule will be used. -generators.register_standard("testing.expect-success", ["OBJ"], ["COMPILE"]) -generators.register_standard("testing.expect-failure", ["OBJ"], ["COMPILE_FAIL"]) -generators.register_standard("testing.expect-success", ["RUN_OUTPUT"], ["RUN"]) -generators.register_standard("testing.expect-failure", ["RUN_OUTPUT"], ["RUN_FAIL"]) -generators.register_standard("testing.expect-success", ["EXE"], ["LINK"]) -generators.register_standard("testing.expect-failure", ["EXE"], ["LINK_FAIL"]) - -# Generator which runs an EXE and captures output. -generators.register_standard("testing.capture-output", ["EXE"], ["RUN_OUTPUT"]) - -# Generator which creates a target if sources run successfully. Differs from RUN -# in that run output is not captured. The reason why it exists is that the 'run' -# rule is much better for automated testing, but is not user-friendly (see -# http://article.gmane.org/gmane.comp.lib.boost.build/6353). -generators.register_standard("testing.unit-test", ["EXE"], ["UNIT_TEST"]) - -# FIXME: if those calls are after bjam.call, then bjam will crash -# when toolset.flags calls bjam.caller. -toolset.flags("testing.capture-output", "ARGS", [], ["<testing.arg>"]) -toolset.flags("testing.capture-output", "INPUT_FILES", [], ["<testing.input-file>"]) -toolset.flags("testing.capture-output", "LAUNCHER", [], ["<testing.launcher>"]) - -toolset.flags("testing.unit-test", "LAUNCHER", [], ["<testing.launcher>"]) -toolset.flags("testing.unit-test", "ARGS", [], ["<testing.arg>"]) - -type.register("TIME", ["time"]) -generators.register_standard("testing.time", [], ["TIME"]) - - -# The following code sets up actions for this module. It's pretty convoluted, -# but the basic points is that we most of actions are defined by Jam code -# contained in testing-aux.jam, which we load into Jam module named 'testing' - -def run_path_setup(target, sources, ps): - - # For testing, we need to make sure that all dynamic libraries needed by the - # test are found. So, we collect all paths from dependency libraries (via - # xdll-path property) and add whatever explicit dll-path user has specified. - # The resulting paths are added to the environment on each test invocation. - dll_paths = ps.get('dll-path') - dll_paths.extend(ps.get('xdll-path')) - dll_paths.extend(bjam.call("get-target-variable", sources, "RUN_PATH")) - dll_paths = unique(dll_paths) - if dll_paths: - bjam.call("set-target-variable", target, "PATH_SETUP", - common.prepend_path_variable_command( - common.shared_library_path_variable(), dll_paths)) - -def capture_output_setup(target, sources, ps): - run_path_setup(target, sources, ps) - - if ps.get('preserve-test-targets') == ['off']: - bjam.call("set-target-variable", target, "REMOVE_TEST_TARGETS", "1") - -get_manager().engine().register_bjam_action("testing.capture-output", - capture_output_setup) - - -path = os.path.dirname(get_manager().projects().loaded_tool_module_path_[__name__]) -import b2.util.os_j -get_manager().projects().project_rules()._import_rule("testing", "os.name", - b2.util.os_j.name) -import b2.tools.common -get_manager().projects().project_rules()._import_rule("testing", "common.rm-command", - b2.tools.common.rm_command) -get_manager().projects().project_rules()._import_rule("testing", "common.file-creation-command", - b2.tools.common.file_creation_command) - -bjam.call("load", "testing", os.path.join(path, "testing-aux.jam")) - - -for name in ["expect-success", "expect-failure", "time"]: - get_manager().engine().register_bjam_action("testing." + name) - -get_manager().engine().register_bjam_action("testing.unit-test", - run_path_setup) - -if option.get("dump-tests", False, True): - build_system.add_pre_build_hook(dump_tests) diff --git a/jam-files/boost-build/tools/types/__init__.py b/jam-files/boost-build/tools/types/__init__.py deleted file mode 100644 index f972b714..00000000 --- a/jam-files/boost-build/tools/types/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = [ - 'asm', - 'cpp', - 'exe', - 'html', - 'lib', - 'obj', - 'rsp', -] - -def register_all (): - for i in __all__: - m = __import__ (__name__ + '.' + i) - reg = i + '.register ()' - #exec (reg) - -# TODO: (PF) I thought these would be imported automatically. Anyone knows why they aren't? -register_all () diff --git a/jam-files/boost-build/tools/types/asm.jam b/jam-files/boost-build/tools/types/asm.jam deleted file mode 100644 index a340db36..00000000 --- a/jam-files/boost-build/tools/types/asm.jam +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright Craig Rodrigues 2005. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -type ASM : s S asm ; diff --git a/jam-files/boost-build/tools/types/asm.py b/jam-files/boost-build/tools/types/asm.py deleted file mode 100644 index b4e1c30e..00000000 --- a/jam-files/boost-build/tools/types/asm.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright Craig Rodrigues 2005. -# Copyright (c) 2008 Steven Watanabe -# -# Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register(): - type.register_type('ASM', ['s', 'S', 'asm']) - -register() diff --git a/jam-files/boost-build/tools/types/cpp.jam b/jam-files/boost-build/tools/types/cpp.jam deleted file mode 100644 index 3159cdd7..00000000 --- a/jam-files/boost-build/tools/types/cpp.jam +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright David Abrahams 2004. -# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus -# Copyright 2010 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -import type ; -import scanner ; - -class c-scanner : scanner -{ - import path ; - import regex ; - import scanner ; - import sequence ; - import virtual-target ; - - rule __init__ ( includes * ) - { - scanner.__init__ ; - - for local i in $(includes) - { - self.includes += [ sequence.transform path.native - : [ regex.split $(i:G=) "&&" ] ] ; - } - } - - rule pattern ( ) - { - return "#[ \t]*include[ ]*(<(.*)>|\"(.*)\")" ; - } - - rule process ( target : matches * : binding ) - { - local angle = [ regex.transform $(matches) : "<(.*)>" ] ; - angle = [ sequence.transform path.native : $(angle) ] ; - local quoted = [ regex.transform $(matches) : "\"(.*)\"" ] ; - quoted = [ sequence.transform path.native : $(quoted) ] ; - - # CONSIDER: the new scoping rule seem to defeat "on target" variables. - local g = [ on $(target) return $(HDRGRIST) ] ; - local b = [ NORMALIZE_PATH $(binding:D) ] ; - - # Attach binding of including file to included targets. When a target is - # directly created from virtual target this extra information is - # unnecessary. But in other cases, it allows us to distinguish between - # two headers of the same name included from different places. We do not - # need this extra information for angle includes, since they should not - # depend on including file (we can not get literal "." in include path). - local g2 = $(g)"#"$(b) ; - - angle = $(angle:G=$(g)) ; - quoted = $(quoted:G=$(g2)) ; - - local all = $(angle) $(quoted) ; - - INCLUDES $(target) : $(all) ; - NOCARE $(all) ; - SEARCH on $(angle) = $(self.includes:G=) ; - SEARCH on $(quoted) = $(b) $(self.includes:G=) ; - - # Just propagate the current scanner to includes in hope that includes - # do not change scanners. - scanner.propagate $(__name__) : $(angle) $(quoted) : $(target) ; - - ISFILE $(angle) $(quoted) ; - } -} - -scanner.register c-scanner : include ; - -type.register CPP : cpp cxx cc ; -type.register H : h ; -type.register HPP : hpp : H ; -type.register C : c ; - -# It most cases where a CPP file or a H file is a source of some action, we -# should rebuild the result if any of files included by CPP/H are changed. One -# case when this is not needed is installation, which is handled specifically. -type.set-scanner CPP : c-scanner ; -type.set-scanner C : c-scanner ; -# One case where scanning of H/HPP files is necessary is PCH generation -- if -# any header included by HPP being precompiled changes, we need to recompile the -# header. -type.set-scanner H : c-scanner ; -type.set-scanner HPP : c-scanner ; diff --git a/jam-files/boost-build/tools/types/cpp.py b/jam-files/boost-build/tools/types/cpp.py deleted file mode 100644 index 7b56111c..00000000 --- a/jam-files/boost-build/tools/types/cpp.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register (): - type.register_type ('CPP', ['cpp', 'cxx', 'cc']) - -register () diff --git a/jam-files/boost-build/tools/types/exe.jam b/jam-files/boost-build/tools/types/exe.jam deleted file mode 100644 index 47109513..00000000 --- a/jam-files/boost-build/tools/types/exe.jam +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -import type ; - -type.register EXE ; -type.set-generated-target-suffix EXE : <target-os>windows : "exe" ; -type.set-generated-target-suffix EXE : <target-os>cygwin : "exe" ; diff --git a/jam-files/boost-build/tools/types/exe.py b/jam-files/boost-build/tools/types/exe.py deleted file mode 100644 index a4935e24..00000000 --- a/jam-files/boost-build/tools/types/exe.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register (): - type.register_type ('EXE', ['exe'], None, ['NT', 'CYGWIN']) - type.register_type ('EXE', [], None, []) - -register () diff --git a/jam-files/boost-build/tools/types/html.jam b/jam-files/boost-build/tools/types/html.jam deleted file mode 100644 index 5cd337d0..00000000 --- a/jam-files/boost-build/tools/types/html.jam +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -type HTML : html ; diff --git a/jam-files/boost-build/tools/types/html.py b/jam-files/boost-build/tools/types/html.py deleted file mode 100644 index 63af4d90..00000000 --- a/jam-files/boost-build/tools/types/html.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register (): - type.register_type ('HTML', ['html']) - -register () diff --git a/jam-files/boost-build/tools/types/lib.jam b/jam-files/boost-build/tools/types/lib.jam deleted file mode 100644 index 854ab8fd..00000000 --- a/jam-files/boost-build/tools/types/lib.jam +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -import type ; # for set-generated-target-suffix -import os ; - -# The following naming scheme is used for libraries. -# -# On *nix: -# libxxx.a static library -# libxxx.so shared library -# -# On windows (msvc) -# libxxx.lib static library -# xxx.dll DLL -# xxx.lib import library -# -# On windows (mingw): -# libxxx.a static library -# libxxx.dll DLL -# libxxx.dll.a import library -# -# On cygwin i.e. <target-os>cygwin -# libxxx.a static library -# cygxxx.dll DLL -# libxxx.dll.a import library -# - -type.register LIB ; - -# FIXME: should not register both extensions on both platforms. -type.register STATIC_LIB : a lib : LIB ; - -# The 'lib' prefix is used everywhere -type.set-generated-target-prefix STATIC_LIB : : lib ; - -# Use '.lib' suffix for windows -type.set-generated-target-suffix STATIC_LIB : <target-os>windows : lib ; - -# Except with gcc. -type.set-generated-target-suffix STATIC_LIB : <toolset>gcc <target-os>windows : a ; - -# Use xxx.lib for import libs -type IMPORT_LIB : : STATIC_LIB ; -type.set-generated-target-prefix IMPORT_LIB : : "" ; -type.set-generated-target-suffix IMPORT_LIB : : lib ; - -# Except with gcc (mingw or cygwin), where use libxxx.dll.a -type.set-generated-target-prefix IMPORT_LIB : <toolset>gcc : lib ; -type.set-generated-target-suffix IMPORT_LIB : <toolset>gcc : dll.a ; - -type.register SHARED_LIB : so dll dylib : LIB ; - -# Both mingw and cygwin use libxxx.dll naming scheme. -# On Linux, use "lib" prefix -type.set-generated-target-prefix SHARED_LIB : : lib ; -# But don't use it on windows -type.set-generated-target-prefix SHARED_LIB : <target-os>windows : "" ; -# But use it again on mingw -type.set-generated-target-prefix SHARED_LIB : <toolset>gcc <target-os>windows : lib ; -# And use 'cyg' on cygwin -type.set-generated-target-prefix SHARED_LIB : <target-os>cygwin : cyg ; - - -type.set-generated-target-suffix SHARED_LIB : <target-os>windows : dll ; -type.set-generated-target-suffix SHARED_LIB : <target-os>cygwin : dll ; -type.set-generated-target-suffix SHARED_LIB : <target-os>darwin : dylib ; - -type SEARCHED_LIB : : LIB ; -# This is needed so that when we create a target of SEARCHED_LIB -# type, there's no prefix or suffix automatically added. -type.set-generated-target-prefix SEARCHED_LIB : : "" ; -type.set-generated-target-suffix SEARCHED_LIB : : "" ; diff --git a/jam-files/boost-build/tools/types/lib.py b/jam-files/boost-build/tools/types/lib.py deleted file mode 100644 index d0ec1fb5..00000000 --- a/jam-files/boost-build/tools/types/lib.py +++ /dev/null @@ -1,77 +0,0 @@ -# Status: ported -# Base revision: 64456. -# Copyright David Abrahams 2004. -# Copyright Vladimir Prus 2010. -# Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -import b2.build.type as type - -# The following naming scheme is used for libraries. -# -# On *nix: -# libxxx.a static library -# libxxx.so shared library -# -# On windows (msvc) -# libxxx.lib static library -# xxx.dll DLL -# xxx.lib import library -# -# On windows (mingw): -# libxxx.a static library -# libxxx.dll DLL -# libxxx.dll.a import library -# -# On cygwin i.e. <target-os>cygwin -# libxxx.a static library -# cygxxx.dll DLL -# libxxx.dll.a import library -# - -type.register('LIB') - -# FIXME: should not register both extensions on both platforms. -type.register('STATIC_LIB', ['a', 'lib'], 'LIB') - -# The 'lib' prefix is used everywhere -type.set_generated_target_prefix('STATIC_LIB', [], 'lib') - -# Use '.lib' suffix for windows -type.set_generated_target_suffix('STATIC_LIB', ['<target-os>windows'], 'lib') - -# Except with gcc. -type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>windows'], 'a') - -# Use xxx.lib for import libs -type.register('IMPORT_LIB', [], 'STATIC_LIB') -type.set_generated_target_prefix('IMPORT_LIB', [], '') -type.set_generated_target_suffix('IMPORT_LIB', [], 'lib') - -# Except with gcc (mingw or cygwin), where use libxxx.dll.a -type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc'], 'lib') -type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc'], 'dll.a') - -type.register('SHARED_LIB', ['so', 'dll', 'dylib'], 'LIB') - -# Both mingw and cygwin use libxxx.dll naming scheme. -# On Linux, use "lib" prefix -type.set_generated_target_prefix('SHARED_LIB', [], 'lib') -# But don't use it on windows -type.set_generated_target_prefix('SHARED_LIB', ['<target-os>windows'], '') -# But use it again on mingw -type.set_generated_target_prefix('SHARED_LIB', ['<toolset>gcc', '<target-os>windows'], 'lib') -# And use 'cyg' on cygwin -type.set_generated_target_prefix('SHARED_LIB', ['<target-os>cygwin'], 'cyg') - - -type.set_generated_target_suffix('SHARED_LIB', ['<target-os>windows'], 'dll') -type.set_generated_target_suffix('SHARED_LIB', ['<target-os>cygwin'], 'dll') -type.set_generated_target_suffix('SHARED_LIB', ['<target-os>darwin'], 'dylib') - -type.register('SEARCHED_LIB', [], 'LIB') -# This is needed so that when we create a target of SEARCHED_LIB -# type, there's no prefix or suffix automatically added. -type.set_generated_target_prefix('SEARCHED_LIB', [], '') -type.set_generated_target_suffix('SEARCHED_LIB', [], '') diff --git a/jam-files/boost-build/tools/types/obj.jam b/jam-files/boost-build/tools/types/obj.jam deleted file mode 100644 index 6afbcaa6..00000000 --- a/jam-files/boost-build/tools/types/obj.jam +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -import type ; - -type.register OBJ : o obj ; -type.set-generated-target-suffix OBJ : <target-os>windows : obj ; -type.set-generated-target-suffix OBJ : <target-os>cygwin : obj ; diff --git a/jam-files/boost-build/tools/types/obj.py b/jam-files/boost-build/tools/types/obj.py deleted file mode 100644 index e61e99a8..00000000 --- a/jam-files/boost-build/tools/types/obj.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register (): - type.register_type ('OBJ', ['obj'], None, ['NT', 'CYGWIN']) - type.register_type ('OBJ', ['o']) - -register () diff --git a/jam-files/boost-build/tools/types/objc.jam b/jam-files/boost-build/tools/types/objc.jam deleted file mode 100644 index 709cbd0c..00000000 --- a/jam-files/boost-build/tools/types/objc.jam +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright Rene Rivera 2008, 2010. -# Distributed under the Boost Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -import type ; -import scanner ; -import types/cpp ; - -class objc-scanner : c-scanner -{ - rule __init__ ( includes * ) - { - c-scanner.__init__ $(includes) ; - } - - rule pattern ( ) - { - return "#[ \t]*include|import[ ]*(<(.*)>|\"(.*)\")" ; - } -} - -scanner.register objc-scanner : include ; - -type.register OBJECTIVE_C : m ; -type.register OBJECTIVE_CPP : mm ; -type.set-scanner OBJECTIVE_C : objc-scanner ; -type.set-scanner OBJECTIVE_CPP : objc-scanner ; diff --git a/jam-files/boost-build/tools/types/preprocessed.jam b/jam-files/boost-build/tools/types/preprocessed.jam deleted file mode 100644 index c9187ba6..00000000 --- a/jam-files/boost-build/tools/types/preprocessed.jam +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright Steven Watanabe 2011 -# Distributed under the Boost Software License Version 1.0. (See -# accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -import type ; - -type.register PREPROCESSED_C : i : C ; -type.register PREPROCESSED_CPP : ii : CPP ; diff --git a/jam-files/boost-build/tools/types/qt.jam b/jam-files/boost-build/tools/types/qt.jam deleted file mode 100644 index 6d1dfbd4..00000000 --- a/jam-files/boost-build/tools/types/qt.jam +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright Vladimir Prus 2005. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -type UI : ui ; -type QRC : qrc ; -type MOCCABLE_CPP ; -type MOCCABLE_H ; -# Result of running moc. -type MOC : moc : H ; diff --git a/jam-files/boost-build/tools/types/register.jam b/jam-files/boost-build/tools/types/register.jam deleted file mode 100644 index 203992ca..00000000 --- a/jam-files/boost-build/tools/types/register.jam +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -# This module's job is to automatically import all the type -# registration modules in its directory. -import type os path modules ; - -# Register the given type on the specified OSes, or on remaining OSes -# if os is not specified. This rule is injected into each of the type -# modules for the sake of convenience. -local rule type ( type : suffixes * : base-type ? : os * ) -{ - if ! [ type.registered $(type) ] - { - if ( ! $(os) ) || [ os.name ] in $(os) - { - type.register $(type) : $(suffixes) : $(base-type) ; - } - } -} - -.this-module's-file = [ modules.binding $(__name__) ] ; -.this-module's-dir = [ path.parent $(.this-module's-file) ] ; -.sibling-jamfiles = [ path.glob $(.this-module's-dir) : *.jam ] ; -.sibling-modules = [ MATCH ^(.*)\.jam$ : $(.sibling-jamfiles) ] ; - -# A loop over all modules in this directory -for m in $(.sibling-modules) -{ - m = [ path.basename $(m) ] ; - m = types/$(m) ; - - # Inject the type rule into the new module - IMPORT $(__name__) : type : $(m) : type ; - import $(m) ; -} - - diff --git a/jam-files/boost-build/tools/types/rsp.jam b/jam-files/boost-build/tools/types/rsp.jam deleted file mode 100644 index bdf8a7c9..00000000 --- a/jam-files/boost-build/tools/types/rsp.jam +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -type RSP : rsp ; diff --git a/jam-files/boost-build/tools/types/rsp.py b/jam-files/boost-build/tools/types/rsp.py deleted file mode 100644 index ccb379e9..00000000 --- a/jam-files/boost-build/tools/types/rsp.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright David Abrahams 2004. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -from b2.build import type - -def register (): - type.register_type ('RSP', ['rsp']) - -register () diff --git a/jam-files/boost-build/tools/unix.jam b/jam-files/boost-build/tools/unix.jam deleted file mode 100644 index 75949851..00000000 --- a/jam-files/boost-build/tools/unix.jam +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) 2004 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# This file implements linking semantic common to all unixes. On unix, static -# libraries must be specified in a fixed order on the linker command line. Generators -# declared there store information about the order and use it property. - -import feature ; -import "class" : new ; -import generators ; -import type ; -import set ; -import order ; -import builtin ; - -class unix-linking-generator : linking-generator -{ - import property-set ; - import type ; - import unix ; - - rule __init__ ( id - composing ? : # Specify if generator is composing. The generator will be - # composing if non-empty string is passed, or parameter is - # not given. To make generator non-composing, pass empty - # string ("") - source-types + : target-types + : - requirements * ) - { - composing ?= true ; - generator.__init__ $(id) $(composing) : $(source-types) : $(target-types) : - $(requirements) ; - } - - rule run ( project name ? : property-set : sources + ) - { - local result = [ linking-generator.run $(project) $(name) : $(property-set) - : $(sources) ] ; - - unix.set-library-order $(sources) : $(property-set) : $(result[2-]) ; - - return $(result) ; - } - - rule generated-targets ( sources + : property-set : project name ? ) - { - local sources2 ; - local libraries ; - for local l in $(sources) - { - if [ type.is-derived [ $(l).type ] LIB ] - { - libraries += $(l) ; - } - else - { - sources2 += $(l) ; - } - } - - sources = $(sources2) [ unix.order-libraries $(libraries) ] ; - - return [ linking-generator.generated-targets $(sources) : $(property-set) - : $(project) $(name) ] ; - } - -} - -class unix-archive-generator : archive-generator -{ - import unix ; - - rule __init__ ( id composing ? : source-types + : target-types + : - requirements * ) - { - composing ?= true ; - archive-generator.__init__ $(id) $(composing) : $(source-types) : $(target-types) : - $(requirements) ; - } - - rule run ( project name ? : property-set : sources + ) - { - local result = [ archive-generator.run $(project) $(name) : $(property-set) - : $(sources) ] ; - - unix.set-library-order $(sources) : $(property-set) : $(result[2-]) ; - - return $(result) ; - - } -} - -class unix-searched-lib-generator : searched-lib-generator -{ - import unix ; - rule __init__ ( * : * ) - { - generator.__init__ - $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule optional-properties ( ) - { - return $(self.requirements) ; - } - - rule run ( project name ? : property-set : sources * ) - { - local result = [ searched-lib-generator.run $(project) $(name) - : $(property-set) : $(sources) ] ; - - unix.set-library-order $(sources) : $(property-set) : $(result[2-]) ; - - return $(result) ; - } -} - -class unix-prebuilt-lib-generator : generator -{ - import unix ; - rule __init__ ( * : * ) - { - generator.__init__ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - - rule run ( project name ? : property-set : sources * ) - { - local f = [ $(property-set).get <file> ] ; - unix.set-library-order-aux $(f) : $(sources) ; - return $(f) $(sources) ; - } -} - -generators.register - [ new unix-prebuilt-lib-generator unix.prebuilt : : LIB - : <file> <toolset>unix ] ; - -generators.override unix.prebuilt : builtin.lib-generator ; - - -# Declare generators -generators.register [ new unix-linking-generator unix.link : LIB OBJ : EXE - : <toolset>unix ] ; - -generators.register [ new unix-archive-generator unix.archive : OBJ : STATIC_LIB - : <toolset>unix ] ; - -generators.register [ new unix-linking-generator unix.link.dll : LIB OBJ : SHARED_LIB - : <toolset>unix ] ; - -generators.register [ new unix-searched-lib-generator - unix.searched-lib-generator : : SEARCHED_LIB : <toolset>unix ] ; - - -# The derived toolset must specify their own actions. -actions link { -} - -actions link.dll { -} - -actions archive { -} - -actions searched-lib-generator { -} - -actions prebuilt { -} - - - - - -.order = [ new order ] ; - -rule set-library-order-aux ( from * : to * ) -{ - for local f in $(from) - { - for local t in $(to) - { - if $(f) != $(t) - { - $(.order).add-pair $(f) $(t) ; - } - } - } -} - -rule set-library-order ( sources * : property-set : result * ) -{ - local used-libraries ; - local deps = [ $(property-set).dependency ] ; - for local l in $(sources) $(deps:G=) - { - if [ $(l).type ] && [ type.is-derived [ $(l).type ] LIB ] - { - used-libraries += $(l) ; - } - } - - local created-libraries ; - for local l in $(result) - { - if [ $(l).type ] && [ type.is-derived [ $(l).type ] LIB ] - { - created-libraries += $(l) ; - } - } - - created-libraries = [ set.difference $(created-libraries) : $(used-libraries) ] ; - set-library-order-aux $(created-libraries) : $(used-libraries) ; -} - -rule order-libraries ( libraries * ) -{ - local r = [ $(.order).order $(libraries) ] ; - return $(r) ; -} -
\ No newline at end of file diff --git a/jam-files/boost-build/tools/unix.py b/jam-files/boost-build/tools/unix.py deleted file mode 100644 index d409c2e4..00000000 --- a/jam-files/boost-build/tools/unix.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2004 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -""" This file implements linking semantics common to all unixes. On unix, static - libraries must be specified in a fixed order on the linker command line. Generators - declared there store information about the order and use it properly. -""" - -import builtin -from b2.build import generators, type -from b2.util.utility import * -from b2.util import set, sequence - -class UnixLinkingGenerator (builtin.LinkingGenerator): - - def __init__ (self, id, composing, source_types, target_types, requirements): - builtin.LinkingGenerator.__init__ (self, id, composing, source_types, target_types, requirements) - - def run (self, project, name, prop_set, sources): - result = builtin.LinkingGenerator.run (self, project, name, prop_set, sources) - if result: - set_library_order (project.manager (), sources, prop_set, result [1]) - - return result - - def generated_targets (self, sources, prop_set, project, name): - sources2 = [] - libraries = [] - for l in sources: - if type.is_derived (l.type (), 'LIB'): - libraries.append (l) - - else: - sources2.append (l) - - sources = sources2 + order_libraries (libraries) - - return builtin.LinkingGenerator.generated_targets (self, sources, prop_set, project, name) - - -class UnixArchiveGenerator (builtin.ArchiveGenerator): - def __init__ (self, id, composing, source_types, target_types_and_names, requirements): - builtin.ArchiveGenerator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def run (self, project, name, prop_set, sources): - result = builtin.ArchiveGenerator.run(self, project, name, prop_set, sources) - set_library_order(project.manager(), sources, prop_set, result) - return result - -class UnixSearchedLibGenerator (builtin.SearchedLibGenerator): - - def __init__ (self): - builtin.SearchedLibGenerator.__init__ (self) - - def optional_properties (self): - return self.requirements () - - def run (self, project, name, prop_set, sources, multiple): - result = SearchedLibGenerator.run (project, name, prop_set, sources, multiple) - - set_library_order (sources, prop_set, result) - - return result - -class UnixPrebuiltLibGenerator (generators.Generator): - def __init__ (self, id, composing, source_types, target_types_and_names, requirements): - generators.Generator.__init__ (self, id, composing, source_types, target_types_and_names, requirements) - - def run (self, project, name, prop_set, sources, multiple): - f = prop_set.get ('<file>') - set_library_order_aux (f, sources) - return (f, sources) - -### # The derived toolset must specify their own rules and actions. -# FIXME: restore? -# action.register ('unix.prebuilt', None, None) - - -generators.register (UnixPrebuiltLibGenerator ('unix.prebuilt', False, [], ['LIB'], ['<file>', '<toolset>unix'])) - - - - - -### # Declare generators -### generators.register [ new UnixLinkingGenerator unix.link : LIB OBJ : EXE -### : <toolset>unix ] ; -generators.register (UnixArchiveGenerator ('unix.archive', True, ['OBJ'], ['STATIC_LIB'], ['<toolset>unix'])) - -### generators.register [ new UnixLinkingGenerator unix.link.dll : LIB OBJ : SHARED_LIB -### : <toolset>unix ] ; -### -### generators.register [ new UnixSearchedLibGenerator -### unix.SearchedLibGenerator : : SEARCHED_LIB : <toolset>unix ] ; -### -### -### # The derived toolset must specify their own actions. -### actions link { -### } -### -### actions link.dll { -### } - -def unix_archive (manager, targets, sources, properties): - pass - -# FIXME: restore? -#action.register ('unix.archive', unix_archive, ['']) - -### actions searched-lib-generator { -### } -### -### actions prebuilt { -### } - - -from b2.util.order import Order -__order = Order () - -def set_library_order_aux (from_libs, to_libs): - for f in from_libs: - for t in to_libs: - if f != t: - __order.add_pair (f, t) - -def set_library_order (manager, sources, prop_set, result): - used_libraries = [] - deps = prop_set.dependency () - - sources.extend(d.value() for d in deps) - sources = sequence.unique(sources) - - for l in sources: - if l.type () and type.is_derived (l.type (), 'LIB'): - used_libraries.append (l) - - created_libraries = [] - for l in result: - if l.type () and type.is_derived (l.type (), 'LIB'): - created_libraries.append (l) - - created_libraries = set.difference (created_libraries, used_libraries) - set_library_order_aux (created_libraries, used_libraries) - -def order_libraries (libraries): - return __order.order (libraries) - diff --git a/jam-files/boost-build/tools/vacpp.jam b/jam-files/boost-build/tools/vacpp.jam deleted file mode 100644 index f4080fc0..00000000 --- a/jam-files/boost-build/tools/vacpp.jam +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright Vladimir Prus 2004. -# Copyright Toon Knapen 2004. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt -# or copy at http://www.boost.org/LICENSE_1_0.txt) - -# -# Boost.Build V2 toolset for the IBM XL C++ compiler -# - -import toolset : flags ; -import feature ; -import common ; -import generators ; -import os ; - -feature.extend toolset : vacpp ; -toolset.inherit vacpp : unix ; -generators.override vacpp.prebuilt : builtin.prebuilt ; -generators.override vacpp.searched-lib-generator : searched-lib-generator ; - -# Configure the vacpp toolset -rule init ( version ? : command * : options * ) -{ - local condition = [ - common.check-init-parameters vacpp : version $(version) ] ; - - command = [ common.get-invocation-command vacpp : xlC - : $(command) : "/usr/vacpp/bin/xlC" ] ; - - common.handle-options vacpp : $(condition) : $(command) : $(options) ; -} - -# Declare generators -generators.register-c-compiler vacpp.compile.c : C : OBJ : <toolset>vacpp ; -generators.register-c-compiler vacpp.compile.c++ : CPP : OBJ : <toolset>vacpp ; - -# Allow C++ style comments in C files -flags vacpp CFLAGS : -qcpluscmt ; - -# Declare flags -flags vacpp CFLAGS <optimization>off : -qNOOPTimize ; -flags vacpp CFLAGS <optimization>speed : -O3 -qstrict ; -flags vacpp CFLAGS <optimization>space : -O2 -qcompact ; - -# Discretionary inlining (not recommended) -flags vacpp CFLAGS <inlining>off : -qnoinline ; -flags vacpp CFLAGS <inlining>on : -qinline ; -#flags vacpp CFLAGS <inlining>full : -qinline ; -flags vacpp CFLAGS <inlining>full : ; - -# Exception handling -flags vacpp C++FLAGS <exception-handling>off : -qnoeh ; -flags vacpp C++FLAGS <exception-handling>on : -qeh ; - -# Run-time Type Identification -flags vacpp C++FLAGS <rtti>off : -qnortti ; -flags vacpp C++FLAGS <rtti>on : -qrtti ; - -# Enable 64-bit memory addressing model -flags vacpp CFLAGS <address-model>64 : -q64 ; -flags vacpp LINKFLAGS <address-model>64 : -q64 ; -flags vacpp ARFLAGS <target-os>aix/<address-model>64 : -X 64 ; - -# Use absolute path when generating debug information -flags vacpp CFLAGS <debug-symbols>on : -g -qfullpath ; -flags vacpp LINKFLAGS <debug-symbols>on : -g -qfullpath ; -flags vacpp LINKFLAGS <debug-symbols>off : -s ; - -if [ os.name ] = AIX -{ - flags vacpp.compile C++FLAGS : -qfuncsect ; - - # The -bnoipath strips the prepending (relative) path of libraries from - # the loader section in the target library or executable. Hence, during - # load-time LIBPATH (identical to LD_LIBRARY_PATH) or a hard-coded - # -blibpath (*similar* to -lrpath/-lrpath-link) is searched. Without - # this option, the prepending (relative) path + library name is - # hard-coded in the loader section, causing *only* this path to be - # searched during load-time. Note that the AIX linker does not have an - # -soname equivalent, this is as close as it gets. - # - # The above options are definately for AIX 5.x, and most likely also for - # AIX 4.x and AIX 6.x. For details about the AIX linker see: - # http://download.boulder.ibm.com/ibmdl/pub/software/dw/aix/es-aix_ll.pdf - # - flags vacpp.link LINKFLAGS <link>shared : -bnoipath ; - - # Run-time linking - flags vacpp.link EXE-LINKFLAGS <link>shared : -brtl ; -} -else -{ - # Linux PPC - flags vacpp.compile CFLAGS <link>shared : -qpic=large ; - flags vacpp FINDLIBS : rt ; -} - -# Profiling -flags vacpp CFLAGS <profiling>on : -pg ; -flags vacpp LINKFLAGS <profiling>on : -pg ; - -flags vacpp.compile OPTIONS <cflags> ; -flags vacpp.compile.c++ OPTIONS <cxxflags> ; -flags vacpp DEFINES <define> ; -flags vacpp UNDEFS <undef> ; -flags vacpp HDRS <include> ; -flags vacpp STDHDRS <sysinclude> ; -flags vacpp.link OPTIONS <linkflags> ; -flags vacpp ARFLAGS <arflags> ; - -flags vacpp LIBPATH <library-path> ; -flags vacpp NEEDLIBS <library-file> ; -flags vacpp FINDLIBS <find-shared-library> ; -flags vacpp FINDLIBS <find-static-library> ; - -# Select the compiler name according to the threading model. -flags vacpp VA_C_COMPILER <threading>single : xlc ; -flags vacpp VA_C_COMPILER <threading>multi : xlc_r ; -flags vacpp VA_CXX_COMPILER <threading>single : xlC ; -flags vacpp VA_CXX_COMPILER <threading>multi : xlC_r ; - -SPACE = " " ; - -flags vacpp.link.dll HAVE_SONAME <target-os>linux : "" ; - -actions vacpp.link bind NEEDLIBS -{ - $(VA_CXX_COMPILER) $(EXE-LINKFLAGS) $(LINKFLAGS) -o "$(<[1])" -L$(LIBPATH) -L$(STDLIBPATH) "$(>)" "$(NEEDLIBS)" "$(NEEDLIBS)" -l$(FINDLIBS) $(OPTIONS) $(USER_OPTIONS) -} - -actions vacpp.link.dll bind NEEDLIBS -{ - xlC_r -G $(LINKFLAGS) -o "$(<[1])" $(HAVE_SONAME)-Wl,-soname$(SPACE)-Wl,$(<[-1]:D=) -L$(LIBPATH) -L$(STDLIBPATH) "$(>)" "$(NEEDLIBS)" "$(NEEDLIBS)" -l$(FINDLIBS) $(OPTIONS) $(USER_OPTIONS) -} - -actions vacpp.compile.c -{ - $(VA_C_COMPILER) -c $(OPTIONS) $(USER_OPTIONS) -I$(BOOST_ROOT) -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" -} - -actions vacpp.compile.c++ -{ - $(VA_CXX_COMPILER) -c $(OPTIONS) $(USER_OPTIONS) -I$(BOOST_ROOT) -U$(UNDEFS) -D$(DEFINES) $(CFLAGS) $(C++FLAGS) -I"$(HDRS)" -I"$(STDHDRS)" -o "$(<)" "$(>)" -} - -actions updated together piecemeal vacpp.archive -{ - ar $(ARFLAGS) ru "$(<)" "$(>)" -} diff --git a/jam-files/boost-build/tools/whale.jam b/jam-files/boost-build/tools/whale.jam deleted file mode 100644 index 9335ff0c..00000000 --- a/jam-files/boost-build/tools/whale.jam +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) Vladimir Prus 2002-2005. - -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# This module implements support for Whale/Dolphin/WD parser/lexer tools. -# See http://www.cs.queensu.ca/home/okhotin/whale/ for details. -# -# There are three interesting target types: -# - WHL (the parser sources), that are converted to CPP and H -# - DLP (the lexer sources), that are converted to CPP and H -# - WD (combined parser/lexer sources), that are converted to WHL + DLP - -import type ; -import generators ; -import path ; -import "class" : new ; -import errors ; - -rule init ( path # path the Whale/Dolphin/WD binaries - ) -{ - if $(.configured) && $(.path) != $(path) - { - errors.user-error "Attempt to reconfigure Whale support" : - "Previously configured with path \"$(.path:E=<empty>)\"" : - "Now configuring with path \"$(path:E=<empty>)\"" ; - - } - .configured = true ; - .path = $(path) ; - - .whale = [ path.join $(path) whale ] ; - .dolphin = [ path.join $(path) dolphin ] ; - .wd = [ path.join $(path) wd ] ; -} - - -# Declare the types. -type.register WHL : whl ; -type.register DLP : dlp ; -type.register WHL_LR0 : lr0 ; -type.register WD : wd ; - -# Declare standard generators. -generators.register-standard whale.whale : WHL : CPP H H(%_symbols) ; -generators.register-standard whale.dolphin : DLP : CPP H ; -generators.register-standard whale.wd : WD : WHL(%_parser) DLP(%_lexer) ; - -# The conversions defines above a ambiguious when we generated CPP from WD. -# We can either go via WHL type, or via DLP type. -# The following custom generator handles this by running both conversions. - -class wd-to-cpp : generator -{ - rule __init__ ( * : * : * ) - { - generator.__init__ $(1) : $(2) : $(3) ; - } - - rule run ( project name ? : property-set : source * ) - { - if ! $(source[2]) - { - local new-sources ; - if ! [ $(source).type ] in WHL DLP - { - local r1 = [ generators.construct $(project) $(name) - : WHL : $(property-set) : $(source) ] ; - local r2 = [ generators.construct $(project) $(name) - : DLP : $(property-set) : $(source) ] ; - - new-sources = [ sequence.unique $(r1[2-]) $(r2[2-]) ] ; - } - else - { - new-sources = $(source) ; - } - - local result ; - for local i in $(new-sources) - { - local t = [ generators.construct $(project) $(name) : CPP - : $(property-set) : $(i) ] ; - result += $(t[2-]) ; - } - return $(result) ; - } - } - -} - - -generators.override whale.wd-to-cpp : whale.whale ; -generators.override whale.wd-to-cpp : whale.dolphin ; - - -generators.register [ new wd-to-cpp whale.wd-to-cpp : : CPP ] ; - - -actions whale -{ - $(.whale) -d $(<[1]:D) $(>) -} - -actions dolphin -{ - $(.dolphin) -d $(<[1]:D) $(>) -} - -actions wd -{ - $(.wd) -d $(<[1]:D) -g $(>) -} - diff --git a/jam-files/boost-build/tools/xlf.jam b/jam-files/boost-build/tools/xlf.jam deleted file mode 100644 index e7fcc608..00000000 --- a/jam-files/boost-build/tools/xlf.jam +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2004 Toon Knapen -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# -# toolset configuration for the IBM Fortran compiler (xlf) -# - -import toolset : flags ; -import feature ; -import fortran ; - -rule init ( version ? : command * : options * ) -{ -} - -# Declare flags and action for compilation -flags xlf OPTIONS <optimization>off : -O0 ; -flags xlf OPTIONS <optimization>speed : -O3 ; -flags xlf OPTIONS <optimization>space : -Os ; - -flags xlf OPTIONS <debug-symbols>on : -g ; -flags xlf OPTIONS <profiling>on : -pg ; - -flags xlf DEFINES <define> ; -flags xlf INCLUDES <include> ; - -rule compile-fortran -{ -} - -actions compile-fortran -{ - xlf $(OPTIONS) -I$(INCLUDES) -c -o "$(<)" "$(>)" -} - -generators.register-fortran-compiler xlf.compile-fortran : FORTRAN : OBJ ; diff --git a/jam-files/boost-build/tools/xsltproc-config.jam b/jam-files/boost-build/tools/xsltproc-config.jam deleted file mode 100644 index de54a2eb..00000000 --- a/jam-files/boost-build/tools/xsltproc-config.jam +++ /dev/null @@ -1,37 +0,0 @@ -#~ Copyright 2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Automatic configuration for Python tools and librries. To use, just import this module. - -import os ; -import toolset : using ; - -if [ os.name ] = NT -{ - local xsltproc-path = [ GLOB [ modules.peek : PATH ] "C:\\Boost\\bin" : xsltproc\.exe ] ; - xsltproc-path = $(xsltproc-path[1]) ; - - if $(xsltproc-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using xsltproc ":" $(xsltproc-path) ; - } - using xsltproc : $(xsltproc-path) ; - } -} -else -{ - local xsltproc-path = [ GLOB [ modules.peek : PATH ] : xsltproc ] ; - xsltproc-path = $(xsltproc-path[1]) ; - - if $(xsltproc-path) - { - if --debug-configuration in [ modules.peek : ARGV ] - { - ECHO "notice:" using xsltproc ":" $(xsltproc-path) ; - } - using xsltproc : $(xsltproc-path) ; - } -} diff --git a/jam-files/boost-build/tools/xsltproc.jam b/jam-files/boost-build/tools/xsltproc.jam deleted file mode 100644 index 96f5170b..00000000 --- a/jam-files/boost-build/tools/xsltproc.jam +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (C) 2003 Doug Gregor. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -# This module defines rules to apply an XSLT stylesheet to an XML file using the -# xsltproc driver, part of libxslt. -# -# Note: except for 'init', this modules does not provide any rules for end -# users. - -import feature ; -import regex ; -import sequence ; -import common ; -import os ; -import modules ; -import path ; -import errors ; - -feature.feature xsl:param : : free ; -feature.feature xsl:path : : free ; -feature.feature catalog : : free ; - - -# Initialize xsltproc support. The parameters are: -# xsltproc: The xsltproc executable -# -rule init ( xsltproc ? ) -{ - if $(xsltproc) - { - modify-config ; - .xsltproc = $(xsltproc) ; - check-xsltproc ; - } -} - -rule freeze-config ( ) -{ - if ! $(.config-frozen) - { - .config-frozen = true ; - .xsltproc ?= [ modules.peek : XSLTPROC ] ; - .xsltproc ?= xsltproc ; - check-xsltproc ; - .is-cygwin = [ .is-cygwin $(.xsltproc) ] ; - } -} - -rule modify-config -{ - if $(.config-frozen) - { - errors.user-error "xsltproc: Cannot change xsltproc command after it has been used." ; - } -} - -rule check-xsltproc ( ) -{ - if $(.xsltproc) - { - local status = [ SHELL "\"$(.xsltproc)\" -V" : no-output : exit-status ] ; - if $(status[2]) != "0" - { - errors.user-error "xsltproc: Could not run \"$(.xsltproc)\" -V." ; - } - } -} - -# Returns a non-empty string if a cygwin xsltproc binary was specified. -rule is-cygwin ( ) -{ - freeze-config ; - return $(.is-cygwin) ; -} - -rule .is-cygwin ( xsltproc ) -{ - if [ os.on-windows ] - { - local file = [ path.make [ modules.binding $(__name__) ] ] ; - local dir = [ path.native - [ path.join [ path.parent $(file) ] xsltproc ] ] ; - if [ os.name ] = CYGWIN - { - dir = $(dir:W) ; - } - local command = - "\"$(xsltproc)\" \"$(dir)\\test.xsl\" \"$(dir)\\test.xml\" 2>&1" ; - local status = [ SHELL $(command) : no-output : exit-status ] ; - if $(status[2]) != "0" - { - return true ; - } - } -} - -rule compute-xslt-flags ( target : properties * ) -{ - local flags ; - - # Raw flags. - flags += [ feature.get-values <flags> : $(properties) ] ; - - # Translate <xsl:param> into command line flags. - for local param in [ feature.get-values <xsl:param> : $(properties) ] - { - local namevalue = [ regex.split $(param) "=" ] ; - flags += --stringparam $(namevalue[1]) \"$(namevalue[2])\" ; - } - - # Translate <xsl:path>. - for local path in [ feature.get-values <xsl:path> : $(properties) ] - { - flags += --path \"$(path:G=)\" ; - } - - # Take care of implicit dependencies. - local other-deps ; - for local dep in [ feature.get-values <implicit-dependency> : $(properties) ] - { - other-deps += [ $(dep:G=).creating-subvariant ] ; - } - - local implicit-target-directories ; - for local dep in [ sequence.unique $(other-deps) ] - { - implicit-target-directories += [ $(dep).all-target-directories ] ; - } - - for local dir in $(implicit-target-directories) - { - flags += --path \"$(dir:T)\" ; - } - - return $(flags) ; -} - - -local rule .xsltproc ( target : source stylesheet : properties * : dirname ? : action ) -{ - freeze-config ; - STYLESHEET on $(target) = $(stylesheet) ; - FLAGS on $(target) += [ compute-xslt-flags $(target) : $(properties) ] ; - NAME on $(target) = $(.xsltproc) ; - - for local catalog in [ feature.get-values <catalog> : $(properties) ] - { - CATALOG = [ common.variable-setting-command XML_CATALOG_FILES : $(catalog:T) ] ; - } - - if [ os.on-windows ] && ! [ is-cygwin ] - { - action = $(action).windows ; - } - - $(action) $(target) : $(source) ; -} - - -rule xslt ( target : source stylesheet : properties * ) -{ - return [ .xsltproc $(target) : $(source) $(stylesheet) : $(properties) : : xslt-xsltproc ] ; -} - - -rule xslt-dir ( target : source stylesheet : properties * : dirname ) -{ - return [ .xsltproc $(target) : $(source) $(stylesheet) : $(properties) : $(dirname) : xslt-xsltproc-dir ] ; -} - -actions xslt-xsltproc.windows -{ - $(CATALOG) "$(NAME:E=xsltproc)" $(FLAGS) --xinclude -o "$(<)" "$(STYLESHEET:W)" "$(>:W)" -} - - -actions xslt-xsltproc bind STYLESHEET -{ - $(CATALOG) "$(NAME:E=xsltproc)" $(FLAGS) --xinclude -o "$(<)" "$(STYLESHEET:T)" "$(>:T)" -} - - -actions xslt-xsltproc-dir.windows bind STYLESHEET -{ - $(CATALOG) "$(NAME:E=xsltproc)" $(FLAGS) --xinclude -o "$(<:D)/" "$(STYLESHEET:W)" "$(>:W)" -} - - -actions xslt-xsltproc-dir bind STYLESHEET -{ - $(CATALOG) "$(NAME:E=xsltproc)" $(FLAGS) --xinclude -o "$(<:D)/" "$(STYLESHEET:T)" "$(>:T)" -} diff --git a/jam-files/boost-build/tools/xsltproc/included.xsl b/jam-files/boost-build/tools/xsltproc/included.xsl deleted file mode 100644 index ef86394a..00000000 --- a/jam-files/boost-build/tools/xsltproc/included.xsl +++ /dev/null @@ -1,11 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<!-- - Copyright (c) 2010 Steven Watanabe - - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or copy at - http://www.boost.org/LICENSE_1_0.txt) - --> -<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" - version="1.0"> -</xsl:stylesheet> diff --git a/jam-files/boost-build/tools/xsltproc/test.xml b/jam-files/boost-build/tools/xsltproc/test.xml deleted file mode 100644 index 57c8ba18..00000000 --- a/jam-files/boost-build/tools/xsltproc/test.xml +++ /dev/null @@ -1,2 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<root/> diff --git a/jam-files/boost-build/tools/xsltproc/test.xsl b/jam-files/boost-build/tools/xsltproc/test.xsl deleted file mode 100644 index a142c91d..00000000 --- a/jam-files/boost-build/tools/xsltproc/test.xsl +++ /dev/null @@ -1,12 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<!-- - Copyright (c) 2010 Steven Watanabe - - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or copy at - http://www.boost.org/LICENSE_1_0.txt) - --> -<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" - version="1.0"> - <xsl:include href="included.xsl"/> -</xsl:stylesheet> diff --git a/jam-files/boost-build/tools/zlib.jam b/jam-files/boost-build/tools/zlib.jam deleted file mode 100644 index f9138fd5..00000000 --- a/jam-files/boost-build/tools/zlib.jam +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -# Supports the zlib library -# -# After 'using zlib', the following targets are available: -# -# /zlib//zlib -- The zlib library - - -# In addition to direct purpose of supporting zlib, this module also -# serves as canonical example of how third-party condiguration works -# in Boost.Build. The operation is as follows -# -# - For each 'using zlib : condition ... : ...' we create a target alternative -# for zlib, with the specified condition. -# - There's one target alternative for 'zlib' with no specific condition -# properties. -# -# Two invocations of 'using zlib' with the same condition but different -# properties are not permitted, e.g.: -# -# using zlib : condition <target-os>windows : include foo ; -# using zlib : condition <target-os>windows : include bar ; -# -# is in error. One exception is for empty condition, 'using' without any -# parameters is overridable. That is: -# -# using zlib ; -# using zlib : include foo ; -# -# Is OK then the first 'using' is ignored. Likewise if the order of the statements -# is reversed. -# -# When 'zlib' target is built, a target alternative is selected as usual for -# Boost.Build. The selected alternative is a custom target class, which: -# -# - calls ac.find-include-path to find header path. If explicit path is provided -# in 'using', only that path is checked, and if no header is found there, error -# is emitted. Otherwise, we check a directory specified using ZLIB_INCLUDE -# environment variable, and failing that, in standard directories. -# [TODO: document sysroot handling] -# - calls ac.find-library to find the library, in an identical fashion. -# - -import project ; -import ac ; -import errors ; -import "class" : new ; -import targets ; - -project.initialize $(__name__) ; -project = [ project.current ] ; -project zlib ; - -header = zlib.h ; -names = z zlib zll zdll ; - -.default-alternative = [ new ac-library zlib : $(project) ] ; -$(.default-alternative).set-header $(header) ; -$(.default-alternative).set-default-names $(names) ; -targets.main-target-alternative $(.default-alternative) ; - -rule init ( * : * ) -{ - if ! $(condition) - { - # Special case the no-condition case so that 'using' without parameters - # can mix with more specific 'using'. - $(.default-alternative).reconfigure $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ; - } - else - { - # FIXME: consider if we should allow overriding definitions for a given - # condition -- e.g. project-config.jam might want to override whatever is - # in user-config.jam. - local mt = [ new ac-library zlib : $(project) - : $(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; - $(mt).set-header $(header) ; - $(mt).set-default-names $(names) ; - targets.main-target-alternative $(mt) ; - } -} - - - - - - diff --git a/jam-files/boost-build/user-config.jam b/jam-files/boost-build/user-config.jam deleted file mode 100644 index fbbf13fd..00000000 --- a/jam-files/boost-build/user-config.jam +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2003, 2005 Douglas Gregor -# Copyright 2004 John Maddock -# Copyright 2002, 2003, 2004, 2007 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# This file is used to configure your Boost.Build installation. You can modify -# this file in place, or you can place it in a permanent location so that it -# does not get overwritten should you get a new version of Boost.Build. See: -# -# http://www.boost.org/boost-build2/doc/html/bbv2/overview/configuration.html -# -# for documentation about possible permanent locations. - -# This file specifies which toolsets (C++ compilers), libraries, and other -# tools are available. Often, you should be able to just uncomment existing -# example lines and adjust them to taste. The complete list of supported tools, -# and configuration instructions can be found at: -# -# http://boost.org/boost-build2/doc/html/bbv2/reference/tools.html -# - -# This file uses Jam language syntax to describe available tools. Mostly, -# there are 'using' lines, that contain the name of the used tools, and -# parameters to pass to those tools -- where paremeters are separated by -# semicolons. Important syntax notes: -# -# - Both ':' and ';' must be separated from other tokens by whitespace -# - The '\' symbol is a quote character, so when specifying Windows paths you -# should use '/' or '\\' instead. -# -# More details about the syntax can be found at: -# -# http://boost.org/boost-build2/doc/html/bbv2/advanced.html#bbv2.advanced.jam_language -# - -# ------------------ -# GCC configuration. -# ------------------ - -# Configure gcc (default version). -# using gcc ; - -# Configure specific gcc version, giving alternative name to use. -# using gcc : 3.2 : g++-3.2 ; - - -# ------------------- -# MSVC configuration. -# ------------------- - -# Configure msvc (default version, searched for in standard locations and PATH). -# using msvc ; - -# Configure specific msvc version (searched for in standard locations and PATH). -# using msvc : 8.0 ; - - -# ---------------------- -# Borland configuration. -# ---------------------- -# using borland ; - - -# ---------------------- -# STLPort configuration. -# ---------------------- - -# Configure specifying location of STLPort headers. Libraries must be either -# not needed or available to the compiler by default. -# using stlport : : /usr/include/stlport ; - -# Configure specifying location of both headers and libraries explicitly. -# using stlport : : /usr/include/stlport /usr/lib ; - - -# ----------------- -# QT configuration. -# ----------------- - -# Configure assuming QTDIR gives the installation prefix. -# using qt ; - -# Configure with an explicit installation prefix. -# using qt : /usr/opt/qt ; - -# --------------------- -# Python configuration. -# --------------------- - -# Configure specific Python version. -# using python : 3.1 : /usr/bin/python3 : /usr/include/python3.1 : /usr/lib ; diff --git a/jam-files/boost-build/util/__init__.py b/jam-files/boost-build/util/__init__.py deleted file mode 100644 index f80fe70e..00000000 --- a/jam-files/boost-build/util/__init__.py +++ /dev/null @@ -1,136 +0,0 @@ - -import bjam -import re -import types - -# Decorator the specifies bjam-side prototype for a Python function -def bjam_signature(s): - - def wrap(f): - f.bjam_signature = s - return f - - return wrap - -def metatarget(f): - - f.bjam_signature = (["name"], ["sources", "*"], ["requirements", "*"], - ["default_build", "*"], ["usage_requirements", "*"]) - return f - -class cached(object): - - def __init__(self, function): - self.function = function - self.cache = {} - - def __call__(self, *args): - try: - return self.cache[args] - except KeyError: - v = self.function(*args) - self.cache[args] = v - return v - - def __get__(self, instance, type): - return types.MethodType(self, instance, type) - -def unquote(s): - if s and s[0] == '"' and s[-1] == '"': - return s[1:-1] - else: - return s - -_extract_jamfile_and_rule = re.compile("(Jamfile<.*>)%(.*)") - -def qualify_jam_action(action_name, context_module): - - if action_name.startswith("###"): - # Callable exported from Python. Don't touch - return action_name - elif _extract_jamfile_and_rule.match(action_name): - # Rule is already in indirect format - return action_name - else: - ix = action_name.find('.') - if ix != -1 and action_name[:ix] == context_module: - return context_module + '%' + action_name[ix+1:] - - return context_module + '%' + action_name - - -def set_jam_action(name, *args): - - m = _extract_jamfile_and_rule.match(name) - if m: - args = ("set-update-action-in-module", m.group(1), m.group(2)) + args - else: - args = ("set-update-action", name) + args - - return bjam.call(*args) - - -def call_jam_function(name, *args): - - m = _extract_jamfile_and_rule.match(name) - if m: - args = ("call-in-module", m.group(1), m.group(2)) + args - return bjam.call(*args) - else: - return bjam.call(*((name,) + args)) - -__value_id = 0 -__python_to_jam = {} -__jam_to_python = {} - -def value_to_jam(value, methods=False): - """Makes a token to refer to a Python value inside Jam language code. - - The token is merely a string that can be passed around in Jam code and - eventually passed back. For example, we might want to pass PropertySet - instance to a tag function and it might eventually call back - to virtual_target.add_suffix_and_prefix, passing the same instance. - - For values that are classes, we'll also make class methods callable - from Jam. - - Note that this is necessary to make a bit more of existing Jamfiles work. - This trick should not be used to much, or else the performance benefits of - Python port will be eaten. - """ - - global __value_id - - r = __python_to_jam.get(value, None) - if r: - return r - - exported_name = '###_' + str(__value_id) - __value_id = __value_id + 1 - __python_to_jam[value] = exported_name - __jam_to_python[exported_name] = value - - if methods and type(value) == types.InstanceType: - for field_name in dir(value): - field = getattr(value, field_name) - if callable(field) and not field_name.startswith("__"): - bjam.import_rule("", exported_name + "." + field_name, field) - - return exported_name - -def record_jam_to_value_mapping(jam_value, python_value): - __jam_to_python[jam_value] = python_value - -def jam_to_value_maybe(jam_value): - - if type(jam_value) == type(""): - return __jam_to_python.get(jam_value, jam_value) - else: - return jam_value - -def stem(filename): - i = filename.find('.') - if i != -1: - return filename[0:i] - else: - return filename diff --git a/jam-files/boost-build/util/assert.jam b/jam-files/boost-build/util/assert.jam deleted file mode 100644 index abedad52..00000000 --- a/jam-files/boost-build/util/assert.jam +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import errors ; -import modules ; - - -################################################################################ -# -# Private implementation details. -# -################################################################################ - -# Rule added as a replacement for the regular Jam = operator but which does not -# ignore trailing empty string elements. -# -local rule exact-equal-test ( lhs * : rhs * ) -{ - local lhs_extended = $(lhs) xxx ; - local rhs_extended = $(rhs) xxx ; - if $(lhs_extended) = $(rhs_extended) - { - return true ; - } -} - - -# Two lists are considered set-equal if they contain the same elements, ignoring -# duplicates and ordering. -# -local rule set-equal-test ( set1 * : set2 * ) -{ - if ( $(set1) in $(set2) ) && ( $(set2) in $(set1) ) - { - return true ; - } -} - - -################################################################################ -# -# Public interface. -# -################################################################################ - -# Assert the equality of A and B, ignoring trailing empty string elements. -# -rule equal ( a * : b * ) -{ - if $(a) != $(b) - { - errors.error-skip-frames 3 assertion failure: \"$(a)\" "==" \"$(b)\" - (ignoring trailing empty strings) ; - } -} - - -# Assert that the result of calling RULE-NAME on the given arguments has a false -# logical value (is either an empty list or all empty strings). -# -rule false ( rule-name args * : * ) -{ - local result ; - module [ CALLER_MODULE ] - { - modules.poke assert : result : [ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) - : $(7) : $(8) : $(9) ] ; - } - - if $(result) - { - errors.error-skip-frames 3 assertion failure: Expected false result from - "[" $(rule-name) [ errors.lol->list $(args) : $(2) : $(3) : $(4) : - $(5) : $(6) : $(7) : $(8) : $(9) ] "]" : Got: "[" \"$(result)\" "]" ; - } -} - - -# Assert that ELEMENT is present in LIST. -# -rule "in" ( element : list * ) -{ - if ! $(element) in $(list) - { - errors.error-skip-frames 3 assertion failure: Expected \"$(element)\" in - "[" \"$(list)\" "]" ; - } -} - - -# Assert the inequality of A and B, ignoring trailing empty string elements. -# -rule not-equal ( a * : b * ) -{ - if $(a) = $(b) - { - errors.error-skip-frames 3 assertion failure: \"$(a)\" "!=" \"$(b)\" - (ignoring trailing empty strings) ; - } -} - - -# Assert that ELEMENT is not present in LIST. -# -rule not-in ( element : list * ) -{ - if $(element) in $(list) - { - errors.error-skip-frames 3 assertion failure: Did not expect - \"$(element)\" in "[" \"$(list)\" "]" ; - } -} - - -# Assert the inequality of A and B as sets. -# -rule not-set-equal ( a * : b * ) -{ - if [ set-equal-test $(a) : $(b) ] - { - errors.error-skip-frames 3 assertion failure: Expected "[" \"$(a)\" "]" - and "[" \"$(b)\" "]" to not be equal as sets ; - } -} - - -# Assert that A and B are not exactly equal, not ignoring trailing empty string -# elements. -# -rule not-exact-equal ( a * : b * ) -{ - if [ exact-equal-test $(a) : $(b) ] - { - errors.error-skip-frames 3 assertion failure: \"$(a)\" "!=" \"$(b)\" ; - } -} - - -# Assert that EXPECTED is the result of calling RULE-NAME with the given -# arguments. -# -rule result ( expected * : rule-name args * : * ) -{ - local result ; - module [ CALLER_MODULE ] - { - modules.poke assert : result : [ $(2) : $(3) : $(4) : $(5) : $(6) : $(7) - : $(8) : $(9) ] ; - } - - if ! [ exact-equal-test $(result) : $(expected) ] - { - errors.error-skip-frames 3 assertion failure: "[" $(rule-name) [ - errors.lol->list $(args) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : - $(9) ] "]" : Expected: "[" \"$(expected)\" "]" : Got: "[" - \"$(result)\" "]" ; - } -} - - -# Assert that EXPECTED is set-equal (i.e. duplicates and ordering are ignored) -# to the result of calling RULE-NAME with the given arguments. Note that rules -# called this way may accept at most 8 parameters. -# -rule result-set-equal ( expected * : rule-name args * : * ) -{ - local result ; - module [ CALLER_MODULE ] - { - modules.poke assert : result : [ $(2) : $(3) : $(4) : $(5) : $(6) : $(7) - : $(8) : $(9) ] ; - } - - if ! [ set-equal-test $(result) : $(expected) ] - { - errors.error-skip-frames 3 assertion failure: "[" $(rule-name) [ - errors.lol->list $(args) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : - $(9) ] "]" : Expected: "[" \"$(expected)\" "]" : Got: "[" - \"$(result)\" "]" ; - } -} - - -# Assert the equality of A and B as sets. -# -rule set-equal ( a * : b * ) -{ - if ! [ set-equal-test $(a) : $(b) ] - { - errors.error-skip-frames 3 assertion failure: Expected "[" \"$(a)\" "]" - and "[" \"$(b)\" "]" to be equal as sets ; - } -} - - -# Assert that the result of calling RULE-NAME on the given arguments has a true -# logical value (is neither an empty list nor all empty strings). -# -rule true ( rule-name args * : * ) -{ - local result ; - module [ CALLER_MODULE ] - { - modules.poke assert : result : [ $(1) : $(2) : $(3) : $(4) : $(5) : $(6) - : $(7) : $(8) : $(9) ] ; - } - - if ! $(result) - { - errors.error-skip-frames 3 assertion failure: Expected true result from - "[" $(rule-name) [ errors.lol->list $(args) : $(2) : $(3) : $(4) : - $(5) : $(6) : $(7) : $(8) : $(9) ] "]" ; - } -} - - -# Assert the exact equality of A and B, not ignoring trailing empty string -# elements. -# -rule exact-equal ( a * : b * ) -{ - if ! [ exact-equal-test $(a) : $(b) ] - { - errors.error-skip-frames 3 assertion failure: \"$(a)\" "==" \"$(b)\" ; - } -} - - -# Assert that the given variable is not an empty list. -# -rule variable-not-empty ( name ) -{ - local value = [ modules.peek [ CALLER_MODULE ] : $(name) ] ; - if ! $(value)-is-not-empty - { - errors.error-skip-frames 3 assertion failure: Expected variable - \"$(name)\" not to be an empty list ; - } -} - - -rule __test__ ( ) -{ - # Helper rule used to avoid test duplication related to different list - # equality test rules. - # - local rule run-equality-test ( equality-assert : ignore-trailing-empty-strings ? ) - { - local not-equality-assert = not-$(equality-assert) ; - - # When the given equality test is expected to ignore trailing empty - # strings some of the test results should be inverted. - local not-equality-assert-i = not-$(equality-assert) ; - if $(ignore-trailing-empty-strings) - { - not-equality-assert-i = $(equality-assert) ; - } - - $(equality-assert) : ; - $(equality-assert) "" "" : "" "" ; - $(not-equality-assert-i) : "" "" ; - $(equality-assert) x : x ; - $(not-equality-assert) : x ; - $(not-equality-assert) "" : x ; - $(not-equality-assert) "" "" : x ; - $(not-equality-assert-i) x : x "" ; - $(equality-assert) x "" : x "" ; - $(not-equality-assert) x : "" x ; - $(equality-assert) "" x : "" x ; - - $(equality-assert) 1 2 3 : 1 2 3 ; - $(not-equality-assert) 1 2 3 : 3 2 1 ; - $(not-equality-assert) 1 2 3 : 1 5 3 ; - $(not-equality-assert) 1 2 3 : 1 "" 3 ; - $(not-equality-assert) 1 2 3 : 1 1 2 3 ; - $(not-equality-assert) 1 2 3 : 1 2 2 3 ; - $(not-equality-assert) 1 2 3 : 5 6 7 ; - - # Extra variables used here just to make sure Boost Jam or Boost Build - # do not handle lists with empty strings differently depending on - # whether they are literals or stored in variables. - - local empty = ; - local empty-strings = "" "" ; - local x-empty-strings = x "" "" ; - local empty-strings-x = "" "" x ; - - $(equality-assert) : $(empty) ; - $(not-equality-assert-i) "" : $(empty) ; - $(not-equality-assert-i) "" "" : $(empty) ; - $(not-equality-assert-i) : $(empty-strings) ; - $(not-equality-assert-i) "" : $(empty-strings) ; - $(equality-assert) "" "" : $(empty-strings) ; - $(equality-assert) $(empty) : $(empty) ; - $(equality-assert) $(empty-strings) : $(empty-strings) ; - $(not-equality-assert-i) $(empty) : $(empty-strings) ; - $(equality-assert) $(x-empty-strings) : $(x-empty-strings) ; - $(equality-assert) $(empty-strings-x) : $(empty-strings-x) ; - $(not-equality-assert) $(empty-strings-x) : $(x-empty-strings) ; - $(not-equality-assert-i) x : $(x-empty-strings) ; - $(not-equality-assert) x : $(empty-strings-x) ; - $(not-equality-assert-i) x : $(x-empty-strings) ; - $(not-equality-assert-i) x "" : $(x-empty-strings) ; - $(equality-assert) x "" "" : $(x-empty-strings) ; - $(not-equality-assert) x : $(empty-strings-x) ; - $(not-equality-assert) "" x : $(empty-strings-x) ; - $(equality-assert) "" "" x : $(empty-strings-x) ; - } - - - # --------------- - # Equality tests. - # --------------- - - run-equality-test equal : ignore-trailing-empty-strings ; - run-equality-test exact-equal ; - - - # ------------------------- - # assert.set-equal() tests. - # ------------------------- - - set-equal : ; - not-set-equal "" "" : ; - set-equal "" "" : "" ; - set-equal "" "" : "" "" ; - set-equal a b c : a b c ; - set-equal a b c : b c a ; - set-equal a b c a : a b c ; - set-equal a b c : a b c a ; - not-set-equal a b c : a b c d ; - not-set-equal a b c d : a b c ; -} diff --git a/jam-files/boost-build/util/container.jam b/jam-files/boost-build/util/container.jam deleted file mode 100644 index dd496393..00000000 --- a/jam-files/boost-build/util/container.jam +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2002, 2003 Rene Rivera -# Copyright 2002, 2003, 2004 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Various container classes. - -# Base for container objects. This lets us construct recursive structures. That -# is containers with containers in them, specifically so we can tell literal -# values from node values. -# -class node -{ - rule __init__ ( - value ? # Optional value to set node to initially. - ) - { - self.value = $(value) ; - } - - # Set the value of this node, passing nothing will clear it. - # - rule set ( value * ) - { - self.value = $(value) ; - } - - # Get the value of this node. - # - rule get ( ) - { - return $(self.value) ; - } -} - - -# A simple vector. Interface mimics the C++ std::vector and std::list, with the -# exception that indices are one (1) based to follow Jam standard. -# -# TODO: Possibly add assertion checks. -# -class vector : node -{ - import numbers ; - import utility ; - import sequence ; - - rule __init__ ( - values * # Initial contents of vector. - ) - { - node.__init__ ; - self.value = $(values) ; - } - - # Get the value of the first element. - # - rule front ( ) - { - return $(self.value[1]) ; - } - - # Get the value of the last element. - # - rule back ( ) - { - return $(self.value[-1]) ; - } - - # Get the value of the element at the given index, one based. Access to - # elements of recursive structures is supported directly. Specifying - # additional index values recursively accesses the elements as containers. - # For example: [ $(v).at 1 : 2 ] would retrieve the second element of our - # first element, assuming the first element is a container. - # - rule at ( - index # The element index, one based. - : * # Additional indices to access recursively. - ) - { - local r = $(self.value[$(index)]) ; - if $(2) - { - r = [ $(r).at $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; - } - return $(r) ; - } - - # Get the value contained in the given element. This has the same - # functionality and interface as "at" but in addition gets the value of the - # referenced element, assuming it is a "node". - # - rule get-at ( - index # The element index, one based. - : * # Additional indices to access recursively. - ) - { - local r = $(self.value[$(index)]) ; - if $(2) - { - r = [ $(r).at $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; - } - return [ $(r).get ] ; - } - - # Insert the given value into the front of the vector pushing the rest of - # the elements back. - # - rule push-front ( - value # Value to become first element. - ) - { - self.value = $(value) $(self.value) ; - } - - # Remove the front element from the vector. Does not return the value. No - # effect if vector is empty. - # - rule pop-front ( ) - { - self.value = $(self.value[2-]) ; - } - - # Add the given value at the end of the vector. - # - rule push-back ( - value # Value to become back element. - ) - { - self.value += $(value) ; - } - - # Remove the back element from the vector. Does not return the value. No - # effect if vector is empty. - # - rule pop-back ( ) - { - self.value = $(self.value[1--2]) ; - } - - # Insert the given value at the given index, one based. The values at and to - # the right of the index are pushed back to make room for the new value. - # If the index is passed the end of the vector the element is added to the - # end. - # - rule insert ( - index # The index to insert at, one based. - : value # The value to insert. - ) - { - local left = $(self.value[1-$(index)]) ; - local right = $(self.value[$(index)-]) ; - if $(right)-is-not-empty - { - left = $(left[1--2]) ; - } - self.value = $(left) $(value) $(right) ; - } - - # Remove one or more elements from the vector. The range is inclusive, and - # not specifying an end is equivalent to the [start, start] range. - # - rule erase ( - start # Index of first element to remove. - end ? # Optional, index of last element to remove. - ) - { - end ?= $(start) ; - local left = $(self.value[1-$(start)]) ; - left = $(left[1--2]) ; - local right = $(self.value[$(end)-]) ; - right = $(right[2-]) ; - self.value = $(left) $(right) ; - } - - # Remove all elements from the vector. - # - rule clear ( ) - { - self.value = ; - } - - # The number of elements in the vector. - # - rule size ( ) - { - return [ sequence.length $(self.value) ] ; - } - - # Returns "true" if there are NO elements in the vector, empty otherwise. - # - rule empty ( ) - { - if ! $(self.value)-is-not-empty - { - return true ; - } - } - - # Returns the textual representation of content. - # - rule str ( ) - { - return "[" [ sequence.transform utility.str : $(self.value) ] "]" ; - } - - # Sorts the vector inplace, calling 'utility.less' for comparisons. - # - rule sort ( ) - { - self.value = [ sequence.insertion-sort $(self.value) : utility.less ] ; - } - - # Returns true if content is equal to the content of other vector. Uses - # 'utility.equal' for comparison. - # - rule equal ( another ) - { - local mismatch ; - local size = [ size ] ; - if $(size) = [ $(another).size ] - { - for local i in [ numbers.range 1 $(size) ] - { - if ! [ utility.equal [ at $(i) ] [ $(another).at $(i) ] ] - { - mismatch = true ; - } - } - } - else - { - mismatch = true ; - } - - if ! $(mismatch) - { - return true ; - } - } -} - - -rule __test__ ( ) -{ - import assert ; - import "class" : new ; - - local v1 = [ new vector ] ; - assert.true $(v1).equal $(v1) ; - assert.true $(v1).empty ; - assert.result 0 : $(v1).size ; - assert.result "[" "]" : $(v1).str ; - $(v1).push-back b ; - $(v1).push-front a ; - assert.result "[" a b "]" : $(v1).str ; - assert.result a : $(v1).front ; - assert.result b : $(v1).back ; - $(v1).insert 2 : d ; - $(v1).insert 2 : c ; - $(v1).insert 4 : f ; - $(v1).insert 4 : e ; - $(v1).pop-back ; - assert.result 5 : $(v1).size ; - assert.result d : $(v1).at 3 ; - $(v1).pop-front ; - assert.result c : $(v1).front ; - assert.false $(v1).empty ; - $(v1).erase 3 4 ; - assert.result 2 : $(v1).size ; - - local v2 = [ new vector q w e r t y ] ; - assert.result 6 : $(v2).size ; - $(v1).push-back $(v2) ; - assert.result 3 : $(v1).size ; - local v2-alias = [ $(v1).back ] ; - assert.result e : $(v2-alias).at 3 ; - $(v1).clear ; - assert.true $(v1).empty ; - assert.false $(v2-alias).empty ; - $(v2).pop-back ; - assert.result t : $(v2-alias).back ; - - local v3 = [ new vector ] ; - $(v3).push-back [ new vector 1 2 3 4 5 ] ; - $(v3).push-back [ new vector a b c ] ; - assert.result "[" "[" 1 2 3 4 5 "]" "[" a b c "]" "]" : $(v3).str ; - $(v3).push-back [ new vector [ new vector x y z ] [ new vector 7 8 9 ] ] ; - assert.result 1 : $(v3).at 1 : 1 ; - assert.result b : $(v3).at 2 : 2 ; - assert.result a b c : $(v3).get-at 2 ; - assert.result 7 8 9 : $(v3).get-at 3 : 2 ; - - local v4 = [ new vector 4 3 6 ] ; - $(v4).sort ; - assert.result 3 4 6 : $(v4).get ; - assert.false $(v4).equal $(v3) ; - - local v5 = [ new vector 3 4 6 ] ; - assert.true $(v4).equal $(v5) ; - # Check that vectors of different sizes are considered non-equal. - $(v5).pop-back ; - assert.false $(v4).equal $(v5) ; - - local v6 = [ new vector [ new vector 1 2 3 ] ] ; - assert.true $(v6).equal [ new vector [ new vector 1 2 3 ] ] ; - - local v7 = [ new vector 111 222 333 ] ; - assert.true $(v7).equal $(v7) ; - $(v7).insert 4 : 444 ; - assert.result 111 222 333 444 : $(v7).get ; - $(v7).insert 999 : xxx ; - assert.result 111 222 333 444 xxx : $(v7).get ; - - local v8 = [ new vector "" "" "" ] ; - assert.true $(v8).equal $(v8) ; - assert.false $(v8).empty ; - assert.result 3 : $(v8).size ; - assert.result "" : $(v8).at 1 ; - assert.result "" : $(v8).at 2 ; - assert.result "" : $(v8).at 3 ; - assert.result : $(v8).at 4 ; - $(v8).insert 2 : 222 ; - assert.result 4 : $(v8).size ; - assert.result "" 222 "" "" : $(v8).get ; - $(v8).insert 999 : "" ; - assert.result 5 : $(v8).size ; - assert.result "" 222 "" "" "" : $(v8).get ; - $(v8).insert 999 : xxx ; - assert.result 6 : $(v8).size ; - assert.result "" 222 "" "" "" xxx : $(v8).get ; - - # Regression test for a bug causing vector.equal to compare only the first - # and the last element in the given vectors. - local v9 = [ new vector 111 xxx 222 ] ; - local v10 = [ new vector 111 yyy 222 ] ; - assert.false $(v9).equal $(v10) ; -} diff --git a/jam-files/boost-build/util/doc.jam b/jam-files/boost-build/util/doc.jam deleted file mode 100644 index a7515588..00000000 --- a/jam-files/boost-build/util/doc.jam +++ /dev/null @@ -1,997 +0,0 @@ -# Copyright 2002, 2005 Dave Abrahams -# Copyright 2002, 2003, 2006 Rene Rivera -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Documentation system, handles --help requests. -# It defines rules that attach documentation to modules, rules, and variables. -# Collects and generates documentation for the various parts of the build -# system. The documentation is collected from comments integrated into the code. - -import modules ; -import print ; -import set ; -import container ; -import "class" ; -import sequence ; -import path ; - - -# The type of output to generate. -# "console" is formated text echoed to the console (the default); -# "text" is formated text appended to the output file; -# "html" is HTML output to the file. -# -help-output = console ; - - -# The file to output documentation to when generating "text" or "html" help. -# This is without extension as the extension is determined by the type of -# output. -# -help-output-file = help ; - -# Whether to include local rules in help output. -# -.option.show-locals ?= ; - -# When showing documentation for a module, whether to also generate -# automatically the detailed docs for each item in the module. -# -.option.detailed ?= ; - -# Generate debug output as the help is generated and modules are parsed. -# -.option.debug ?= ; - -# Enable or disable a documentation option. -# -local rule set-option ( - option # The option name. - : value ? # Enabled (non-empty), or disabled (empty) -) -{ - .option.$(option) = $(value) ; -} - - -# Set the type of output. -# -local rule set-output ( type ) -{ - help-output = $(type) ; -} - - -# Set the output to a file. -# -local rule set-output-file ( file ) -{ - help-output-file = $(file) ; -} - - -# Extracts the brief comment from a complete comment. The brief comment is the -# first sentence. -# -local rule brief-comment ( - docs * # The comment documentation. -) -{ - local d = $(docs:J=" ") ; - local p = [ MATCH ".*([.])$" : $(d) ] ; - if ! $(p) { d = $(d)"." ; } - d = $(d)" " ; - local m = [ MATCH "^([^.]+[.])(.*)" : $(d) ] ; - local brief = $(m[1]) ; - while $(m[2]) && [ MATCH "^([^ ])" : $(m[2]) ] - { - m = [ MATCH "^([^.]+[.])(.*)" : $(m[2]) ] ; - brief += $(m[1]) ; - } - return $(brief:J="") ; -} - - -# Specifies the documentation for the current module. -# -local rule set-module-doc ( - module-name ? # The name of the module to document. - : docs * # The documentation for the module. -) -{ - module-name ?= * ; - - $(module-name).brief = [ brief-comment $(docs) ] ; - $(module-name).docs = $(docs) ; - - if ! $(module-name) in $(documented-modules) - { - documented-modules += $(module-name) ; - } -} - - -# Specifies the documentation for the current module. -# -local rule set-module-copyright ( - module-name ? # The name of the module to document. - : copyright * # The copyright for the module. -) -{ - module-name ?= * ; - - $(module-name).copy-brief = [ brief-comment $(copyright) ] ; - $(module-name).copy-docs = $(docs) ; - - if ! $(module-name) in $(documented-modules) - { - documented-modules += $(module-name) ; - } -} - - -# Specifies the documentation for a rule in the current module. If called in the -# global module, this documents a global rule. -# -local rule set-rule-doc ( - name # The name of the rule. - module-name ? # The name of the module to document. - is-local ? # Whether the rule is local to the module. - : docs * # The documentation for the rule. -) -{ - module-name ?= * ; - - $(module-name).$(name).brief = [ brief-comment $(docs) ] ; - $(module-name).$(name).docs = $(docs) ; - $(module-name).$(name).is-local = $(is-local) ; - - if ! $(name) in $($(module-name).rules) - { - $(module-name).rules += $(name) ; - } -} - - -# Specify a class, will turn a rule into a class. -# -local rule set-class-doc ( - name # The name of the class. - module-name ? # The name of the module to document. - : super-name ? # The super class name. -) -{ - module-name ?= * ; - - $(module-name).$(name).is-class = true ; - $(module-name).$(name).super-name = $(super-name) ; - $(module-name).$(name).class-rules = - [ MATCH "^($(name)[.].*)" : $($(module-name).rules) ] ; - $(module-name).$($(module-name).$(name).class-rules).is-class-rule = true ; - - $(module-name).classes += $(name) ; - $(module-name).class-rules += $($(module-name).$(name).class-rules) ; - $(module-name).rules = - [ set.difference $($(module-name).rules) : - $(name) $($(module-name).$(name).class-rules) ] ; -} - - -# Set the argument call signature of a rule. -# -local rule set-rule-arguments-signature ( - name # The name of the rule. - module-name ? # The name of the module to document. - : signature * # The arguments signature. -) -{ - module-name ?= * ; - - $(module-name).$(name).signature = $(signature) ; -} - - -# Specifies the documentation for an argument of a rule. -# -local rule set-argument-doc ( - name # The name of the argument. - qualifier # Argument syntax qualifier, "*", "+", etc. - rule-name # The name of the rule. - module-name ? # THe optional name of the module. - : docs * # The documentation. -) -{ - module-name ?= * ; - - $(module-name).$(rule-name).args.$(name).qualifier = $(qualifier) ; - $(module-name).$(rule-name).args.$(name).docs = $(docs) ; - - if ! $(name) in $($(module-name).$(rule-name).args) - { - $(module-name).$(rule-name).args += $(name) ; - } -} - - -# Specifies the documentation for a variable in the current module. If called in -# the global module, the global variable is documented. -# -local rule set-variable-doc ( - name # The name of the variable. - default # The default value. - initial # The initial value. - module-name ? # The name of the module to document. - : docs * # The documentation for the variable. -) -{ - module-name ?= * ; - - $(module-name).$(name).brief = [ brief-comment $(docs) ] ; - $(module-name).$(name).default = $(default) ; - $(module-name).$(name).initial = $(initial) ; - $(module-name).$(name).docs = $(docs) ; - - if ! $(name) in $($(module-name).variables) - { - $(module-name).variables += $(name) ; - } -} - - -# Generates a general description of the documentation and help system. -# -local rule print-help-top ( ) -{ - print.section "General command line usage" ; - - print.text " bjam [options] [properties] [targets] - - Options, properties and targets can be specified in any order. - " ; - - print.section "Important Options" ; - - print.list-start ; - print.list-item "--clean Remove targets instead of building" ; - print.list-item "-a Rebuild everything" ; - print.list-item "-n Don't execute the commands, only print them" ; - print.list-item "-d+2 Show commands as they are executed" ; - print.list-item "-d0 Supress all informational messages" ; - print.list-item "-q Stop at first error" ; - print.list-item "--debug-configuration Diagnose configuration" ; - print.list-item "--debug-building Report which targets are built with what properties" ; - print.list-item "--debug-generator Diagnose generator search/execution" ; - print.list-end ; - - print.section "Further Help" - The following options can be used to obtain additional documentation. - ; - - print.list-start ; - print.list-item "--help-options Print more obscure command line options." ; - print.list-item "--help-internal Boost.Build implementation details." ; - print.list-item "--help-doc-options Implementation details doc formatting." ; - print.list-end ; -} - - -# Generate Jam/Boost.Jam command usage information. -# -local rule print-help-usage ( ) -{ - print.section "Boost.Jam Usage" - "bjam [ options... ] targets..." - ; - print.list-start ; - print.list-item -a; - Build all targets, even if they are current. ; - print.list-item -fx; - Read '"x"' as the Jamfile for building instead of searching for the - Boost.Build system. ; - print.list-item -jx; - Run up to '"x"' commands concurrently. ; - print.list-item -n; - Do not execute build commands. Instead print out the commands as they - would be executed if building. ; - print.list-item -ox; - Output the used build commands to file '"x"'. ; - print.list-item -q; - Quit as soon as a build failure is encountered. Without this option - Boost.Jam will continue building as many targets as it can. - print.list-item -sx=y; - Sets a Jam variable '"x"' to the value '"y"', overriding any value that - variable would have from the environment. ; - print.list-item -tx; - Rebuild the target '"x"', even if it is up-to-date. ; - print.list-item -v; - Display the version of bjam. ; - print.list-item --x; - Any option not explicitly handled by Boost.Jam remains available to - build scripts using the '"ARGV"' variable. ; - print.list-item -dn; - Enables output of diagnostic messages. The debug level '"n"' and all - below it are enabled by this option. ; - print.list-item -d+n; - Enables output of diagnostic messages. Only the output for debug level - '"n"' is enabled. ; - print.list-end ; - print.section "Debug Levels" - Each debug level shows a different set of information. Usually with - higher levels producing more verbose information. The following levels - are supported: ; - print.list-start ; - print.list-item 0; - Turn off all diagnostic output. Only errors are reported. ; - print.list-item 1; - Show the actions taken for building targets, as they are executed. ; - print.list-item 2; - Show "quiet" actions and display all action text, as they are executed. ; - print.list-item 3; - Show dependency analysis, and target/source timestamps/paths. ; - print.list-item 4; - Show arguments of shell invocations. ; - print.list-item 5; - Show rule invocations and variable expansions. ; - print.list-item 6; - Show directory/header file/archive scans, and attempts at binding to targets. ; - print.list-item 7; - Show variable settings. ; - print.list-item 8; - Show variable fetches, variable expansions, and evaluation of '"if"' expressions. ; - print.list-item 9; - Show variable manipulation, scanner tokens, and memory usage. ; - print.list-item 10; - Show execution times for rules. ; - print.list-item 11; - Show parsing progress of Jamfiles. ; - print.list-item 12; - Show graph for target dependencies. ; - print.list-item 13; - Show changes in target status (fate). ; - print.list-end ; -} - - -# Generates description of options controlling the help system. This -# automatically reads the options as all variables in the doc module of the form -# ".option.*". -# -local rule print-help-options ( - module-name # The doc module. -) -{ - print.section "Help Options" - These are all the options available for enabling or disabling to control - the help system in various ways. Options can be enabled or disabled with - '"--help-enable-<option>"', and "'--help-disable-<option>'" - respectively. - ; - local options-to-list = [ MATCH ^[.]option[.](.*) : $($(module-name).variables) ] ; - if $(options-to-list) - { - print.list-start ; - for local option in [ sequence.insertion-sort $(options-to-list) ] - { - local def = disabled ; - if $($(module-name)..option.$(option).default) != "(empty)" - { - def = enabled ; - } - print.list-item $(option): $($(module-name)..option.$(option).docs) - Default is $(def). ; - } - print.list-end ; - } -} - - -# Generate brief documentation for all the known items in the section for a -# module. Possible sections are: "rules", and "variables". -# -local rule print-help-module-section ( - module # The module name. - section # rules or variables. - : section-head # The title of the section. - section-description * # The detailed description of the section. -) -{ - if $($(module).$(section)) - { - print.section $(section-head) $(section-description) ; - print.list-start ; - for local item in [ sequence.insertion-sort $($(module).$(section)) ] - { - local show = ; - if ! $($(module).$(item).is-local) - { - show = yes ; - } - if $(.option.show-locals) - { - show = yes ; - } - if $(show) - { - print.list-item $(item): $($(module).$(item).brief) ; - } - } - print.list-end ; - } -} - - -# Generate documentation for all possible modules. We attempt to list all known -# modules together with a brief description of each. -# -local rule print-help-all ( - ignored # Usually the module name, but is ignored here. -) -{ - print.section "Modules" - "These are all the known modules. Use --help <module> to get more" - "detailed information." - ; - if $(documented-modules) - { - print.list-start ; - for local module-name in [ sequence.insertion-sort $(documented-modules) ] - { - # The brief docs for each module. - print.list-item $(module-name): $($(module-name).brief) ; - } - print.list-end ; - } - # The documentation for each module when details are requested. - if $(documented-modules) && $(.option.detailed) - { - for local module-name in [ sequence.insertion-sort $(documented-modules) ] - { - # The brief docs for each module. - print-help-module $(module-name) ; - } - } -} - - -# Generate documentation for a module. Basic information about the module is -# generated. -# -local rule print-help-module ( - module-name # The module to generate docs for. -) -{ - # Print the docs. - print.section "Module '$(module-name)'" $($(module-name).docs) ; - - # Print out the documented classes. - print-help-module-section $(module-name) classes : "Module '$(module-name)' classes" - Use --help $(module-name).<class-name> to get more information. ; - - # Print out the documented rules. - print-help-module-section $(module-name) rules : "Module '$(module-name)' rules" - Use --help $(module-name).<rule-name> to get more information. ; - - # Print out the documented variables. - print-help-module-section $(module-name) variables : "Module '$(module-name)' variables" - Use --help $(module-name).<variable-name> to get more information. ; - - # Print out all the same information but indetailed form. - if $(.option.detailed) - { - print-help-classes $(module-name) ; - print-help-rules $(module-name) ; - print-help-variables $(module-name) ; - } -} - - -# Generate documentation for a set of rules in a module. -# -local rule print-help-rules ( - module-name # Module of the rules. - : name * # Optional list of rules to describe. -) -{ - name ?= $($(module-name).rules) ; - if [ set.intersection $(name) : $($(module-name).rules) $($(module-name).class-rules) ] - { - # Print out the given rules. - for local rule-name in [ sequence.insertion-sort $(name) ] - { - if $(.option.show-locals) || ! $($(module-name).$(rule-name).is-local) - { - local signature = $($(module-name).$(rule-name).signature:J=" ") ; - signature ?= "" ; - print.section "Rule '$(module-name).$(rule-name) ( $(signature) )'" - $($(module-name).$(rule-name).docs) ; - if $($(module-name).$(rule-name).args) - { - print.list-start ; - for local arg-name in $($(module-name).$(rule-name).args) - { - print.list-item $(arg-name): $($(module-name).$(rule-name).args.$(arg-name).docs) ; - } - print.list-end ; - } - } - } - } -} - - -# Generate documentation for a set of classes in a module. -# -local rule print-help-classes ( - module-name # Module of the classes. - : name * # Optional list of classes to describe. -) -{ - name ?= $($(module-name).classes) ; - if [ set.intersection $(name) : $($(module-name).classes) ] - { - # Print out the given classes. - for local class-name in [ sequence.insertion-sort $(name) ] - { - if $(.option.show-locals) || ! $($(module-name).$(class-name).is-local) - { - local signature = $($(module-name).$(class-name).signature:J=" ") ; - signature ?= "" ; - print.section "Class '$(module-name).$(class-name) ( $(signature) )'" - $($(module-name).$(class-name).docs) - "Inherits from '"$($(module-name).$(class-name).super-name)"'." ; - if $($(module-name).$(class-name).args) - { - print.list-start ; - for local arg-name in $($(module-name).$(class-name).args) - { - print.list-item $(arg-name): $($(module-name).$(class-name).args.$(arg-name).docs) ; - } - print.list-end ; - } - } - - # Print out the documented rules of the class. - print-help-module-section $(module-name) $(class-name).class-rules : "Class '$(module-name).$(class-name)' rules" - Use --help $(module-name).<rule-name> to get more information. ; - - # Print out all the rules if details are requested. - if $(.option.detailed) - { - print-help-rules $(module-name) : $($(module-name).$(class-name).class-rules) ; - } - } - } -} - - -# Generate documentation for a set of variables in a module. -# -local rule print-help-variables ( - module-name ? # Module of the variables. - : name * # Optional list of variables to describe. -) -{ - name ?= $($(module-name).variables) ; - if [ set.intersection $(name) : $($(module-name).variables) ] - { - # Print out the given variables. - for local variable-name in [ sequence.insertion-sort $(name) ] - { - print.section "Variable '$(module-name).$(variable-name)'" $($(module-name).$(variable-name).docs) ; - if $($(module-name).$(variable-name).default) || - $($(module-name).$(variable-name).initial) - { - print.list-start ; - if $($(module-name).$(variable-name).default) - { - print.list-item "default value:" '$($(module-name).$(variable-name).default:J=" ")' ; - } - if $($(module-name).$(variable-name).initial) - { - print.list-item "initial value:" '$($(module-name).$(variable-name).initial:J=" ")' ; - } - print.list-end ; - } - } - } -} - - -# Generate documentation for a project. -# -local rule print-help-project ( - unused ? - : jamfile * # The project Jamfile. -) -{ - if $(jamfile<$(jamfile)>.docs) - { - # Print the docs. - print.section "Project-specific help" - Project has jamfile at $(jamfile) ; - - print.lines $(jamfile<$(jamfile)>.docs) "" ; - } -} - - -# Generate documentation for a config file. -# -local rule print-help-config ( - unused ? - : type # The type of configuration file user or site. - config-file # The configuration Jamfile. -) -{ - if $(jamfile<$(config-file)>.docs) - { - # Print the docs. - print.section "Configuration help" - Configuration file at $(config-file) ; - - print.lines $(jamfile<$(config-file)>.docs) "" ; - } -} - - -ws = " " ; - -# Extract the text from a block of comments. -# -local rule extract-comment ( - var # The name of the variable to extract from. -) -{ - local comment = ; - local line = $($(var)[1]) ; - local l = [ MATCH "^[$(ws)]*(#)(.*)$" : $(line) ] ; - while $(l[1]) && $($(var)) - { - if $(l[2]) { comment += [ MATCH "^[$(ws)]?(.*)$" : $(l[2]) ] ; } - else { comment += "" ; } - $(var) = $($(var)[2-]) ; - line = $($(var)[1]) ; - l = [ MATCH "^[$(ws)]*(#)(.*)$" : $(line) ] ; - } - return $(comment) ; -} - - -# Extract s single line of Jam syntax, ignoring any comments. -# -local rule extract-syntax ( - var # The name of the variable to extract from. -) -{ - local syntax = ; - local line = $($(var)[1]) ; - while ! $(syntax) && ! [ MATCH "^[$(ws)]*(#)" : $(line) ] && $($(var)) - { - local m = [ MATCH "^[$(ws)]*(.*)$" : $(line) ] ; - if $(m) && ! $(m) = "" - { - syntax = $(m) ; - } - $(var) = $($(var)[2-]) ; - line = $($(var)[1]) ; - } - return $(syntax) ; -} - - -# Extract the next token, this is either a single Jam construct or a comment as -# a single token. -# -local rule extract-token ( - var # The name of the variable to extract from. -) -{ - local parts = ; - while ! $(parts) - { - parts = [ MATCH "^[$(ws)]*([^$(ws)]+)[$(ws)]*(.*)" : $($(var)[1]) ] ; - if ! $(parts) - { - $(var) = $($(var)[2-]) ; - } - } - local token = ; - if [ MATCH "^(#)" : $(parts[1]) ] - { - token = $(parts:J=" ") ; - $(var) = $($(var)[2-]) ; - } - else - { - token = $(parts[1]) ; - $(var) = $(parts[2-]:J=" ") $($(var)[2-]) ; - } - return $(token) ; -} - - -# Scan for a rule declaration as the next item in the variable. -# -local rule scan-rule ( - syntax ? # The first part of the text which contains the rule declaration. - : var # The name of the variable to extract from. -) -{ - local rule-parts = - [ MATCH "^[$(ws)]*(rule|local[$(ws)]*rule)[$(ws)]+([^$(ws)]+)[$(ws)]*(.*)" : $(syntax:J=" ") ] ; - if $(rule-parts[1]) - { - # Mark as doc for rule. - local rule-name = $(rule-parts[2]) ; - if $(scope-name) - { - rule-name = $(scope-name).$(rule-name) ; - } - local is-local = [ MATCH "^(local).*" : $(rule-parts[1]) ] ; - if $(comment-block) - { - set-rule-doc $(rule-name) $(module-name) $(is-local) : $(comment-block) ; - } - # Parse args of rule. - $(var) = $(rule-parts[3-]) $($(var)) ; - set-rule-arguments-signature $(rule-name) $(module-name) : [ scan-rule-arguments $(var) ] ; - # Scan within this rules scope. - local scope-level = [ extract-token $(var) ] ; - local scope-name = $(rule-name) ; - while $(scope-level) - { - local comment-block = [ extract-comment $(var) ] ; - local syntax-block = [ extract-syntax $(var) ] ; - if [ scan-rule $(syntax-block) : $(var) ] - { - } - else if [ MATCH "^(\\{)" : $(syntax-block) ] - { - scope-level += "{" ; - } - else if [ MATCH "^[^\\}]*([\\}])[$(ws)]*$" : $(syntax-block) ] - { - scope-level = $(scope-level[2-]) ; - } - } - - return true ; - } -} - - -# Scan the arguments of a rule. -# -local rule scan-rule-arguments ( - var # The name of the variable to extract from. -) -{ - local arg-syntax = ; - local token = [ extract-token $(var) ] ; - while $(token) != "(" && $(token) != "{" - { - token = [ extract-token $(var) ] ; - } - if $(token) != "{" - { - token = [ extract-token $(var) ] ; - } - local arg-signature = ; - while $(token) != ")" && $(token) != "{" - { - local arg-name = ; - local arg-qualifier = " " ; - local arg-doc = ; - if $(token) = ":" - { - arg-signature += $(token) ; - token = [ extract-token $(var) ] ; - } - arg-name = $(token) ; - arg-signature += $(token) ; - token = [ extract-token $(var) ] ; - if [ MATCH "^([\\*\\+\\?])" : $(token) ] - { - arg-qualifier = $(token) ; - arg-signature += $(token) ; - token = [ extract-token $(var) ] ; - } - if $(token) = ":" - { - arg-signature += $(token) ; - token = [ extract-token $(var) ] ; - } - if [ MATCH "^(#)" : $(token) ] - { - $(var) = $(token) $($(var)) ; - arg-doc = [ extract-comment $(var) ] ; - token = [ extract-token $(var) ] ; - } - set-argument-doc $(arg-name) $(arg-qualifier) $(rule-name) $(module-name) : $(arg-doc) ; - } - while $(token) != "{" - { - token = [ extract-token $(var) ] ; - } - $(var) = "{" $($(var)) ; - arg-signature ?= "" ; - return $(arg-signature) ; -} - - -# Scan for a variable declaration. -# -local rule scan-variable ( - syntax ? # The first part of the text which contains the variable declaration. - : var # The name of the variable to extract from. -) -{ - # [1] = name, [2] = value(s) - local var-parts = - [ MATCH "^[$(ws)]*([^$(ws)]+)[$(ws)]+([\\?\\=]*)[$(ws)]+([^\\;]*)\\;" : $(syntax) ] ; - if $(var-parts) - { - local value = [ MATCH "^(.*)[ ]$" : $(var-parts[3-]:J=" ") ] ; - local default-value = "" ; - local initial-valie = "" ; - if $(var-parts[2]) = "?=" - { - default-value = $(value) ; - default-value ?= "(empty)" ; - } - else - { - initial-value = $(value) ; - initial-value ?= "(empty)" ; - } - if $(comment-block) - { - set-variable-doc $(var-parts[1]) $(default-value) $(initial-value) $(module-name) : $(comment-block) ; - } - return true ; - } -} - - -# Scan a class declaration. -# -local rule scan-class ( - syntax ? # The syntax text for the class declaration. -) -{ - # [1] = class?, [2] = name, [3] = superclass - local class-parts = - [ MATCH "^[$(ws)]*([^$(ws)]+)[$(ws)]+([^$(ws)]+)[$(ws)]+:*[$(ws)]*([^$(ws);]*)" : $(syntax) ] ; - if $(class-parts[1]) = "class" || $(class-parts[1]) = "class.class" - { - set-class-doc $(class-parts[2]) $(module-name) : $(class-parts[3]) ; - } -} - - -# Scan a module file for documentation comments. This also invokes any actions -# assigned to the module. The actions are the rules that do the actual output of -# the documentation. This rule is invoked as the header scan rule for the module -# file. -# -rule scan-module ( - target # The module file. - : text * # The text in the file, one item per line. - : action * # Rule to call to output docs for the module. -) -{ - if $(.option.debug) { ECHO "HELP:" scanning module target '$(target)' ; } - local module-name = $(target:B) ; - local module-documented = ; - local comment-block = ; - local syntax-block = ; - # This is a hack because we can not get the line of a file if it happens to - # not have a new-line termination. - text += "}" ; - while $(text) - { - comment-block = [ extract-comment text ] ; - syntax-block = [ extract-syntax text ] ; - if $(.option.debug) - { - ECHO "HELP:" comment block; '$(comment-block)' ; - ECHO "HELP:" syntax block; '$(syntax-block)' ; - } - if [ scan-rule $(syntax-block) : text ] { } - else if [ scan-variable $(syntax-block) : text ] { } - else if [ scan-class $(syntax-block) ] { } - else if [ MATCH .*([cC]opyright).* : $(comment-block:J=" ") ] - { - # mark as the copy for the module. - set-module-copyright $(module-name) : $(comment-block) ; - } - else if $(action[1]) in "print-help-project" "print-help-config" - && ! $(jamfile<$(target)>.docs) - { - # special module docs for the project jamfile. - jamfile<$(target)>.docs = $(comment-block) ; - } - else if ! $(module-documented) - { - # document the module. - set-module-doc $(module-name) : $(comment-block) ; - module-documented = true ; - } - } - if $(action) - { - $(action[1]) $(module-name) : $(action[2-]) ; - } -} - - -# Import scan-module to global scope, so that it is available during header -# scanning phase. -# -IMPORT $(__name__) : scan-module : : doc.scan-module ; - - -# Read in a file using the SHELL builtin and return the individual lines as -# would be done for header scanning. -# -local rule read-file ( - file # The file to read in. -) -{ - file = [ path.native [ path.root [ path.make $(file) ] [ path.pwd ] ] ] ; - if ! $(.file<$(file)>.lines) - { - local content ; - switch [ modules.peek : OS ] - { - case NT : - content = [ SHELL "TYPE \"$(file)\"" ] ; - - case * : - content = [ SHELL "cat \"$(file)\"" ] ; - } - local lines ; - local nl = " -" ; - local << = "([^$(nl)]*)[$(nl)](.*)" ; - local line+ = [ MATCH "$(<<)" : "$(content)" ] ; - while $(line+) - { - lines += $(line+[1]) ; - line+ = [ MATCH "$(<<)" : "$(line+[2])" ] ; - } - .file<$(file)>.lines = $(lines) ; - } - return $(.file<$(file)>.lines) ; -} - - -# Add a scan action to perform to generate the help documentation. The action -# rule is passed the name of the module as the first argument. The second -# argument(s) are optional and passed directly as specified here. -# -local rule do-scan ( - modules + # The modules to scan and perform the action on. - : action * # The action rule, plus the secondary arguments to pass to the action rule. -) -{ - if $(help-output) = text - { - print.output $(help-output-file).txt plain ; - ALWAYS $(help-output-file).txt ; - DEPENDS all : $(help-output-file).txt ; - } - if $(help-output) = html - { - print.output $(help-output-file).html html ; - ALWAYS $(help-output-file).html ; - DEPENDS all : $(help-output-file).html ; - } - for local module-file in $(modules[1--2]) - { - scan-module $(module-file) : [ read-file $(module-file) ] ; - } - scan-module $(modules[-1]) : [ read-file $(modules[-1]) ] : $(action) ; -} diff --git a/jam-files/boost-build/util/indirect.jam b/jam-files/boost-build/util/indirect.jam deleted file mode 100644 index ec63f192..00000000 --- a/jam-files/boost-build/util/indirect.jam +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2003 Dave Abrahams -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import modules ; -import numbers ; - - -# The pattern that indirect rules must match: module%rule -.pattern = ^([^%]*)%([^%]+)$ ; - - -# -# Type checking rules. -# -local rule indirect-rule ( x ) -{ - if ! [ MATCH $(.pattern) : $(x) ] - { - return "expected a string of the form module%rule, but got \""$(x)"\" for argument" ; - } -} - - -# Make an indirect rule which calls the given rule. If context is supplied it is -# expected to be the module in which to invoke the rule by the 'call' rule -# below. Otherwise, the rule will be invoked in the module of this rule's -# caller. -# -rule make ( rulename bound-args * : context ? ) -{ - context ?= [ CALLER_MODULE ] ; - context ?= "" ; - return $(context)%$(rulename) $(bound-args) ; -} - - -# Make an indirect rule which calls the given rule. 'rulename' may be a -# qualified rule; if so it is returned unchanged. Otherwise, if frames is not -# supplied, the result will be invoked (by 'call', below) in the module of the -# caller. Otherwise, frames > 1 specifies additional call frames to back up in -# order to find the module context. -# -rule make-qualified ( rulename bound-args * : frames ? ) -{ - if [ MATCH $(.pattern) : $(rulename) ] - { - return $(rulename) $(bound-args) ; - } - else - { - frames ?= 1 ; - # If the rule name includes a Jamfile module, grab it. - local module-context = [ MATCH ^(Jamfile<[^>]*>)\\..* : $(rulename) ] ; - - if ! $(module-context) - { - # Take the first dot-separated element as module name. This disallows - # module names with dots, but allows rule names with dots. - module-context = [ MATCH ^([^.]*)\\..* : $(rulename) ] ; - } - module-context ?= [ CALLER_MODULE $(frames) ] ; - return [ make $(rulename) $(bound-args) : $(module-context) ] ; - } -} - - -# Returns the module name in which the given indirect rule will be invoked. -# -rule get-module ( [indirect-rule] x ) -{ - local m = [ MATCH $(.pattern) : $(x) ] ; - if ! $(m[1]) - { - m = ; - } - return $(m[1]) ; -} - - -# Returns the rulename that will be called when x is invoked. -# -rule get-rule ( [indirect-rule] x ) -{ - local m = [ MATCH $(.pattern) : $(x) ] ; - return $(m[2]) ; -} - - -# Invoke the given indirect-rule. -# -rule call ( [indirect-rule] r args * : * ) -{ - return [ modules.call-in [ get-module $(r) ] : [ get-rule $(r) ] $(args) - : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ] ; -} - - -rule __test__ -{ - import assert ; - - rule foo-barr! ( x ) - { - assert.equal $(x) : x ; - } - - assert.equal [ get-rule [ make foo-barr! ] ] : foo-barr! ; - assert.equal [ get-module [ make foo-barr! ] ] : [ CALLER_MODULE ] ; - - call [ make foo-barr! ] x ; - call [ make foo-barr! x ] ; - call [ make foo-barr! : [ CALLER_MODULE ] ] x ; -} diff --git a/jam-files/boost-build/util/indirect.py b/jam-files/boost-build/util/indirect.py deleted file mode 100644 index 78fa8994..00000000 --- a/jam-files/boost-build/util/indirect.py +++ /dev/null @@ -1,15 +0,0 @@ -# Status: minimally ported. This module is not supposed to be used much -# with Boost.Build/Python. -# -# Copyright 2003 Dave Abrahams -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -from b2.util import call_jam_function, bjam_signature - -def call(*args): - a1 = args[0] - name = a1[0] - a1tail = a1[1:] - call_jam_function(name, *((a1tail,) + args[1:])) diff --git a/jam-files/boost-build/util/logger.py b/jam-files/boost-build/util/logger.py deleted file mode 100644 index de652129..00000000 --- a/jam-files/boost-build/util/logger.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright Pedro Ferreira 2005. Distributed under the Boost -# Software License, Version 1.0. (See accompanying -# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -import sys - -class NullLogger: - def __init__ (self): - self.indent_ = '' - - def log (self, source_name, *args): - if self.on () and self.interesting (source_name): - self.do_log (self.indent_) - for i in args: - self.do_log (i) - self.do_log ('\n') - - def increase_indent (self): - if self.on (): - self.indent_ += ' ' - - def decrease_indent (self): - if self.on () and len (self.indent_) > 4: - self.indent_ = self.indent_ [-4:] - - def do_log (self, *args): - pass - - def interesting (self, source_name): - return False - - def on (self): - return True - -class TextLogger (NullLogger): - def __init__ (self): - NullLogger.__init__ (self) - - def do_log (self, arg): - sys.stdout.write (str (arg)) - - def interesting (self, source_name): - return True - - def on (self): - return True diff --git a/jam-files/boost-build/util/numbers.jam b/jam-files/boost-build/util/numbers.jam deleted file mode 100644 index 665347d3..00000000 --- a/jam-files/boost-build/util/numbers.jam +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2001, 2002 Dave Abrahams -# Copyright 2002, 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import errors ; - - -rule trim-leading-zeroes ( value ) -{ - return [ CALC $(value) + 0 ] ; -} - - -rule check ( numbers * ) -{ - for local n in $(numbers) - { - switch $(n) - { - case *[^0-9]* : - errors.error $(n) "in" $(numbers) : is not a number ; - } - } -} - - -rule increment ( number ) -{ - return [ CALC $(number) + 1 ] ; -} - - -rule decrement ( number ) -{ - return [ CALC $(number) - 1 ] ; -} - - -rule range ( start finish ? : step ? ) -{ - if ! $(finish) - { - finish = $(start) ; - start = 1 ; - } - step ?= 1 ; - - check $(start) $(finish) $(step) ; - - if $(finish) != 0 - { - local result ; - while [ less $(start) $(finish) ] || $(start) = $(finish) - { - result += $(start) ; - start = [ CALC $(start) + $(step) ] ; - } - return $(result) ; - } -} - - -rule less ( n1 n2 ) -{ - switch [ CALC $(n2) - $(n1) ] - { - case [1-9]* : return true ; - } -} - - -rule log10 ( number ) -{ - switch $(number) - { - case *[^0-9]* : errors.error $(number) is not a number ; - case 0 : errors.error can't take log of zero ; - case [1-9] : return 0 ; - case [1-9]? : return 1 ; - case [1-9]?? : return 2 ; - case [1-9]??? : return 3 ; - case [1-9]???? : return 4 ; - case [1-9]????? : return 5 ; - case [1-9]?????? : return 6 ; - case [1-9]??????? : return 7 ; - case [1-9]???????? : return 8 ; - case [1-9]????????? : return 9 ; - case * : - { - import sequence ; - import string ; - local chars = [ string.chars $(number) ] ; - while $(chars[1]) = 0 - { - chars = $(chars[2-]) ; - } - if ! $(chars) - { - errors.error can't take log of zero ; - } - else - { - return [ decrement [ sequence.length $(chars) ] ] ; - } - } - } -} - - -rule __test__ ( ) -{ - import assert ; - - assert.result 1 : increment 0 ; - assert.result 2 : increment 1 ; - assert.result 1 : decrement 2 ; - assert.result 0 : decrement 1 ; - assert.result 50 : increment 49 ; - assert.result 49 : decrement 50 ; - assert.result 99 : increment 98 ; - assert.result 99 : decrement 100 ; - assert.result 100 : increment 99 ; - assert.result 999 : decrement 1000 ; - assert.result 1000 : increment 999 ; - - assert.result 1 2 3 : range 3 ; - assert.result 1 2 3 4 5 6 7 8 9 10 11 12 : range 12 ; - assert.result 3 4 5 6 7 8 9 10 11 : range 3 11 ; - assert.result : range 0 ; - assert.result 1 4 7 10 : range 10 : 3 ; - assert.result 2 4 6 8 10 : range 2 10 : 2 ; - assert.result 25 50 75 100 : range 25 100 : 25 ; - - assert.result 0 : trim-leading-zeroes 0 ; - assert.result 1234 : trim-leading-zeroes 1234 ; - assert.result 123456 : trim-leading-zeroes 0000123456 ; - assert.result 1000123456 : trim-leading-zeroes 1000123456 ; - assert.result 10000 : trim-leading-zeroes 10000 ; - assert.result 10000 : trim-leading-zeroes 00010000 ; - - assert.true less 1 2 ; - assert.true less 1 12 ; - assert.true less 1 21 ; - assert.true less 005 217 ; - assert.false less 0 0 ; - assert.false less 03 3 ; - assert.false less 3 03 ; - assert.true less 005 217 ; - assert.true less 0005 217 ; - assert.true less 5 00217 ; - - # TEMPORARY disabled, because nested "try"/"catch" do not work and I do no - # have the time to fix that right now. - if $(0) - { - try ; - { - decrement 0 ; - } - catch can't decrement zero! ; - - try ; - { - check foo ; - } - catch : not a number ; - - try ; - { - increment foo ; - } - catch : not a number ; - - try ; - { - log10 0 ; - } - catch can't take log of zero ; - - try ; - { - log10 000 ; - } - catch can't take log of zero ; - - } - - assert.result 0 : log10 1 ; - assert.result 0 : log10 9 ; - assert.result 1 : log10 10 ; - assert.result 1 : log10 99 ; - assert.result 2 : log10 100 ; - assert.result 2 : log10 101 ; - assert.result 2 : log10 125 ; - assert.result 2 : log10 999 ; - assert.result 3 : log10 1000 ; - assert.result 10 : log10 12345678901 ; - - for local x in [ range 75 110 : 5 ] - { - for local y in [ range $(x) 111 : 3 ] - { - if $(x) != $(y) - { - assert.true less $(x) $(y) ; - } - } - } - - for local x in [ range 90 110 : 2 ] - { - for local y in [ range 80 $(x) : 4 ] - { - assert.false less $(x) $(y) ; - } - } -} diff --git a/jam-files/boost-build/util/option.jam b/jam-files/boost-build/util/option.jam deleted file mode 100644 index f6dc3752..00000000 --- a/jam-files/boost-build/util/option.jam +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2005 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import modules ; - -# Set a value for a named option, to be used when not overridden on the command -# line. -rule set ( name : value ? ) -{ - .option.$(name) = $(value) ; -} - -rule get ( name : default-value ? : implied-value ? ) -{ - local m = [ MATCH --$(name)=(.*) : [ modules.peek : ARGV ] ] ; - if $(m) - { - return $(m[1]) ; - } - else - { - m = [ MATCH (--$(name)) : [ modules.peek : ARGV ] ] ; - if $(m) && $(implied-value) - { - return $(implied-value) ; - } - else if $(.option.$(name)) - { - return $(.option.$(name)) ; - } - else - { - return $(default-value) ; - } - } -} - - -# Check command-line args as soon as possible. For each option try to load -# module named after option. Is that succeeds, invoke 'process' rule in the -# module. The rule may return "true" to indicate that the regular build process -# should not be attempted. -# -# Options take the general form of: --<name>[=<value>] [<value>] -# -rule process ( ) -{ - local ARGV = [ modules.peek : ARGV ] ; - local BOOST_BUILD_PATH = [ modules.peek : BOOST_BUILD_PATH ] ; - - local dont-build ; - local args = $(ARGV) ; - while $(args) - { - local arg = [ MATCH ^--(.*) : $(args[1]) ] ; - while $(args[2-]) && ! $(arg) - { - args = $(args[2-]) ; - arg = [ MATCH ^--(.*) : $(args[1]) ] ; - } - args = $(args[2-]) ; - - if $(arg) - { - local split = [ MATCH ^(([^-=]+)[^=]*)(=?)(.*)$ : $(arg) ] ; - local full-name = $(split[1]) ; - local prefix = $(split[2]) ; - local values ; - - if $(split[3]) - { - values = $(split[4]) ; - } - if $(args) && ! [ MATCH ^(--).* : $(args[1]) ] - { - values += $(args[1]) ; - args = $(args[2-]) ; - } - - # Jook in options subdirectories of BOOST_BUILD_PATH for modules - # matching the full option name and then its prefix. - local plugin-dir = options ; - local option-files = [ GLOB $(plugin-dir:D=$(BOOST_BUILD_PATH)) : - $(full-name).jam $(prefix).jam ] ; - - if $(option-files) - { - # Load the file into a module named for the option. - local f = $(option-files[1]) ; - local module-name = --$(f:D=:S=) ; - modules.load $(module-name) : $(f:D=) : $(f:D) ; - - # If there is a process rule, call it with the full option name - # and its value (if any). If there was no "=" in the option, the - # value will be empty. - if process in [ RULENAMES $(module-name) ] - { - dont-build += [ modules.call-in $(module-name) : process - --$(full-name) : $(values) ] ; - } - } - } - } - - return $(dont-build) ; -} diff --git a/jam-files/boost-build/util/option.py b/jam-files/boost-build/util/option.py deleted file mode 100644 index 47d6abdf..00000000 --- a/jam-files/boost-build/util/option.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2005-2010 Vladimir Prus. -# -# Use, modification and distribution is subject to the Boost Software -# License Version 1.0. (See accompanying file LICENSE_1_0.txt or -# http://www.boost.org/LICENSE_1_0.txt) - -import sys -import re -import b2.util.regex - -options = {} - -# Set a value for a named option, to be used when not overridden on the command -# line. -def set(name, value=None): - - global options - - options[name] = value - -def get(name, default_value=None, implied_value=None): - - global options - - matches = b2.util.regex.transform(sys.argv, "--" + re.escape(name) + "=(.*)") - if matches: - return matches[-1] - else: - m = b2.util.regex.transform(sys.argv, "--(" + re.escape(name) + ")") - if m and implied_value: - return implied_value - elif options.has_key(name) and options[name] != None: - return options[name] - else: - return default_value diff --git a/jam-files/boost-build/util/order.jam b/jam-files/boost-build/util/order.jam deleted file mode 100644 index a74fc8c8..00000000 --- a/jam-files/boost-build/util/order.jam +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (C) 2003 Vladimir Prus -# Use, modification, and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy -# at http://www.boost.org/LICENSE_1_0.txt) - -# This module defines a class which allows to order arbitrary object with -# regard to arbitrary binary relation. -# -# The primary use case is the gcc toolset, which is sensitive to library order: -# if library 'a' uses symbols from library 'b', then 'a' must be present before -# 'b' on the linker's command line. -# -# This requirement can be lifted for gcc with GNU ld, but for gcc with Solaris -# LD (and for Solaris toolset as well), the order always matters. -# -# So, we need to store order requirements and then order libraries according to -# them. It is not possible to use the dependency graph as order requirements. -# What we need is a "use symbols" relationship while dependency graph provides -# the "needs to be updated" relationship. -# -# For example:: -# lib a : a.cpp b; -# lib b ; -# -# For static linking, library 'a' need not depend on 'b'. However, it should -# still come before 'b' on the command line. - -class order -{ - rule __init__ ( ) - { - } - - # Adds the constraint that 'first' should preceede 'second'. - rule add-pair ( first second ) - { - .constraits += $(first)--$(second) ; - } - NATIVE_RULE class@order : add-pair ; - - # Given a list of objects, reorder them so that the constraints specified by - # 'add-pair' are satisfied. - # - # The algorithm was adopted from an awk script by Nikita Youshchenko - # (yoush at cs dot msu dot su) - rule order ( objects * ) - { - # The algorithm used is the same is standard transitive closure, except - # that we're not keeping in-degree for all vertices, but rather removing - # edges. - local result ; - if $(objects) - { - local constraints = [ eliminate-unused-constraits $(objects) ] ; - - # Find some library that nobody depends upon and add it to the - # 'result' array. - local obj ; - while $(objects) - { - local new_objects ; - while $(objects) - { - obj = $(objects[1]) ; - if [ has-no-dependents $(obj) : $(constraints) ] - { - # Emulate break ; - new_objects += $(objects[2-]) ; - objects = ; - } - else - { - new_objects += $(obj) ; - obj = ; - objects = $(objects[2-]) ; - } - } - - if ! $(obj) - { - errors.error "Circular order dependencies" ; - } - # No problem with placing first. - result += $(obj) ; - # Remove all contraints where 'obj' comes first, since they are - # already satisfied. - constraints = [ remove-satisfied $(constraints) : $(obj) ] ; - - # Add the remaining objects for further processing on the next - # iteration - objects = $(new_objects) ; - } - - } - return $(result) ; - } - NATIVE_RULE class@order : order ; - - # Eliminate constraints which mention objects not in 'objects'. In - # graph-theory terms, this is finding a subgraph induced by ordered - # vertices. - rule eliminate-unused-constraits ( objects * ) - { - local result ; - for local c in $(.constraints) - { - local m = [ MATCH (.*)--(.*) : $(c) ] ; - if $(m[1]) in $(objects) && $(m[2]) in $(objects) - { - result += $(c) ; - } - } - return $(result) ; - } - - # Returns true if there's no constraint in 'constaraints' where 'obj' comes - # second. - rule has-no-dependents ( obj : constraints * ) - { - local failed ; - while $(constraints) && ! $(failed) - { - local c = $(constraints[1]) ; - local m = [ MATCH (.*)--(.*) : $(c) ] ; - if $(m[2]) = $(obj) - { - failed = true ; - } - constraints = $(constraints[2-]) ; - } - if ! $(failed) - { - return true ; - } - } - - rule remove-satisfied ( constraints * : obj ) - { - local result ; - for local c in $(constraints) - { - local m = [ MATCH (.*)--(.*) : $(c) ] ; - if $(m[1]) != $(obj) - { - result += $(c) ; - } - } - return $(result) ; - } -} - - -rule __test__ ( ) -{ - import "class" : new ; - import assert ; - - c1 = [ new order ] ; - $(c1).add-pair l1 l2 ; - - assert.result l1 l2 : $(c1).order l1 l2 ; - assert.result l1 l2 : $(c1).order l2 l1 ; - - $(c1).add-pair l2 l3 ; - assert.result l1 l2 : $(c1).order l2 l1 ; - $(c1).add-pair x l2 ; - assert.result l1 l2 : $(c1).order l2 l1 ; - assert.result l1 l2 l3 : $(c1).order l2 l3 l1 ; -} diff --git a/jam-files/boost-build/util/order.py b/jam-files/boost-build/util/order.py deleted file mode 100644 index 4e67b3f1..00000000 --- a/jam-files/boost-build/util/order.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2003 Vladimir Prus -# Use, modification, and distribution is subject to the Boost Software -# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy -# at http://www.boost.org/LICENSE_1_0.txt) - -class Order: - """Allows ordering arbitrary objects with regard to arbitrary binary relation. - - The primary use case is the gcc toolset, which is sensitive to - library order: if library 'a' uses symbols from library 'b', - then 'a' must be present before 'b' on the linker's command line. - - This requirement can be lifted for gcc with GNU ld, but for gcc with - Solaris LD (and for Solaris toolset as well), the order always matters. - - So, we need to store order requirements and then order libraries - according to them. It it not possible to use dependency graph as - order requirements. What we need is "use symbols" relationship - while dependency graph provides "needs to be updated" relationship. - - For example:: - lib a : a.cpp b; - lib b ; - - For static linking, the 'a' library need not depend on 'b'. However, it - still should come before 'b' on the command line. - """ - - def __init__ (self): - self.constraints_ = [] - - def add_pair (self, first, second): - """ Adds the constraint that 'first' should precede 'second'. - """ - self.constraints_.append ((first, second)) - - def order (self, objects): - """ Given a list of objects, reorder them so that the constains specified - by 'add_pair' are satisfied. - - The algorithm was adopted from an awk script by Nikita Youshchenko - (yoush at cs dot msu dot su) - """ - # The algorithm used is the same is standard transitive closure, - # except that we're not keeping in-degree for all vertices, but - # rather removing edges. - result = [] - - if not objects: - return result - - constraints = self.__eliminate_unused_constraits (objects) - - # Find some library that nobody depends upon and add it to - # the 'result' array. - obj = None - while objects: - new_objects = [] - while objects: - obj = objects [0] - - if self.__has_no_dependents (obj, constraints): - # Emulate break ; - new_objects.extend (objects [1:]) - objects = [] - - else: - new_objects.append (obj) - obj = None - objects = objects [1:] - - if not obj: - raise BaseException ("Circular order dependencies") - - # No problem with placing first. - result.append (obj) - - # Remove all containts where 'obj' comes first, - # since they are already satisfied. - constraints = self.__remove_satisfied (constraints, obj) - - # Add the remaining objects for further processing - # on the next iteration - objects = new_objects - - return result - - def __eliminate_unused_constraits (self, objects): - """ Eliminate constraints which mention objects not in 'objects'. - In graph-theory terms, this is finding subgraph induced by - ordered vertices. - """ - result = [] - for c in self.constraints_: - if c [0] in objects and c [1] in objects: - result.append (c) - - return result - - def __has_no_dependents (self, obj, constraints): - """ Returns true if there's no constraint in 'constraints' where - 'obj' comes second. - """ - failed = False - while constraints and not failed: - c = constraints [0] - - if c [1] == obj: - failed = True - - constraints = constraints [1:] - - return not failed - - def __remove_satisfied (self, constraints, obj): - result = [] - for c in constraints: - if c [0] != obj: - result.append (c) - - return result diff --git a/jam-files/boost-build/util/os.jam b/jam-files/boost-build/util/os.jam deleted file mode 100644 index daef27f7..00000000 --- a/jam-files/boost-build/util/os.jam +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2001, 2002, 2003, 2005 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2003, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import modules ; -import string ; - - -# Return the value(s) of the given environment variable(s) at the time bjam was -# invoked. -rule environ ( variable-names + ) -{ - return [ modules.peek .ENVIRON : $(variable-names) ] ; -} - -.name = [ modules.peek : OS ] ; -.platform = [ modules.peek : OSPLAT ] ; -.version = [ modules.peek : OSVER ] ; - - -local rule constant ( c : os ? ) -{ - os ?= $(.name) ; - # First look for a platform-specific name, then the general value. - local variables = .$(c)-$(os) .$(c) ; - local result = $($(variables)) ; - return $(result[1]) ; -} - -rule get-constant ( os ? ) -{ - # Find the name of the constant being accessed, which is equal to the name - # used to invoke us. - local bt = [ BACKTRACE 1 ] ; - local rulename = [ MATCH ([^.]*)$ : $(bt[4]) ] ; - return [ constant $(rulename) : $(os) ] ; -} - - -# export all the common constants -.constants = name platform version shared-library-path-variable path-separator executable-path-variable executable-suffix ; -for local constant in $(.constants) -{ - IMPORT $(__name__) : get-constant : $(__name__) : $(constant) ; -} -EXPORT $(__name__) : $(.constants) ; - -.executable-path-variable-NT = PATH ; -# On Windows the case and capitalization of PATH is not always predictable, so -# let's find out what variable name was really set. -if $(.name) = NT -{ - for local n in [ VARNAMES .ENVIRON ] - { - if $(n:L) = path - { - .executable-path-variable-NT = $(n) ; - } - } -} - -# Specific constants for various platforms. There's no need to define any -# constant whose value would be the same as the default, below. -.shared-library-path-variable-NT = $(.executable-path-variable-NT) ; -.path-separator-NT = ";" ; -.expand-variable-prefix-NT = % ; -.expand-variable-suffix-NT = % ; -.executable-suffix-NT = .exe ; - -.shared-library-path-variable-CYGWIN = PATH ; - -.shared-library-path-variable-MACOSX = DYLD_LIBRARY_PATH ; - -.shared-library-path-variable-AIX = LIBPATH ; - -# Default constants -.shared-library-path-variable = LD_LIBRARY_PATH ; -.path-separator = ":" ; -.expand-variable-prefix = $ ; -.expand-variable-suffix = "" ; -.executable-path-variable = PATH ; -.executable-suffix = "" ; - - -# Return a list of the directories in the PATH. Yes, that information is (sort -# of) available in the global module, but jam code can change those values, and -# it isn't always clear what case/capitalization to use when looking. This rule -# is a more reliable way to get there. -rule executable-path ( ) -{ - return [ string.words [ environ [ constant executable-path-variable ] ] - : [ constant path-separator ] ] ; -} - - -# Initialize the list of home directories for the current user depending on the -# OS. -if $(.name) = NT -{ - local home = [ environ HOMEDRIVE HOMEPATH ] ; - .home-directories = $(home[1])$(home[2]) [ environ HOME ] [ environ USERPROFILE ] ; -} -else -{ - .home-directories = [ environ HOME ] ; -} - - -# Can't use 'constant' mechanism because it only returns 1-element values. -rule home-directories ( ) -{ - return $(.home-directories) ; -} - - -# Return the string needed to represent the expansion of the named shell -# variable. -rule expand-variable ( variable ) -{ - local prefix = [ constant expand-variable-prefix ] ; - local suffix = [ constant expand-variable-suffix ] ; - return $(prefix)$(variable)$(suffix) ; -} - - -# Returns true if running on windows, whether in cygwin or not. -rule on-windows ( ) -{ - local result ; - if [ modules.peek : NT ] - { - result = true ; - } - else if [ modules.peek : UNIX ] - { - switch [ modules.peek : JAMUNAME ] - { - case CYGWIN* : - { - result = true ; - } - } - } - return $(result) ; -} - - -if ! [ on-windows ] -{ - .on-unix = 1 ; -} - - -rule on-unix -{ - return $(.on-unix) ; -} - - -rule __test__ -{ - import assert ; - if ! ( --quiet in [ modules.peek : ARGV ] ) - { - ECHO os: name= [ name ] ; - ECHO os: version= [ version ] ; - } - assert.true name ; -} diff --git a/jam-files/boost-build/util/os_j.py b/jam-files/boost-build/util/os_j.py deleted file mode 100644 index f44cca62..00000000 --- a/jam-files/boost-build/util/os_j.py +++ /dev/null @@ -1,19 +0,0 @@ -# Status: stub, just enough to make tests work. -# -# Named os_j to avoid conflicts with standard 'os'. See -# project.py:import for special-casing. -# -# Copyright 2001, 2002, 2003, 2005 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2003, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import bjam - -__OS = bjam.call("peek", [], "OS")[0] - -# Return Jam's name of OS to prevent existing code from burning -# when faced with Python naming -def name(): - return __OS diff --git a/jam-files/boost-build/util/path.jam b/jam-files/boost-build/util/path.jam deleted file mode 100644 index ea26b816..00000000 --- a/jam-files/boost-build/util/path.jam +++ /dev/null @@ -1,934 +0,0 @@ -# Copyright Vladimir Prus 2002-2006. -# Copyright Dave Abrahams 2003-2004. -# Copyright Rene Rivera 2003-2006. -# -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at -# http://www.boost.org/LICENSE_1_0.txt) - -# Performs various path manipulations. Paths are always in a 'normalized' -# representation. In it, a path may be either: -# -# - '.', or -# -# - ['/'] [ ( '..' '/' )* (token '/')* token ] -# -# In plain english, path can be rooted, '..' elements are allowed only at the -# beginning, and it never ends in slash, except for path consisting of slash -# only. - -import errors ; -import modules ; -import regex ; -import sequence ; -import set ; -import version ; - - -os = [ modules.peek : OS ] ; -if [ modules.peek : UNIX ] -{ - local uname = [ modules.peek : JAMUNAME ] ; - switch $(uname) - { - case CYGWIN* : os = CYGWIN ; - case * : os = UNIX ; - } -} - - -# Converts the native path into normalized form. -# -rule make ( native ) -{ - return [ make-$(os) $(native) ] ; -} - - -# Builds native representation of the path. -# -rule native ( path ) -{ - return [ native-$(os) $(path) ] ; -} - - -# Tests if a path is rooted. -# -rule is-rooted ( path ) -{ - return [ MATCH "^(/)" : $(path) ] ; -} - - -# Tests if a path has a parent. -# -rule has-parent ( path ) -{ - if $(path) != / - { - return 1 ; - } - else - { - return ; - } -} - - -# Returns the path without any directory components. -# -rule basename ( path ) -{ - return [ MATCH "([^/]+)$" : $(path) ] ; -} - - -# Returns parent directory of the path. If no parent exists, error is issued. -# -rule parent ( path ) -{ - if [ has-parent $(path) ] - { - if $(path) = . - { - return .. ; - } - else - { - # Strip everything at the end of path up to and including the last - # slash. - local result = [ regex.match "((.*)/)?([^/]+)" : $(path) : 2 3 ] ; - - # Did we strip what we shouldn't? - if $(result[2]) = ".." - { - return $(path)/.. ; - } - else - { - if ! $(result[1]) - { - if [ is-rooted $(path) ] - { - result = / ; - } - else - { - result = . ; - } - } - return $(result[1]) ; - } - } - } - else - { - errors.error "Path '$(path)' has no parent" ; - } -} - - -# Returns path2 such that "[ join path path2 ] = .". The path may not contain -# ".." element or be rooted. -# -rule reverse ( path ) -{ - if $(path) = . - { - return $(path) ; - } - else - { - local tokens = [ regex.split $(path) "/" ] ; - local tokens2 ; - for local i in $(tokens) - { - tokens2 += .. ; - } - return [ sequence.join $(tokens2) : "/" ] ; - } -} - - -# Concatenates the passed path elements. Generates an error if any element other -# than the first one is rooted. Skips any empty or undefined path elements. -# -rule join ( elements + ) -{ - if ! $(elements[2-]) - { - return $(elements[1]) ; - } - else - { - for local e in $(elements[2-]) - { - if [ is-rooted $(e) ] - { - errors.error only the first element may be rooted ; - } - } - if [ version.check-jam-version 3 1 17 ] - { - return [ NORMALIZE_PATH "$(elements)" ] ; - } - else - { - # Boost Jam prior to version 3.1.17 had problems with its - # NORMALIZE_PATH rule in case you passed it a leading backslash - # instead of a slash, in some cases when you sent it an empty - # initial path element and possibly some others. At least some of - # those cases were being hit and relied upon when calling this rule - # from the path.make-NT rule. - if ! $(elements[1]) && $(elements[2]) - { - return [ NORMALIZE_PATH "/" "$(elements[2-])" ] ; - } - else - { - return [ NORMALIZE_PATH "$(elements)" ] ; - } - } - } -} - - -# If 'path' is relative, it is rooted at 'root'. Otherwise, it is unchanged. -# -rule root ( path root ) -{ - if [ is-rooted $(path) ] - { - return $(path) ; - } - else - { - return [ join $(root) $(path) ] ; - } -} - - -# Returns the current working directory. -# -rule pwd ( ) -{ - if ! $(.pwd) - { - .pwd = [ make [ PWD ] ] ; - } - return $(.pwd) ; -} - - -# Returns the list of files matching the given pattern in the specified -# directory. Both directories and patterns are supplied as portable paths. Each -# pattern should be non-absolute path, and can't contain "." or ".." elements. -# Each slash separated element of pattern can contain the following special -# characters: -# - '?', which match any character -# - '*', which matches arbitrary number of characters. -# A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3 if and -# only if e1 matches p1, e2 matches p2 and so on. -# -# For example: -# [ glob . : *.cpp ] -# [ glob . : */build/Jamfile ] -# -rule glob ( dirs * : patterns + : exclude-patterns * ) -{ - local result ; - local real-patterns ; - local real-exclude-patterns ; - for local d in $(dirs) - { - for local p in $(patterns) - { - local pattern = [ path.root $(p) $(d) ] ; - real-patterns += [ path.native $(pattern) ] ; - } - - for local p in $(exclude-patterns) - { - local pattern = [ path.root $(p) $(d) ] ; - real-exclude-patterns += [ path.native $(pattern) ] ; - } - } - - local inc = [ GLOB-RECURSIVELY $(real-patterns) ] ; - inc = [ sequence.transform NORMALIZE_PATH : $(inc) ] ; - local exc = [ GLOB-RECURSIVELY $(real-exclude-patterns) ] ; - exc = [ sequence.transform NORMALIZE_PATH : $(exc) ] ; - - return [ sequence.transform path.make : [ set.difference $(inc) : $(exc) ] ] - ; -} - - -# Recursive version of GLOB. Builds the glob of files while also searching in -# the subdirectories of the given roots. An optional set of exclusion patterns -# will filter out the matching entries from the result. The exclusions also -# apply to the subdirectory scanning, such that directories that match the -# exclusion patterns will not be searched. -# -rule glob-tree ( roots * : patterns + : exclude-patterns * ) -{ - return [ sequence.transform path.make : [ .glob-tree [ sequence.transform - path.native : $(roots) ] : $(patterns) : $(exclude-patterns) ] ] ; -} - - -local rule .glob-tree ( roots * : patterns * : exclude-patterns * ) -{ - local excluded ; - if $(exclude-patterns) - { - excluded = [ GLOB $(roots) : $(exclude-patterns) ] ; - } - local result = [ set.difference [ GLOB $(roots) : $(patterns) ] : - $(excluded) ] ; - local subdirs ; - for local d in [ set.difference [ GLOB $(roots) : * ] : $(excluded) ] - { - if ! ( $(d:D=) in . .. ) && ! [ CHECK_IF_FILE $(d) ] - { - subdirs += $(d) ; - } - } - if $(subdirs) - { - result += [ .glob-tree $(subdirs) : $(patterns) : $(exclude-patterns) ] - ; - } - return $(result) ; -} - - -# Returns true is the specified file exists. -# -rule exists ( file ) -{ - return [ path.glob $(file:D) : $(file:D=) ] ; -} -NATIVE_RULE path : exists ; - - -# Find out the absolute name of path and returns the list of all the parents, -# starting with the immediate one. Parents are returned as relative names. If -# 'upper_limit' is specified, directories above it will be pruned. -# -rule all-parents ( path : upper_limit ? : cwd ? ) -{ - cwd ?= [ pwd ] ; - local path_ele = [ regex.split [ root $(path) $(cwd) ] "/" ] ; - - if ! $(upper_limit) - { - upper_limit = / ; - } - local upper_ele = [ regex.split [ root $(upper_limit) $(cwd) ] "/" ] ; - - # Leave only elements in 'path_ele' below 'upper_ele'. - while $(path_ele) && ( $(upper_ele[1]) = $(path_ele[1]) ) - { - upper_ele = $(upper_ele[2-]) ; - path_ele = $(path_ele[2-]) ; - } - - # Have all upper elements been removed ? - if $(upper_ele) - { - errors.error "$(upper_limit) is not prefix of $(path)" ; - } - - # Create the relative paths to parents, number of elements in 'path_ele'. - local result ; - for local i in $(path_ele) - { - path = [ parent $(path) ] ; - result += $(path) ; - } - return $(result) ; -} - - -# Search for 'pattern' in parent directories of 'dir', up till and including -# 'upper_limit', if it is specified, or till the filesystem root otherwise. -# -rule glob-in-parents ( dir : patterns + : upper-limit ? ) -{ - local result ; - local parent-dirs = [ all-parents $(dir) : $(upper-limit) ] ; - - while $(parent-dirs) && ! $(result) - { - result = [ glob $(parent-dirs[1]) : $(patterns) ] ; - parent-dirs = $(parent-dirs[2-]) ; - } - return $(result) ; -} - - -# Assuming 'child' is a subdirectory of 'parent', return the relative path from -# 'parent' to 'child'. -# -rule relative ( child parent : no-error ? ) -{ - local not-a-child ; - if $(parent) = "." - { - return $(child) ; - } - else - { - local split1 = [ regex.split $(parent) / ] ; - local split2 = [ regex.split $(child) / ] ; - - while $(split1) - { - if $(split1[1]) = $(split2[1]) - { - split1 = $(split1[2-]) ; - split2 = $(split2[2-]) ; - } - else - { - not-a-child = true ; - split1 = ; - } - } - if $(split2) - { - if $(not-a-child) - { - if $(no-error) - { - return not-a-child ; - } - else - { - errors.error $(child) is not a subdir of $(parent) ; - } - } - else - { - return [ join $(split2) ] ; - } - } - else - { - return "." ; - } - } -} - - -# Returns the minimal path to path2 that is relative path1. -# -rule relative-to ( path1 path2 ) -{ - local root_1 = [ regex.split [ reverse $(path1) ] / ] ; - local split1 = [ regex.split $(path1) / ] ; - local split2 = [ regex.split $(path2) / ] ; - - while $(split1) && $(root_1) - { - if $(split1[1]) = $(split2[1]) - { - root_1 = $(root_1[2-]) ; - split1 = $(split1[2-]) ; - split2 = $(split2[2-]) ; - } - else - { - split1 = ; - } - } - return [ join . $(root_1) $(split2) ] ; -} - - -# Returns the list of paths which are used by the operating system for looking -# up programs. -# -rule programs-path ( ) -{ - local result ; - local raw = [ modules.peek : PATH Path path ] ; - for local p in $(raw) - { - if $(p) - { - result += [ path.make $(p) ] ; - } - } - return $(result) ; -} - -rule makedirs ( path ) -{ - local result = true ; - local native = [ native $(path) ] ; - if ! [ exists $(native) ] - { - if [ makedirs [ parent $(path) ] ] - { - if ! [ MAKEDIR $(native) ] - { - errors.error "Could not create directory '$(path)'" ; - result = ; - } - } - } - return $(result) ; -} - -# Converts native Windows paths into our internal canonic path representation. -# Supports 'invalid' paths containing multiple successive path separator -# characters. -# -# TODO: Check and if needed add support for Windows 'X:file' path format where -# the file is located in the current folder on drive X. -# -rule make-NT ( native ) -{ - local result ; - - if [ version.check-jam-version 3 1 17 ] - { - result = [ NORMALIZE_PATH $(native) ] ; - } - else - { - # This old implementation is really fragile due to a not so clear way - # NORMALIZE_PATH rule worked in Boost.Jam versions prior to 3.1.17. E.g. - # path.join would mostly ignore empty path elements but would root the - # joined path in case the initial two path elements were empty or some - # similar accidental wierdness. - result = [ path.join [ regex.split $(native) "[/\\]" ] ] ; - } - - # We need to add an extra '/' in front in case this is a rooted Windows path - # starting with a drive letter and not a path separator character since the - # builtin NORMALIZE_PATH rule has no knowledge of this leading drive letter - # and treats it as a regular folder name. - if [ regex.match "(^.:)" : $(native) ] - { - result = /$(result) ; - } - - return $(result) ; -} - - -rule native-NT ( path ) -{ - local result ; - if [ is-rooted $(path) ] && ! [ regex.match "^/(.:)" : $(path) ] - { - result = $(path) ; - } - else - { - result = [ MATCH "^/?(.*)" : $(path) ] ; - } - result = [ sequence.join [ regex.split $(result) "/" ] : "\\" ] ; - return $(result) ; -} - - -rule make-UNIX ( native ) -{ - # VP: I have no idea now 'native' can be empty here! But it can! - if ! $(native) - { - errors.error "Empty path passed to 'make-UNIX'" ; - } - else - { - return [ NORMALIZE_PATH $(native:T) ] ; - } -} - - -rule native-UNIX ( path ) -{ - return $(path) ; -} - - -rule make-CYGWIN ( path ) -{ - return [ make-NT $(path) ] ; -} - - -rule native-CYGWIN ( path ) -{ - local result = $(path) ; - if [ regex.match "(^/.:)" : $(path) ] # Windows absolute path. - { - result = [ MATCH "^/?(.*)" : $(path) ] ; # Remove leading '/'. - } - return [ native-UNIX $(result) ] ; -} - - -# split-path-VMS: splits input native path into device dir file (each part is -# optional). -# -# example: -# -# dev:[dir]file.c => dev: [dir] file.c -# -rule split-path-VMS ( native ) -{ - local matches = [ MATCH ([a-zA-Z0-9_-]+:)?(\\[[^\]]*\\])?(.*)?$ : $(native) ] ; - local device = $(matches[1]) ; - local dir = $(matches[2]) ; - local file = $(matches[3]) ; - - return $(device) $(dir) $(file) ; -} - - -# Converts a native VMS path into a portable path spec. -# -# Does not handle current-device absolute paths such as "[dir]File.c" as it is -# not clear how to represent them in the portable path notation. -# -# Adds a trailing dot (".") to the file part if no extension is present (helps -# when converting it back into native path). -# -rule make-VMS ( native ) -{ - if [ MATCH ^(\\[[a-zA-Z0-9]) : $(native) ] - { - errors.error "Can't handle default-device absolute paths: " $(native) ; - } - - local parts = [ split-path-VMS $(native) ] ; - local device = $(parts[1]) ; - local dir = $(parts[2]) ; - local file = $(parts[3]) ; - local elems ; - - if $(device) - { - # - # rooted - # - elems = /$(device) ; - } - - if $(dir) = "[]" - { - # - # Special case: current directory - # - elems = $(elems) "." ; - } - else if $(dir) - { - dir = [ regex.replace $(dir) "\\[|\\]" "" ] ; - local dir_parts = [ regex.split $(dir) \\. ] ; - - if $(dir_parts[1]) = "" - { - # - # Relative path - # - dir_parts = $(dir_parts[2--1]) ; - } - - # - # replace "parent-directory" parts (- => ..) - # - dir_parts = [ regex.replace-list $(dir_parts) : - : .. ] ; - - elems = $(elems) $(dir_parts) ; - } - - if $(file) - { - if ! [ MATCH (\\.) : $(file) ] - { - # - # Always add "." to end of non-extension file. - # - file = $(file). ; - } - elems = $(elems) $(file) ; - } - - local portable = [ path.join $(elems) ] ; - - return $(portable) ; -} - - -# Converts a portable path spec into a native VMS path. -# -# Relies on having at least one dot (".") included in the file name to be able -# to differentiate it from the directory part. -# -rule native-VMS ( path ) -{ - local device = "" ; - local dir = $(path) ; - local file = "" ; - local native ; - local split ; - - # - # Has device ? - # - if [ is-rooted $(dir) ] - { - split = [ MATCH ^/([^:]+:)/?(.*) : $(dir) ] ; - device = $(split[1]) ; - dir = $(split[2]) ; - } - - # - # Has file ? - # - # This is no exact science, just guess work: - # - # If the last part of the current path spec - # includes some chars, followed by a dot, - # optionally followed by more chars - - # then it is a file (keep your fingers crossed). - # - split = [ regex.split $(dir) / ] ; - local maybe_file = $(split[-1]) ; - - if [ MATCH ^([^.]+\\..*) : $(maybe_file) ] - { - file = $(maybe_file) ; - dir = [ sequence.join $(split[1--2]) : / ] ; - } - - # - # Has dir spec ? - # - if $(dir) = "." - { - dir = "[]" ; - } - else if $(dir) - { - dir = [ regex.replace $(dir) \\.\\. - ] ; - dir = [ regex.replace $(dir) / . ] ; - - if $(device) = "" - { - # - # Relative directory - # - dir = "."$(dir) ; - } - dir = "["$(dir)"]" ; - } - - native = [ sequence.join $(device) $(dir) $(file) ] ; - - return $(native) ; -} - - -rule __test__ ( ) -{ - import assert ; - import errors : try catch ; - - assert.true is-rooted "/" ; - assert.true is-rooted "/foo" ; - assert.true is-rooted "/foo/bar" ; - assert.result : is-rooted "." ; - assert.result : is-rooted "foo" ; - assert.result : is-rooted "foo/bar" ; - - assert.true has-parent "foo" ; - assert.true has-parent "foo/bar" ; - assert.true has-parent "." ; - assert.result : has-parent "/" ; - - assert.result "." : basename "." ; - assert.result ".." : basename ".." ; - assert.result "foo" : basename "foo" ; - assert.result "foo" : basename "bar/foo" ; - assert.result "foo" : basename "gaz/bar/foo" ; - assert.result "foo" : basename "/gaz/bar/foo" ; - - assert.result "." : parent "foo" ; - assert.result "/" : parent "/foo" ; - assert.result "foo/bar" : parent "foo/bar/giz" ; - assert.result ".." : parent "." ; - assert.result ".." : parent "../foo" ; - assert.result "../../foo" : parent "../../foo/bar" ; - - assert.result "." : reverse "." ; - assert.result ".." : reverse "foo" ; - assert.result "../../.." : reverse "foo/bar/giz" ; - - assert.result "foo" : join "foo" ; - assert.result "/foo" : join "/" "foo" ; - assert.result "foo/bar" : join "foo" "bar" ; - assert.result "foo/bar" : join "foo/giz" "../bar" ; - assert.result "foo/giz" : join "foo/bar/baz" "../../giz" ; - assert.result ".." : join "." ".." ; - assert.result ".." : join "foo" "../.." ; - assert.result "../.." : join "../foo" "../.." ; - assert.result "/foo" : join "/bar" "../foo" ; - assert.result "foo/giz" : join "foo/giz" "." ; - assert.result "." : join lib2 ".." ; - assert.result "/" : join "/a" ".." ; - - assert.result /a/b : join /a/b/c .. ; - - assert.result "foo/bar/giz" : join "foo" "bar" "giz" ; - assert.result "giz" : join "foo" ".." "giz" ; - assert.result "foo/giz" : join "foo" "." "giz" ; - - try ; - { - join "a" "/b" ; - } - catch only first element may be rooted ; - - local CWD = "/home/ghost/build" ; - assert.result : all-parents . : . : $(CWD) ; - assert.result . .. ../.. ../../.. : all-parents "Jamfile" : "" : $(CWD) ; - assert.result foo . .. ../.. ../../.. : all-parents "foo/Jamfile" : "" : $(CWD) ; - assert.result ../Work .. ../.. ../../.. : all-parents "../Work/Jamfile" : "" : $(CWD) ; - - local CWD = "/home/ghost" ; - assert.result . .. : all-parents "Jamfile" : "/home" : $(CWD) ; - assert.result . : all-parents "Jamfile" : "/home/ghost" : $(CWD) ; - - assert.result "c/d" : relative "a/b/c/d" "a/b" ; - assert.result "foo" : relative "foo" "." ; - - local save-os = [ modules.peek path : os ] ; - modules.poke path : os : NT ; - - assert.result "foo/bar/giz" : make "foo/bar/giz" ; - assert.result "foo/bar/giz" : make "foo\\bar\\giz" ; - assert.result "foo" : make "foo/" ; - assert.result "foo" : make "foo\\" ; - assert.result "foo" : make "foo/." ; - assert.result "foo" : make "foo/bar/.." ; - assert.result "foo" : make "foo/bar/../" ; - assert.result "foo" : make "foo/bar/..\\" ; - assert.result "foo/bar" : make "foo/././././bar" ; - assert.result "/foo" : make "\\foo" ; - assert.result "/D:/My Documents" : make "D:\\My Documents" ; - assert.result "/c:/boost/tools/build/new/project.jam" : make "c:\\boost\\tools\\build\\test\\..\\new\\project.jam" ; - - # Test processing 'invalid' paths containing multiple successive path - # separators. - assert.result "foo" : make "foo//" ; - assert.result "foo" : make "foo///" ; - assert.result "foo" : make "foo\\\\" ; - assert.result "foo" : make "foo\\\\\\" ; - assert.result "/foo" : make "//foo" ; - assert.result "/foo" : make "///foo" ; - assert.result "/foo" : make "\\\\foo" ; - assert.result "/foo" : make "\\\\\\foo" ; - assert.result "/foo" : make "\\/\\/foo" ; - assert.result "foo/bar" : make "foo//\\//\\\\bar//\\//\\\\\\//\\//\\\\" ; - assert.result "foo" : make "foo/bar//.." ; - assert.result "foo/bar" : make "foo/bar/giz//.." ; - assert.result "foo/giz" : make "foo//\\//\\\\bar///\\\\//\\\\////\\/..///giz\\//\\\\\\//\\//\\\\" ; - assert.result "../../../foo" : make "..///.//..///.//..////foo///" ; - - # Test processing 'invalid' rooted paths with too many '..' path elements - # that would place them before the root. - assert.result : make "/.." ; - assert.result : make "/../" ; - assert.result : make "/../." ; - assert.result : make "/.././" ; - assert.result : make "/foo/../bar/giz/.././././../../." ; - assert.result : make "/foo/../bar/giz/.././././../.././" ; - assert.result : make "//foo/../bar/giz/.././././../../." ; - assert.result : make "//foo/../bar/giz/.././././../.././" ; - assert.result : make "\\\\foo/../bar/giz/.././././../../." ; - assert.result : make "\\\\foo/../bar/giz/.././././../.././" ; - assert.result : make "/..///.//..///.//..////foo///" ; - - assert.result "foo\\bar\\giz" : native "foo/bar/giz" ; - assert.result "foo" : native "foo" ; - assert.result "\\foo" : native "/foo" ; - assert.result "D:\\My Documents\\Work" : native "/D:/My Documents/Work" ; - - modules.poke path : os : UNIX ; - - assert.result "foo/bar/giz" : make "foo/bar/giz" ; - assert.result "/sub1" : make "/sub1/." ; - assert.result "/sub1" : make "/sub1/sub2/.." ; - assert.result "sub1" : make "sub1/." ; - assert.result "sub1" : make "sub1/sub2/.." ; - assert.result "/foo/bar" : native "/foo/bar" ; - - modules.poke path : os : VMS ; - - # - # Don't really need to poke os before these - # - assert.result "disk:" "[dir]" "file" : split-path-VMS "disk:[dir]file" ; - assert.result "disk:" "[dir]" "" : split-path-VMS "disk:[dir]" ; - assert.result "disk:" "" "" : split-path-VMS "disk:" ; - assert.result "disk:" "" "file" : split-path-VMS "disk:file" ; - assert.result "" "[dir]" "file" : split-path-VMS "[dir]file" ; - assert.result "" "[dir]" "" : split-path-VMS "[dir]" ; - assert.result "" "" "file" : split-path-VMS "file" ; - assert.result "" "" "" : split-path-VMS "" ; - - # - # Special case: current directory - # - assert.result "" "[]" "" : split-path-VMS "[]" ; - assert.result "disk:" "[]" "" : split-path-VMS "disk:[]" ; - assert.result "" "[]" "file" : split-path-VMS "[]file" ; - assert.result "disk:" "[]" "file" : split-path-VMS "disk:[]file" ; - - # - # Make portable paths - # - assert.result "/disk:" : make "disk:" ; - assert.result "foo/bar/giz" : make "[.foo.bar.giz]" ; - assert.result "foo" : make "[.foo]" ; - assert.result "foo" : make "[.foo.bar.-]" ; - assert.result ".." : make "[.-]" ; - assert.result ".." : make "[-]" ; - assert.result "." : make "[]" ; - assert.result "giz.h" : make "giz.h" ; - assert.result "foo/bar/giz.h" : make "[.foo.bar]giz.h" ; - assert.result "/disk:/my_docs" : make "disk:[my_docs]" ; - assert.result "/disk:/boost/tools/build/new/project.jam" : make "disk:[boost.tools.build.test.-.new]project.jam" ; - - # - # Special case (adds '.' to end of file w/o extension to - # disambiguate from directory in portable path spec). - # - assert.result "Jamfile." : make "Jamfile" ; - assert.result "dir/Jamfile." : make "[.dir]Jamfile" ; - assert.result "/disk:/dir/Jamfile." : make "disk:[dir]Jamfile" ; - - # - # Make native paths - # - assert.result "disk:" : native "/disk:" ; - assert.result "[.foo.bar.giz]" : native "foo/bar/giz" ; - assert.result "[.foo]" : native "foo" ; - assert.result "[.-]" : native ".." ; - assert.result "[.foo.-]" : native "foo/.." ; - assert.result "[]" : native "." ; - assert.result "disk:[my_docs.work]" : native "/disk:/my_docs/work" ; - assert.result "giz.h" : native "giz.h" ; - assert.result "disk:Jamfile." : native "/disk:Jamfile." ; - assert.result "disk:[my_docs.work]Jamfile." : native "/disk:/my_docs/work/Jamfile." ; - - modules.poke path : os : $(save-os) ; -} diff --git a/jam-files/boost-build/util/path.py b/jam-files/boost-build/util/path.py deleted file mode 100644 index 222b96bf..00000000 --- a/jam-files/boost-build/util/path.py +++ /dev/null @@ -1,904 +0,0 @@ -# Status: this module is ported on demand by however needs something -# from it. Functionality that is not needed by Python port will -# be dropped. - -# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -# Performs various path manipulations. Path are always in a 'normilized' -# representation. In it, a path may be either: -# -# - '.', or -# -# - ['/'] [ ( '..' '/' )* (token '/')* token ] -# -# In plain english, path can be rooted, '..' elements are allowed only -# at the beginning, and it never ends in slash, except for path consisting -# of slash only. - -import os.path -from utility import to_seq -from glob import glob as builtin_glob - -from b2.util import bjam_signature - -@bjam_signature((["path", "root"],)) -def root (path, root): - """ If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged. - """ - if os.path.isabs (path): - return path - else: - return os.path.join (root, path) - -@bjam_signature((["native"],)) -def make (native): - """ Converts the native path into normalized form. - """ - # TODO: make os selection here. - return make_UNIX (native) - -def make_UNIX (native): - - # VP: I have no idea now 'native' can be empty here! But it can! - assert (native) - - return os.path.normpath (native) - -@bjam_signature((["path"],)) -def native (path): - """ Builds a native representation of the path. - """ - # TODO: make os selection here. - return native_UNIX (path) - -def native_UNIX (path): - return path - - -def pwd (): - """ Returns the current working directory. - # TODO: is it a good idea to use the current dir? Some use-cases - may not allow us to depend on the current dir. - """ - return make (os.getcwd ()) - -def is_rooted (path): - """ Tests if a path is rooted. - """ - return path and path [0] == '/' - - -################################################################### -# Still to port. -# Original lines are prefixed with "# " -# -# # Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and -# # distribute this software is granted provided this copyright notice appears in -# # all copies. This software is provided "as is" without express or implied -# # warranty, and with no claim as to its suitability for any purpose. -# -# # Performs various path manipulations. Path are always in a 'normilized' -# # representation. In it, a path may be either: -# # -# # - '.', or -# # -# # - ['/'] [ ( '..' '/' )* (token '/')* token ] -# # -# # In plain english, path can be rooted, '..' elements are allowed only -# # at the beginning, and it never ends in slash, except for path consisting -# # of slash only. -# -# import modules ; -# import sequence ; -# import regex ; -# import errors : error ; -# -# -# os = [ modules.peek : OS ] ; -# if [ modules.peek : UNIX ] -# { -# local uname = [ modules.peek : JAMUNAME ] ; -# switch $(uname) -# { -# case CYGWIN* : -# os = CYGWIN ; -# -# case * : -# os = UNIX ; -# } -# } -# -# # -# # Tests if a path is rooted. -# # -# rule is-rooted ( path ) -# { -# return [ MATCH "^(/)" : $(path) ] ; -# } -# -# # -# # Tests if a path has a parent. -# # -# rule has-parent ( path ) -# { -# if $(path) != / { -# return 1 ; -# } else { -# return ; -# } -# } -# -# # -# # Returns the path without any directory components. -# # -# rule basename ( path ) -# { -# return [ MATCH "([^/]+)$" : $(path) ] ; -# } -# -# # -# # Returns parent directory of the path. If no parent exists, error is issued. -# # -# rule parent ( path ) -# { -# if [ has-parent $(path) ] { -# -# if $(path) = . { -# return .. ; -# } else { -# -# # Strip everything at the end of path up to and including -# # the last slash -# local result = [ regex.match "((.*)/)?([^/]+)" : $(path) : 2 3 ] ; -# -# # Did we strip what we shouldn't? -# if $(result[2]) = ".." { -# return $(path)/.. ; -# } else { -# if ! $(result[1]) { -# if [ is-rooted $(path) ] { -# result = / ; -# } else { -# result = . ; -# } -# } -# return $(result[1]) ; -# } -# } -# } else { -# error "Path '$(path)' has no parent" ; -# } -# } -# -# # -# # Returns path2 such that "[ join path path2 ] = .". -# # The path may not contain ".." element or be rooted. -# # -# rule reverse ( path ) -# { -# if $(path) = . -# { -# return $(path) ; -# } -# else -# { -# local tokens = [ regex.split $(path) "/" ] ; -# local tokens2 ; -# for local i in $(tokens) { -# tokens2 += .. ; -# } -# return [ sequence.join $(tokens2) : "/" ] ; -# } -# } -# -# # -# # Auxillary rule: does all the semantic of 'join', except for error cheching. -# # The error checking is separated because this rule is recursive, and I don't -# # like the idea of checking the same input over and over. -# # -# local rule join-imp ( elements + ) -# { -# return [ NORMALIZE_PATH $(elements:J="/") ] ; -# } -# -# # -# # Contanenates the passed path elements. Generates an error if -# # any element other than the first one is rooted. -# # -# rule join ( elements + ) -# { -# if ! $(elements[2]) -# { -# return $(elements[1]) ; -# } -# else -# { -# for local e in $(elements[2-]) -# { -# if [ is-rooted $(e) ] -# { -# error only first element may be rooted ; -# } -# } -# return [ join-imp $(elements) ] ; -# } -# } - - -def glob (dirs, patterns): - """ Returns the list of files matching the given pattern in the - specified directory. Both directories and patterns are - supplied as portable paths. Each pattern should be non-absolute - path, and can't contain "." or ".." elements. Each slash separated - element of pattern can contain the following special characters: - - '?', which match any character - - '*', which matches arbitrary number of characters. - A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3 - if and only if e1 matches p1, e2 matches p2 and so on. - - For example: - [ glob . : *.cpp ] - [ glob . : */build/Jamfile ] - """ -# { -# local result ; -# if $(patterns:D) -# { -# # When a pattern has a directory element, we first glob for -# # directory, and then glob for file name is the found directories. -# for local p in $(patterns) -# { -# # First glob for directory part. -# local globbed-dirs = [ glob $(dirs) : $(p:D) ] ; -# result += [ glob $(globbed-dirs) : $(p:D="") ] ; -# } -# } -# else -# { -# # When a pattern has not directory, we glob directly. -# # Take care of special ".." value. The "GLOB" rule simply ignores -# # the ".." element (and ".") element in directory listings. This is -# # needed so that -# # -# # [ glob libs/*/Jamfile ] -# # -# # don't return -# # -# # libs/../Jamfile (which is the same as ./Jamfile) -# # -# # On the other hand, when ".." is explicitly present in the pattern -# # we need to return it. -# # -# for local dir in $(dirs) -# { -# for local p in $(patterns) -# { -# if $(p) != ".." -# { -# result += [ sequence.transform make -# : [ GLOB [ native $(dir) ] : $(p) ] ] ; -# } -# else -# { -# result += [ path.join $(dir) .. ] ; -# } -# } -# } -# } -# return $(result) ; -# } -# - -# TODO: (PF) I replaced the code above by this. I think it should work but needs to be tested. - result = [] - dirs = to_seq (dirs) - patterns = to_seq (patterns) - - splitdirs = [] - for dir in dirs: - splitdirs += dir.split (os.pathsep) - - for dir in splitdirs: - for pattern in patterns: - p = os.path.join (dir, pattern) - import glob - result.extend (glob.glob (p)) - return result - -# -# Find out the absolute name of path and returns the list of all the parents, -# starting with the immediate one. Parents are returned as relative names. -# If 'upper_limit' is specified, directories above it will be pruned. -# -def all_parents(path, upper_limit=None, cwd=None): - - if not cwd: - cwd = os.getcwd() - - path_abs = os.path.join(cwd, path) - - if upper_limit: - upper_limit = os.path.join(cwd, upper_limit) - - result = [] - while path_abs and path_abs != upper_limit: - (head, tail) = os.path.split(path) - path = os.path.join(path, "..") - result.append(path) - path_abs = head - - if upper_limit and path_abs != upper_limit: - raise BaseException("'%s' is not a prefix of '%s'" % (upper_limit, path)) - - return result - -# Search for 'pattern' in parent directories of 'dir', up till and including -# 'upper_limit', if it is specified, or till the filesystem root otherwise. -# -def glob_in_parents(dir, patterns, upper_limit=None): - - result = [] - parent_dirs = all_parents(dir, upper_limit) - - for p in parent_dirs: - result = glob(p, patterns) - if result: break - - return result - -# -# # -# # Assuming 'child' is a subdirectory of 'parent', return the relative -# # path from 'parent' to 'child' -# # -# rule relative ( child parent ) -# { -# if $(parent) = "." -# { -# return $(child) ; -# } -# else -# { -# local split1 = [ regex.split $(parent) / ] ; -# local split2 = [ regex.split $(child) / ] ; -# -# while $(split1) -# { -# if $(split1[1]) = $(split2[1]) -# { -# split1 = $(split1[2-]) ; -# split2 = $(split2[2-]) ; -# } -# else -# { -# errors.error $(child) is not a subdir of $(parent) ; -# } -# } -# return [ join $(split2) ] ; -# } -# } -# -# # Returns the minimal path to path2 that is relative path1. -# # -# rule relative-to ( path1 path2 ) -# { -# local root_1 = [ regex.split [ reverse $(path1) ] / ] ; -# local split1 = [ regex.split $(path1) / ] ; -# local split2 = [ regex.split $(path2) / ] ; -# -# while $(split1) && $(root_1) -# { -# if $(split1[1]) = $(split2[1]) -# { -# root_1 = $(root_1[2-]) ; -# split1 = $(split1[2-]) ; -# split2 = $(split2[2-]) ; -# } -# else -# { -# split1 = ; -# } -# } -# return [ join . $(root_1) $(split2) ] ; -# } - -# Returns the list of paths which are used by the operating system -# for looking up programs -def programs_path (): - raw = [] - names = ['PATH', 'Path', 'path'] - - for name in names: - raw.append(os.environ.get (name, '')) - - result = [] - for elem in raw: - if elem: - for p in elem.split(os.path.pathsep): - result.append(make(p)) - - return result - -# rule make-NT ( native ) -# { -# local tokens = [ regex.split $(native) "[/\\]" ] ; -# local result ; -# -# # Handle paths ending with slashes -# if $(tokens[-1]) = "" -# { -# tokens = $(tokens[1--2]) ; # discard the empty element -# } -# -# result = [ path.join $(tokens) ] ; -# -# if [ regex.match "(^.:)" : $(native) ] -# { -# result = /$(result) ; -# } -# -# if $(native) = "" -# { -# result = "." ; -# } -# -# return $(result) ; -# } -# -# rule native-NT ( path ) -# { -# local result = [ MATCH "^/?(.*)" : $(path) ] ; -# result = [ sequence.join [ regex.split $(result) "/" ] : "\\" ] ; -# return $(result) ; -# } -# -# rule make-CYGWIN ( path ) -# { -# return [ make-NT $(path) ] ; -# } -# -# rule native-CYGWIN ( path ) -# { -# local result = $(path) ; -# if [ regex.match "(^/.:)" : $(path) ] # win absolute -# { -# result = [ MATCH "^/?(.*)" : $(path) ] ; # remove leading '/' -# } -# return [ native-UNIX $(result) ] ; -# } -# -# # -# # split-VMS: splits input native path into -# # device dir file (each part is optional), -# # example: -# # -# # dev:[dir]file.c => dev: [dir] file.c -# # -# rule split-path-VMS ( native ) -# { -# local matches = [ MATCH ([a-zA-Z0-9_-]+:)?(\\[[^\]]*\\])?(.*)?$ : $(native) ] ; -# local device = $(matches[1]) ; -# local dir = $(matches[2]) ; -# local file = $(matches[3]) ; -# -# return $(device) $(dir) $(file) ; -# } -# -# # -# # Converts a native VMS path into a portable path spec. -# # -# # Does not handle current-device absolute paths such -# # as "[dir]File.c" as it is not clear how to represent -# # them in the portable path notation. -# # -# # Adds a trailing dot (".") to the file part if no extension -# # is present (helps when converting it back into native path). -# # -# rule make-VMS ( native ) -# { -# if [ MATCH ^(\\[[a-zA-Z0-9]) : $(native) ] -# { -# errors.error "Can't handle default-device absolute paths: " $(native) ; -# } -# -# local parts = [ split-path-VMS $(native) ] ; -# local device = $(parts[1]) ; -# local dir = $(parts[2]) ; -# local file = $(parts[3]) ; -# local elems ; -# -# if $(device) -# { -# # -# # rooted -# # -# elems = /$(device) ; -# } -# -# if $(dir) = "[]" -# { -# # -# # Special case: current directory -# # -# elems = $(elems) "." ; -# } -# else if $(dir) -# { -# dir = [ regex.replace $(dir) "\\[|\\]" "" ] ; -# local dir_parts = [ regex.split $(dir) \\. ] ; -# -# if $(dir_parts[1]) = "" -# { -# # -# # Relative path -# # -# dir_parts = $(dir_parts[2--1]) ; -# } -# -# # -# # replace "parent-directory" parts (- => ..) -# # -# dir_parts = [ regex.replace-list $(dir_parts) : - : .. ] ; -# -# elems = $(elems) $(dir_parts) ; -# } -# -# if $(file) -# { -# if ! [ MATCH (\\.) : $(file) ] -# { -# # -# # Always add "." to end of non-extension file -# # -# file = $(file). ; -# } -# elems = $(elems) $(file) ; -# } -# -# local portable = [ path.join $(elems) ] ; -# -# return $(portable) ; -# } -# -# # -# # Converts a portable path spec into a native VMS path. -# # -# # Relies on having at least one dot (".") included in the file -# # name to be able to differentiate it ftom the directory part. -# # -# rule native-VMS ( path ) -# { -# local device = "" ; -# local dir = $(path) ; -# local file = "" ; -# local native ; -# local split ; -# -# # -# # Has device ? -# # -# if [ is-rooted $(dir) ] -# { -# split = [ MATCH ^/([^:]+:)/?(.*) : $(dir) ] ; -# device = $(split[1]) ; -# dir = $(split[2]) ; -# } -# -# # -# # Has file ? -# # -# # This is no exact science, just guess work: -# # -# # If the last part of the current path spec -# # includes some chars, followed by a dot, -# # optionally followed by more chars - -# # then it is a file (keep your fingers crossed). -# # -# split = [ regex.split $(dir) / ] ; -# local maybe_file = $(split[-1]) ; -# -# if [ MATCH ^([^.]+\\..*) : $(maybe_file) ] -# { -# file = $(maybe_file) ; -# dir = [ sequence.join $(split[1--2]) : / ] ; -# } -# -# # -# # Has dir spec ? -# # -# if $(dir) = "." -# { -# dir = "[]" ; -# } -# else if $(dir) -# { -# dir = [ regex.replace $(dir) \\.\\. - ] ; -# dir = [ regex.replace $(dir) / . ] ; -# -# if $(device) = "" -# { -# # -# # Relative directory -# # -# dir = "."$(dir) ; -# } -# dir = "["$(dir)"]" ; -# } -# -# native = [ sequence.join $(device) $(dir) $(file) ] ; -# -# return $(native) ; -# } -# -# -# rule __test__ ( ) { -# -# import assert ; -# import errors : try catch ; -# -# assert.true is-rooted "/" ; -# assert.true is-rooted "/foo" ; -# assert.true is-rooted "/foo/bar" ; -# assert.result : is-rooted "." ; -# assert.result : is-rooted "foo" ; -# assert.result : is-rooted "foo/bar" ; -# -# assert.true has-parent "foo" ; -# assert.true has-parent "foo/bar" ; -# assert.true has-parent "." ; -# assert.result : has-parent "/" ; -# -# assert.result "." : basename "." ; -# assert.result ".." : basename ".." ; -# assert.result "foo" : basename "foo" ; -# assert.result "foo" : basename "bar/foo" ; -# assert.result "foo" : basename "gaz/bar/foo" ; -# assert.result "foo" : basename "/gaz/bar/foo" ; -# -# assert.result "." : parent "foo" ; -# assert.result "/" : parent "/foo" ; -# assert.result "foo/bar" : parent "foo/bar/giz" ; -# assert.result ".." : parent "." ; -# assert.result ".." : parent "../foo" ; -# assert.result "../../foo" : parent "../../foo/bar" ; -# -# -# assert.result "." : reverse "." ; -# assert.result ".." : reverse "foo" ; -# assert.result "../../.." : reverse "foo/bar/giz" ; -# -# assert.result "foo" : join "foo" ; -# assert.result "/foo" : join "/" "foo" ; -# assert.result "foo/bar" : join "foo" "bar" ; -# assert.result "foo/bar" : join "foo/giz" "../bar" ; -# assert.result "foo/giz" : join "foo/bar/baz" "../../giz" ; -# assert.result ".." : join "." ".." ; -# assert.result ".." : join "foo" "../.." ; -# assert.result "../.." : join "../foo" "../.." ; -# assert.result "/foo" : join "/bar" "../foo" ; -# assert.result "foo/giz" : join "foo/giz" "." ; -# assert.result "." : join lib2 ".." ; -# assert.result "/" : join "/a" ".." ; -# -# assert.result /a/b : join /a/b/c .. ; -# -# assert.result "foo/bar/giz" : join "foo" "bar" "giz" ; -# assert.result "giz" : join "foo" ".." "giz" ; -# assert.result "foo/giz" : join "foo" "." "giz" ; -# -# try ; -# { -# join "a" "/b" ; -# } -# catch only first element may be rooted ; -# -# local CWD = "/home/ghost/build" ; -# assert.result : all-parents . : . : $(CWD) ; -# assert.result . .. ../.. ../../.. : all-parents "Jamfile" : "" : $(CWD) ; -# assert.result foo . .. ../.. ../../.. : all-parents "foo/Jamfile" : "" : $(CWD) ; -# assert.result ../Work .. ../.. ../../.. : all-parents "../Work/Jamfile" : "" : $(CWD) ; -# -# local CWD = "/home/ghost" ; -# assert.result . .. : all-parents "Jamfile" : "/home" : $(CWD) ; -# assert.result . : all-parents "Jamfile" : "/home/ghost" : $(CWD) ; -# -# assert.result "c/d" : relative "a/b/c/d" "a/b" ; -# assert.result "foo" : relative "foo" "." ; -# -# local save-os = [ modules.peek path : os ] ; -# modules.poke path : os : NT ; -# -# assert.result "foo/bar/giz" : make "foo/bar/giz" ; -# assert.result "foo/bar/giz" : make "foo\\bar\\giz" ; -# assert.result "foo" : make "foo/." ; -# assert.result "foo" : make "foo/bar/.." ; -# assert.result "/D:/My Documents" : make "D:\\My Documents" ; -# assert.result "/c:/boost/tools/build/new/project.jam" : make "c:\\boost\\tools\\build\\test\\..\\new\\project.jam" ; -# -# assert.result "foo\\bar\\giz" : native "foo/bar/giz" ; -# assert.result "foo" : native "foo" ; -# assert.result "D:\\My Documents\\Work" : native "/D:/My Documents/Work" ; -# -# modules.poke path : os : UNIX ; -# -# assert.result "foo/bar/giz" : make "foo/bar/giz" ; -# assert.result "/sub1" : make "/sub1/." ; -# assert.result "/sub1" : make "/sub1/sub2/.." ; -# assert.result "sub1" : make "sub1/." ; -# assert.result "sub1" : make "sub1/sub2/.." ; -# assert.result "/foo/bar" : native "/foo/bar" ; -# -# modules.poke path : os : VMS ; -# -# # -# # Don't really need to poke os before these -# # -# assert.result "disk:" "[dir]" "file" : split-path-VMS "disk:[dir]file" ; -# assert.result "disk:" "[dir]" "" : split-path-VMS "disk:[dir]" ; -# assert.result "disk:" "" "" : split-path-VMS "disk:" ; -# assert.result "disk:" "" "file" : split-path-VMS "disk:file" ; -# assert.result "" "[dir]" "file" : split-path-VMS "[dir]file" ; -# assert.result "" "[dir]" "" : split-path-VMS "[dir]" ; -# assert.result "" "" "file" : split-path-VMS "file" ; -# assert.result "" "" "" : split-path-VMS "" ; -# -# # -# # Special case: current directory -# # -# assert.result "" "[]" "" : split-path-VMS "[]" ; -# assert.result "disk:" "[]" "" : split-path-VMS "disk:[]" ; -# assert.result "" "[]" "file" : split-path-VMS "[]file" ; -# assert.result "disk:" "[]" "file" : split-path-VMS "disk:[]file" ; -# -# # -# # Make portable paths -# # -# assert.result "/disk:" : make "disk:" ; -# assert.result "foo/bar/giz" : make "[.foo.bar.giz]" ; -# assert.result "foo" : make "[.foo]" ; -# assert.result "foo" : make "[.foo.bar.-]" ; -# assert.result ".." : make "[.-]" ; -# assert.result ".." : make "[-]" ; -# assert.result "." : make "[]" ; -# assert.result "giz.h" : make "giz.h" ; -# assert.result "foo/bar/giz.h" : make "[.foo.bar]giz.h" ; -# assert.result "/disk:/my_docs" : make "disk:[my_docs]" ; -# assert.result "/disk:/boost/tools/build/new/project.jam" : make "disk:[boost.tools.build.test.-.new]project.jam" ; -# -# # -# # Special case (adds '.' to end of file w/o extension to -# # disambiguate from directory in portable path spec). -# # -# assert.result "Jamfile." : make "Jamfile" ; -# assert.result "dir/Jamfile." : make "[.dir]Jamfile" ; -# assert.result "/disk:/dir/Jamfile." : make "disk:[dir]Jamfile" ; -# -# # -# # Make native paths -# # -# assert.result "disk:" : native "/disk:" ; -# assert.result "[.foo.bar.giz]" : native "foo/bar/giz" ; -# assert.result "[.foo]" : native "foo" ; -# assert.result "[.-]" : native ".." ; -# assert.result "[.foo.-]" : native "foo/.." ; -# assert.result "[]" : native "." ; -# assert.result "disk:[my_docs.work]" : native "/disk:/my_docs/work" ; -# assert.result "giz.h" : native "giz.h" ; -# assert.result "disk:Jamfile." : native "/disk:Jamfile." ; -# assert.result "disk:[my_docs.work]Jamfile." : native "/disk:/my_docs/work/Jamfile." ; -# -# modules.poke path : os : $(save-os) ; -# -# } - -# - - -#def glob(dir, patterns): -# result = [] -# for pattern in patterns: -# result.extend(builtin_glob(os.path.join(dir, pattern))) -# return result - -def glob(dirs, patterns, exclude_patterns=None): - """Returns the list of files matching the given pattern in the - specified directory. Both directories and patterns are - supplied as portable paths. Each pattern should be non-absolute - path, and can't contain '.' or '..' elements. Each slash separated - element of pattern can contain the following special characters: - - '?', which match any character - - '*', which matches arbitrary number of characters. - A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3 - if and only if e1 matches p1, e2 matches p2 and so on. - For example: - [ glob . : *.cpp ] - [ glob . : */build/Jamfile ] - """ - - assert(isinstance(patterns, list)) - assert(isinstance(dirs, list)) - - if not exclude_patterns: - exclude_patterns = [] - else: - assert(isinstance(exclude_patterns, list)) - - real_patterns = [os.path.join(d, p) for p in patterns for d in dirs] - real_exclude_patterns = [os.path.join(d, p) for p in exclude_patterns - for d in dirs] - - inc = [os.path.normpath(name) for p in real_patterns - for name in builtin_glob(p)] - exc = [os.path.normpath(name) for p in real_exclude_patterns - for name in builtin_glob(p)] - return [x for x in inc if x not in exc] - -def glob_tree(roots, patterns, exclude_patterns=None): - """Recursive version of GLOB. Builds the glob of files while - also searching in the subdirectories of the given roots. An - optional set of exclusion patterns will filter out the - matching entries from the result. The exclusions also apply - to the subdirectory scanning, such that directories that - match the exclusion patterns will not be searched.""" - - if not exclude_patterns: - exclude_patterns = [] - - result = glob(roots, patterns, exclude_patterns) - subdirs = [s for s in glob(roots, ["*"]) if s != "." and s != ".." and os.path.isdir(s)] - if subdirs: - result.extend(glob_tree(subdirs, patterns, exclude_patterns)) - - return result - -def glob_in_parents(dir, patterns, upper_limit=None): - """Recursive version of GLOB which glob sall parent directories - of dir until the first match is found. Returns an empty result if no match - is found""" - - assert(isinstance(dir, str)) - assert(isinstance(patterns, list)) - - result = [] - - absolute_dir = os.path.join(os.getcwd(), dir) - absolute_dir = os.path.normpath(absolute_dir) - while absolute_dir: - new_dir = os.path.split(absolute_dir)[0] - if new_dir == absolute_dir: - break - result = glob([new_dir], patterns) - if result: - break - absolute_dir = new_dir - - return result - - -# The relpath functionality is written by -# Cimarron Taylor -def split(p, rest=[]): - (h,t) = os.path.split(p) - if len(h) < 1: return [t]+rest - if len(t) < 1: return [h]+rest - return split(h,[t]+rest) - -def commonpath(l1, l2, common=[]): - if len(l1) < 1: return (common, l1, l2) - if len(l2) < 1: return (common, l1, l2) - if l1[0] != l2[0]: return (common, l1, l2) - return commonpath(l1[1:], l2[1:], common+[l1[0]]) - -def relpath(p1, p2): - (common,l1,l2) = commonpath(split(p1), split(p2)) - p = [] - if len(l1) > 0: - p = [ '../' * len(l1) ] - p = p + l2 - if p: - return os.path.join( *p ) - else: - return "." diff --git a/jam-files/boost-build/util/print.jam b/jam-files/boost-build/util/print.jam deleted file mode 100644 index 708d21ab..00000000 --- a/jam-files/boost-build/util/print.jam +++ /dev/null @@ -1,488 +0,0 @@ -# Copyright 2003 Douglas Gregor -# Copyright 2002, 2003, 2005 Rene Rivera -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Utilities for generating format independent output. Using these -# will help in generation of documentation in at minimum plain/console -# and html. - -import modules ; -import numbers ; -import string ; -import regex ; -import "class" ; -import scanner ; -import path ; - -# The current output target. Defaults to console. -output-target = console ; - -# The current output type. Defaults to plain. Other possible values are "html". -output-type = plain ; - -# Whitespace. -.whitespace = [ string.whitespace ] ; - - -# Set the target and type of output to generate. This sets both the destination -# output and the type of docs to generate to that output. The target can be -# either a file or "console" for echoing to the console. If the type of output -# is not specified it defaults to plain text. -# -rule output ( - target # The target file or device; file or "console". - type ? # The type of output; "plain" or "html". -) -{ - type ?= plain ; - if $(output-target) != $(target) - { - output-target = $(target) ; - output-type = $(type) ; - if $(output-type) = html - { - text - "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">" - "<html>" - "<head>" - "</head>" - "<body link=\"#0000ff\" vlink=\"#800080\">" - : true - : prefix ; - text - "</body>" - "</html>" - : - : suffix ; - } - } -} - - -# Generate a section with a description. The type of output can be controlled by -# the value of the 'output-type' variable. -# -rule section ( - name # The name of the section. - description * # A number of description lines. -) -{ - if $(output-type) = plain - { - lines [ split-at-words $(name): ] ; - lines ; - } - else if $(output-type) = html - { - name = [ escape-html $(name) ] ; - text <h3>$(name)</h3> <p> ; - } - local pre = ; - while $(description) - { - local paragraph = ; - while $(description) && [ string.is-whitespace $(description[1]) ] { description = $(description[2-]) ; } - if $(pre) - { - while $(description) && ( - $(pre) = " $(description[1])" || - ( $(pre) < [ string.chars [ MATCH "^([$(.whitespace)]*)" : " $(description[1])" ] ] ) - ) - { paragraph += $(description[1]) ; description = $(description[2-]) ; } - while [ string.is-whitespace $(paragraph[-1]) ] { paragraph = $(paragraph[1--2]) ; } - pre = ; - if $(output-type) = plain - { - lines $(paragraph) "" : " " " " ; - } - else if $(output-type) = html - { - text <blockquote> ; - lines $(paragraph) ; - text </blockquote> ; - } - } - else - { - while $(description) && ! [ string.is-whitespace $(description[1]) ] - { paragraph += $(description[1]) ; description = $(description[2-]) ; } - if $(paragraph[1]) = :: && ! $(paragraph[2]) - { - pre = " " ; - } - if $(paragraph[1]) = :: - { - if $(output-type) = plain - { - lines $(paragraph[2-]) "" : " " " " ; - lines ; - } - else if $(output-type) = html - { - text <blockquote> ; - lines $(paragraph[2-]) ; - text </blockquote> ; - } - } - else - { - local p = [ MATCH "(.*)(::)$" : $(paragraph[-1]) ] ; - local pws = [ MATCH "([ ]*)$" : $(p[1]) ] ; - p = [ MATCH "(.*)($(pws))($(p[2]))$" : $(paragraph[-1]) ] ; - if $(p[3]) = :: - { - pre = [ string.chars [ MATCH "^([$(.whitespace)]*)" : " $(p[1])" ] ] ; - if ! $(p[2]) || $(p[2]) = "" { paragraph = $(paragraph[1--2]) $(p[1]): ; } - else { paragraph = $(paragraph[1--2]) $(p[1]) ; } - if $(output-type) = plain - { - lines [ split-at-words " " $(paragraph) ] : " " " " ; - lines ; - } - else if $(output-type) = html - { - text </p> <p> [ escape-html $(paragraph) ] ; - } - } - else - { - if $(output-type) = plain - { - lines [ split-at-words " " $(paragraph) ] : " " " " ; - lines ; - } - else if $(output-type) = html - { - text </p> <p> [ escape-html $(paragraph) ] ; - } - } - } - } - } - if $(output-type) = html - { - text </p> ; - } -} - - -# Generate the start of a list of items. The type of output can be controlled by -# the value of the 'output-type' variable. -# -rule list-start ( ) -{ - if $(output-type) = plain - { - } - else if $(output-type) = html - { - text <ul> ; - } -} - - -# Generate an item in a list. The type of output can be controlled by the value -# of the 'output-type' variable. -# -rule list-item ( - item + # The item to list. -) -{ - if $(output-type) = plain - { - lines [ split-at-words "*" $(item) ] : " " " " ; - } - else if $(output-type) = html - { - text <li> [ escape-html $(item) ] </li> ; - } -} - - -# Generate the end of a list of items. The type of output can be controlled by -# the value of the 'output-type' variable. -# -rule list-end ( ) -{ - if $(output-type) = plain - { - lines ; - } - else if $(output-type) = html - { - text </ul> ; - } -} - - -# Split the given text into separate lines, word-wrapping to a margin. The -# default margin is 78 characters. -# -rule split-at-words ( - text + # The text to split. - : margin ? # An optional margin, default is 78. -) -{ - local lines = ; - text = [ string.words $(text:J=" ") ] ; - text = $(text:J=" ") ; - margin ?= 78 ; - local char-match-1 = ".?" ; - local char-match = "" ; - while $(margin) != 0 - { - char-match = $(char-match)$(char-match-1) ; - margin = [ numbers.decrement $(margin) ] ; - } - while $(text) - { - local s = "" ; - local t = "" ; - # divide s into the first X characters and the rest - s = [ MATCH "^($(char-match))(.*)" : $(text) ] ; - - if $(s[2]) - { - # split the first half at a space - t = [ MATCH "^(.*)[\\ ]([^\\ ]*)$" : $(s[1]) ] ; - } - else - { - t = $(s) ; - } - - if ! $(t[2]) - { - t += "" ; - } - - text = $(t[2])$(s[2]) ; - lines += $(t[1]) ; - } - return $(lines) ; -} - - -# Generate a set of fixed lines. Each single item passed in is output on a -# separate line. For console this just echos each line, but for html this will -# split them with <br>. -# -rule lines ( - text * # The lines of text. - : indent ? # Optional indentation prepended to each line after the first one. - outdent ? # Optional indentation to prepend to the first line. -) -{ - text ?= "" ; - indent ?= "" ; - outdent ?= "" ; - if $(output-type) = plain - { - text $(outdent)$(text[1]) $(indent)$(text[2-]) ; - } - else if $(output-type) = html - { - local indent-chars = [ string.chars $(indent) ] ; - indent = "" ; - for local c in $(indent-chars) - { - if $(c) = " " { c = " " ; } - else if $(c) = " " { c = " " ; } - indent = $(indent)$(c) ; - } - local html-text = [ escape-html $(text) : " " ] ; - text $(html-text[1])<br> $(indent)$(html-text[2-])<br> ; - } -} - - -# Output text directly to the current target. When doing output to a file, one -# can indicate if the text should be output to "prefix" it, as the "body" -# (default), or "suffix" of the file. This is independant of the actual -# execution order of the text rule. This rule invokes a singular action, one -# action only once, which does the build of the file. Therefore actions on the -# target outside of this rule will happen entirely before and/or after all -# output using this rule. -# -rule text ( - strings * # The strings of text to output. - : overwrite ? # true to overwrite the output (if it is a file) - : prefix-body-suffix ? # Indication to output prefix, body, or suffix (for a file). -) -{ - prefix-body-suffix ?= body ; - if $(output-target) = console - { - if ! $(strings) - { - ECHO ; - } - else - { - for local s in $(strings) - { - ECHO $(s) ; - } - } - } - if ! $($(output-target).did-action) - { - $(output-target).did-action = yes ; - $(output-target).text-prefix = ; - $(output-target).text-body = ; - $(output-target).text-suffix = ; - - nl on $(output-target) = " -" ; - text-redirect on $(output-target) = ">>" ; - if $(overwrite) - { - text-redirect on $(output-target) = ">" ; - } - text-content on $(output-target) = ; - - text-action $(output-target) ; - - if $(overwrite) && $(output-target) != console - { - check-for-update $(output-target) ; - } - } - $(output-target).text-$(prefix-body-suffix) += $(strings) ; - text-content on $(output-target) = - $($(output-target).text-prefix) - $($(output-target).text-body) - $($(output-target).text-suffix) ; -} - - -# Outputs the text to the current targets, after word-wrapping it. -# -rule wrapped-text ( text + ) -{ - local lines = [ split-at-words $(text) ] ; - text $(lines) ; -} - - -# Escapes text into html/xml printable equivalents. Does not know about tags and -# therefore tags fed into this will also be escaped. Currently escapes space, -# "<", ">", and "&". -# -rule escape-html ( - text + # The text to escape. - : space ? # What to replace spaces with, defaults to " ". -) -{ - local html-text = ; - while $(text) - { - local html = $(text[1]) ; - text = $(text[2-]) ; - html = [ regex.replace $(html) "&" "&" ] ; - html = [ regex.replace $(html) "<" "<" ] ; - html = [ regex.replace $(html) ">" ">" ] ; - if $(space) - { - html = [ regex.replace $(html) " " "$(space)" ] ; - } - html-text += $(html) ; - } - return $(html-text) ; -} - - -# Outputs the text strings collected by the text rule to the output file. -# -actions quietly text-action -{ - @($(STDOUT):E=$(text-content:J=$(nl))) $(text-redirect) "$(<)" -} - - -rule get-scanner ( ) -{ - if ! $(.scanner) - { - .scanner = [ class.new print-scanner ] ; - } - return $(.scanner) ; -} - - -# The following code to update print targets when their contents -# change is a horrible hack. It basically creates a target which -# binds to this file (print.jam) and installs a scanner on it -# which reads the target and compares its contents to the new -# contents that we're writing. -# -rule check-for-update ( target ) -{ - local scanner = [ get-scanner ] ; - local file = [ path.native [ modules.binding $(__name__) ] ] ; - local g = [ MATCH <(.*)> : $(target:G) ] ; - local dependency-target = $(__file__:G=$(g:E=)-$(target:G=)-$(scanner)) ; - DEPENDS $(target) : $(dependency-target) ; - SEARCH on $(dependency-target) = $(file:D) ; - ISFILE $(dependency-target) ; - NOUPDATE $(dependency-target) ; - base on $(dependency-target) = $(target) ; - scanner.install $(scanner) : $(dependency-target) none ; - return $(dependency-target) ; -} - - -class print-scanner : scanner -{ - import path ; - import os ; - - rule pattern ( ) - { - return "(One match...)" ; - } - - rule process ( target : matches * : binding ) - { - local base = [ on $(target) return $(base) ] ; - local nl = [ on $(base) return $(nl) ] ; - local text-content = [ on $(base) return $(text-content) ] ; - local dir = [ on $(base) return $(LOCATE) ] ; - if $(dir) - { - dir = [ path.make $(dir) ] ; - } - local file = [ path.native [ path.join $(dir) $(base:G=) ] ] ; - local actual-content ; - if [ os.name ] = NT - { - actual-content = [ SHELL "type \"$(file)\" 2>nul" ] ; - } - else - { - actual-content = [ SHELL "cat \"$(file)\" 2>/dev/null" ] ; - } - if $(text-content:J=$(nl)) != $(actual-content) - { - ALWAYS $(base) ; - } - } -} - - -rule __test__ ( ) -{ - import assert ; - - assert.result one two three : split-at-words one two three : 5 ; - assert.result "one two" three : split-at-words one two three : 8 ; - assert.result "one two" three : split-at-words one two three : 9 ; - assert.result "one two three" : split-at-words one two three ; - - # VP, 2004-12-03 The following test fails for some reason, so commenting it - # out. - #assert.result "one two three" "&<>" : - # escape-html "one two three" "&<>" ; -} diff --git a/jam-files/boost-build/util/regex.jam b/jam-files/boost-build/util/regex.jam deleted file mode 100644 index 234c36f6..00000000 --- a/jam-files/boost-build/util/regex.jam +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2001, 2002 Dave Abrahams -# Copyright 2003 Douglas Gregor -# Copyright 2003 Rene Rivera -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# -# Returns a list of the following substrings: -# 1) from beginning till the first occurrence of 'separator' or till the end, -# 2) between each occurrence of 'separator' and the next occurrence, -# 3) from the last occurrence of 'separator' till the end. -# If no separator is present, the result will contain only one element. -# - -rule split ( string separator ) -{ - local result ; - local s = $(string) ; - - # Break pieaces off 's' until it has no separators left. - local match = 1 ; - while $(match) - { - match = [ MATCH ^(.*)($(separator))(.*) : $(s) ] ; - if $(match) - { - match += "" ; # in case 3rd item was empty - works around MATCH bug - result = $(match[3]) $(result) ; - s = $(match[1]) ; - } - } - # Combine the remaining part at the beginning, which does not have - # separators, with the pieces broken off. Note that the rule's signature - # does not allow the initial s to be empty. - return $(s) $(result) ; -} - - -# Returns the concatenated results of Applying regex.split to every element of -# the list using the separator pattern. -# -rule split-list ( list * : separator ) -{ - local result ; - for s in $(list) - { - result += [ split $(s) $(separator) ] ; - } - return $(result) ; -} - - -# Match string against pattern, and return the elements indicated by indices. -# -rule match ( pattern : string : indices * ) -{ - indices ?= 1 2 3 4 5 6 7 8 9 ; - local x = [ MATCH $(pattern) : $(string) ] ; - return $(x[$(indices)]) ; -} - - -# Matches all elements of 'list' agains the 'pattern' and returns a list of -# elements indicated by indices of all successful matches. If 'indices' is -# omitted returns a list of first paranthethised groups of all successful -# matches. -# -rule transform ( list * : pattern : indices * ) -{ - indices ?= 1 ; - local result ; - for local e in $(list) - { - local m = [ MATCH $(pattern) : $(e) ] ; - if $(m) - { - result += $(m[$(indices)]) ; - } - } - return $(result) ; -} - -NATIVE_RULE regex : transform ; - - -# Escapes all of the characters in symbols using the escape symbol escape-symbol -# for the given string, and returns the escaped string. -# -rule escape ( string : symbols : escape-symbol ) -{ - local result = "" ; - local m = 1 ; - while $(m) - { - m = [ MATCH ^([^$(symbols)]*)([$(symbols)])(.*) : $(string) ] ; - if $(m) - { - m += "" ; # Supposedly a bug fix; borrowed from regex.split - result = "$(result)$(m[1])$(escape-symbol)$(m[2])" ; - string = $(m[3]) ; - } - } - string ?= "" ; - result = "$(result)$(string)" ; - return $(result) ; -} - - -# Replaces occurrences of a match string in a given string and returns the new -# string. The match string can be a regex expression. -# -rule replace ( - string # The string to modify. - match # The characters to replace. - replacement # The string to replace with. - ) -{ - local result = "" ; - local parts = 1 ; - while $(parts) - { - parts = [ MATCH ^(.*)($(match))(.*) : $(string) ] ; - if $(parts) - { - parts += "" ; - result = "$(replacement)$(parts[3])$(result)" ; - string = $(parts[1]) ; - } - } - string ?= "" ; - result = "$(string)$(result)" ; - return $(result) ; -} - - -# Replaces occurrences of a match string in a given list of strings and returns -# a list of new strings. The match string can be a regex expression. -# -# list - the list of strings to modify. -# match - the search expression. -# replacement - the string to replace with. -# -rule replace-list ( list * : match : replacement ) -{ - local result ; - for local e in $(list) - { - result += [ replace $(e) $(match) $(replacement) ] ; - } - return $(result) ; -} - - -rule __test__ ( ) -{ - import assert ; - - assert.result a b c : split "a/b/c" / ; - assert.result "" a b c : split "/a/b/c" / ; - assert.result "" "" a b c : split "//a/b/c" / ; - assert.result "" a "" b c : split "/a//b/c" / ; - assert.result "" a "" b c "" : split "/a//b/c/" / ; - assert.result "" a "" b c "" "" : split "/a//b/c//" / ; - - assert.result a c b d - : match (.)(.)(.)(.) : abcd : 1 3 2 4 ; - - assert.result a b c d - : match (.)(.)(.)(.) : abcd ; - - assert.result ababab cddc - : match ((ab)*)([cd]+) : abababcddc : 1 3 ; - - assert.result a.h c.h - : transform <a.h> \"b.h\" <c.h> : <(.*)> ; - - assert.result a.h b.h c.h - : transform <a.h> \"b.h\" <c.h> : <([^>]*)>|\"([^\"]*)\" : 1 2 ; - - assert.result "^<?xml version=\"1.0\"^>" - : escape "<?xml version=\"1.0\">" : "&|()<>^" : "^" ; - - assert.result "<?xml version=\\\"1.0\\\">" - : escape "<?xml version=\"1.0\">" : "\\\"" : "\\" ; - - assert.result "string string " : replace "string string " " " " " ; - assert.result " string string" : replace " string string" " " " " ; - assert.result "string string" : replace "string string" " " " " ; - assert.result "-" : replace "&" "&" "-" ; - - assert.result "-" "a-b" : replace-list "&" "a&b" : "&" : "-" ; -} diff --git a/jam-files/boost-build/util/regex.py b/jam-files/boost-build/util/regex.py deleted file mode 100644 index 29e26ecf..00000000 --- a/jam-files/boost-build/util/regex.py +++ /dev/null @@ -1,25 +0,0 @@ -# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -import re - -def transform (list, pattern, indices = [1]): - """ Matches all elements of 'list' agains the 'pattern' - and returns a list of the elements indicated by indices of - all successfull matches. If 'indices' is omitted returns - a list of first paranthethised groups of all successfull - matches. - """ - result = [] - - for e in list: - m = re.match (pattern, e) - - if m: - for i in indices: - result.append (m.group (i)) - - return result - diff --git a/jam-files/boost-build/util/sequence.jam b/jam-files/boost-build/util/sequence.jam deleted file mode 100644 index 73919a65..00000000 --- a/jam-files/boost-build/util/sequence.jam +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2001, 2002, 2003 Dave Abrahams -# Copyright 2006 Rene Rivera -# Copyright 2002, 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import assert ; -import numbers ; -import modules ; - - -# Note that algorithms in this module execute largely in the caller's module -# namespace, so that local rules can be used as function objects. Also note that -# most predicates can be multi-element lists. In that case, all but the first -# element are prepended to the first argument which is passed to the rule named -# by the first element. - - -# Return the elements e of $(sequence) for which [ $(predicate) e ] has a -# non-null value. -# -rule filter ( predicate + : sequence * ) -{ - local caller = [ CALLER_MODULE ] ; - local result ; - - for local e in $(sequence) - { - if [ modules.call-in $(caller) : $(predicate) $(e) ] - { - result += $(e) ; - } - } - return $(result) ; -} - - -# Return a new sequence consisting of [ $(function) $(e) ] for each element e of -# $(sequence). -# -rule transform ( function + : sequence * ) -{ - local caller = [ CALLER_MODULE ] ; - local result ; - - for local e in $(sequence) - { - result += [ modules.call-in $(caller) : $(function) $(e) ] ; - } - return $(result) ; -} - - -rule reverse ( s * ) -{ - local r ; - for local x in $(s) - { - r = $(x) $(r) ; - } - return $(r) ; -} - - -rule less ( a b ) -{ - if $(a) < $(b) - { - return true ; - } -} - - -# Insertion-sort s using the BinaryPredicate ordered. -# -rule insertion-sort ( s * : ordered * ) -{ - if ! $(ordered) - { - return [ SORT $(s) ] ; - } - else - { - local caller = [ CALLER_MODULE ] ; - ordered ?= sequence.less ; - local result = $(s[1]) ; - if $(ordered) = sequence.less - { - local head tail ; - for local x in $(s[2-]) - { - head = ; - tail = $(result) ; - while $(tail) && ( $(tail[1]) < $(x) ) - { - head += $(tail[1]) ; - tail = $(tail[2-]) ; - } - result = $(head) $(x) $(tail) ; - } - } - else - { - for local x in $(s[2-]) - { - local head tail ; - tail = $(result) ; - while $(tail) && [ modules.call-in $(caller) : $(ordered) $(tail[1]) $(x) ] - { - head += $(tail[1]) ; - tail = $(tail[2-]) ; - } - result = $(head) $(x) $(tail) ; - } - } - - return $(result) ; - } -} - - -# Merge two ordered sequences using the BinaryPredicate ordered. -# -rule merge ( s1 * : s2 * : ordered * ) -{ - ordered ?= sequence.less ; - local result__ ; - local caller = [ CALLER_MODULE ] ; - - while $(s1) && $(s2) - { - if [ modules.call-in $(caller) : $(ordered) $(s1[1]) $(s2[1]) ] - { - result__ += $(s1[1]) ; - s1 = $(s1[2-]) ; - } - else if [ modules.call-in $(caller) : $(ordered) $(s2[1]) $(s1[1]) ] - { - result__ += $(s2[1]) ; - s2 = $(s2[2-]) ; - } - else - { - s2 = $(s2[2-]) ; - } - - } - result__ += $(s1) ; - result__ += $(s2) ; - - return $(result__) ; -} - - -# Join the elements of s into one long string. If joint is supplied, it is used -# as a separator. -# -rule join ( s * : joint ? ) -{ - joint ?= "" ; - return $(s:J=$(joint)) ; -} - - -# Find the length of any sequence. -# -rule length ( s * ) -{ - local result = 0 ; - for local i in $(s) - { - result = [ CALC $(result) + 1 ] ; - } - return $(result) ; -} - - -rule unique ( list * : stable ? ) -{ - local result ; - local prev ; - if $(stable) - { - for local f in $(list) - { - if ! $(f) in $(result) - { - result += $(f) ; - } - } - } - else - { - for local i in [ SORT $(list) ] - { - if $(i) != $(prev) - { - result += $(i) ; - } - prev = $(i) ; - } - } - return $(result) ; -} - - -# Returns the maximum number in 'elements'. Uses 'ordered' for comparisons or -# 'numbers.less' if none is provided. -# -rule max-element ( elements + : ordered ? ) -{ - ordered ?= numbers.less ; - - local max = $(elements[1]) ; - for local e in $(elements[2-]) - { - if [ $(ordered) $(max) $(e) ] - { - max = $(e) ; - } - } - return $(max) ; -} - - -# Returns all of 'elements' for which corresponding element in parallel list -# 'rank' is equal to the maximum value in 'rank'. -# -rule select-highest-ranked ( elements * : ranks * ) -{ - if $(elements) - { - local max-rank = [ max-element $(ranks) ] ; - local result ; - while $(elements) - { - if $(ranks[1]) = $(max-rank) - { - result += $(elements[1]) ; - } - elements = $(elements[2-]) ; - ranks = $(ranks[2-]) ; - } - return $(result) ; - } -} -NATIVE_RULE sequence : select-highest-ranked ; - - -rule __test__ ( ) -{ - # Use a unique module so we can test the use of local rules. - module sequence.__test__ - { - import assert ; - import sequence ; - - local rule is-even ( n ) - { - if $(n) in 0 2 4 6 8 - { - return true ; - } - } - - assert.result 4 6 4 2 8 : sequence.filter is-even : 1 4 6 3 4 7 2 3 8 ; - - # Test that argument binding works. - local rule is-equal-test ( x y ) - { - if $(x) = $(y) - { - return true ; - } - } - - assert.result 3 3 3 : sequence.filter is-equal-test 3 : 1 2 3 4 3 5 3 5 7 ; - - local rule append-x ( n ) - { - return $(n)x ; - } - - assert.result 1x 2x 3x : sequence.transform append-x : 1 2 3 ; - - local rule repeat2 ( x ) - { - return $(x) $(x) ; - } - - assert.result 1 1 2 2 3 3 : sequence.transform repeat2 : 1 2 3 ; - - local rule test-greater ( a b ) - { - if $(a) > $(b) - { - return true ; - } - } - assert.result 1 2 3 4 5 6 7 8 9 : sequence.insertion-sort 9 6 5 3 8 7 1 2 4 ; - assert.result 9 8 7 6 5 4 3 2 1 : sequence.insertion-sort 9 6 5 3 8 7 1 2 4 : test-greater ; - assert.result 1 2 3 4 5 6 : sequence.merge 1 3 5 : 2 4 6 ; - assert.result 6 5 4 3 2 1 : sequence.merge 5 3 1 : 6 4 2 : test-greater ; - assert.result 1 2 3 : sequence.merge 1 2 3 : ; - assert.result 1 : sequence.merge 1 : 1 ; - - assert.result foo-bar-baz : sequence.join foo bar baz : - ; - assert.result substandard : sequence.join sub stan dard ; - assert.result 3.0.1 : sequence.join 3.0.1 : - ; - - assert.result 0 : sequence.length ; - assert.result 3 : sequence.length a b c ; - assert.result 17 : sequence.length 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 ; - - assert.result 1 : sequence.length a ; - assert.result 10 : sequence.length a b c d e f g h i j ; - assert.result 11 : sequence.length a b c d e f g h i j k ; - assert.result 12 : sequence.length a b c d e f g h i j k l ; - - local p2 = x ; - for local i in 1 2 3 4 5 6 7 8 - { - p2 = $(p2) $(p2) ; - } - assert.result 256 : sequence.length $(p2) ; - - assert.result 1 2 3 4 5 : sequence.unique 1 2 3 2 4 3 3 5 5 5 ; - - assert.result 5 : sequence.max-element 1 3 5 0 4 ; - - assert.result e-3 h-3 : sequence.select-highest-ranked e-1 e-3 h-3 m-2 : 1 3 3 2 ; - - assert.result 7 6 5 4 3 2 1 : sequence.reverse 1 2 3 4 5 6 7 ; - } -} diff --git a/jam-files/boost-build/util/sequence.py b/jam-files/boost-build/util/sequence.py deleted file mode 100644 index 1d32efd2..00000000 --- a/jam-files/boost-build/util/sequence.py +++ /dev/null @@ -1,50 +0,0 @@ -# (C) Copyright David Abrahams 2002. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -import operator - -def unique (values, stable=False): - if stable: - s = set() - r = [] - for v in values: - if not v in s: - r.append(v) - s.add(v) - return r - else: - return list(set(values)) - -def max_element (elements, ordered = None): - """ Returns the maximum number in 'elements'. Uses 'ordered' for comparisons, - or '<' is none is provided. - """ - if not ordered: ordered = operator.lt - - max = elements [0] - for e in elements [1:]: - if ordered (max, e): - max = e - - return max - -def select_highest_ranked (elements, ranks): - """ Returns all of 'elements' for which corresponding element in parallel - list 'rank' is equal to the maximum value in 'rank'. - """ - if not elements: - return [] - - max_rank = max_element (ranks) - - result = [] - while elements: - if ranks [0] == max_rank: - result.append (elements [0]) - - elements = elements [1:] - ranks = ranks [1:] - - return result diff --git a/jam-files/boost-build/util/set.jam b/jam-files/boost-build/util/set.jam deleted file mode 100644 index fc179134..00000000 --- a/jam-files/boost-build/util/set.jam +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2001, 2002 Dave Abrahams -# Copyright 2003 Vladimir Prus -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -class set -{ - rule __init__ ( ) - { - } - - rule add ( elements * ) - { - for local e in $(elements) - { - if ! $($(e)) - { - $(e) = 1 ; - self.result += $(e) ; - } - } - } - - rule contains ( element ) - { - return $($(element)) ; - } - - rule list ( ) - { - return $(self.result) ; - } -} - - - -# Returns the elements of set1 that are not in set2. -# -rule difference ( set1 * : set2 * ) -{ - local result = ; - for local element in $(set1) - { - if ! ( $(element) in $(set2) ) - { - result += $(element) ; - } - } - return $(result) ; -} - -NATIVE_RULE set : difference ; - - -# Removes all the items appearing in both set1 & set2. -# -rule intersection ( set1 * : set2 * ) -{ - local result ; - for local v in $(set1) - { - if $(v) in $(set2) - { - result += $(v) ; - } - } - return $(result) ; -} - - -# Returns whether set1 & set2 contain the same elements. Note that this ignores -# any element ordering differences as well as any element duplication. -# -rule equal ( set1 * : set2 * ) -{ - if $(set1) in $(set2) && ( $(set2) in $(set1) ) - { - return true ; - } -} - - -rule __test__ ( ) -{ - import assert ; - - assert.result 0 1 4 6 8 9 : difference 0 1 2 3 4 5 6 7 8 9 : 2 3 5 7 ; - assert.result 2 5 7 : intersection 0 1 2 4 5 6 7 8 9 : 2 3 5 7 ; - - assert.true equal : ; - assert.true equal 1 1 2 3 : 3 2 2 1 ; - assert.false equal 2 3 : 3 2 2 1 ; -} diff --git a/jam-files/boost-build/util/set.py b/jam-files/boost-build/util/set.py deleted file mode 100644 index dc7cf328..00000000 --- a/jam-files/boost-build/util/set.py +++ /dev/null @@ -1,42 +0,0 @@ -# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -from utility import to_seq - -def difference (b, a): - """ Returns the elements of B that are not in A. - """ - result = [] - for element in b: - if not element in a: - result.append (element) - - return result - -def intersection (set1, set2): - """ Removes from set1 any items which don't appear in set2 and returns the result. - """ - result = [] - for v in set1: - if v in set2: - result.append (v) - return result - -def contains (small, large): - """ Returns true iff all elements of 'small' exist in 'large'. - """ - small = to_seq (small) - large = to_seq (large) - - for s in small: - if not s in large: - return False - return True - -def equal (a, b): - """ Returns True iff 'a' contains the same elements as 'b', irrespective of their order. - # TODO: Python 2.4 has a proper set class. - """ - return contains (a, b) and contains (b, a) diff --git a/jam-files/boost-build/util/string.jam b/jam-files/boost-build/util/string.jam deleted file mode 100644 index a39ed119..00000000 --- a/jam-files/boost-build/util/string.jam +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2002 Dave Abrahams -# Copyright 2002, 2003 Rene Rivera -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import regex ; - - -# Characters considered whitespace, as a list. -.whitespace-chars = " " " " " -" ; - -# Characters considered whitespace, as a single string. -.whitespace = $(.whitespace-chars:J="") ; - - -# Returns the canonical set of whitespace characters, as a list. -# -rule whitespace-chars ( ) -{ - return $(.whitespace-chars) ; -} - - -# Returns the canonical set of whitespace characters, as a single string. -# -rule whitespace ( ) -{ - return $(.whitespace) ; -} - - -# Splits the given string into a list of strings composed of each character of -# the string in sequence. -# -rule chars ( - string # The string to split. - ) -{ - local result ; - while $(string) - { - local s = [ MATCH (.?)(.?)(.?)(.?)(.?)(.?)(.?)(.?)(.*) : $(string) ] ; - string = $(s[9]) ; - result += $(s[1-8]) ; - } - - # Trim off empty strings. - while $(result[1]) && ! $(result[-1]) - { - result = $(result[1--2]) ; - } - - return $(result) ; -} - - -# Apply a set of standard transformations to string to produce an abbreviation -# no more than 5 characters long. -# -rule abbreviate ( string ) -{ - local r = $(.abbreviated-$(string)) ; - if $(r) - { - return $(r) ; - } - # Anything less than 4 characters gets no abbreviation. - else if ! [ MATCH (....) : $(string) ] - { - .abbreviated-$(string) = $(string) ; - return $(string) ; - } - else - { - # Separate the initial letter in case it's a vowel. - local s1 = [ MATCH ^(.)(.*) : $(string) ] ; - - # Drop trailing "ing". - local s2 = [ MATCH ^(.*)ing$ : $(s1[2]) ] ; - s2 ?= $(s1[2]) ; - - # Reduce all doubled characters to one. - local last = "" ; - for local c in [ chars $(s2) ] - { - if $(c) != $(last) - { - r += $(c) ; - last = $(c) ; - } - } - s2 = $(r:J="") ; - - # Chop all vowels out of the remainder. - s2 = [ regex.replace $(s2) [AEIOUaeiou] "" ] ; - - # Shorten remaining consonants to 4 characters. - s2 = [ MATCH ^(.?.?.?.?) : $(s2) ] ; - - # Glue the initial character back on to the front. - s2 = $(s1[1])$(s2) ; - - .abbreviated-$(string) = $(s2) ; - return $(s2) ; - } -} - - -# Concatenates the given strings, inserting the given separator between each -# string. -# -rule join ( - strings * # The strings to join. - : separator ? # The optional separator. - ) -{ - separator ?= "" ; - return $(strings:J=$(separator)) ; -} - - -# Split a string into whitespace separated words. -# -rule words ( - string # The string to split. - : whitespace * # Optional, characters to consider as whitespace. - ) -{ - whitespace = $(whitespace:J="") ; - whitespace ?= $(.whitespace) ; - local w = ; - while $(string) - { - string = [ MATCH "^[$(whitespace)]*([^$(whitespace)]*)(.*)" : $(string) ] ; - if $(string[1]) && $(string[1]) != "" - { - w += $(string[1]) ; - } - string = $(string[2]) ; - } - return $(w) ; -} - - -# Check that the given string is composed entirely of whitespace. -# -rule is-whitespace ( - string ? # The string to test. - ) -{ - if ! $(string) { return true ; } - else if $(string) = "" { return true ; } - else if [ MATCH "^([$(.whitespace)]+)$" : $(string) ] { return true ; } - else { return ; } -} - -rule __test__ ( ) -{ - import assert ; - assert.result a b c : chars abc ; - - assert.result rntm : abbreviate runtime ; - assert.result ovrld : abbreviate overload ; - assert.result dbg : abbreviate debugging ; - assert.result async : abbreviate asynchronous ; - assert.result pop : abbreviate pop ; - assert.result aaa : abbreviate aaa ; - assert.result qck : abbreviate quack ; - assert.result sttc : abbreviate static ; - - # Check boundary cases. - assert.result a : chars a ; - assert.result : chars "" ; - assert.result a b c d e f g h : chars abcdefgh ; - assert.result a b c d e f g h i : chars abcdefghi ; - assert.result a b c d e f g h i j : chars abcdefghij ; - assert.result a b c d e f g h i j k : chars abcdefghijk ; - - assert.result a//b/c/d : join a "" b c d : / ; - assert.result abcd : join a "" b c d ; - - assert.result a b c : words "a b c" ; - - assert.true is-whitespace " " ; - assert.false is-whitespace " a b c " ; - assert.true is-whitespace "" ; - assert.true is-whitespace ; -} diff --git a/jam-files/boost-build/util/utility.jam b/jam-files/boost-build/util/utility.jam deleted file mode 100644 index c46747f5..00000000 --- a/jam-files/boost-build/util/utility.jam +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2001, 2002 Dave Abrahams -# Copyright 2002, 2003, 2004, 2005 Vladimir Prus -# Copyright 2008 Jurko Gospodnetic -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -import "class" : is-instance ; -import errors ; - - -# For all elements of 'list' which do not already have 'suffix', add 'suffix'. -# -rule apply-default-suffix ( suffix : list * ) -{ - local result ; - for local i in $(list) - { - if $(i:S) = $(suffix) - { - result += $(i) ; - } - else - { - result += $(i)$(suffix) ; - } - } - return $(result) ; -} - - -# If 'name' contains a dot, returns the part before the last dot. If 'name' -# contains no dot, returns it unmodified. -# -rule basename ( name ) -{ - if $(name:S) - { - name = $(name:B) ; - } - return $(name) ; -} - - -# Return the file of the caller of the rule that called caller-file. -# -rule caller-file ( ) -{ - local bt = [ BACKTRACE ] ; - return $(bt[9]) ; -} - - -# Tests if 'a' is equal to 'b'. If 'a' is a class instance, calls its 'equal' -# method. Uses ordinary jam's comparison otherwise. -# -rule equal ( a b ) -{ - if [ is-instance $(a) ] - { - return [ $(a).equal $(b) ] ; - } - else - { - if $(a) = $(b) - { - return true ; - } - } -} - - -# Tests if 'a' is less than 'b'. If 'a' is a class instance, calls its 'less' -# method. Uses ordinary jam's comparison otherwise. -# -rule less ( a b ) -{ - if [ is-instance $(a) ] - { - return [ $(a).less $(b) ] ; - } - else - { - if $(a) < $(b) - { - return true ; - } - } -} - - -# Returns the textual representation of argument. If it is a class instance, -# class its 'str' method. Otherwise, returns the argument. -# -rule str ( value ) -{ - if [ is-instance $(value) ] - { - return [ $(value).str ] ; - } - else - { - return $(value) ; - } -} - - -# Accepts a list of gristed values and returns them ungristed. Reports an error -# in case any of the passed parameters is not gristed, i.e. surrounded in angle -# brackets < and >. -# -rule ungrist ( names * ) -{ - local result ; - for local name in $(names) - { - local stripped = [ MATCH ^<(.*)>$ : $(name) ] ; - if ! $(stripped) - { - errors.error "in ungrist $(names) : $(name) is not of the form <.*>" ; - } - result += $(stripped) ; - } - return $(result) ; -} - - -# If the passed value is quoted, unquotes it. Otherwise returns the value -# unchanged. -# -rule unquote ( value ? ) -{ - local match-result = [ MATCH ^(\")(.*)(\")$ : $(value) ] ; - if $(match-result) - { - return $(match-result[2]) ; - } - else - { - return $(value) ; - } -} - - -rule __test__ ( ) -{ - import assert ; - import "class" : new ; - import errors : try catch ; - - assert.result 123 : str 123 ; - - class test-class__ - { - rule __init__ ( ) { } - rule str ( ) { return "str-test-class" ; } - rule less ( a ) { return "yes, of course!" ; } - rule equal ( a ) { return "not sure" ; } - } - - assert.result "str-test-class" : str [ new test-class__ ] ; - assert.true less 1 2 ; - assert.false less 2 1 ; - assert.result "yes, of course!" : less [ new test-class__ ] 1 ; - assert.true equal 1 1 ; - assert.false equal 1 2 ; - assert.result "not sure" : equal [ new test-class__ ] 1 ; - - assert.result foo.lib foo.lib : apply-default-suffix .lib : foo.lib foo.lib - ; - - assert.result foo : basename foo ; - assert.result foo : basename foo.so ; - assert.result foo.so : basename foo.so.1 ; - - assert.result : unquote ; - assert.result "" : unquote "" ; - assert.result foo : unquote foo ; - assert.result \"foo : unquote \"foo ; - assert.result foo\" : unquote foo\" ; - assert.result foo : unquote \"foo\" ; - assert.result \"foo\" : unquote \"\"foo\"\" ; - - assert.result : ungrist ; - assert.result foo : ungrist <foo> ; - assert.result <foo> : ungrist <<foo>> ; - assert.result foo bar : ungrist <foo> <bar> ; - - try ; - { - ungrist "" ; - } - catch "in ungrist : is not of the form <.*>" ; - - try ; - { - ungrist <> ; - } - catch "in ungrist <> : <> is not of the form <.*>" ; - - try ; - { - ungrist foo ; - } - catch "in ungrist foo : foo is not of the form <.*>" ; - - try ; - { - ungrist <foo ; - } - catch "in ungrist <foo : <foo is not of the form <.*>" ; - - try ; - { - ungrist foo> ; - } - catch "in ungrist foo> : foo> is not of the form <.*>" ; - - try ; - { - ungrist foo bar ; - } - catch "in ungrist foo : foo is not of the form <.*>" ; - - try ; - { - ungrist foo <bar> ; - } - catch "in ungrist foo : foo is not of the form <.*>" ; - - try ; - { - ungrist <foo> bar ; - } - catch "in ungrist bar : bar is not of the form <.*>" ; -} diff --git a/jam-files/boost-build/util/utility.py b/jam-files/boost-build/util/utility.py deleted file mode 100644 index afea765b..00000000 --- a/jam-files/boost-build/util/utility.py +++ /dev/null @@ -1,155 +0,0 @@ -# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and -# distribute this software is granted provided this copyright notice appears in -# all copies. This software is provided "as is" without express or implied -# warranty, and with no claim as to its suitability for any purpose. - -""" Utility functions to add/remove/get grists. - Grists are string enclosed in angle brackets (<>) that are used as prefixes. See Jam for more information. -""" - -import re -import os -import bjam -from b2.exceptions import * - -__re_grist_and_value = re.compile (r'(<[^>]*>)(.*)') -__re_grist_content = re.compile ('^<(.*)>$') -__re_backslash = re.compile (r'\\') - -def to_seq (value): - """ If value is a sequence, returns it. - If it is a string, returns a sequence with value as its sole element. - """ - if not value: - return [] - - if isinstance (value, str): - return [value] - - else: - return value - -def replace_references_by_objects (manager, refs): - objs = [] - for r in refs: - objs.append (manager.get_object (r)) - return objs - -def add_grist (features): - """ Transform a string by bracketing it with "<>". If already bracketed, does nothing. - features: one string or a sequence of strings - return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence - """ - - def grist_one (feature): - if feature [0] != '<' and feature [len (feature) - 1] != '>': - return '<' + feature + '>' - else: - return feature - - if isinstance (features, str): - return grist_one (features) - else: - return [ grist_one (feature) for feature in features ] - -def replace_grist (features, new_grist): - """ Replaces the grist of a string by a new one. - Returns the string with the new grist. - """ - def replace_grist_one (name, new_grist): - split = __re_grist_and_value.match (name) - if not split: - return new_grist + name - else: - return new_grist + split.group (2) - - if isinstance (features, str): - return replace_grist_one (features, new_grist) - else: - return [ replace_grist_one (feature, new_grist) for feature in features ] - -def get_value (property): - """ Gets the value of a property, that is, the part following the grist, if any. - """ - return replace_grist (property, '') - -def get_grist (value): - """ Returns the grist of a string. - If value is a sequence, does it for every value and returns the result as a sequence. - """ - def get_grist_one (name): - split = __re_grist_and_value.match (name) - if not split: - return '' - else: - return split.group (1) - - if isinstance (value, str): - return get_grist_one (value) - else: - return [ get_grist_one (v) for v in value ] - -def ungrist (value): - """ Returns the value without grist. - If value is a sequence, does it for every value and returns the result as a sequence. - """ - def ungrist_one (value): - stripped = __re_grist_content.match (value) - if not stripped: - raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value) - - return stripped.group (1) - - if isinstance (value, str): - return ungrist_one (value) - else: - return [ ungrist_one (v) for v in value ] - -def replace_suffix (name, new_suffix): - """ Replaces the suffix of name by new_suffix. - If no suffix exists, the new one is added. - """ - split = os.path.splitext (name) - return split [0] + new_suffix - -def forward_slashes (s): - """ Converts all backslashes to forward slashes. - """ - return __re_backslash.sub ('/', s) - - -def split_action_id (id): - """ Splits an id in the toolset and specific rule parts. E.g. - 'gcc.compile.c++' returns ('gcc', 'compile.c++') - """ - split = id.split ('.', 1) - toolset = split [0] - name = '' - if len (split) > 1: - name = split [1] - return (toolset, name) - -def os_name (): - result = bjam.variable("OS") - assert(len(result) == 1) - return result[0] - -def platform (): - return bjam.variable("OSPLAT") - -def os_version (): - return bjam.variable("OSVER") - -def on_windows (): - """ Returns true if running on windows, whether in cygwin or not. - """ - if bjam.variable("NT"): - return True - - elif bjam.variable("UNIX"): - - uname = bjam.variable("JAMUNAME") - if uname and uname[0].startswith("CYGWIN"): - return True - - return False diff --git a/jam-files/engine/Jambase b/jam-files/engine/Jambase deleted file mode 100644 index 94f8fbde..00000000 --- a/jam-files/engine/Jambase +++ /dev/null @@ -1,2473 +0,0 @@ -# -# /+\ -# +\ Copyright 1993, 2000 Christopher Seiwald. -# \+/ -# -# This file is part of Jam - see jam.c for Copyright information. -# - -# This file is ALSO: -# Copyright 2001-2004 David Abrahams. -# Copyright 2002-2004 Rene Rivera. -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -if $(NT) -{ - SLASH ?= \\ ; -} -SLASH ?= / ; - - -# Glob for patterns in the directories starting from the given start directory, -# up to and including the root of the file-system. We stop globbing as soon as -# we find at least one match. -# -rule find-to-root ( dir : patterns + ) -{ - local globs = [ GLOB $(dir) : $(patterns) ] ; - while ! $(globs) && $(dir:P) != $(dir) - { - dir = $(dir:P) ; - globs = [ GLOB $(dir) : $(patterns) ] ; - } - return $(globs) ; -} - - -# This global will hold the location of the user's boost-build.jam file. -.boost-build-file = ; - -# This global will hold the location of the build system bootstrap file. -.bootstrap-file = ; - -# Remember the value of $(BOOST_BUILD_PATH) supplied to us by the user. -BOOST_BUILD_PATH.user-value = $(BOOST_BUILD_PATH) ; - -# On Unix only, when BOOST_BUILD_PATH is not supplied by the user, set it to a -# sensible default value. This allows Boost.Build to work without any -# environment variables, which is good in itself and also required by the Debian -# Policy. -if ! $(BOOST_BUILD_PATH) && $(UNIX) -{ - BOOST_BUILD_PATH = /usr/share/boost-build ; -} - - -rule _poke ( module-name ? : variables + : value * ) -{ - module $(<) - { - $(>) = $(3) ; - } -} - - -# This rule can be invoked from an optional user's boost-build.jam file to both -# indicate where to find the build system files, and to load them. The path -# indicated is relative to the location of the boost-build.jam file. -# -rule boost-build ( dir ? ) -{ - if $(.bootstrap-file) - { - ECHO "Error: Illegal attempt to re-bootstrap the build system by invoking" ; - ECHO ; - ECHO " 'boost-build" $(dir) ";'" ; - ECHO ; - EXIT "Please consult the documentation at 'http://www.boost.org'." ; - } - - # Add the given directory to the path so we can find the build system. If - # dir is empty, has no effect. - BOOST_BUILD_PATH = $(dir:R=$(.boost-build-file:D)) $(BOOST_BUILD_PATH) ; - - # We might have just modified the *global* value of BOOST_BUILD_PATH. The - # code that loads the rest of Boost.Build, in particular the site-config.jam - # and user-config.jam configuration files uses os.environ, so we need to - # update the value there. - _poke .ENVIRON : BOOST_BUILD_PATH : $(BOOST_BUILD_PATH) ; - - # Try to find the build system bootstrap file 'bootstrap.jam'. - local bootstrap-file = [ GLOB $(BOOST_BUILD_PATH) : bootstrap.jam ] ; - .bootstrap-file = $(bootstrap-file[1]) ; - - # There is no bootstrap.jam we can find, exit with an error. - if ! $(.bootstrap-file) - { - ECHO "Unable to load Boost.Build: could not find build system." ; - ECHO --------------------------------------------------------- ; - ECHO "$(.boost-build-file) attempted to load the build system by invoking" ; - ECHO ; - ECHO " 'boost-build" $(dir) ";'" ; - ECHO ; - ECHO "but we were unable to find \"bootstrap.jam\" in the specified directory" ; - ECHO "or in BOOST_BUILD_PATH (searching "$(BOOST_BUILD_PATH:J=", ")")." ; - ECHO ; - EXIT "Please consult the documentation at 'http://www.boost.org'." ; - } - - if [ MATCH .*(--debug-configuration).* : $(ARGV) ] - { - ECHO "notice: loading Boost.Build from" - [ NORMALIZE_PATH $(.bootstrap-file:D) ] ; - } - - # Load the build system, now that we know where to start from. - include $(.bootstrap-file) ; -} - - -if [ MATCH .*(b2).* : $(ARGV[1]:BL) ] - || [ MATCH .*(bjam).* : $(ARGV[1]:BL) ] - || $(BOOST_ROOT) # A temporary measure so Jam works with Boost.Build v1. -{ - # We attempt to load "boost-build.jam" by searching from the current - # invocation directory up to the root of the file-system. - # - # boost-build.jam is expected to invoke the "boost-build" rule to load the - # Boost.Build files. - - local search-path = $(BOOST_BUILD_PATH) $(BOOST_ROOT) ; - local self = [ SELF_PATH ] ; - local boost-build-relative = ../../share/boost-build ; - local self-based-path = [ NORMALIZE_PATH $(boost-build-relative:R=$(self)) ] ; - - local boost-build-files = - [ find-to-root [ PWD ] : boost-build.jam ] - [ GLOB $(self-based-path) : boost-build.jam ] - # Another temporary measure so Jam works with Boost.Build v1. - [ GLOB $(search-path) : boost-build.jam ] ; - - .boost-build-file = $(boost-build-files[1]) ; - - # There is no boost-build.jam we can find, exit with an error, and - # information. - if ! $(.boost-build-file) - { - ECHO "Unable to load Boost.Build: could not find \"boost-build.jam\"" ; - ECHO --------------------------------------------------------------- ; - - if ! [ MATCH .*(bjam).* : $(ARGV[1]:BL) ] - { - ECHO "BOOST_ROOT must be set, either in the environment, or " ; - ECHO "on the command-line with -sBOOST_ROOT=..., to the root" ; - ECHO "of the boost installation." ; - ECHO ; - } - - ECHO "Attempted search from" [ PWD ] "up to the root" ; - ECHO "at" $(self-based-path) ; - ECHO "and in these directories from BOOST_BUILD_PATH and BOOST_ROOT: "$(search-path:J=", ")"." ; - EXIT "Please consult the documentation at 'http://www.boost.org'." ; - } - - if [ MATCH .*(--debug-configuration).* : $(ARGV) ] - { - ECHO "notice: found boost-build.jam at" - [ NORMALIZE_PATH $(.boost-build-file) ] ; - } - - # Now load the boost-build.jam to get the build system loaded. This - # incidentaly loads the users jamfile and attempts to build targets. - # - # We also set it up so we can tell whether we are loading the new V2 system - # or the the old V1 system. - include $(.boost-build-file) ; - - # Check that, at minimum, the bootstrap file was found. - if ! $(.bootstrap-file) - { - ECHO "Unable to load Boost.Build" ; - ECHO -------------------------- ; - ECHO "\"$(.boost-build-file)\" was found by searching from" [ PWD ] "up to the root" ; - ECHO "and in these directories from BOOST_BUILD_PATH and BOOST_ROOT: "$(search-path:J=", ")"." ; - ECHO ; - ECHO "However, it failed to call the \"boost-build\" rule to indicate" ; - ECHO "the location of the build system." ; - ECHO ; - EXIT "Please consult the documentation at 'http://www.boost.org'." ; - } -} -else -{ - -# -# JAMBASE - jam 2.3 ruleset providing make(1)-like functionality -# -# Supports UNIX, NT, and VMS. -# -# 12/27/93 (seiwald) - purturb library sources with SOURCE_GRIST -# 04/18/94 (seiwald) - use '?=' when setting OS specific vars -# 04/21/94 (seiwald) - do RmTemps together -# 05/05/94 (seiwald) - all supported C compilers support -o: relegate -# RELOCATE as an option; set Ranlib to "" to disable it -# 06/01/94 (seiwald) - new 'actions existing' to do existing sources -# 08/25/94 (seiwald) - new ObjectCcFlags rule to append to per-target CCFLAGS -# 08/29/94 (seiwald) - new ObjectHdrs rule to append to per-target HDRS -# 09/19/94 (seiwald) - LinkLibraries and Undefs now append -# - Rule names downshifted. -# 10/06/94 (seiwald) - Dumb yyacc stuff moved into Jamfile. -# 10/14/94 (seiwald) - (Crude) support for .s, .C, .cc, .cpp, and .f files. -# 01/08/95 (seiwald) - Shell now handled with awk, not sed -# 01/09/95 (seiwald) - Install* now take dest directory as target -# 01/10/95 (seiwald) - All entries sorted. -# 01/10/95 (seiwald) - NT support moved in, with LauraW's help. -# 01/10/95 (seiwald) - VMS support moved in. -# 02/06/95 (seiwald) - ObjectC++Flags and SubDirC++Flags added. -# 02/07/95 (seiwald) - Iron out when HDRSEARCH uses "" or SEARCH_SOURCE. -# 02/08/95 (seiwald) - SubDir works on VMS. -# 02/14/95 (seiwald) - MkDir and entourage. -# 04/30/95 (seiwald) - Use install -c flag so that it copies, not moves. -# 07/10/95 (taylor) - Support for Microsoft C++. -# 11/21/96 (peterk) - Support for BeOS -# 07/19/99 (sickel) - Support for Mac OS X Server (and maybe client) -# 02/18/00 (belmonte)- Support for Cygwin. - -# Special targets defined in this file: -# -# all - parent of first, shell, files, lib, exe -# first - first dependency of 'all', for potential initialization -# shell - parent of all Shell targets -# files - parent of all File targets -# lib - parent of all Library targets -# exe - parent of all Main targets -# dirs - parent of all MkDir targets -# clean - removes all Shell, File, Library, and Main targets -# uninstall - removes all Install targets -# - -# Rules defined by this file: -# -# as obj.o : source.s ; .s -> .o -# Bulk dir : files ; populate directory with many files -# Cc obj.o : source.c ; .c -> .o -# C++ obj.o : source.cc ; .cc -> .o -# Clean clean : sources ; remove sources with 'jam clean' -# File dest : source ; copy file -# Fortran obj.o : source.f ; .f -> .o -# GenFile source.c : program args ; make custom file -# Hardlink target : source ; make link from source to target -# HdrRule source : headers ; handle #includes -# InstallInto dir : sources ; install any files -# InstallBin dir : sources ; install binaries -# InstallLib dir : sources ; install files -# InstallFile dir : sources ; install files -# InstallMan dir : sources ; install man pages -# InstallShell dir : sources ; install shell scripts -# Lex source.c : source.l ; .l -> .c -# Library lib : source ; archive library from compiled sources -# LibraryFromObjects lib : objects ; archive library from objects -# LinkLibraries images : libraries ; bag libraries onto Mains -# Main image : source ; link executable from compiled sources -# MainFromObjects image : objects ; link executable from objects -# MkDir dir ; make a directory, if not there -# Object object : source ; compile object from source -# ObjectCcFlags source : flags ; add compiler flags for object -# ObjectC++Flags source : flags ; add compiler flags for object -# ObjectHdrs source : dirs ; add include directories for object -# Objects sources ; compile sources -# RmTemps target : sources ; remove temp sources after target made -# Setuid images ; mark executables Setuid -# SubDir TOP d1 d2 ... ; start a subdirectory Jamfile -# SubDirCcFlags flags ; add compiler flags until next SubDir -# SubDirC++Flags flags ; add compiler flags until next SubDir -# SubDirHdrs dirs ; add include dirs until next SubDir -# SubInclude TOP d1 d2 ... ; include a subdirectory Jamfile -# Shell exe : source ; make a shell executable -# Undefines images : symbols ; save undef's for linking -# UserObject object : source ; handle unknown suffixes for Object -# Yacc source.c : source.y ; .y -> .c -# -# Utility rules that have no side effects (not supported): -# -# FAppendSuffix f1 f2 ... : $(SUF) ; return $(<) with suffixes -# FConcat value ... ; return contatenated values -# FDirName d1 d2 ... ; return path from root to dir -# FGrist d1 d2 ... ; return d1!d2!... -# FGristFiles value ; return $(value:G=$(SOURCE_GRIST)) -# FGristSourceFiles value ; return $(value:G=$(SOURCE_GRIST)) -# FRelPath d1 : d2 ; return rel path from d1 to d2 -# FSubDir d1 d2 ... ; return path to root -# - - -# Brief review of the jam language: -# -# Statements: -# rule RULE - statements to process a rule -# actions RULE - system commands to carry out target update -# -# Modifiers on actions: -# together - multiple instances of same rule on target get executed -# once with their sources ($(>)) concatenated -# updated - refers to updated sources ($(>)) only -# ignore - ignore return status of command -# quietly - don't trace its execution unless verbose -# piecemeal - iterate command each time with a small subset of $(>) -# existing - refers to currently existing sources ($(>)) only -# bind vars - subject to binding before expanding in actions -# -# Special rules: -# ALWAYS - always build a target -# DEPENDS - builds the dependency graph -# ECHO - blurt out targets on stdout -# EXIT - blurt out targets and exit -# INCLUDES - marks sources as headers for target (a codependency) -# NOCARE - don't panic if the target can't be built -# NOUPDATE - create the target if needed but never update it -# NOTFILE - ignore the timestamp of the target (it's not a file) -# TEMPORARY - target need not be present if sources haven't changed -# -# Special variables set by jam: -# $(<) - targets of a rule (to the left of the :) -# $(>) - sources of a rule (to the right of the :) -# $(xxx) - true on xxx (UNIX, VMS, NT, OS2, MAC) -# $(OS) - name of OS - varies wildly -# $(JAMVERSION) - version number (2.3) -# -# Special variables used by jam: -# SEARCH - where to find something (used during binding and actions) -# LOCATE - where to plop something not found with SEARCH -# HDRRULE - rule to call to handle include files -# HDRSCAN - egrep regex to extract include files -# -# Special targets: -# all - default if none given on command line -# - -# Initialize variables -# - -# -# OS specific variable settings -# -if $(NT) -{ - # the list of supported toolsets on Windows NT and Windows 95/98 - # - local SUPPORTED_TOOLSETS = "BORLANDC" "VC7" "VISUALC" "VISUALC16" "INTELC" "WATCOM" - "MINGW" "LCC" ; - - # this variable holds the current toolset - # - TOOLSET = "" ; - - # if the JAM_TOOLSET environment variable is defined, check that it is - # one of our supported values - # - if $(JAM_TOOLSET) - { - local t ; - - for t in $(SUPPORTED_TOOLSETS) - { - $(t) = $($(t):J=" ") ; # reconstitute paths with spaces in them - if $(t) = $(JAM_TOOLSET) { TOOLSET = $(t) ; } - } - - if ! $(TOOLSET) - { - ECHO "The JAM_TOOLSET environment variable is defined but its value" ; - ECHO "is invalid, please use one of the following:" ; - ECHO ; - - for t in $(SUPPORTED_TOOLSETS) { ECHO " " $(t) ; } - EXIT ; - } - } - - # if TOOLSET is empty, we'll try to detect the toolset from other - # environment variables to remain backwards compatible with Jam 2.3 - # - if ! $(TOOLSET) - { - if $(BCCROOT) - { - TOOLSET = BORLANDC ; - BORLANDC = $(BCCROOT:J=" ") ; - } - else if $(MSVC) - { - TOOLSET = VISUALC16 ; - VISUALC16 = $(MSVC:J=" ") ; - } - else if $(MSVCNT) - { - TOOLSET = VISUALC ; - VISUALC = $(MSVCNT:J=" ") ; - } - else if $(MSVCDir) - { - TOOLSET = VISUALC ; - VISUALC = $(MSVCDir:J=" ") ; - } - else if $(MINGW) - { - TOOLSET = MINGW ; - } - else - { - ECHO "Jam cannot be run because, either:" ; - ECHO " a. You didn't set BOOST_ROOT to indicate the root of your" ; - ECHO " Boost installation." ; - ECHO " b. You are trying to use stock Jam but didn't indicate which" ; - ECHO " compilation toolset to use. To do so, follow these simple" ; - ECHO " instructions:" ; - ECHO ; - ECHO " - define one of the following environment variable, with the" ; - ECHO " appropriate value according to this list:" ; - ECHO ; - ECHO " Variable Toolset Description" ; - ECHO ; - ECHO " BORLANDC Borland C++ BC++ install path" ; - ECHO " VISUALC Microsoft Visual C++ VC++ install path" ; - ECHO " VISUALC16 Microsoft Visual C++ 16 bit VC++ 16 bit install" ; - ECHO " INTELC Intel C/C++ IC++ install path" ; - ECHO " WATCOM Watcom C/C++ Watcom install path" ; - ECHO " MINGW MinGW (gcc) MinGW install path" ; - ECHO " LCC Win32-LCC LCC-Win32 install path" ; - ECHO ; - ECHO " - define the JAM_TOOLSET environment variable with the *name*" ; - ECHO " of the toolset variable you want to use." ; - ECHO ; - ECHO " e.g.: set VISUALC=C:\\Visual6" ; - ECHO " set JAM_TOOLSET=VISUALC" ; - EXIT ; - } - } - - CP ?= copy ; - RM ?= del /f/q ; - SLASH ?= \\ ; - SUFLIB ?= .lib ; - SUFOBJ ?= .obj ; - SUFEXE ?= .exe ; - - if $(TOOLSET) = BORLANDC - { - ECHO "Compiler is Borland C++" ; - - AR ?= tlib /C /P64 ; - CC ?= bcc32 ; - CCFLAGS ?= -q -y -d -v -w-par -w-ccc -w-rch -w-pro -w-aus ; - C++ ?= bcc32 ; - C++FLAGS ?= -q -y -d -v -w-par -w-ccc -w-rch -w-pro -w-aus -P ; - LINK ?= $(CC) ; - LINKFLAGS ?= $(CCFLAGS) ; - STDLIBPATH ?= $(BORLANDC)\\lib ; - STDHDRS ?= $(BORLANDC)\\include ; - NOARSCAN ?= true ; - } - else if $(TOOLSET) = VISUALC16 - { - ECHO "Compiler is Microsoft Visual C++ 16 bit" ; - - AR ?= lib /nologo ; - CC ?= cl /nologo ; - CCFLAGS ?= /D \"WIN\" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= $(CC) ; - LINKFLAGS ?= $(CCFLAGS) ; - LINKLIBS ?= - \"$(VISUALC16)\\lib\\mlibce.lib\" - \"$(VISUALC16)\\lib\\oldnames.lib\" - ; - LINKLIBS ?= ; - NOARSCAN ?= true ; - OPTIM ?= "" ; - STDHDRS ?= $(VISUALC16)\\include ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = VISUALC - { - ECHO "Compiler is Microsoft Visual C++" ; - - AR ?= lib ; - AS ?= masm386 ; - CC ?= cl /nologo ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= link /nologo ; - LINKFLAGS ?= "" ; - LINKLIBS ?= \"$(VISUALC)\\lib\\advapi32.lib\" - # $(VISUALC)\\lib\\libc.lib - # $(VISUALC)\\lib\\oldnames.lib - \"$(VISUALC)\\lib\\gdi32.lib\" - \"$(VISUALC)\\lib\\user32.lib\" - \"$(VISUALC)\\lib\\kernel32.lib\" ; - OPTIM ?= "" ; - STDHDRS ?= $(VISUALC)\\include ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = VC7 - { - ECHO "Compiler is Microsoft Visual C++ .NET" ; - - AR ?= lib ; - AS ?= masm386 ; - CC ?= cl /nologo ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= link /nologo ; - LINKFLAGS ?= "" ; - LINKLIBS ?= \"$(VISUALC)\\PlatformSDK\\lib\\advapi32.lib\" - # $(VISUALC)\\lib\\libc.lib - # $(VISUALC)\\lib\\oldnames.lib - \"$(VISUALC)\\PlatformSDK\\lib\\gdi32.lib\" - \"$(VISUALC)\\PlatformSDK\\lib\\user32.lib\" - \"$(VISUALC)\\PlatformSDK\\lib\\kernel32.lib\" ; - OPTIM ?= "" ; - STDHDRS ?= \"$(VISUALC)\\include\" - \"$(VISUALC)\\PlatformSDK\\include\" ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = INTELC - { - ECHO "Compiler is Intel C/C++" ; - - if ! $(VISUALC) - { - ECHO "As a special exception, when using the Intel C++ compiler, you need" ; - ECHO "to define the VISUALC environment variable to indicate the location" ; - ECHO "of your Visual C++ installation. Aborting.." ; - EXIT ; - } - - AR ?= lib ; - AS ?= masm386 ; - CC ?= icl /nologo ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= link /nologo ; - LINKFLAGS ?= "" ; - LINKLIBS ?= $(VISUALC)\\lib\\advapi32.lib - # $(VISUALC)\\lib\\libc.lib - # $(VISUALC)\\lib\\oldnames.lib - $(VISUALC)\\lib\\kernel32.lib - ; - OPTIM ?= "" ; - STDHDRS ?= $(INTELC)\include $(VISUALC)\\include ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = WATCOM - { - ECHO "Compiler is Watcom C/C++" ; - - AR ?= wlib ; - CC ?= wcc386 ; - CCFLAGS ?= /zq /DWIN32 /I$(WATCOM)\\h ; # zq=quiet - C++ ?= wpp386 ; - C++FLAGS ?= $(CCFLAGS) ; - CP ?= copy ; - DOT ?= . ; - DOTDOT ?= .. ; - LINK ?= wcl386 ; - LINKFLAGS ?= /zq ; # zq=quiet - LINKLIBS ?= ; - MV ?= move ; - NOARSCAN ?= true ; - OPTIM ?= ; - RM ?= del /f ; - SLASH ?= \\ ; - STDHDRS ?= $(WATCOM)\\h $(WATCOM)\\h\\nt ; - SUFEXE ?= .exe ; - SUFLIB ?= .lib ; - SUFOBJ ?= .obj ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = MINGW - { - ECHO "Compiler is GCC with Mingw" ; - - AR ?= ar -ru ; - CC ?= gcc ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= $(CC) ; - LINKFLAGS ?= "" ; - LINKLIBS ?= "" ; - OPTIM ?= ; - SUFOBJ = .o ; - SUFLIB = .a ; - SLASH = / ; -# NOARSCAN ?= true ; - } - else if $(TOOLSET) = LCC - { - ECHO "Compiler is Win32-LCC" ; - - AR ?= lcclib ; - CC ?= lcc ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= lcclnk ; - LINKFLAGS ?= "" ; - LINKLIBS ?= "" ; - OPTIM ?= ; - NOARSCAN = true ; - } - else - { -# -# XXX: We need better comments here !! -# - EXIT On NT, set BCCROOT, MSVCNT, MINGW or MSVC to the root of the - Borland or Microsoft directories. ; - } - -} -else if $(OS2) -{ - # the list of supported toolsets on Windows NT and Windows 95/98 - # - local SUPPORTED_TOOLSETS = "EMX" "WATCOM" ; - - # this variable holds the current toolset - # - TOOLSET = "" ; - - # if the JAM_TOOLSET environment variable is defined, check that it is - # one of our supported values - # - if $(JAM_TOOLSET) - { - local t ; - - for t in $(SUPPORTED_TOOLSETS) - { - $(t) = $($(t):J=" ") ; # reconstitute paths with spaces in them - if $(t) = $(JAM_TOOLSET) { TOOLSET = $(t) ; } - } - - if ! $(TOOLSET) - { - ECHO "The JAM_TOOLSET environment variable is defined but its value" ; - ECHO "is invalid, please use one of the following:" ; - ECHO ; - - for t in $(SUPPORTED_TOOLSETS) { ECHO " " $(t) ; } - EXIT ; - } - } - - # if TOOLSET is empty, we'll try to detect the toolset from other - # environment variables to remain backwards compatible with Jam 2.3 - # - if ! $(TOOLSET) - { - if $(watcom) - { - WATCOM = $(watcom:J=" ") ; - TOOLSET = WATCOM ; - } - else - { - ECHO "Jam cannot be run because you didn't indicate which compilation toolset" ; - ECHO "to use. To do so, follow these simple instructions:" ; - ECHO ; - ECHO " - define one of the following environment variable, with the" ; - ECHO " appropriate value according to this list:" ; - ECHO ; - ECHO " Variable Toolset Description" ; - ECHO ; - ECHO " WATCOM Watcom C/C++ Watcom install path" ; - ECHO " EMX EMX (gcc) EMX install path" ; - ECHO " VISUALAGE IBM Visual Age C/C++ VisualAge install path" ; - ECHO ; - ECHO " - define the JAM_TOOLSET environment variable with the *name*" ; - ECHO " of the toolset variable you want to use." ; - ECHO ; - ECHO " e.g.: set WATCOM=C:\WATCOM" ; - ECHO " set JAM_TOOLSET=WATCOM" ; - ECHO ; - EXIT ; - } - } - - RM = del /f ; - CP = copy ; - MV ?= move ; - DOT ?= . ; - DOTDOT ?= .. ; - SUFLIB ?= .lib ; - SUFOBJ ?= .obj ; - SUFEXE ?= .exe ; - - if $(TOOLSET) = WATCOM - { - AR ?= wlib ; - BINDIR ?= \\os2\\apps ; - CC ?= wcc386 ; - CCFLAGS ?= /zq /DOS2 /I$(WATCOM)\\h ; # zq=quiet - C++ ?= wpp386 ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= wcl386 ; - LINKFLAGS ?= /zq ; # zq=quiet - LINKLIBS ?= ; - NOARSCAN ?= true ; - OPTIM ?= ; - SLASH ?= \\ ; - STDHDRS ?= $(WATCOM)\\h ; - UNDEFFLAG ?= "/u _" ; - } - else if $(TOOLSET) = EMX - { - ECHO "Compiler is GCC-EMX" ; - AR ?= ar -ru ; - CC ?= gcc ; - CCFLAGS ?= "" ; - C++ ?= $(CC) ; - C++FLAGS ?= $(CCFLAGS) ; - LINK ?= $(CC) ; - LINKFLAGS ?= "" ; - LINKLIBS ?= "" ; - OPTIM ?= ; - SUFOBJ = .o ; - SUFLIB = .a ; - UNDEFFLAG ?= "-U" ; - SLASH = / ; -# NOARSCAN ?= true ; - } - else - { - # should never happen - EXIT "Sorry, but the $(JAM_TOOLSET) toolset isn't supported for now" ; - } -} -else if $(VMS) -{ - C++ ?= cxx ; - C++FLAGS ?= ; - CC ?= cc ; - CCFLAGS ?= ; - CHMOD ?= set file/prot= ; - CP ?= copy/replace ; - CRELIB ?= true ; - DOT ?= [] ; - DOTDOT ?= [-] ; - EXEMODE ?= (w:e) ; - FILEMODE ?= (w:r) ; - HDRS ?= ; - LINK ?= link ; - LINKFLAGS ?= "" ; - LINKLIBS ?= ; - MKDIR ?= create/dir ; - MV ?= rename ; - OPTIM ?= "" ; - RM ?= delete ; - RUNVMS ?= mcr ; - SHELLMODE ?= (w:er) ; - SLASH ?= . ; - STDHDRS ?= decc$library_include ; - SUFEXE ?= .exe ; - SUFLIB ?= .olb ; - SUFOBJ ?= .obj ; - - switch $(OS) - { - case OPENVMS : CCFLAGS ?= /stand=vaxc ; - case VMS : LINKLIBS ?= sys$library:vaxcrtl.olb/lib ; - } -} -else if $(MAC) -{ - local OPT ; - - CW ?= "{CW}" ; - - MACHDRS ?= - "$(UMACHDRS):Universal:Interfaces:CIncludes" - "$(CW):MSL:MSL_C:MSL_Common:Include" - "$(CW):MSL:MSL_C:MSL_MacOS:Include" ; - - MACLIBS ?= - "$(CW):MacOS Support:Universal:Libraries:StubLibraries:Interfacelib" - "$(CW):MacOS Support:Universal:Libraries:StubLibraries:Mathlib" ; - - MPWLIBS ?= - "$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL MPWCRuntime.lib" - "$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC MPW.Lib" ; - - MPWNLLIBS ?= - "$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL MPWCRuntime.lib" - "$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC MPW(NL).Lib" ; - - SIOUXHDRS ?= ; - - SIOUXLIBS ?= - "$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL RuntimePPC.lib" - "$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL SIOUX.PPC.Lib" - "$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC.Lib" ; - - C++ ?= mwcppc ; - C++FLAGS ?= -w off -nomapcr ; - CC ?= mwcppc ; - CCFLAGS ?= -w off -nomapcr ; - CP ?= duplicate -y ; - DOT ?= ":" ; - DOTDOT ?= "::" ; - HDRS ?= $(MACHDRS) $(MPWHDRS) ; - LINK ?= mwlinkppc ; - LINKFLAGS ?= -mpwtool -warn ; - LINKLIBS ?= $(MACLIBS) $(MPWLIBS) ; - MKDIR ?= newfolder ; - MV ?= rename -y ; - NOARSCAN ?= true ; - OPTIM ?= ; - RM ?= delete -y ; - SLASH ?= ":" ; - STDHDRS ?= ; - SUFLIB ?= .lib ; - SUFOBJ ?= .o ; -} -else if $(OS) = BEOS && $(METROWERKS) -{ - AR ?= mwld -xml -o ; - BINDIR ?= /boot/apps ; - CC ?= mwcc ; - CCFLAGS ?= -nosyspath ; - C++ ?= $(CC) ; - C++FLAGS ?= -nosyspath ; - FORTRAN ?= "" ; - LIBDIR ?= /boot/develop/libraries ; - LINK ?= mwld ; - LINKFLAGS ?= "" ; - MANDIR ?= /boot/documentation/"Shell Tools"/HTML ; - NOARSCAN ?= true ; - STDHDRS ?= /boot/develop/headers/posix ; -} -else if $(OS) = BEOS -{ - BINDIR ?= /boot/apps ; - CC ?= gcc ; - C++ ?= $(CC) ; - FORTRAN ?= "" ; - LIBDIR ?= /boot/develop/libraries ; - LINK ?= gcc ; - LINKLIBS ?= -lnet ; - NOARSCAN ?= true ; - STDHDRS ?= /boot/develop/headers/posix ; -} -else if $(UNIX) -{ - switch $(OS) - { - case AIX : - LINKLIBS ?= -lbsd ; - - case AMIGA : - CC ?= gcc ; - YACC ?= "bison -y" ; - - case CYGWIN : - CC ?= gcc ; - CCFLAGS += -D__cygwin__ ; - LEX ?= flex ; - RANLIB ?= "" ; - SUFEXE ?= .exe ; - YACC ?= "bison -y" ; - - case DGUX : - RANLIB ?= "" ; - RELOCATE ?= true ; - - case HPUX : - YACC = ; - CFLAGS += -Ae ; - CCFLAGS += -Ae ; - RANLIB ?= "" ; - - case INTERIX : - CC ?= gcc ; - RANLIB ?= "" ; - - case IRIX : - RANLIB ?= "" ; - - case MPEIX : - CC ?= gcc ; - C++ ?= gcc ; - CCFLAGS += -D_POSIX_SOURCE ; - HDRS += /usr/include ; - RANLIB ?= "" ; - NOARSCAN ?= true ; - NOARUPDATE ?= true ; - - case MVS : - RANLIB ?= "" ; - - case NEXT : - AR ?= libtool -o ; - RANLIB ?= "" ; - - case MACOSX : - AR ?= libtool -o ; - C++ ?= c++ ; - MANDIR ?= /usr/local/share/man ; - RANLIB ?= "" ; - - case NCR : - RANLIB ?= "" ; - - case PTX : - RANLIB ?= "" ; - - case QNX : - AR ?= wlib ; - CC ?= cc ; - CCFLAGS ?= -Q ; # quiet - C++ ?= $(CC) ; - C++FLAGS ?= -Q ; # quiet - LINK ?= $(CC) ; - LINKFLAGS ?= -Q ; # quiet - NOARSCAN ?= true ; - RANLIB ?= "" ; - - case SCO : - RANLIB ?= "" ; - RELOCATE ?= true ; - - case SINIX : - RANLIB ?= "" ; - - case SOLARIS : - RANLIB ?= "" ; - AR ?= "/usr/ccs/bin/ar ru" ; - - case UNICOS : - NOARSCAN ?= true ; - OPTIM ?= -O0 ; - - case UNIXWARE : - RANLIB ?= "" ; - RELOCATE ?= true ; - } - - # UNIX defaults - - CCFLAGS ?= ; - C++FLAGS ?= $(CCFLAGS) ; - CHMOD ?= chmod ; - CHGRP ?= chgrp ; - CHOWN ?= chown ; - LEX ?= lex ; - LINKFLAGS ?= $(CCFLAGS) ; - LINKLIBS ?= ; - OPTIM ?= -O ; - RANLIB ?= ranlib ; - YACC ?= yacc ; - YACCFILES ?= y.tab ; - YACCFLAGS ?= -d ; -} - -# -# General defaults; a lot like UNIX -# - - AR ?= ar ru ; - AS ?= as ; - ASFLAGS ?= ; - AWK ?= awk ; - BINDIR ?= /usr/local/bin ; - C++ ?= cc ; - C++FLAGS ?= ; - CC ?= cc ; - CCFLAGS ?= ; - CP ?= cp -f ; - CRELIB ?= ; - DOT ?= . ; - DOTDOT ?= .. ; - EXEMODE ?= 711 ; - FILEMODE ?= 644 ; - FORTRAN ?= f77 ; - FORTRANFLAGS ?= ; - HDRS ?= ; - INSTALLGRIST ?= installed ; - JAMFILE ?= Jamfile ; - JAMRULES ?= Jamrules ; - LEX ?= ; - LIBDIR ?= /usr/local/lib ; - LINK ?= $(CC) ; - LINKFLAGS ?= ; - LINKLIBS ?= ; - LN ?= ln ; - MANDIR ?= /usr/local/man ; - MKDIR ?= mkdir ; - MV ?= mv -f ; - OPTIM ?= ; - RCP ?= rcp ; - RM ?= rm -f ; - RSH ?= rsh ; - SED ?= sed ; - SHELLHEADER ?= "#!/bin/sh" ; - SHELLMODE ?= 755 ; - SLASH ?= / ; - STDHDRS ?= /usr/include ; - SUFEXE ?= "" ; - SUFLIB ?= .a ; - SUFOBJ ?= .o ; - UNDEFFLAG ?= "-u _" ; - YACC ?= ; - YACCFILES ?= ; - YACCFLAGS ?= ; - - HDRPATTERN = - "^[ ]*#[ ]*include[ ]*[<\"]([^\">]*)[\">].*$" ; - - OSFULL = $(OS)$(OSVER)$(OSPLAT) $(OS)$(OSPLAT) $(OS)$(OSVER) $(OS) ; - - -# -# Base dependencies - first for "bootstrap" kinds of rules -# - -DEPENDS all : shell files lib exe obj ; -DEPENDS all shell files lib exe obj : first ; -NOTFILE all first shell files lib exe obj dirs clean uninstall ; -ALWAYS clean uninstall ; - -# -# Rules -# - -rule As -{ - DEPENDS $(<) : $(>) ; - ASFLAGS on $(<) += $(ASFLAGS) $(SUBDIRASFLAGS) ; -} - -rule Bulk -{ - local i ; - - for i in $(>) - { - File $(i:D=$(<)) : $(i) ; - } -} - -rule Cc -{ - local _h ; - - DEPENDS $(<) : $(>) ; - - # Just to clarify here: this sets the per-target CCFLAGS to - # be the current value of (global) CCFLAGS and SUBDIRCCFLAGS. - - CCFLAGS on $(<) += $(CCFLAGS) $(SUBDIRCCFLAGS) ; - - # If the compiler's -o flag doesn't work, relocate the .o - - if $(RELOCATE) - { - CcMv $(<) : $(>) ; - } - - _h = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ; - - if $(VMS) && $(_h) - { - SLASHINC on $(<) = "/inc=(" $(_h[1]) ,$(_h[2-]) ")" ; - } - else if $(MAC) && $(_h) - { - local _i _j ; - _j = $(_h[1]) ; - for _i in $(_h[2-]) - { - _j = $(_j),$(_i) ; - } - MACINC on $(<) = \"$(_j)\" ; - } -} - -rule C++ -{ - local _h ; - - DEPENDS $(<) : $(>) ; - C++FLAGS on $(<) += $(C++FLAGS) $(SUBDIRC++FLAGS) ; - - if $(RELOCATE) - { - CcMv $(<) : $(>) ; - } - - _h = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ; - - if $(VMS) && $(_h) - { - SLASHINC on $(<) = "/inc=(" $(_h[1]) ,$(_h[2-]) ")" ; - } - else if $(MAC) && $(_h) - { - local _i _j ; - _j = $(_h[1]) ; - for _i in $(_h[2-]) - { - _j = $(_j),$(_i) ; - } - MACINC on $(<) = \"$(_j)\" ; - } -} - -rule Chmod -{ - if $(CHMOD) { Chmod1 $(<) ; } -} - -rule File -{ - DEPENDS files : $(<) ; - DEPENDS $(<) : $(>) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; - MODE on $(<) = $(FILEMODE) ; - Chmod $(<) ; -} - -rule Fortran -{ - DEPENDS $(<) : $(>) ; -} - -rule GenFile -{ - local _t = [ FGristSourceFiles $(<) ] ; - local _s = [ FAppendSuffix $(>[1]) : $(SUFEXE) ] ; - Depends $(_t) : $(_s) $(>[2-]) ; - GenFile1 $(_t) : $(_s) $(>[2-]) ; - Clean clean : $(_t) ; -} - -rule GenFile1 -{ - MakeLocate $(<) : $(LOCATE_SOURCE) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; -} - -rule HardLink -{ - DEPENDS files : $(<) ; - DEPENDS $(<) : $(>) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; -} - -rule HdrMacroFile -{ - # HdrMacroFile file ; - # - # this rule is used to indicate that a given file contains definitions - # for filename macros (e.g. "#define MYFILE_H <myfile.h>") that can - # later be used in #include statements in the rest of the source - # - # theses files must be parsed before any make is tried.. - # - HDRMACRO $(<) ; -} - -rule HdrRule -{ - # HdrRule source : headers ; - - # N.B. This rule is called during binding, potentially after - # the fate of many targets has been determined, and must be - # used with caution: don't add dependencies to unrelated - # targets, and don't set variables on $(<). - - # Tell Jam that anything depending on $(<) also depends on $(>), - # set SEARCH so Jam can find the headers, but then say we don't - # care if we can't actually find the headers (they may have been - # within ifdefs), - - local s ; - - if $(HDRGRIST) - { - s = $(>:G=$(HDRGRIST)) ; - } else { - s = $(>) ; - } - - INCLUDES $(<) : $(s) ; - SEARCH on $(s) = $(HDRSEARCH) ; - NOCARE $(s) ; - - # Propagate on $(<) to $(>) - - HDRSEARCH on $(s) = $(HDRSEARCH) ; - HDRSCAN on $(s) = $(HDRSCAN) ; - HDRRULE on $(s) = $(HDRRULE) ; - HDRGRIST on $(s) = $(HDRGRIST) ; -} - -rule InstallInto -{ - # InstallInto dir : sources ; - - local i t ; - - t = $(>:G=$(INSTALLGRIST)) ; - - # Arrange for jam install - # Arrange for jam uninstall - # sources are in SEARCH_SOURCE - # targets are in dir - - Depends install : $(t) ; - Clean uninstall : $(t) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; - MakeLocate $(t) : $(<) ; - - # For each source, make gristed target name - # and Install, Chmod, Chown, and Chgrp - - for i in $(>) - { - local tt = $(i:G=$(INSTALLGRIST)) ; - - Depends $(tt) : $(i) ; - Install $(tt) : $(i) ; - Chmod $(tt) ; - - if $(OWNER) && $(CHOWN) - { - Chown $(tt) ; - OWNER on $(tt) = $(OWNER) ; - } - - if $(GROUP) && $(CHGRP) - { - Chgrp $(tt) ; - GROUP on $(tt) = $(GROUP) ; - } - } -} - -rule InstallBin -{ - local _t = [ FAppendSuffix $(>) : $(SUFEXE) ] ; - - InstallInto $(<) : $(_t) ; - MODE on $(_t:G=installed) = $(EXEMODE) ; -} - -rule InstallFile -{ - InstallInto $(<) : $(>) ; - MODE on $(>:G=installed) = $(FILEMODE) ; -} - -rule InstallLib -{ - InstallInto $(<) : $(>) ; - MODE on $(>:G=installed) = $(FILEMODE) ; -} - -rule InstallMan -{ - # Really this just strips the . from the suffix - - local i s d ; - - for i in $(>) - { - switch $(i:S) - { - case .1 : s = 1 ; case .2 : s = 2 ; case .3 : s = 3 ; - case .4 : s = 4 ; case .5 : s = 5 ; case .6 : s = 6 ; - case .7 : s = 7 ; case .8 : s = 8 ; case .l : s = l ; - case .n : s = n ; case .man : s = 1 ; - } - - d = man$(s) ; - - InstallInto $(d:R=$(<)) : $(i) ; - } - - MODE on $(>:G=installed) = $(FILEMODE) ; -} - -rule InstallShell -{ - InstallInto $(<) : $(>) ; - MODE on $(>:G=installed) = $(SHELLMODE) ; -} - -rule Lex -{ - LexMv $(<) : $(>) ; - DEPENDS $(<) : $(>) ; - MakeLocate $(<) : $(LOCATE_SOURCE) ; - Clean clean : $(<) ; -} - -rule Library -{ - LibraryFromObjects $(<) : $(>:S=$(SUFOBJ)) ; - Objects $(>) ; -} - -rule LibraryFromObjects -{ - local _i _l _s ; - - # Add grist to file names - - _s = [ FGristFiles $(>) ] ; - _l = $(<:S=$(SUFLIB)) ; - - # library depends on its member objects - - if $(KEEPOBJS) - { - DEPENDS obj : $(_s) ; - } - else - { - DEPENDS lib : $(_l) ; - } - - # Set LOCATE for the library and its contents. The bound - # value shows up as $(NEEDLIBS) on the Link actions. - # For compatibility, we only do this if the library doesn't - # already have a path. - - if ! $(_l:D) - { - MakeLocate $(_l) $(_l)($(_s:BS)) : $(LOCATE_TARGET) ; - } - - if $(NOARSCAN) - { - # If we can't scan the library to timestamp its contents, - # we have to just make the library depend directly on the - # on-disk object files. - - DEPENDS $(_l) : $(_s) ; - } - else - { - # If we can scan the library, we make the library depend - # on its members and each member depend on the on-disk - # object file. - - DEPENDS $(_l) : $(_l)($(_s:BS)) ; - - for _i in $(_s) - { - DEPENDS $(_l)($(_i:BS)) : $(_i) ; - } - } - - Clean clean : $(_l) ; - - if $(CRELIB) { CreLib $(_l) : $(_s[1]) ; } - - Archive $(_l) : $(_s) ; - - if $(RANLIB) { Ranlib $(_l) ; } - - # If we can't scan the library, we have to leave the .o's around. - - if ! ( $(NOARSCAN) || $(KEEPOBJS) ) { RmTemps $(_l) : $(_s) ; } -} - -rule Link -{ - MODE on $(<) = $(EXEMODE) ; - Chmod $(<) ; -} - -rule LinkLibraries -{ - # make library dependencies of target - # set NEEDLIBS variable used by 'actions Main' - - local _t = [ FAppendSuffix $(<) : $(SUFEXE) ] ; - - DEPENDS $(_t) : $(>:S=$(SUFLIB)) ; - NEEDLIBS on $(_t) += $(>:S=$(SUFLIB)) ; -} - -rule Main -{ - MainFromObjects $(<) : $(>:S=$(SUFOBJ)) ; - Objects $(>) ; -} - -rule MainFromObjects -{ - local _s _t ; - - # Add grist to file names - # Add suffix to exe - - _s = [ FGristFiles $(>) ] ; - _t = [ FAppendSuffix $(<) : $(SUFEXE) ] ; - - if $(_t) != $(<) - { - DEPENDS $(<) : $(_t) ; - NOTFILE $(<) ; - } - - # make compiled sources a dependency of target - - DEPENDS exe : $(_t) ; - DEPENDS $(_t) : $(_s) ; - MakeLocate $(_t) : $(LOCATE_TARGET) ; - - Clean clean : $(_t) ; - - Link $(_t) : $(_s) ; -} - -rule MakeLocate -{ - if $(>) - { - LOCATE on $(<) = $(>) ; - Depends $(<) : $(>[1]) ; - MkDir $(>[1]) ; - } -} - -rule MkDir -{ - # If dir exists, don't update it - # Do this even for $(DOT). - - NOUPDATE $(<) ; - - if $(<) != $(DOT) && ! $($(<)-mkdir) - { - local s ; - - # Cheesy gate to prevent multiple invocations on same dir - # MkDir1 has the actions - # Arrange for jam dirs - - $(<)-mkdir = true ; - MkDir1 $(<) ; - Depends dirs : $(<) ; - - # Recursively make parent directories. - # $(<:P) = $(<)'s parent, & we recurse until root - - s = $(<:P) ; - - if $(NT) - { - switch $(s) - { - case *: : s = ; - case *:\\ : s = ; - } - } - - if $(s) && $(s) != $(<) - { - Depends $(<) : $(s) ; - MkDir $(s) ; - } - else if $(s) - { - NOTFILE $(s) ; - } - - } -} - -rule Object -{ - local h ; - - # locate object and search for source, if wanted - - Clean clean : $(<) ; - - MakeLocate $(<) : $(LOCATE_TARGET) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; - - # Save HDRS for -I$(HDRS) on compile. - # We shouldn't need -I$(SEARCH_SOURCE) as cc can find headers - # in the .c file's directory, but generated .c files (from - # yacc, lex, etc) are located in $(LOCATE_TARGET), possibly - # different from $(SEARCH_SOURCE). - - HDRS on $(<) = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ; - - # handle #includes for source: Jam scans for headers with - # the regexp pattern $(HDRSCAN) and then invokes $(HDRRULE) - # with the scanned file as the target and the found headers - # as the sources. HDRSEARCH is the value of SEARCH used for - # the found header files. Finally, if jam must deal with - # header files of the same name in different directories, - # they can be distinguished with HDRGRIST. - - # $(h) is where cc first looks for #include "foo.h" files. - # If the source file is in a distant directory, look there. - # Else, look in "" (the current directory). - - if $(SEARCH_SOURCE) - { - h = $(SEARCH_SOURCE) ; - } - else - { - h = "" ; - } - - HDRRULE on $(>) = HdrRule ; - HDRSCAN on $(>) = $(HDRPATTERN) ; - HDRSEARCH on $(>) = $(HDRS) $(SUBDIRHDRS) $(h) $(STDHDRS) ; - HDRGRIST on $(>) = $(HDRGRIST) ; - - # if source is not .c, generate .c with specific rule - - switch $(>:S) - { - case .asm : As $(<) : $(>) ; - case .c : Cc $(<) : $(>) ; - case .C : C++ $(<) : $(>) ; - case .cc : C++ $(<) : $(>) ; - case .cpp : C++ $(<) : $(>) ; - case .f : Fortran $(<) : $(>) ; - case .l : Cc $(<) : $(<:S=.c) ; - Lex $(<:S=.c) : $(>) ; - case .s : As $(<) : $(>) ; - case .y : Cc $(<) : $(<:S=.c) ; - Yacc $(<:S=.c) : $(>) ; - case * : UserObject $(<) : $(>) ; - } -} - - -rule ObjectCcFlags -{ - CCFLAGS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ; -} - -rule ObjectC++Flags -{ - C++FLAGS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ; -} - -rule ObjectHdrs -{ - HDRS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ; -} - -rule Objects -{ - local _i ; - - for _i in [ FGristFiles $(<) ] - { - Object $(_i:S=$(SUFOBJ)) : $(_i) ; - DEPENDS obj : $(_i:S=$(SUFOBJ)) ; - } -} - -rule RmTemps -{ - TEMPORARY $(>) ; -} - -rule Setuid -{ - MODE on [ FAppendSuffix $(<) : $(SUFEXE) ] = 4711 ; -} - -rule Shell -{ - DEPENDS shell : $(<) ; - DEPENDS $(<) : $(>) ; - SEARCH on $(>) = $(SEARCH_SOURCE) ; - MODE on $(<) = $(SHELLMODE) ; - Clean clean : $(<) ; - Chmod $(<) ; -} - -rule SubDir -{ - local _r _s ; - - # - # SubDir TOP d1 [ ... ] - # - # This introduces a Jamfile that is part of a project tree - # rooted at $(TOP). It (only once) includes the project-specific - # rules file $(TOP)/Jamrules and then sets search & locate stuff. - # - # If the variable $(TOPRULES) is set (where TOP is the first arg - # to SubDir), that file is included instead of $(TOP)/Jamrules. - # - # d1 ... are the directory elements that lead to this directory - # from $(TOP). We construct the system dependent path from these - # directory elements in order to set search & locate stuff. - # - - if ! $($(<[1])) - { - if ! $(<[1]) - { - EXIT SubDir syntax error ; - } - - $(<[1]) = [ FSubDir $(<[2-]) ] ; - } - - # - # If $(TOP)/Jamrules hasn't been included, do so. - # - - if ! $($(<[1])-included) - { - # Gated entry. - - $(<[1])-included = TRUE ; - - # File is $(TOPRULES) or $(TOP)/Jamrules. - - _r = $($(<[1])RULES) ; - - if ! $(_r) - { - _r = $(JAMRULES:R=$($(<[1]))) ; - } - - # Include it. - - include $(_r) ; - } - - # Get path to current directory from root using SubDir. - # Save dir tokens for other potential uses. - - _s = [ FDirName $(<[2-]) ] ; - SUBDIR = $(_s:R=$($(<[1]))) ; - SUBDIR_TOKENS = $(<[2-]) ; - - # Now set up SEARCH_SOURCE, LOCATE_TARGET, SOURCE_GRIST - # These can be reset if needed. For example, if the source - # directory should not hold object files, LOCATE_TARGET can - # subsequently be redefined. - - SEARCH_SOURCE = $(SUBDIR) ; - LOCATE_SOURCE = $(ALL_LOCATE_TARGET) $(SUBDIR) ; - LOCATE_TARGET = $(ALL_LOCATE_TARGET) $(SUBDIR) ; - SOURCE_GRIST = [ FGrist $(<[2-]) ] ; - - # Reset per-directory ccflags, hdrs - - SUBDIRCCFLAGS = ; - SUBDIRC++FLAGS = ; - SUBDIRHDRS = ; -} - -rule SubDirCcFlags -{ - SUBDIRCCFLAGS += $(<) ; -} - -rule SubDirC++Flags -{ - SUBDIRC++FLAGS += $(<) ; -} - -rule SubDirHdrs -{ - SUBDIRHDRS += $(<) ; -} - -rule SubInclude -{ - local _s ; - - # That's - # SubInclude TOP d1 [ d2 [ d3 [ d4 ] ] ] - # - # to include a subdirectory's Jamfile. - - if ! $($(<[1])) - { - EXIT Top level of source tree has not been set with $(<[1]) ; - } - - _s = [ FDirName $(<[2-]) ] ; - - include $(JAMFILE:D=$(_s):R=$($(<[1]))) ; -} - -rule Undefines -{ - UNDEFS on [ FAppendSuffix $(<) : $(SUFEXE) ] += $(UNDEFFLAG)$(>) ; -} - -rule UserObject -{ - EXIT "Unknown suffix on" $(>) "- see UserObject rule in Jamfile(5)." ; -} - -rule Yacc -{ - local _h ; - - _h = $(<:BS=.h) ; - - # Some places don't have a yacc. - - MakeLocate $(<) $(_h) : $(LOCATE_SOURCE) ; - - if $(YACC) - { - DEPENDS $(<) $(_h) : $(>) ; - Yacc1 $(<) $(_h) : $(>) ; - YaccMv $(<) $(_h) : $(>) ; - Clean clean : $(<) $(_h) ; - } - - # Make sure someone includes $(_h) else it will be a deadly independent - # target. - INCLUDES $(<) : $(_h) ; -} - -# -# Utility rules; no side effects on these. -# - -rule FGrist -{ - # Turn individual elements in $(<) into grist. - - local _g _i ; - - _g = $(<[1]) ; - - for _i in $(<[2-]) - { - _g = $(_g)!$(_i) ; - } - - return $(_g) ; -} - -rule FGristFiles -{ - if ! $(SOURCE_GRIST) - { - return $(<) ; - } - else - { - return $(<:G=$(SOURCE_GRIST)) ; - } -} - -rule FGristSourceFiles -{ - # Produce source file name name with grist in it, - # if SOURCE_GRIST is set. - - # Leave header files alone, because they have a global - # visibility. - - if ! $(SOURCE_GRIST) - { - return $(<) ; - } - else - { - local _i _o ; - - for _i in $(<) - { - switch $(_i) - { - case *.h : _o += $(_i) ; - case * : _o += $(_i:G=$(SOURCE_GRIST)) ; - } - } - - return $(_o) ; - } -} - -rule FConcat -{ - # Puts the variables together, removing spaces. - - local _t _r ; - - $(_r) = $(<[1]) ; - - for _t in $(<[2-]) - { - $(_r) = $(_r)$(_t) ; - } - - return $(_r) ; -} - -rule FSubDir -{ - local _i _d ; - - # If $(>) is the path to the current directory, compute the - # path (using ../../ etc) back to that root directory. - # Sets result in $(<) - - if ! $(<[1]) - { - _d = $(DOT) ; - } - else - { - _d = $(DOTDOT) ; - - for _i in $(<[2-]) - { - _d = $(_d:R=$(DOTDOT)) ; - } - } - - return $(_d) ; -} - -rule FDirName -{ - local _s _i ; - - # Turn individual elements in $(<) into a usable path. - - if ! $(<) - { - _s = $(DOT) ; - } - else if $(VMS) - { - # This handles the following cases: - # a -> [.a] - # a b c -> [.a.b.c] - # x: -> x: - # x: a -> x:[a] - # x:[a] b -> x:[a.b] - - switch $(<[1]) - { - case *:* : _s = $(<[1]) ; - case \\[*\\] : _s = $(<[1]) ; - case * : _s = [.$(<[1])] ; - } - - for _i in [.$(<[2-])] - { - _s = $(_i:R=$(_s)) ; - } - } - else if $(MAC) - { - _s = $(DOT) ; - - for _i in $(<) - { - _s = $(_i:R=$(_s)) ; - } - } - else - { - _s = $(<[1]) ; - - for _i in $(<[2-]) - { - _s = $(_i:R=$(_s)) ; - } - } - - return $(_s) ; -} - - -rule _makeCommon -{ - # strip common initial elements - - if $($(<)[1]) && $($(<)[1]) = $($(>)[1]) - { - $(<) = $($(<)[2-]) ; - $(>) = $($(>)[2-]) ; - _makeCommon $(<) : $(>) ; - } -} - - -rule FRelPath -{ - local _l _r ; - - # first strip off common parts - - _l = $(<) ; - _r = $(>) ; - - _makeCommon _l : _r ; - - # now make path to root and path down - - _l = [ FSubDir $(_l) ] ; - _r = [ FDirName $(_r) ] ; - - # Concatenate and save - - # XXX This should be better - - if $(_r) = $(DOT) { - return $(_l) ; - } else { - return $(_r:R=$(_l)) ; - } -} - -rule FAppendSuffix -{ - # E.g., "FAppendSuffix yacc lex foo.bat : $(SUFEXE) ;" - # returns (yacc,lex,foo.bat) on Unix and - # (yacc.exe,lex.exe,foo.bat) on NT. - - if $(>) - { - local _i _o ; - - for _i in $(<) - { - if $(_i:S) - { - _o += $(_i) ; - } - else - { - _o += $(_i:S=$(>)) ; - } - } - return $(_o) ; - } - else - { - return $(<) ; - } -} - -rule unmakeDir -{ - if $(>[1]:D) && $(>[1]:D) != $(>[1]) && $(>[1]:D) != \\\\ - { - unmakeDir $(<) : $(>[1]:D) $(>[1]:BS) $(>[2-]) ; - } - else - { - $(<) = $(>) ; - } -} - - -rule FConvertToSlashes -{ - local _d, _s, _i ; - - unmakeDir _d : $(<) ; - - _s = $(_d[1]) ; - for _i in $(_d[2-]) - { - _s = $(_s)/$(_i) ; - } - return $(_s) ; -} - - -# -# Actions -# - -# -# First the defaults -# - -actions updated together piecemeal Archive -{ - $(AR) $(<) $(>) -} - -actions As -{ - $(AS) $(ASFLAGS) -I$(HDRS) -o $(<) $(>) -} - -actions C++ -{ - $(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o $(<) $(>) -} - -actions Cc -{ - $(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o $(<) $(>) -} - -actions Chgrp -{ - $(CHGRP) $(GROUP) $(<) -} - -actions Chmod1 -{ - $(CHMOD) $(MODE) $(<) -} - -actions Chown -{ - $(CHOWN) $(OWNER) $(<) -} - -actions piecemeal together existing Clean -{ - $(RM) $(>) -} - -actions File -{ - $(CP) $(>) $(<) -} - -actions GenFile1 -{ - $(>[1]) $(<) $(>[2-]) -} - -actions Fortran -{ - $(FORTRAN) $(FORTRANFLAGS) -o $(<) $(>) -} - -actions HardLink -{ - $(RM) $(<) && $(LN) $(>) $(<) -} - -actions Install -{ - $(CP) $(>) $(<) -} - -actions Lex -{ - $(LEX) $(>) -} - -actions LexMv -{ - $(MV) lex.yy.c $(<) -} - -actions Link bind NEEDLIBS -{ - $(LINK) $(LINKFLAGS) -o $(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) -} - -actions MkDir1 -{ - $(MKDIR) $(<) -} - -actions together Ranlib -{ - $(RANLIB) $(<) -} - -actions quietly updated piecemeal together RmTemps -{ - $(RM) $(>) -} - -actions Shell -{ - $(AWK) ' - NR == 1 { print "$(SHELLHEADER)" } - NR == 1 && /^[#:]/ { next } - /^##/ { next } - { print } - ' < $(>) > $(<) -} - -actions Yacc1 -{ - $(YACC) $(YACCFLAGS) $(>) -} - -actions YaccMv -{ - $(MV) $(YACCFILES).c $(<[1]) - $(MV) $(YACCFILES).h $(<[2]) -} - -# -# RELOCATE - for compilers with broken -o flags -# - -if $(RELOCATE) -{ - actions C++ - { - $(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) $(>) - } - - actions Cc - { - $(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) $(>) - } - - actions ignore CcMv - { - [ $(<) != $(>:BS=$(SUFOBJ)) ] && $(MV) $(>:BS=$(SUFOBJ)) $(<) - } -} - -# -# NOARUPDATE - can't update an archive -# - -if $(NOARUPDATE) -{ - actions Archive - { - $(AR) $(<) $(>) - } -} - -# -# NT specific actions -# - -if $(NT) -{ - if $(TOOLSET) = VISUALC || $(TOOLSET) = VC7 || $(TOOLSET) = INTELC - { - actions updated together piecemeal Archive - { - if exist $(<) set _$(<:B)_=$(<) - $(AR) /out:$(<) %_$(<:B)_% $(>) - } - - actions As - { - $(AS) /Ml /p /v /w2 $(>) $(<) ,nul,nul; - } - - actions Cc - { - $(CC) /c $(CCFLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /I$(STDHDRS) $(>) - } - - actions C++ - { - $(C++) /c $(C++FLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /I$(STDHDRS) /Tp$(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) $(LINKFLAGS) /out:$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) - } - } - else if $(TOOLSET) = VISUALC16 - { - actions updated together piecemeal Archive - { - $(AR) $(<) -+$(>) - } - - actions Cc - { - $(CC) /c $(CCFLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) $(>) - } - - actions C++ - { - $(C++) /c $(C++FLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /Tp$(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) $(LINKFLAGS) /out:$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) - } - } - else if $(TOOLSET) = BORLANDC - { - actions updated together piecemeal Archive - { - $(AR) $(<) -+$(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) -e$(<) $(LINKFLAGS) $(UNDEFS) -L$(LINKLIBS) $(NEEDLIBS) $(>) - } - - actions C++ - { - $(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - - actions Cc - { - $(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - - } - else if $(TOOLSET) = MINGW - { - actions together piecemeal Archive - { - $(AR) $(<) $(>:T) - } - - actions Cc - { - $(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - - actions C++ - { - $(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - } - else if $(TOOLSET) = WATCOM - { - actions together piecemeal Archive - { - $(AR) $(<) +-$(>) - } - - actions Cc - { - $(CC) $(CCFLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>) - } - - actions C++ - { - $(C++) $(C++FLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) $(LINKFLAGS) /Fe=$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) - } - - actions Shell - { - $(CP) $(>) $(<) - } - } - else if $(TOOLSET) = LCC - { - actions together piecemeal Archive - { - $(AR) /out:$(<) $(>) - } - - actions Cc - { - $(CC) $(CCFLAGS) $(OPTIM) -Fo$(<) -I$(HDRS) $(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) $(LINKFLAGS) -o $(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) - } - - actions Shell - { - $(CP) $(>) $(<) - } - } -} - -# -# OS2 specific actions -# - -else if $(OS2) -{ - if $(TOOLSET) = WATCOM - { - actions together piecemeal Archive - { - $(AR) $(<) +-$(>) - } - - actions Cc - { - $(CC) $(CCFLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>) - } - - actions C++ - { - $(C++) $(C++FLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) $(LINKFLAGS) /Fe=$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS) - } - - actions Shell - { - $(CP) $(>) $(<) - } - } - else if $(TOOLSET) = EMX - { - actions together piecemeal Archive - { - $(AR) $(<) $(>:T) - } - - actions Cc - { - $(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - - actions C++ - { - $(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>) - } - } -} - -# -# VMS specific actions -# - -else if $(VMS) -{ - actions updated together piecemeal Archive - { - lib/replace $(<) $(>[1]) ,$(>[2-]) - } - - actions Cc - { - $(CC)/obj=$(<) $(CCFLAGS) $(OPTIM) $(SLASHINC) $(>) - } - - actions C++ - { - $(C++)/obj=$(<) $(C++FLAGS) $(OPTIM) $(SLASHINC) $(>) - } - - actions piecemeal together existing Clean - { - $(RM) $(>[1]);* ,$(>[2-]);* - } - - actions together quietly CreLib - { - if f$search("$(<)") .eqs. "" then lib/create $(<) - } - - actions GenFile1 - { - mcr $(>[1]) $(<) $(>[2-]) - } - - actions Link bind NEEDLIBS - { - $(LINK)/exe=$(<) $(LINKFLAGS) $(>[1]) ,$(>[2-]) ,$(NEEDLIBS)/lib ,$(LINKLIBS) - } - - actions quietly updated piecemeal together RmTemps - { - $(RM) $(>[1]);* ,$(>[2-]);* - } - - actions Shell - { - $(CP) $(>) $(<) - } -} - -# -# Mac specifc actions -# - -else if $(MAC) -{ - actions together Archive - { - $(LINK) -library -o $(<) $(>) - } - - actions Cc - { - set -e MWCincludes $(MACINC) - $(CC) -o $(<) $(CCFLAGS) $(OPTIM) $(>) - } - - actions C++ - { - set -e MWCincludes $(MACINC) - $(CC) -o $(<) $(C++FLAGS) $(OPTIM) $(>) - } - - actions Link bind NEEDLIBS - { - $(LINK) -o $(<) $(LINKFLAGS) $(>) $(NEEDLIBS) "$(LINKLIBS)" - } -} - -# -# Backwards compatibility with jam 1, where rules were uppercased. -# - -rule BULK { Bulk $(<) : $(>) ; } -rule FILE { File $(<) : $(>) ; } -rule HDRRULE { HdrRule $(<) : $(>) ; } -rule INSTALL { Install $(<) : $(>) ; } -rule LIBRARY { Library $(<) : $(>) ; } -rule LIBS { LinkLibraries $(<) : $(>) ; } -rule LINK { Link $(<) : $(>) ; } -rule MAIN { Main $(<) : $(>) ; } -rule SETUID { Setuid $(<) ; } -rule SHELL { Shell $(<) : $(>) ; } -rule UNDEFINES { Undefines $(<) : $(>) ; } - -# Old INSTALL* didn't take dest directory. - -rule INSTALLBIN { InstallBin $(BINDIR) : $(<) ; } -rule INSTALLLIB { InstallLib $(LIBDIR) : $(<) ; } -rule INSTALLMAN { InstallMan $(MANDIR) : $(<) ; } - -# Compatibility with jam 2.2. - -rule addDirName { $(<) += [ FDirName $(>) ] ; } -rule makeDirName { $(<) = [ FDirName $(>) ] ; } -rule makeGristedName { $(<) = [ FGristSourceFiles $(>) ] ; } -rule makeRelPath { $(<[1]) = [ FRelPath $(<[2-]) : $(>) ] ; } -rule makeSuffixed { $(<[1]) = [ FAppendSuffix $(>) : $(<[2]) ] ; } - -# -# Now include the user's Jamfile. -# - -{ - if $(JAMFILE) { include $(JAMFILE) ; } -} - -} diff --git a/jam-files/engine/boost-jam.spec b/jam-files/engine/boost-jam.spec deleted file mode 100644 index bc572fc9..00000000 --- a/jam-files/engine/boost-jam.spec +++ /dev/null @@ -1,64 +0,0 @@ -Name: boost-jam -Version: 3.1.19 -Summary: Build tool -Release: 1 -Source: %{name}-%{version}.tgz - -License: Boost Software License, Version 1.0 -Group: Development/Tools -URL: http://www.boost.org -Packager: Rene Rivera <grafik@redshift-software.com> -BuildRoot: /var/tmp/%{name}-%{version}.root - -%description -Boost Jam is a build tool based on FTJam, which in turn is based on -Perforce Jam. It contains significant improvements made to facilitate -its use in the Boost Build System, but should be backward compatible -with Perforce Jam. - -Authors: - Perforce Jam : Cristopher Seiwald - FT Jam : David Turner - Boost Jam : David Abrahams - -Copyright: - /+\ - +\ Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - \+/ - License is hereby granted to use this software and distribute it - freely, as long as this copyright notice is retained and modifications - are clearly marked. - ALL WARRANTIES ARE HEREBY DISCLAIMED. - -Also: - Copyright 2001-2006 David Abrahams. - Copyright 2002-2006 Rene Rivera. - Copyright 2003-2006 Vladimir Prus. - - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -%prep -%setup -n %{name}-%{version} - -%build -LOCATE_TARGET=bin ./build.sh $BOOST_JAM_TOOLSET - -%install -rm -rf $RPM_BUILD_ROOT -mkdir -p $RPM_BUILD_ROOT%{_bindir} -mkdir -p $RPM_BUILD_ROOT%{_docdir}/%{name}-%{version} -install -m 755 bin/bjam $RPM_BUILD_ROOT%{_bindir}/bjam-%{version} -ln -sf bjam-%{version} $RPM_BUILD_ROOT%{_bindir}/bjam -cp -R *.html *.png *.css LICENSE*.txt images jam $RPM_BUILD_ROOT%{_docdir}/%{name}-%{version} - -find $RPM_BUILD_ROOT -name CVS -type d -exec rm -r {} \; - -%files -%defattr(-,root,root) -%attr(755,root,root) /usr/bin/* -%doc %{_docdir}/%{name}-%{version} - - -%clean -rm -rf $RPM_BUILD_ROOT diff --git a/jam-files/engine/boost-no-inspect b/jam-files/engine/boost-no-inspect deleted file mode 100644 index 8a06f3a7..00000000 --- a/jam-files/engine/boost-no-inspect +++ /dev/null @@ -1 +0,0 @@ -this really out of our hands, so tell inspect to ignore directory
\ No newline at end of file diff --git a/jam-files/engine/build.bat b/jam-files/engine/build.bat deleted file mode 100644 index f927b769..00000000 --- a/jam-files/engine/build.bat +++ /dev/null @@ -1,532 +0,0 @@ -@ECHO OFF - -REM ~ Copyright 2002-2007 Rene Rivera. -REM ~ Distributed under the Boost Software License, Version 1.0. -REM ~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -setlocal -goto Start - - -:Set_Error -color 00 -goto :eof - - -:Clear_Error -ver >nul -goto :eof - - -:Error_Print -REM Output an error message and set the errorlevel to indicate failure. -setlocal -ECHO ### -ECHO ### %1 -ECHO ### -ECHO ### You can specify the toolset as the argument, i.e.: -ECHO ### .\build.bat msvc -ECHO ### -ECHO ### Toolsets supported by this script are: borland, como, gcc, gcc-nocygwin, -ECHO ### intel-win32, metrowerks, mingw, msvc, vc7, vc8, vc9, vc10 -ECHO ### -call :Set_Error -endlocal -goto :eof - - -:Test_Path -REM Tests for the given file(executable) presence in the directories in the PATH -REM environment variable. Additionaly sets FOUND_PATH to the path of the -REM found file. -call :Clear_Error -setlocal -set test=%~$PATH:1 -endlocal -if not errorlevel 1 set FOUND_PATH=%~dp$PATH:1 -goto :eof - - -:Test_Option -REM Tests whether the given string is in the form of an option: "--*" -call :Clear_Error -setlocal -set test=%1 -if not defined test ( - call :Set_Error - goto Test_Option_End -) -set test=###%test%### -set test=%test:"###=% -set test=%test:###"=% -set test=%test:###=% -if not "-" == "%test:~1,1%" call :Set_Error -:Test_Option_End -endlocal -goto :eof - - -:Test_Empty -REM Tests whether the given string is not empty -call :Clear_Error -setlocal -set test=%1 -if not defined test ( - call :Clear_Error - goto Test_Empty_End -) -set test=###%test%### -set test=%test:"###=% -set test=%test:###"=% -set test=%test:###=% -if not "" == "%test%" call :Set_Error -:Test_Empty_End -endlocal -goto :eof - - -:Call_If_Exists -if EXIST %1 call %* -goto :eof - - -:Guess_Toolset -REM Try and guess the toolset to bootstrap the build with... -REM Sets BOOST_JAM_TOOLSET to the first found toolset. -REM May also set BOOST_JAM_TOOLSET_ROOT to the -REM location of the found toolset. - -call :Clear_Error -call :Test_Empty %ProgramFiles% -if not errorlevel 1 set ProgramFiles=C:\Program Files - -call :Clear_Error -if NOT "_%VS100COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET=vc10" - set "BOOST_JAM_TOOLSET_ROOT=%VS100COMNTOOLS%..\..\VC\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio 10.0\VC\VCVARSALL.BAT" ( - set "BOOST_JAM_TOOLSET=vc10" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio 10.0\VC\" - goto :eof) -call :Clear_Error -if NOT "_%VS90COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET=vc9" - set "BOOST_JAM_TOOLSET_ROOT=%VS90COMNTOOLS%..\..\VC\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio 9.0\VC\VCVARSALL.BAT" ( - set "BOOST_JAM_TOOLSET=vc9" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio 9.0\VC\" - goto :eof) -call :Clear_Error -if NOT "_%VS80COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET=vc8" - set "BOOST_JAM_TOOLSET_ROOT=%VS80COMNTOOLS%..\..\VC\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio 8\VC\VCVARSALL.BAT" ( - set "BOOST_JAM_TOOLSET=vc8" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio 8\VC\" - goto :eof) -call :Clear_Error -if NOT "_%VS71COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET=vc7" - set "BOOST_JAM_TOOLSET_ROOT=%VS71COMNTOOLS%\..\..\VC7\" - goto :eof) -call :Clear_Error -if NOT "_%VCINSTALLDIR%_" == "__" ( - REM %VCINSTALLDIR% is also set for VC9 (and probably VC8) - set "BOOST_JAM_TOOLSET=vc7" - set "BOOST_JAM_TOOLSET_ROOT=%VCINSTALLDIR%\VC7\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio .NET 2003\VC7\bin\VCVARS32.BAT" ( - set "BOOST_JAM_TOOLSET=vc7" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio .NET 2003\VC7\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio .NET\VC7\bin\VCVARS32.BAT" ( - set "BOOST_JAM_TOOLSET=vc7" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio .NET\VC7\" - goto :eof) -call :Clear_Error -if NOT "_%MSVCDir%_" == "__" ( - set "BOOST_JAM_TOOLSET=msvc" - set "BOOST_JAM_TOOLSET_ROOT=%MSVCDir%\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual Studio\VC98\bin\VCVARS32.BAT" ( - set "BOOST_JAM_TOOLSET=msvc" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual Studio\VC98\" - goto :eof) -call :Clear_Error -if EXIST "%ProgramFiles%\Microsoft Visual C++\VC98\bin\VCVARS32.BAT" ( - set "BOOST_JAM_TOOLSET=msvc" - set "BOOST_JAM_TOOLSET_ROOT=%ProgramFiles%\Microsoft Visual C++\VC98\" - goto :eof) -call :Clear_Error -call :Test_Path cl.exe -if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET=msvc" - set "BOOST_JAM_TOOLSET_ROOT=%FOUND_PATH%..\" - goto :eof) -call :Clear_Error -call :Test_Path vcvars32.bat -if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET=msvc" - call "%FOUND_PATH%VCVARS32.BAT" - set "BOOST_JAM_TOOLSET_ROOT=%MSVCDir%\" - goto :eof) -call :Clear_Error -if EXIST "C:\Borland\BCC55\Bin\bcc32.exe" ( - set "BOOST_JAM_TOOLSET=borland" - set "BOOST_JAM_TOOLSET_ROOT=C:\Borland\BCC55\" - goto :eof) -call :Clear_Error -call :Test_Path bcc32.exe -if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET=borland" - set "BOOST_JAM_TOOLSET_ROOT=%FOUND_PATH%..\" - goto :eof) -call :Clear_Error -call :Test_Path icl.exe -if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET=intel-win32" - set "BOOST_JAM_TOOLSET_ROOT=%FOUND_PATH%..\" - goto :eof) -call :Clear_Error -if EXIST "C:\MinGW\bin\gcc.exe" ( - set "BOOST_JAM_TOOLSET=mingw" - set "BOOST_JAM_TOOLSET_ROOT=C:\MinGW\" - goto :eof) -call :Clear_Error -if NOT "_%CWFolder%_" == "__" ( - set "BOOST_JAM_TOOLSET=metrowerks" - set "BOOST_JAM_TOOLSET_ROOT=%CWFolder%\" - goto :eof ) -call :Clear_Error -call :Test_Path mwcc.exe -if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET=metrowerks" - set "BOOST_JAM_TOOLSET_ROOT=%FOUND_PATH%..\..\" - goto :eof) -call :Clear_Error -call :Error_Print "Could not find a suitable toolset." -goto :eof - - -:Guess_Yacc -REM Tries to find bison or yacc in common places so we can build the grammar. -call :Clear_Error -call :Test_Path yacc.exe -if not errorlevel 1 ( - set "YACC=yacc -d" - goto :eof) -call :Clear_Error -call :Test_Path bison.exe -if not errorlevel 1 ( - set "YACC=bison -d --yacc" - goto :eof) -call :Clear_Error -if EXIST "C:\Program Files\GnuWin32\bin\bison.exe" ( - set "YACC=C:\Program Files\GnuWin32\bin\bison.exe" -d --yacc - goto :eof) -call :Clear_Error -call :Error_Print "Could not find Yacc to build the Jam grammar." -goto :eof - - -:Start -set BOOST_JAM_TOOLSET= -set BOOST_JAM_ARGS= - -REM If no arguments guess the toolset; -REM or if first argument is an option guess the toolset; -REM otherwise the argument is the toolset to use. -call :Clear_Error -call :Test_Empty %1 -if not errorlevel 1 ( - call :Guess_Toolset - if not errorlevel 1 ( goto Setup_Toolset ) else ( goto Finish ) -) - -call :Clear_Error -call :Test_Option %1 -if not errorlevel 1 ( - call :Guess_Toolset - if not errorlevel 1 ( goto Setup_Toolset ) else ( goto Finish ) -) - -call :Clear_Error -set BOOST_JAM_TOOLSET=%1 -shift -goto Setup_Toolset - - -:Setup_Toolset -REM Setup the toolset command and options. This bit of code -REM needs to be flexible enough to handle both when -REM the toolset was guessed at and found, or when the toolset -REM was indicated in the command arguments. -REM NOTE: The strange multiple "if ?? == _toolset_" tests are that way -REM because in BAT variables are subsituted only once during a single -REM command. A complete "if ... ( commands ) else ( commands )" -REM is a single command, even though it's in multiple lines here. -:Setup_Args -call :Clear_Error -call :Test_Empty %1 -if not errorlevel 1 goto Config_Toolset -call :Clear_Error -call :Test_Option %1 -if errorlevel 1 ( - set BOOST_JAM_ARGS=%BOOST_JAM_ARGS% %1 - shift - goto Setup_Args -) -:Config_Toolset -if NOT "_%BOOST_JAM_TOOLSET%_" == "_metrowerks_" goto Skip_METROWERKS -if NOT "_%CWFolder%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%CWFolder%\" - ) -set "PATH=%BOOST_JAM_TOOLSET_ROOT%Other Metrowerks Tools\Command Line Tools;%PATH%" -set "BOOST_JAM_CC=mwcc -runtime ss -cwd include -DNT -lkernel32.lib -ladvapi32.lib -luser32.lib" -set "BOOST_JAM_OPT_JAM=-o bootstrap\jam0.exe" -set "BOOST_JAM_OPT_MKJAMBASE=-o bootstrap\mkjambase0.exe" -set "BOOST_JAM_OPT_YYACC=-o bootstrap\yyacc0.exe" -set "_known_=1" -:Skip_METROWERKS -if NOT "_%BOOST_JAM_TOOLSET%_" == "_msvc_" goto Skip_MSVC -if NOT "_%MSVCDir%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%MSVCDir%\" - ) -call :Call_If_Exists "%BOOST_JAM_TOOLSET_ROOT%bin\VCVARS32.BAT" -if not "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) -set "BOOST_JAM_CC=cl /nologo /GZ /Zi /MLd /Fobootstrap/ /Fdbootstrap/ -DNT -DYYDEBUG kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_MSVC -if NOT "_%BOOST_JAM_TOOLSET%_" == "_vc7_" goto Skip_VC7 -if NOT "_%VS71COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%VS71COMNTOOLS%..\..\VC7\" - ) -if "_%VCINSTALLDIR%_" == "__" call :Call_If_Exists "%BOOST_JAM_TOOLSET_ROOT%bin\VCVARS32.BAT" -if NOT "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - if "_%VCINSTALLDIR%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) ) -set "BOOST_JAM_CC=cl /nologo /GZ /Zi /MLd /Fobootstrap/ /Fdbootstrap/ -DNT -DYYDEBUG kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_VC7 -if NOT "_%BOOST_JAM_TOOLSET%_" == "_vc8_" goto Skip_VC8 -if NOT "_%VS80COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%VS80COMNTOOLS%..\..\VC\" - ) -if "_%VCINSTALLDIR%_" == "__" call :Call_If_Exists "%BOOST_JAM_TOOLSET_ROOT%VCVARSALL.BAT" %BOOST_JAM_ARGS% -if NOT "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - if "_%VCINSTALLDIR%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) ) -set "BOOST_JAM_CC=cl /nologo /RTC1 /Zi /MTd /Fobootstrap/ /Fdbootstrap/ -DNT -DYYDEBUG -wd4996 kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_VC8 -if NOT "_%BOOST_JAM_TOOLSET%_" == "_vc9_" goto Skip_VC9 -if NOT "_%VS90COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%VS90COMNTOOLS%..\..\VC\" - ) -if "_%VCINSTALLDIR%_" == "__" call :Call_If_Exists "%BOOST_JAM_TOOLSET_ROOT%VCVARSALL.BAT" %BOOST_JAM_ARGS% -if NOT "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - if "_%VCINSTALLDIR%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) ) -set "BOOST_JAM_CC=cl /nologo /RTC1 /Zi /MTd /Fobootstrap/ /Fdbootstrap/ -DNT -DYYDEBUG -wd4996 kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_VC9 -if NOT "_%BOOST_JAM_TOOLSET%_" == "_vc10_" goto Skip_VC10 -if NOT "_%VS100COMNTOOLS%_" == "__" ( - set "BOOST_JAM_TOOLSET_ROOT=%VS100COMNTOOLS%..\..\VC\" - ) -if "_%VCINSTALLDIR%_" == "__" call :Call_If_Exists "%BOOST_JAM_TOOLSET_ROOT%VCVARSALL.BAT" %BOOST_JAM_ARGS% -if NOT "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - if "_%VCINSTALLDIR%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) ) -set "BOOST_JAM_CC=cl /nologo /RTC1 /Zi /MTd /Fobootstrap/ /Fdbootstrap/ -DNT -DYYDEBUG -wd4996 kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_VC10 -if NOT "_%BOOST_JAM_TOOLSET%_" == "_borland_" goto Skip_BORLAND -if "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - call :Test_Path bcc32.exe ) -if "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - if not errorlevel 1 ( - set "BOOST_JAM_TOOLSET_ROOT=%FOUND_PATH%..\" - ) ) -if not "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%Bin;%PATH%" - ) -set "BOOST_JAM_CC=bcc32 -WC -w- -q -I%BOOST_JAM_TOOLSET_ROOT%Include -L%BOOST_JAM_TOOLSET_ROOT%Lib /DNT -nbootstrap" -set "BOOST_JAM_OPT_JAM=-ejam0" -set "BOOST_JAM_OPT_MKJAMBASE=-emkjambasejam0" -set "BOOST_JAM_OPT_YYACC=-eyyacc0" -set "_known_=1" -:Skip_BORLAND -if NOT "_%BOOST_JAM_TOOLSET%_" == "_como_" goto Skip_COMO -set "BOOST_JAM_CC=como -DNT" -set "BOOST_JAM_OPT_JAM=-o bootstrap\jam0.exe" -set "BOOST_JAM_OPT_MKJAMBASE=-o bootstrap\mkjambase0.exe" -set "BOOST_JAM_OPT_YYACC=-o bootstrap\yyacc0.exe" -set "_known_=1" -:Skip_COMO -if NOT "_%BOOST_JAM_TOOLSET%_" == "_gcc_" goto Skip_GCC -set "BOOST_JAM_CC=gcc -DNT" -set "BOOST_JAM_OPT_JAM=-o bootstrap\jam0.exe" -set "BOOST_JAM_OPT_MKJAMBASE=-o bootstrap\mkjambase0.exe" -set "BOOST_JAM_OPT_YYACC=-o bootstrap\yyacc0.exe" -set "_known_=1" -:Skip_GCC -if NOT "_%BOOST_JAM_TOOLSET%_" == "_gcc-nocygwin_" goto Skip_GCC_NOCYGWIN -set "BOOST_JAM_CC=gcc -DNT -mno-cygwin" -set "BOOST_JAM_OPT_JAM=-o bootstrap\jam0.exe" -set "BOOST_JAM_OPT_MKJAMBASE=-o bootstrap\mkjambase0.exe" -set "BOOST_JAM_OPT_YYACC=-o bootstrap\yyacc0.exe" -set "_known_=1" -:Skip_GCC_NOCYGWIN -if NOT "_%BOOST_JAM_TOOLSET%_" == "_intel-win32_" goto Skip_INTEL_WIN32 -set "BOOST_JAM_CC=icl -DNT /nologo kernel32.lib advapi32.lib user32.lib" -set "BOOST_JAM_OPT_JAM=/Febootstrap\jam0" -set "BOOST_JAM_OPT_MKJAMBASE=/Febootstrap\mkjambase0" -set "BOOST_JAM_OPT_YYACC=/Febootstrap\yyacc0" -set "_known_=1" -:Skip_INTEL_WIN32 -if NOT "_%BOOST_JAM_TOOLSET%_" == "_mingw_" goto Skip_MINGW -if not "_%BOOST_JAM_TOOLSET_ROOT%_" == "__" ( - set "PATH=%BOOST_JAM_TOOLSET_ROOT%bin;%PATH%" - ) -set "BOOST_JAM_CC=gcc -DNT" -set "BOOST_JAM_OPT_JAM=-o bootstrap\jam0.exe" -set "BOOST_JAM_OPT_MKJAMBASE=-o bootstrap\mkjambase0.exe" -set "BOOST_JAM_OPT_YYACC=-o bootstrap\yyacc0.exe" -set "_known_=1" -:Skip_MINGW -call :Clear_Error -if "_%_known_%_" == "__" ( - call :Error_Print "Unknown toolset: %BOOST_JAM_TOOLSET%" -) -if errorlevel 1 goto Finish - -echo ### -echo ### Using '%BOOST_JAM_TOOLSET%' toolset. -echo ### - -set YYACC_SOURCES=yyacc.c -set MKJAMBASE_SOURCES=mkjambase.c -set BJAM_SOURCES= -set BJAM_SOURCES=%BJAM_SOURCES% command.c compile.c debug.c execnt.c expand.c filent.c glob.c hash.c -set BJAM_SOURCES=%BJAM_SOURCES% hdrmacro.c headers.c jam.c jambase.c jamgram.c lists.c make.c make1.c -set BJAM_SOURCES=%BJAM_SOURCES% newstr.c option.c output.c parse.c pathunix.c regexp.c -set BJAM_SOURCES=%BJAM_SOURCES% rules.c scan.c search.c subst.c timestamp.c variable.c modules.c -set BJAM_SOURCES=%BJAM_SOURCES% strings.c filesys.c builtins.c md5.c pwd.c class.c w32_getreg.c native.c -set BJAM_SOURCES=%BJAM_SOURCES% modules/set.c modules/path.c modules/regex.c -set BJAM_SOURCES=%BJAM_SOURCES% modules/property-set.c modules/sequence.c modules/order.c - -set BJAM_UPDATE= -:Check_Update -call :Test_Empty %1 -if not errorlevel 1 goto Check_Update_End -call :Clear_Error -setlocal -set test=%1 -set test=###%test%### -set test=%test:"###=% -set test=%test:###"=% -set test=%test:###=% -if "%test%" == "--update" set BJAM_UPDATE=update -endlocal -shift -if not "_%BJAM_UPDATE%_" == "_update_" goto Check_Update -:Check_Update_End -if "_%BJAM_UPDATE%_" == "_update_" ( - if not exist ".\bootstrap\jam0.exe" ( - set BJAM_UPDATE= - ) -) - -@echo ON -@if "_%BJAM_UPDATE%_" == "_update_" goto Skip_Bootstrap -if exist bootstrap rd /S /Q bootstrap -md bootstrap -@if not exist jamgram.y goto Bootstrap_GrammarPrep -@if not exist jamgramtab.h goto Bootstrap_GrammarPrep -@goto Skip_GrammarPrep -:Bootstrap_GrammarPrep -%BOOST_JAM_CC% %BOOST_JAM_OPT_YYACC% %YYACC_SOURCES% -@if not exist ".\bootstrap\yyacc0.exe" goto Skip_GrammarPrep -.\bootstrap\yyacc0 jamgram.y jamgramtab.h jamgram.yy -:Skip_GrammarPrep -@if not exist jamgram.c goto Bootstrap_GrammarBuild -@if not exist jamgram.h goto Bootstrap_GrammarBuild -@goto Skip_GrammarBuild -:Bootstrap_GrammarBuild -@echo OFF -if "_%YACC%_" == "__" ( - call :Guess_Yacc -) -if errorlevel 1 goto Finish -@echo ON -%YACC% jamgram.y -@if errorlevel 1 goto Finish -del /f jamgram.c -rename y.tab.c jamgram.c -del /f jamgram.h -rename y.tab.h jamgram.h -:Skip_GrammarBuild -@echo ON -@if exist jambase.c goto Skip_Jambase -%BOOST_JAM_CC% %BOOST_JAM_OPT_MKJAMBASE% %MKJAMBASE_SOURCES% -@if not exist ".\bootstrap\mkjambase0.exe" goto Skip_Jambase -.\bootstrap\mkjambase0 jambase.c Jambase -:Skip_Jambase -%BOOST_JAM_CC% %BOOST_JAM_OPT_JAM% %BJAM_SOURCES% -:Skip_Bootstrap -@if not exist ".\bootstrap\jam0.exe" goto Skip_Jam -@if "_%BJAM_UPDATE%_" == "_update_" goto Skip_Clean -.\bootstrap\jam0 -f build.jam --toolset=%BOOST_JAM_TOOLSET% "--toolset-root=%BOOST_JAM_TOOLSET_ROOT% " clean -:Skip_Clean -@set args=%* -@echo OFF -:Set_Args -setlocal -call :Test_Empty %args% -if not errorlevel 1 goto Set_Args_End -set test=###%args:~0,2%### -set test=%test:"###=% -set test=%test:###"=% -set test=%test:###=% -set test=%test:~0,1% -if "-" == "%test%" goto Set_Args_End -endlocal -set args=%args:~1% -goto Set_Args -:Set_Args_End -@echo ON -.\bootstrap\jam0 -f build.jam --toolset=%BOOST_JAM_TOOLSET% "--toolset-root=%BOOST_JAM_TOOLSET_ROOT% " %args% -:Skip_Jam - -:Finish diff --git a/jam-files/engine/build.jam b/jam-files/engine/build.jam deleted file mode 100644 index 266b07a1..00000000 --- a/jam-files/engine/build.jam +++ /dev/null @@ -1,1070 +0,0 @@ -#~ Copyright 2002-2007 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Clean env vars of any "extra" empty values. -for local v in ARGV CC CFLAGS LIBS -{ - local values ; - for local x in $($(v)) - { - if $(x) != "" - { - values += $(x) ; - } - } - $(v) = $(values) ; -} - -# Platform related specifics. -if $(OS) = NT { rule .path { return "$(<:J=\\)" ; } ./ = "/" ; } -else if $(OS) = OS2 { rule .path { return "$(<:J=\\)" ; } ./ = "/" ; } -else if $(OS) = VMS { rule .path { return "[.$(<:J=/)]" ; } } -else if $(OS) = MAC { rule .path { return ":$(<:J=\:)" ; } } -else { rule .path { return "$(<:J=/)" ; } } -if $(OS) = VMS { . = "_" ; } -else { . = "." ; } -./ ?= "" ; - -# Info about what we are building. -_VERSION_ = 3 1 19 ; -NAME = boost-jam ; -VERSION = $(_VERSION_:J=$(.)) ; -RELEASE = 1 ; -LICENSE = LICENSE_1_0 ; - -# Generate development debug binaries? -if --debug in $(ARGV) -{ - debug = true ; -} - -if --profile in $(ARGV) -{ - profile = true ; -} - -# Attempt to generate and/or build the grammar? -if --grammar in $(ARGV) -{ - grammar = true ; -} - -# Do we need to add a default build type argument? -if ! ( --release in $(ARGV) ) && - ! ( --debug in $(ARGV) ) && - ! ( --profile in $(ARGV) ) -{ - ARGV += --release ; -} - -# Enable, and configure, Python hooks. -with-python = ; -python-location = [ MATCH --with-python=(.*) : $(ARGV) ] ; -if $(python-location) -{ - with-python = true ; -} -if $(with-python) -{ - if $(OS) = NT - { - --python-include = [ .path $(python-location) include ] ; - --python-lib = ; - for local v in 26 25 24 23 22 - { - --python-lib ?= - [ GLOB [ .path $(python-location) libs ] : "python$(v).lib" ] - [ GLOB $(python-location) [ .path $(python-location) libs ] - $(Path) $(PATH) $(path) : "python$(v).dll" ] - ; - if ! $(--python-lib[2]) - { - --python-lib = ; - } - } - --python-lib = $(--python-lib[1]) ; - } - else if $(OS) = MACOSX - { - --python-include = [ .path $(python-location) Headers ] ; - --python-lib = $(python-location) Python ; - } - else - { - --python-include = ; - --python-lib = ; - for local v in 2.6 2.5 2.4 2.3 2.2 - { - local inc = [ GLOB [ .path $(python-location) include ] : python$(v) ] ; - local lib = [ GLOB [ .path $(python-location) lib ] : libpython$(v)* ] ; - if $(inc) && $(lib) - { - --python-include ?= $(inc) ; - --python-lib ?= $(lib[1]:D) python$(v) ; - } - } - } -} - -# Boehm GC? -if --gc in $(ARGV) -{ - --boehm-gc = true ; -} -if $(--boehm-gc) -{ - --extra-include += [ .path [ PWD ] "boehm_gc" "include" ] ; -} - -# Duma? -if --duma in $(ARGV) -{ - --duma = true ; -} -if $(--duma) -{ - --extra-include += [ .path [ PWD ] "duma" ] ; -} - -# An explicit root for the toolset? (trim spaces) -toolset-root = [ MATCH --toolset-root=(.*) : $(ARGV) ] ; -{ - local t = [ MATCH "[ ]*(.*)" : $(toolset-root:J=" ") ] ; - toolset-root = ; - while $(t) - { - t = [ MATCH "([^ ]+)([ ]*)(.*)" : $(t) ] ; - toolset-root += $(t[1]) ; - if $(t[3]) { toolset-root += $(t[2]) ; } - t = $(t[3]) ; - } - toolset-root = $(toolset-root:J="") ; -} - -# Configure the implemented toolsets. These are minimal -# commands and options to compile the full Jam. When -# adding new toolsets make sure to add them to the -# "known" list also. - -rule toolset ( name command .type ? : opt.out + : opt.define * : flags * : linklibs * ) -{ - .type ?= "" ; - tool.$(name)$(.type).cc ?= $(command) ; - tool.$(name)$(.type).opt.out ?= $(opt.out) ; - tool.$(name)$(.type).opt.define ?= $(opt.define) ; - tool.$(name)$(.type).flags ?= $(flags) ; - tool.$(name)$(.type).linklibs ?= $(linklibs) ; - if ! $(name) in $(toolsets) { toolsets += $(name) ; } -} - -rule if-os ( os + : yes-opt * : no-opt * ) - { if $(os) in $(OS) { return $(yes-opt) ; } else { return $(no-opt) ; } } - -rule opt ( type : yes-opt * : no-opt * ) - { if $(type) in $(ARGV) { return $(yes-opt) ; } else { return $(no-opt) ; } } - -## HP-UX aCC compiler -toolset acc cc : "-o " : -D - : -Ae - [ opt --release : -s -O3 ] - [ opt --debug : -g -pg ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Borland C++ 5.5.x -toolset borland bcc32 : -e -n : /D - : -WC -w- -q "-I$(toolset-root)Include" "-L$(toolset-root)Lib" - [ opt --release : -O2 -vi -w-inl ] - [ opt --debug : -v -Od -vi- ] - -I$(--python-include) -I$(--extra-include) - : $(--python-lib[1]) ; -## Generic Unix cc -if ! $(CC) { CC = cc ; } -toolset cc $(CC) : "-o " : -D - : $(CFLAGS) - [ opt --release : -s -O ] - [ opt --debug : -g ] - -I$(--python-include) -I$(--extra-include) - : $(LIBS) -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Comeau C/C++ 4.x -toolset como como : "-o " : -D - : --c - [ opt --release : --inlining ] - [ opt --debug : --no_inlining ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Clang Linux 2.8+ -toolset clang clang : "-o " : -D - : -Wno-unused -Wno-format - [ opt --release : -Os ] - [ opt --debug : -g -O0 -fno-inline ] - [ opt --profile : -finline-functions -g ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## MacOSX Darwin, using GCC 2.9.x, 3.x -toolset darwin cc : "-o " : -D - : - [ opt --release : -Wl,-x -O3 -finline-functions ] - [ opt --debug : -g -O0 -fno-inline -pg ] - [ opt --profile : -Wl,-x -O3 -finline-functions -g -pg ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## GCC 2.x, 3.x, 4.x -toolset gcc gcc : "-o " : -D - : -pedantic -fno-strict-aliasing - [ opt --release : [ opt --symbols : -g : -s ] -O3 ] - [ opt --debug : -g -O0 -fno-inline ] - -I$(--python-include) -I$(--extra-include) -Wno-long-long - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## GCC 2.x, 3.x on CYGWIN but without cygwin1.dll -toolset gcc-nocygwin gcc : "-o " : -D - : -s -O3 -mno-cygwin - [ opt --release : -finline-functions ] - [ opt --debug : -s -O3 -fno-inline -pg ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Intel C/C++ for Darwin -toolset intel-darwin icc : "-o " : -D - : - [ opt --release : -O3 ] - [ opt --debug : -g -O0 -p ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Intel C/C++ for Linux -toolset intel-linux icc : "-o " : -D - : - [ opt --release : -Xlinker -s -O3 ] - [ opt --debug : -g -O0 -p ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Intel C/C++ for Win32 -toolset intel-win32 icl : /Fe : -D - : /nologo - [ opt --release : /MT /O2 /Ob2 /Gy /GF /GA /GB ] - [ opt --debug : /MTd /DEBUG /Z7 /Od /Ob0 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## KCC ? -toolset kcc KCC : "-o " : -D - : - [ opt --release : -s +K2 ] - [ opt --debug : -g +K0 ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Borland Kylix -toolset kylix bc++ : -o : -D - : -tC -q - [ opt --release : -O2 -vi -w-inl ] - [ opt --debug : -v -Od -vi- ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Metrowerks CodeWarrior 8.x -{ - # Even though CW can compile all files at once, it crashes if it tries in the bjam case. - local mwcc = ; if $(OS) = NT { mwcc = mwcc ; } else { mwcc = mwc$(OSPLAT:L) ; } - mwcc ?= mwcc ; - toolset metrowerks $(mwcc) : "-o " : -D - : -c -lang c -subsystem console -cwd include - [ opt --release : -runtime ss -opt full -inline all ] - [ opt --debug : -runtime ssd -opt none -inline off ] - -I$(--python-include) -I$(--extra-include) ; - toolset metrowerks $(mwcc) .link : "-o " : - : -subsystem console -lkernel32.lib -ladvapi32.lib -luser32.lib - [ opt --release : -runtime ss ] - [ opt --debug : -runtime ssd ] - : $(--python-lib[1]) ; -} -## MINGW GCC -toolset mingw gcc : "-o " : -D - : - [ opt --release : -s -O3 -finline-functions ] - [ opt --debug : -g -O0 -fno-inline -pg ] - -I$(--python-include) -I$(--extra-include) - : $(--python-lib[2]) ; -## MIPS Pro -toolset mipspro cc : "-o " : -D - : - [ opt --release : -s -O3 -g0 -INLINE:none ] - [ opt --debug : -g -O0 -INLINE ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Microsoft Visual Studio C++ 6.x -toolset msvc cl : /Fe /Fe /Fd /Fo : -D - : /nologo - [ opt --release : /ML /O2 /Ob2 /Gy /GF /GA /GB ] - [ opt --debug : /MLd /DEBUG /Z7 /Od /Ob0 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## QNX 6.x GCC 3.x/2.95.3 -toolset qcc qcc : "-o " : -D - : -Wc,-pedantic -Wc,-fno-strict-aliasing - [ opt --release : [ opt --symbols : -g ] -O3 -Wc,-finline-functions ] - [ opt --debug : -g -O0 -Wc,-fno-inline ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Qlogic Pathscale 2.4 -toolset pathscale pathcc : "-o " : -D - : - [ opt --release : -s -Ofast -O3 ] - [ opt --debug : -g ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Portland Group Pgi 6.2 -toolset pgi pgcc : "-o " : -D - : - [ opt --release : -s -O3 ] - [ opt --debug : -g ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Sun Workshop 6 C++ -toolset sun cc : "-o " : -D - : - [ opt --release : -s -fast -xO4 ] - [ opt --debug : -g ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Sun Workshop 6 C++ (old alias) -toolset sunpro cc : "-o " : -D - : - [ opt --release : -s -fast -xO4 ] - [ opt --debug : -g ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## Compaq Alpha CXX -toolset tru64cxx cc : "-o " : -D - : - [ opt --release : -s -O5 -inline speed ] - [ opt --debug : -g -O0 -pg ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) ; -## IBM VisualAge C++ -toolset vacpp xlc : "-o " : -D - : - [ opt --release : -s -O3 -qstrict -qinline ] - [ opt --debug : -g -qNOOPTimize -qnoinline -pg ] - -I$(--python-include) -I$(--extra-include) - : -L$(--python-lib[1]) -l$(--python-lib[2]) [ if-os AIX : -bmaxdata:0x40000000 ] ; -## Microsoft Visual C++ .NET 7.x -toolset vc7 cl : /Fe /Fe /Fd /Fo : -D - : /nologo - [ opt --release : /ML /O2 /Ob2 /Gy /GF /GA /GB ] - [ opt --debug : /MLd /DEBUG /Z7 /Od /Ob0 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## Microsoft Visual C++ 2005 -toolset vc8 cl : /Fe /Fe /Fd /Fo : -D - : /nologo - [ opt --release : /MT /O2 /Ob2 /Gy /GF /GA /wd4996 ] - [ opt --debug : /MTd /DEBUG /Z7 /Od /Ob0 /wd4996 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## Microsoft Visual C++ 2008 -toolset vc9 cl : /Fe /Fe /Fd /Fo : -D - : /nologo - [ opt --release : /MT /O2 /Ob2 /Gy /GF /GA /wd4996 ] - [ opt --debug : /MTd /DEBUG /Z7 /Od /Ob0 /wd4996 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## Microsoft Visual C++ 2010 -toolset vc10 cl : /Fe /Fe /Fd /Fo : -D - : /nologo - [ opt --release : /MT /O2 /Ob2 /Gy /GF /GA /wd4996 ] - [ opt --debug : /MTd /DEBUG /Z7 /Od /Ob0 /wd4996 ] - -I$(--python-include) -I$(--extra-include) - : kernel32.lib advapi32.lib user32.lib $(--python-lib[1]) ; -## VMS/OpenVMS DEC C -toolset vmsdecc cc : /OBJECT= : "/DEFINES=(" "," ")" - : /STANDARD=VAXC /PREFIX_LIBRARY_ENTRIES=ALL_ENTRIES - [ opt --release : /OPTIMIZE /NODEBUG ] - [ opt --debug : /NOOPTIMIZE /DEBUG ] - ; -toolset vmsdecc link .link : /EXECUTABLE= : - : /NOMAP - [ opt --release : /NODEBUG ] - [ opt --debug : /DEBUG ] - ; - -# First set the build commands and options according to the -# preset toolset. -toolset = [ MATCH --toolset=(.*) : $(ARGV) ] ; -if ! $(toolset) -{ - # For some reason, the following test does not catch empty toolset. - ECHO "###" ; - ECHO "###" No toolset specified. Please use --toolset option. ; - ECHO "###" ; - ECHO "###" Known toolsets are: $(toolsets:J=", ") ; - EXIT "###" ; -} -if ! $(toolset) in $(toolsets) -{ - ECHO "###" ; - ECHO "###" Unknown toolset: $(toolset) ; - ECHO "###" ; - ECHO "###" Known toolsets are: $(toolsets:J=", ") ; - EXIT "###" ; -} ---cc = $(tool.$(toolset).cc) ; -if $(tool.$(toolset).opt.out[2]) -{ - if $(tool.$(toolset).opt.out[1]) = $(tool.$(toolset).opt.out[2]) - { - --out = $(tool.$(toolset).opt.out[1]) ; - --dir = $(tool.$(toolset).opt.out[3-]) ; - } - else - { - --bin = $(tool.$(toolset).opt.out[1]) ; - --dir = $(tool.$(toolset).opt.out[2-]) ; - } -} -else -{ - --out = $(tool.$(toolset).opt.out) ; -} ---def = $(tool.$(toolset).opt.define) ; ---flags = $(tool.$(toolset).flags) ; ---defs = $(tool.$(toolset).defines) ; ---libs = $(tool.$(toolset).linklibs) ; -if $(tool.$(toolset).link.cc) -{ - --link = $(tool.$(toolset).link.cc) ; - if $(tool.$(toolset).link.opt.out[2]) - { - if $(tool.$(toolset).link.opt.out[1]) = $(tool.$(toolset).link.opt.out[2]) - { - --link-out = $(tool.$(toolset).link.opt.out[1]) ; - --link-dir = $(tool.$(toolset).link.opt.out[3-]) ; - } - else - { - --link-bin = $(tool.$(toolset).link.opt.out[1]) ; - --link-dir = $(tool.$(toolset).link.opt.out[2-]) ; - } - } - else - { - --link-out = $(tool.$(toolset).link.opt.out) ; - } - --link-def = $(tool.$(toolset).link.opt.define) ; - --link-flags = $(tool.$(toolset).link.flags) ; - --link-defs = $(tool.$(toolset).link.defines) ; - --link-libs = $(tool.$(toolset).link.linklibs) ; -} - -# Put executables in platform-specific subdirectory. -locate-target = $(LOCATE_TARGET) ; -if $(OS) = VMS -{ - locate-target ?= bin$(.)vms ; - platform = vms ; -} -else if $(OS) = MAC -{ - locate-target ?= bin$(.)$(OS:L)$(OSPLAT:L) ; - platform = $(OS:L)$(OSPLAT:L) ; -} -else if $(OSPLAT) -{ - locate-target ?= bin$(.)$(OS:L)$(OSPLAT:L) ; - platform = $(OS:L)$(OSPLAT:L) ; -} -else -{ - locate-target ?= bin$(.)$(OS:L) ; - platform = $(OS:L) ; -} -if $(debug) -{ - locate-target = [ .path $(locate-target)$(.)debug ] ; -} -if $(profile) -{ - locate-target = [ .path $(locate-target)$(.)profile ] ; -} -else -{ - locate-target = [ .path $(locate-target) ] ; -} - -if --show-locate-target in $(ARGV) -{ - ECHO $(locate-target) ; -} - -# We have some different files for UNIX, VMS, and NT. -jam.source = - command.c compile.c debug.c expand.c glob.c - hash.c hcache.c headers.c hdrmacro.c - jam.c jambase.c jamgram.c - lists.c make.c make1.c mem.c newstr.c - option.c output.c parse.c regexp.c rules.c - scan.c search.c subst.c w32_getreg.c - timestamp.c variable.c modules.c strings.c filesys.c - builtins.c pwd.c class.c native.c md5.c modules/set.c - modules/path.c modules/regex.c modules/property-set.c - modules/sequence.c modules/order.c - ; -if $(OS) = NT -{ - jam.source += execnt.c filent.c pathunix.c ; -} -else if $(OS) = OS2 -{ - jam.source += execunix.c fileos2.c pathunix.c ; -} -else if $(OS) = VMS -{ - jam.source += execvms.c filevms.c pathvms.c ; -} -else if $(OS) = MAC -{ - jam.source += execmac.c filemac.c pathmac.c ; -} -else -{ - jam.source += execunix.c fileunix.c pathunix.c ; -} - -# Debug assertions, or not. -if ! $(debug) || --noassert in $(ARGV) -{ - --defs += NDEBUG ; -} - -# Enable some optional features. ---defs += OPT_HEADER_CACHE_EXT ; ---defs += OPT_GRAPH_DEBUG_EXT ; ---defs += OPT_SEMAPHORE ; ---defs += OPT_AT_FILES ; ---defs += OPT_DEBUG_PROFILE ; - -# Bug fixes ---defs += OPT_FIX_TARGET_VARIABLES_EXT ; -#~ --defs += OPT_NO_EXTERNAL_VARIABLE_SPLIT ; - -# Improvements ---defs += OPT_IMPROVED_PATIENCE_EXT ; - -# Use Boehm GC memory allocator? -if $(--boehm-gc) -{ - --defs += OPT_BOEHM_GC ; - if $(debug) - { - --defs += GC_DEBUG ; - } -} - -if $(--duma) -{ - --defs += OPT_DUMA ; -} - -if ( $(OS) = NT ) && ! NT in $(--defs) -{ - --defs += NT ; -} -if $(OS) = VMS -{ - --defs += VMS ; -} ---defs += YYSTACKSIZE=5000 ; - -if $(with-python) -{ - --defs += HAVE_PYTHON ; -} - -if $(debug) -{ - --defs += BJAM_NEWSTR_NO_ALLOCATE ; -} - - -# The basic symbolic targets... -NOTFILE all clean dist ; -ALWAYS clean ; - -# Utility rules and actions... -rule .clean -{ - [DELETE] clean : $(<) ; -} -if $(OS) = NT { actions piecemeal together existing [DELETE] { - del /F /Q "$(>)" -} } -if $(UNIX) = true { actions piecemeal together existing [DELETE] { - rm -f "$(>)" -} } -if $(OS) = VMS { actions piecemeal together existing [DELETE] { - DELETE $(>[--2]:J=";*, ") $(>[-1]);* -} } -if $(OS) = NT { - --chmod+w = "attrib -r " ; -} -if $(UNIX) = true { - --chmod+w = "chmod +w " ; -} -if $(OS) = VMS { - --chmod+w = "SET FILE/PROT=(S:RWED) " ; -} - -rule .mkdir -{ - NOUPDATE $(<) ; - if $(<:P) { DEPENDS $(<) : $(<:P) ; .mkdir $(<:P) ; } - if ! $(md<$(<)>) { [MKDIR] $(<) ; md<$(<)> = - ; } -} -if $(OS) = NT { actions [MKDIR] { - md "$(<)" -} } -if $(UNIX) = true { actions [MKDIR] { - mkdir "$(<)" -} } -if $(OS) = VMS { actions [MKDIR] { - CREATE/DIR $(<J=", ") -} } - -rule .exe -{ - local exe = $(<) ; - if $(OS) = NT || ( $(UNIX) = true && $(OS) = CYGWIN ) || $(OS) = VMS { exe = $(exe:S=.exe) ; } - LOCATE on $(exe) = $(locate-target) ; - DEPENDS all : $(exe) ; - .mkdir $(locate-target) ; - if $(--link) - { - local objs = ; - for local s in $(>) - { - # Translate any subdir elements into a simple file name. - local o = [ MATCH "([^/]+)[/]?(.+)" : $(s) ] ; - o = $(o:J=_) ; - o = $(o:S=.o) ; - objs += $(o) ; - LOCATE on $(o) = $(locate-target) ; - DEPENDS $(exe) : $(o) ; - DEPENDS $(o) : $(s) ; - DEPENDS $(o) : $(locate-target) ; - [COMPILE] $(o) : $(s) ; - .clean $(o) ; - } - DEPENDS $(exe) : $(objs) ; - DEPENDS $(exe) : $(locate-target) ; - [COMPILE.LINK] $(exe) : $(objs) ; - .clean $(exe) ; - } - else - { - DEPENDS $(exe) : $(>) ; - DEPENDS $(exe) : $(locate-target) ; - [COMPILE] $(exe) : $(>) ; - .clean $(exe) ; - } - return $(exe) ; -} -if ! $(--def[2]) { actions [COMPILE] { - "$(--cc)" "$(--bin)$(<:D=)" "$(--dir)$(<:D)$(./)" $(--out)$(<) "$(--def)$(--defs)" "$(--flags)" "$(--libs)" "$(>)" -} } -else { actions [COMPILE] { - "$(--cc)" "$(--bin)$(<:D=)" "$(--dir)$(<:D)$(./)" $(--out)$(<) "$(--def[1])$(--defs:J=$(--def[2]))$(--def[3])" "$(--flags)" "$(--libs)" "$(>)" -} } -if $(OS) = VMS { actions [COMPILE.LINK] { - "$(--link)" $(--link-bin)$(<:D=) $(--link-dir)$(<:D)$(./) $(--link-out)$(<) $(--link-def)$(--link-defs) $(--link-flags) "$(--link-libs)" $(>J=", ") -} } -else { actions [COMPILE.LINK] { - "$(--link)" "$(--link-bin)$(<:D=)" "$(--link-dir)$(<:D)$(./)" "$(--link-out)$(<)" "$(--link-def)$(--link-defs)" "$(--link-flags)" "$(--link-libs)" "$(>)" -} } - -rule .link -{ - DEPENDS all : $(<) ; - DEPENDS $(<) : $(>) ; - [LINK] $(<) : $(>) ; - .clean $(<) ; -} -if $(OS) = NT { actions [LINK] { - copy "$(>)" "$(<)" -} } -if $(UNIX) = true { actions [LINK] { - ln -fs "$(>)" "$(<)" -} } -if $(OS) = VMS { actions [LINK] { - COPY/REPLACE $(>) $(<) -} } - -rule .copy -{ - DEPENDS all : $(<) ; - DEPENDS $(<) : $(>) ; - [COPY] $(<) : $(>) ; - .clean $(<) ; -} - -# Will be redefined later. -actions [COPY] -{ -} - - -rule .move -{ - DEPENDS $(<) : $(>) ; - [MOVE] $(<) : $(>) ; -} -if $(OS) = NT { actions [MOVE] { - del /f "$(<)" - rename "$(>)" "$(<)" -} } -if $(UNIX) = true { actions [MOVE] { - mv -f "$(>)" "$(<)" -} } -if $(OS) = VMS { actions [MOVE] { - RENAME "$(>)" "$(<)" -} } - -# Generate the grammar tokens table, and the real yacc grammar. -rule .yyacc -{ - local exe = [ .exe yyacc : yyacc.c ] ; - NOUPDATE $(exe) ; - DEPENDS $(<) : $(exe) $(>) ; - LEAVES $(<) ; - yyacc.exe on $(<) = $(exe:R=$(locate-target)) ; - [YYACC] $(<) : $(>) ; -} -actions [YYACC] { - $(--chmod+w)$(<[1]) - $(--chmod+w)$(<[2]) - "$(yyacc.exe)" "$(<)" "$(>)" -} -if $(grammar) -{ - .yyacc jamgram.y jamgramtab.h : jamgram.yy ; -} -else if $(debug) -{ - .exe yyacc : yyacc.c ; -} - -# How to build the grammar. -if $(OS) = NT -{ - SUFEXE = .exe ; - # try some other likely spellings... - PATH ?= $(Path) ; - PATH ?= $(path) ; -} -SUFEXE ?= "" ; - -yacc ?= [ GLOB $(PATH) : yacc$(SUFEXE) ] ; -yacc ?= [ GLOB $(PATH) : bison$(SUFEXE) ] ; -yacc ?= [ GLOB "$(ProgramFiles:J= )\\GnuWin32\\bin" "C:\\Program Files\\GnuWin32\\bin" : bison$(SUFEXE) ] ; -yacc = $(yacc[1]) ; -switch $(yacc:D=:S=) -{ - case bison : yacc += -d --yacc ; - case yacc : yacc += -d ; -} -if $(debug) && $(yacc) -{ - yacc += -t -v ; -} -yacc += $(YACCFLAGS) ; - -rule .yacc -{ - DEPENDS $(<) : $(>) ; - LEAVES $(<) ; - [YACC] $(<) : $(>) ; -} -if $(OS) = NT { actions [YACC] { - "$(yacc)" "$(>)" - if not errorlevel 1 ( - del /f "$(<[1])" - rename y.tab$(<[1]:S) "$(<[1])" - del /f $(<[2]) - rename y.tab$(<[2]:S) "$(<[2])" - ) else set _error_ = -} } -if $(UNIX) = true { actions [YACC] { - if ` "$(yacc)" "$(>)" ` ; then - mv -f y.tab$(<[1]:S) "$(<[1])" - mv -f y.tab$(<[2]:S) "$(<[2])" - else - exit 1 - fi -} } -if $(OS) = VMS { actions [YACC] { - IF "$(yacc)" $(>) - THEN - RENAME y_tab$(<[1]:S) $(<[1]) - RENAME y_tab$(<[2]:S) $(<[2]) - ENDIF -} } -if $(grammar) && ! $(yacc) -{ - EXIT "Could not find the 'yacc' tool, and therefore can not build the grammar." ; -} -if $(grammar) && $(yacc) -{ - .yacc jamgram.c jamgram.h : jamgram.y ; -} - -# How to build the compiled in jambase. -rule .mkjambase -{ - local exe = [ .exe mkjambase : mkjambase.c ] ; - DEPENDS $(<) : $(exe) $(>) ; - LEAVES $(<) ; - mkjambase.exe on $(<) = $(exe:R=$(locate-target)) ; - [MKJAMBASE] $(<) : $(>) ; -} -actions [MKJAMBASE] { - $(--chmod+w)$(<) - $(mkjambase.exe) "$(<)" "$(>)" -} -if $(debug) -{ - .mkjambase jambase.c : Jambase ; -} - -# How to build Jam. -rule .jam -{ - $(>).exe = [ .exe $(>) : $(jam.source) ] ; - DEPENDS all : $($(>).exe) ; - - # Make a copy under the old name. - $(<).exe = $(<:S=$($(>).exe:S)) ; - LOCATE on $($(<).exe) = $(locate-target) ; - .copy $($(<).exe) : $($(>).exe) ; - DEPENDS all : $($(<).exe) ; -} -.jam bjam : b2 ; - - -# Scan sources for header dependencies. -# WARNING: Yes those are *REAL TABS* below. DO NOT CHANGE, -# under any circumstances, to spaces!! And the tabs -# indenting this are so that if someone is in the mood to -# replace tabs they hit this comment, and hopefully notice -# their error. -rule .scan -{ - HDRRULE on $(<:D=) = .hdr.scan ; - HDRSCAN on $(<:D=) = "^[ ]*#[ ]*include[ ]*([<\"][^\">]*[\">]).*$" ; -} -rule .hdr.scan -{ - local hdrs = [ GLOB . : $(>:D=) ] ; - INCLUDES $(<:D=) : $(hdrs:D=) ; - HDRRULE on $(>:D=) = .hdr.scan ; - HDRSCAN on $(>:D=) = "^[ ]*#[ ]*include[ ]*([<\"][^\">]*[\">]).*$" ; -} -.scan [ GLOB . : *.c ] ; - -# Distribution making from here on out. Assumes that -# the docs are already built as html at ../doc/html. If -# they aren't, then the docs are not included in the dist -# archive. -dist.license = - [ GLOB . : $(LICENSE).txt ] - ; -dist.license = $(dist.license:D=) - [ GLOB [ .path .. .. .. ] : $(LICENSE).txt ] - [ GLOB [ .path .. boost ] : $(LICENSE).txt ] ; -dist.docs = - [ GLOB . : *.png *.css *.html ] - ; -dist.docs = $(dist.docs:D=) - [ GLOB [ .path images ] : *.png ] - [ GLOB [ .path jam ] : *.html ] - ; -dist.source = - [ GLOB . : *.c *.h ] - ; -dist.source = $(dist.source:D=) - $(dist.license[1]) - $(dist.docs) - build.jam build.bat build.sh build_vms.com - Jambase - jamgram.y jamgram.yy - [ .path modules set.c ] - [ .path modules path.c ] - [ .path modules regex.c ] - [ .path modules property-set.c ] - [ .path modules sequence.c ] - [ .path modules order.c ] - [ GLOB [ .path boehm_gc ] : * ] - [ GLOB [ .path boehm_gc include ] : * ] - [ GLOB [ .path boehm_gc include private ] : * ] - [ GLOB [ .path boehm_gc cord ] : * ] - [ GLOB [ .path boehm_gc Mac_files ] : * ] - [ GLOB [ .path boehm_gc tests ] : * ] - [ GLOB [ .path boehm_gc doc ] : * ] - ; -dist.bin = - bjam - ; -dist.bin = - $(dist.license[1]) - $(dist.bin:S=$(bjam.exe:S)) - ; - -if $(OS) = NT -{ - zip ?= [ GLOB "$(ProgramFiles:J= )\\7-ZIP" "C:\\Program Files\\7-ZIP" : "7z.exe" ] ; - zip ?= [ GLOB "$(ProgramFiles:J= )\\7-ZIP" "C:\\Program Files\\7-ZIP" : "7zn.exe" ] ; - zip ?= [ GLOB $(PATH) : zip.exe ] ; - zip ?= zip ; - zip = $(zip[1]) ; - switch $(zip:D=:S=) - { - case 7z* : zip += a -r -tzip -mx=9 ; - case zip : zip += -9r ; - } - actions piecemeal [PACK] { - "$(zip)" "$(<)" "$(>)" - } - actions piecemeal [ZIP] { - "$(zip)" "$(<)" "$(>)" - } - actions piecemeal [COPY] { - copy /Y "$(>)" "$(<)" >NUL: - } -} -if $(UNIX) = true -{ - tar ?= [ GLOB $(PATH) : star bsdtar tar ] ; - tar = $(tar[1]) ; - switch $(tar:D=:S=) - { - case star : tar += -c artype=pax -D -d -to-stdout ; - case * : tar += -c -f - ; - } - actions [PACK] { - "$(tar)" "$(>)" | gzip -c9 > "$(<)" - } - #~ actions [PACK] { - #~ tar cf "$(<:S=.tar)" "$(>)" - #~ } - actions [ZIP] { - gzip -c9 "$(>)" > "$(<)" - } - actions [COPY] { - cp -Rpf "$(>)" "$(<)" - } -} - -# The single binary, compressed. -rule .binary -{ - local zip = ; - if $(OS) = NT { zip = $($(<).exe:S=.zip) ; } - if $(UNIX) = true { zip = $($(<).exe:S=.tgz) ; } - zip = $(zip:S=)-$(VERSION)-$(RELEASE)-$(platform)$(zip:S) ; - DEPENDS $(zip) : $($(<).exe) ; - DEPENDS dist : $(zip) ; - #~ LOCATE on $(zip) = $(locate-target) ; - if $(OS) = NT { [ZIP] $(zip) : $($(<).exe) ; } - if $(UNIX) = true { [PACK] $(zip) : $($(<).exe) ; } - .clean $(zip) ; -} - -# Package some file. -rule .package ( dst-dir : src-files + ) -{ - local dst-files ; - local src-files-actual ; - for local src-path in $(src-files) - { - if ! [ GLOB $(src-path:P) : $(src-path:B) ] || [ CHECK_IF_FILE $(src-path) ] - { - local src-subdir = $(src-path:D) ; - local src-file = $(src-path) ; - while $(src-subdir:D) { src-subdir = $(src-subdir:D) ; } - if $(src-subdir) = ".." - { - src-file = $(src-file:D=) ; - } - dst-files += $(src-file:R=$(dst-dir)) ; - src-files-actual += $(src-path) ; - } - } - - local pack = ; - if $(OS) = NT { pack = $(dst-dir).zip ; } - if $(UNIX) = true { pack = $(dst-dir).tgz ; } - - DEPENDS dist : $(pack) ; - DEPENDS $(pack) : $(dst-files) ; - - local dst-files-queue = $(dst-files) ; - for local src-path in $(src-files-actual) - { - local dst-file = $(dst-files-queue[1]) ; - dst-files-queue = $(dst-files-queue[2-]) ; - DEPENDS $(dst-file) : $(src-path) $(dst-file:D) ; - .mkdir $(dst-file:D) ; - - [COPY] $(dst-file) : $(src-path) ; - .clean $(dst-file) ; - } - - [PACK] $(pack) : $(dst-files) ; - .clean $(pack) ; -} - -# RPM distro file. -rpm-tool = [ GLOB $(PATH) : "rpmbuild" ] ; -rpm-tool ?= [ GLOB $(PATH) : "rpm" ] ; -rpm-tool = $(rpm-tool[1]) ; -rule .rpm ( name : source ) -{ - local rpm-arch = ; - switch $(OSPLAT) - { - case X86 : rpm-arch ?= i386 ; - case PPC : rpm-arch ?= ppc ; - case AXP : rpm-arch ?= alpha ; - # no guaranty for these: - case IA64 : rpm-arch ?= ia64 ; - case ARM : rpm-arch ?= arm ; - case SPARC : rpm-arch ?= sparc ; - case * : rpm-arch ?= other ; - } - local target = $(name)-rpm ; - NOTFILE $(target) ; - DEPENDS dist : $(target) ; - DEPENDS $(target) : $(name).$(rpm-arch).rpm $(name).src.rpm ; - DEPENDS $(name).$(rpm-arch).rpm : $(source) ; - DEPENDS $(name).src.rpm : $(name).$(rpm-arch).rpm ; - docs on $(target) = $(dist.docs:J=" ") ; - arch on $(target) = $(rpm-arch) ; - if $(rpm-arch) = ppc { target-opt on $(target) = --target= ; } - else { target-opt on $(target) = "--target " ; } - [RPM] $(target) : $(source) ; - .clean $(name).$(rpm-arch).rpm $(name).src.rpm ; -} -actions [RPM] { - set -e - export BOOST_JAM_TOOLSET="$(toolset)" - $(rpm-tool) -ta $(target-opt)$(arch) $(>) | tee rpm.out - cp `grep -e '^Wrote:' rpm.out | sed 's/^Wrote: //'` . - rm -f rpm.out -} - -# The distribution targets. Don't bother with the targets if -# distribution build not requested. -if dist in $(ARGV) -{ - #~ .binary bjam ; - .package $(NAME)-$(VERSION) : $(dist.source) ; - .package $(NAME)-$(VERSION)-$(RELEASE)-$(platform) : $(dist.bin) ; - if $(rpm-tool) - { - #~ .rpm $(NAME)-$(VERSION)-$(RELEASE) : $(NAME)-$(VERSION).tgz ; - } -} diff --git a/jam-files/engine/build.sh b/jam-files/engine/build.sh deleted file mode 100755 index f1fb806d..00000000 --- a/jam-files/engine/build.sh +++ /dev/null @@ -1,303 +0,0 @@ -#!/bin/sh - -#~ Copyright 2002-2005 Rene Rivera. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - -# Reset the toolset. -BOOST_JAM_TOOLSET= - -# Run a command, and echo before doing so. Also checks the exit -# status and quits if there was an error. -echo_run () -{ - echo "$@" - $@ - r=$? - if test $r -ne 0 ; then - exit $r - fi -} - -# Print an error message, and exit with a status of 1. -error_exit () -{ - echo "###" - echo "###" "$@" - echo "###" - echo "### You can specify the toolset as the argument, i.e.:" - echo "### ./build.sh gcc" - echo "###" - echo "### Toolsets supported by this script are:" - echo "### acc, como, darwin, gcc, intel-darwin, intel-linux, kcc, kylix," - echo "### mipspro, mingw(msys), pathscale, pgi, qcc, sun, sunpro, tru64cxx, vacpp" - echo "###" - echo "### A special toolset; cc, is available which is used as a fallback" - echo "### when a more specific toolset is not found and the cc command is" - echo "### detected. The 'cc' toolset will use the CC, CFLAGS, and LIBS" - echo "### envrironment variables, if present." - echo "###" - exit 1 -} - -# Check that a command is in the PATH. -test_path () -{ - if `command -v command 1>/dev/null 2>/dev/null`; then - command -v $1 1>/dev/null 2>/dev/null - else - hash $1 1>/dev/null 2>/dev/null - fi -} - -# Check that the OS name, as returned by "uname", is as given. -test_uname () -{ - if test_path uname; then - test `uname` = $* - fi -} - -# Try and guess the toolset to bootstrap the build with... -Guess_Toolset () -{ - if test -r /mingw/bin/gcc ; then - BOOST_JAM_TOOLSET=mingw - BOOST_JAM_TOOLSET_ROOT=/mingw/ - elif test_uname Darwin ; then BOOST_JAM_TOOLSET=darwin - elif test_uname IRIX ; then BOOST_JAM_TOOLSET=mipspro - elif test_uname IRIX64 ; then BOOST_JAM_TOOLSET=mipspro - elif test_uname OSF1 ; then BOOST_JAM_TOOLSET=tru64cxx - elif test_uname QNX && test_path qcc ; then BOOST_JAM_TOOLSET=qcc - elif test_path gcc ; then BOOST_JAM_TOOLSET=gcc - elif test_path icc ; then BOOST_JAM_TOOLSET=intel-linux - elif test -r /opt/intel/cc/9.0/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET=intel-linux - BOOST_JAM_TOOLSET_ROOT=/opt/intel/cc/9.0 - elif test -r /opt/intel_cc_80/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET=intel-linux - BOOST_JAM_TOOLSET_ROOT=/opt/intel_cc_80 - elif test -r /opt/intel/compiler70/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET=intel-linux - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler70/ia32/ - elif test -r /opt/intel/compiler60/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET=intel-linux - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler60/ia32/ - elif test -r /opt/intel/compiler50/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET=intel-linux - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler50/ia32/ - elif test_path pgcc ; then BOOST_JAM_TOOLSET=pgi - elif test_path pathcc ; then BOOST_JAM_TOOLSET=pathscale - elif test_path xlc ; then BOOST_JAM_TOOLSET=vacpp - elif test_path como ; then BOOST_JAM_TOOLSET=como - elif test_path KCC ; then BOOST_JAM_TOOLSET=kcc - elif test_path bc++ ; then BOOST_JAM_TOOLSET=kylix - elif test_path aCC ; then BOOST_JAM_TOOLSET=acc - elif test_uname HP-UX ; then BOOST_JAM_TOOLSET=acc - elif test -r /opt/SUNWspro/bin/cc ; then - BOOST_JAM_TOOLSET=sunpro - BOOST_JAM_TOOLSET_ROOT=/opt/SUNWspro/ - # Test for "cc" as the default fallback. - elif test_path $CC ; then BOOST_JAM_TOOLSET=cc - elif test_path cc ; then - BOOST_JAM_TOOLSET=cc - CC=cc - fi - if test "$BOOST_JAM_TOOLSET" = "" ; then - error_exit "Could not find a suitable toolset." - fi -} - -# The one option we support in the invocation -# is the name of the toolset to force building -# with. -case "$1" in - --guess-toolset) Guess_Toolset ; echo "$BOOST_JAM_TOOLSET" ; exit 1 ;; - -*) Guess_Toolset ;; - ?*) BOOST_JAM_TOOLSET=$1 ; shift ;; - *) Guess_Toolset ;; -esac -BOOST_JAM_OPT_JAM="-o bootstrap/jam0" -BOOST_JAM_OPT_MKJAMBASE="-o bootstrap/mkjambase0" -BOOST_JAM_OPT_YYACC="-o bootstrap/yyacc0" -case $BOOST_JAM_TOOLSET in - mingw) - if test -r ${BOOST_JAM_TOOLSET_ROOT}bin/gcc ; then - export PATH=${BOOST_JAM_TOOLSET_ROOT}bin:$PATH - fi - BOOST_JAM_CC="gcc -DNT" - ;; - - gcc) - BOOST_JAM_CC=gcc - ;; - - darwin) - BOOST_JAM_CC=cc - ;; - - intel-darwin) - BOOST_JAM_CC=icc - ;; - - intel-linux) - if test -r /opt/intel/cc/9.0/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET_ROOT=/opt/intel/cc/9.0/ - elif test -r /opt/intel_cc_80/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET_ROOT=/opt/intel_cc_80/ - elif test -r /opt/intel/compiler70/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler70/ia32/ - elif test -r /opt/intel/compiler60/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler60/ia32/ - elif test -r /opt/intel/compiler50/ia32/bin/iccvars.sh ; then - BOOST_JAM_TOOLSET_ROOT=/opt/intel/compiler50/ia32/ - fi - if test -r ${BOOST_JAM_TOOLSET_ROOT}bin/iccvars.sh ; then - # iccvars doesn't change LD_RUN_PATH. We adjust LD_RUN_PATH - # here in order not to have to rely on ld.so.conf knowing the - # icc library directory. We do this before running iccvars.sh - # in order to allow a user to add modifications to LD_RUN_PATH - # in iccvars.sh. - if test -z "${LD_RUN_PATH}"; then - LD_RUN_PATH="${BOOST_JAM_TOOLSET_ROOT}lib" - else - LD_RUN_PATH="${BOOST_JAM_TOOLSET_ROOT}lib:${LD_RUN_PATH}" - fi - export LD_RUN_PATH - . ${BOOST_JAM_TOOLSET_ROOT}bin/iccvars.sh - fi - BOOST_JAM_CC=icc - ;; - - vacpp) - BOOST_JAM_CC=xlc - ;; - - como) - BOOST_JAM_CC="como --c" - ;; - - kcc) - BOOST_JAM_CC=KCC - ;; - - kylix) - BOOST_JAM_CC=bc++ - ;; - - mipspro) - BOOST_JAM_CC=cc - ;; - - pathscale) - BOOST_JAM_CC=pathcc - ;; - - pgi) - BOOST_JAM_CC=pgcc - ;; - - sun*) - if test -z "${BOOST_JAM_TOOLSET_ROOT}" -a -r /opt/SUNWspro/bin/cc ; then - BOOST_JAM_TOOLSET_ROOT=/opt/SUNWspro/ - fi - if test -r "${BOOST_JAM_TOOLSET_ROOT}bin/cc" ; then - PATH=${BOOST_JAM_TOOLSET_ROOT}bin:${PATH} - export PATH - fi - BOOST_JAM_CC=cc - ;; - - clang*) - BOOST_JAM_CC="clang -Wno-unused -Wno-format" - BOOST_JAM_TOOLSET=clang - ;; - - tru64cxx) - BOOST_JAM_CC=cc - ;; - - acc) - BOOST_JAM_CC="cc -Ae" - ;; - - cc) - if test -z "$CC" ; then CC=cc ; fi - BOOST_JAM_CC=$CC - BOOST_JAM_OPT_JAM="$BOOST_JAM_OPT_JAM $CFLAGS $LIBS" - BOOST_JAM_OPT_MKJAMBASE="$BOOST_JAM_OPT_MKJAMBASE $CFLAGS $LIBS" - BOOST_JAM_OPT_YYACC="$BOOST_JAM_OPT_YYACC $CFLAGS $LIBS" - ;; - - qcc) - BOOST_JAM_CC=qcc - ;; - - *) - error_exit "Unknown toolset: $BOOST_JAM_TOOLSET" - ;; -esac - -echo "###" -echo "### Using '$BOOST_JAM_TOOLSET' toolset." -echo "###" - -YYACC_SOURCES="yyacc.c" -MKJAMBASE_SOURCES="mkjambase.c" -BJAM_SOURCES="\ - command.c compile.c debug.c expand.c glob.c hash.c\ - hdrmacro.c headers.c jam.c jambase.c jamgram.c lists.c make.c make1.c\ - newstr.c option.c output.c parse.c pathunix.c pathvms.c regexp.c\ - rules.c scan.c search.c subst.c timestamp.c variable.c modules.c\ - strings.c filesys.c builtins.c pwd.c class.c native.c md5.c w32_getreg.c\ - modules/set.c modules/path.c modules/regex.c modules/property-set.c\ - modules/sequence.c modules/order.c" -case $BOOST_JAM_TOOLSET in - mingw) - BJAM_SOURCES="${BJAM_SOURCES} execnt.c filent.c" - ;; - - *) - BJAM_SOURCES="${BJAM_SOURCES} execunix.c fileunix.c" - ;; -esac - -BJAM_UPDATE= -if test "$1" = "--update" -o "$2" = "--update" -o "$3" = "--update" -o "$4" = "--update" ; then - BJAM_UPDATE="update" -fi -if test "${BJAM_UPDATE}" = "update" -a ! -x "./bootstrap/jam0" ; then - BJAM_UPDATE= -fi - -if test "${BJAM_UPDATE}" != "update" ; then - echo_run rm -rf bootstrap - echo_run mkdir bootstrap - if test ! -r jamgram.y -o ! -r jamgramtab.h ; then - echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_YYACC} ${YYACC_SOURCES} - if test -x "./bootstrap/yyacc0" ; then - echo_run ./bootstrap/yyacc0 jamgram.y jamgramtab.h jamgram.yy - fi - fi - if test ! -r jamgram.c -o ! -r jamgram.h ; then - if test_path yacc ; then YACC="yacc -d" - elif test_path bison ; then YACC="bison -y -d --yacc" - fi - echo_run $YACC jamgram.y - mv -f y.tab.c jamgram.c - mv -f y.tab.h jamgram.h - fi - if test ! -r jambase.c ; then - echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_MKJAMBASE} ${MKJAMBASE_SOURCES} - if test -x "./bootstrap/mkjambase0" ; then - echo_run ./bootstrap/mkjambase0 jambase.c Jambase - fi - fi - echo_run ${BOOST_JAM_CC} ${BOOST_JAM_OPT_JAM} ${BJAM_SOURCES} -fi -if test -x "./bootstrap/jam0" ; then - if test "${BJAM_UPDATE}" != "update" ; then - echo_run ./bootstrap/jam0 -f build.jam --toolset=$BOOST_JAM_TOOLSET "--toolset-root=$BOOST_JAM_TOOLSET_ROOT" clean - fi - echo_run ./bootstrap/jam0 -f build.jam --toolset=$BOOST_JAM_TOOLSET "--toolset-root=$BOOST_JAM_TOOLSET_ROOT" "$@" -fi diff --git a/jam-files/engine/build_vms.com b/jam-files/engine/build_vms.com deleted file mode 100644 index 965b6342..00000000 --- a/jam-files/engine/build_vms.com +++ /dev/null @@ -1,105 +0,0 @@ -$ ! Copyright 2002-2003 Rene Rivera, Johan Nilsson. -$ ! Distributed under the Boost Software License, Version 1.0. -$ ! (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -$ ! -$ ! bootstrap build script for Jam -$ ! -$ SAY :== WRITE SYS$OUTPUT -$ ! -$ ON WARNING THEN CONTINUE -$ ! -$ IF "" .NES. F$SEARCH("[.bootstrap_vms]*.*") -$ THEN -$ SAY "Cleaning previous boostrap files..." -$ ! -$ SET FILE/PROTECTION=(S:RWED) [.bootstrap_vms]*.*;* -$ DELETE [.bootstrap_vms]*.*;* -$ ENDIF -$ ! -$ IF "" .NES. F$SEARCH("bootstrap_vms.dir") -$ THEN -$ SAY "Removing previous boostrap directory..." -$ ! -$ SET FILE/PROT=(S:RWED) bootstrap_vms.dir -$ DELETE bootstrap_vms.dir; -$ ENDIF -$ ! -$ SAY "Creating boostrap directory..." -$ ! -$ CREATE/DIR [.bootstrap_vms] -$ ! -$ SAY "Building bootstrap jam..." -$ ! -$ CC_FLAGS = "/DEFINE=VMS /STANDARD=VAXC /PREFIX_LIBRARY_ENTRIES=ALL_ENTRIES " -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]builtins.obj builtins.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]command.obj command.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]compile.obj compile.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]execvms.obj execvms.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]expand.obj expand.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]filesys.obj filesys.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]filevms.obj filevms.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]glob.obj glob.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]hash.obj hash.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]hdrmacro.obj hdrmacro.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]headers.obj headers.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]jam.obj jam.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]jambase.obj jambase.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]jamgram.obj jamgram.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]lists.obj lists.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]make.obj make.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]make1.obj make1.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]modules.obj modules.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]newstr.obj newstr.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]option.obj option.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]parse.obj parse.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]pathvms.obj pathvms.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]pwd.obj pwd.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]regexp.obj regexp.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]rules.obj rules.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]scan.obj scan.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]search.obj search.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]strings.obj strings.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]subst.obj subst.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]timestamp.obj timestamp.c -$ cc 'CC_FLAGS /OBJECT=[.bootstrap_vms]variable.obj variable.c -$ link - - /EXECUTABLE=[.bootstrap_vms]jam0.exe - - [.bootstrap_vms]builtins.obj, - - [.bootstrap_vms]command.obj, - - [.bootstrap_vms]compile.obj, - - [.bootstrap_vms]execvms.obj, - - [.bootstrap_vms]expand.obj, - - [.bootstrap_vms]filesys.obj, - - [.bootstrap_vms]filevms.obj, - - [.bootstrap_vms]glob.obj, - - [.bootstrap_vms]hash.obj, - - [.bootstrap_vms]hdrmacro.obj, - - [.bootstrap_vms]headers.obj, - - [.bootstrap_vms]jam.obj, - - [.bootstrap_vms]jambase.obj, - - [.bootstrap_vms]jamgram.obj, - - [.bootstrap_vms]lists.obj, - - [.bootstrap_vms]make.obj, - - [.bootstrap_vms]make1.obj, - - [.bootstrap_vms]modules.obj, - - [.bootstrap_vms]newstr.obj, - - [.bootstrap_vms]option.obj, - - [.bootstrap_vms]parse.obj, - - [.bootstrap_vms]pathvms.obj, - - [.bootstrap_vms]pwd.obj, - - [.bootstrap_vms]regexp.obj, - - [.bootstrap_vms]rules.obj, - - [.bootstrap_vms]scan.obj, - - [.bootstrap_vms]search.obj, - - [.bootstrap_vms]strings.obj, - - [.bootstrap_vms]subst.obj, - - [.bootstrap_vms]timestamp.obj, - - [.bootstrap_vms]variable.obj -$ ! -$ SAY "Cleaning any previous build..." -$ ! -$ MCR [.bootstrap_vms]jam0.exe -f build.jam --toolset=vmsdecc clean -$ ! -$ SAY "Building Boost.Jam..." -$ ! -$ MCR [.bootstrap_vms]jam0.exe -f build.jam --toolset=vmsdecc diff --git a/jam-files/engine/builtins.c b/jam-files/engine/builtins.c deleted file mode 100644 index b28a484e..00000000 --- a/jam-files/engine/builtins.c +++ /dev/null @@ -1,2310 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "builtins.h" -#include "rules.h" -#include "filesys.h" -#include "newstr.h" -#include "regexp.h" -#include "frames.h" -#include "hash.h" -#include "strings.h" -#include "pwd.h" -#include "pathsys.h" -#include "make.h" -#include "hdrmacro.h" -#include "compile.h" -#include "native.h" -#include "variable.h" -#include "timestamp.h" -#include "md5.h" -#include <ctype.h> - -#if defined(USE_EXECUNIX) -# include <sys/types.h> -# include <sys/wait.h> -#else -/* - NT does not have wait() and associated macros, it uses the return value - of system() instead. Status code group are documented at - http://msdn.microsoft.com/en-gb/library/ff565436.aspx -*/ -# define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0) -# define WEXITSTATUS(w)(w) -#endif - -/* - * builtins.c - builtin jam rules - * - * External routines: - * - * load_builtin() - define builtin rules - * - * Internal routines: - * - * builtin_depends() - DEPENDS/INCLUDES rule. - * builtin_echo() - ECHO rule. - * builtin_exit() - EXIT rule. - * builtin_flags() - NOCARE, NOTFILE, TEMPORARY rule. - * builtin_glob() - GLOB rule. - * builtin_match() - MATCH rule. - * - * 01/10/01 (seiwald) - split from compile.c - */ - - -/* - * compile_builtin() - define builtin rules - */ - -#define P0 (PARSE *)0 -#define C0 (char *)0 - -#if defined( OS_NT ) || defined( OS_CYGWIN ) - LIST * builtin_system_registry ( PARSE *, FRAME * ); - LIST * builtin_system_registry_names( PARSE *, FRAME * ); -#endif - -int glob( char * s, char * c ); - -void backtrace ( FRAME * ); -void backtrace_line ( FRAME * ); -void print_source_line( PARSE * ); - - -RULE * bind_builtin( char * name, LIST * (* f)( PARSE *, FRAME * ), int flags, char * * args ) -{ - argument_list* arg_list = 0; - - if ( args ) - { - arg_list = args_new(); - lol_build( arg_list->data, args ); - } - - return new_rule_body( root_module(), name, arg_list, - parse_make( f, P0, P0, P0, C0, C0, flags ), 1 ); -} - - -RULE * duplicate_rule( char * name, RULE * other ) -{ - return import_rule( other, root_module(), name ); -} - - -void load_builtins() -{ - duplicate_rule( "Always", - bind_builtin( "ALWAYS", - builtin_flags, T_FLAG_TOUCHED, 0 ) ); - - duplicate_rule( "Depends", - bind_builtin( "DEPENDS", - builtin_depends, 0, 0 ) ); - - duplicate_rule( "echo", - duplicate_rule( "Echo", - bind_builtin( "ECHO", - builtin_echo, 0, 0 ) ) ); - - { - char * args[] = { "message", "*", ":", "result-value", "?", 0 }; - duplicate_rule( "exit", - duplicate_rule( "Exit", - bind_builtin( "EXIT", - builtin_exit, 0, args ) ) ); - } - - { - char * args[] = { "directories", "*", ":", "patterns", "*", ":", "case-insensitive", "?", 0 }; - duplicate_rule( "Glob", - bind_builtin( "GLOB", builtin_glob, 0, args ) ); - } - - { - char * args[] = { "patterns", "*", 0 }; - bind_builtin( "GLOB-RECURSIVELY", - builtin_glob_recursive, 0, args ); - } - - duplicate_rule( "Includes", - bind_builtin( "INCLUDES", - builtin_depends, 1, 0 ) ); - - { - char * args[] = { "targets", "*", ":", "targets-to-rebuild", "*", 0 }; - bind_builtin( "REBUILDS", - builtin_rebuilds, 0, args ); - } - - duplicate_rule( "Leaves", - bind_builtin( "LEAVES", - builtin_flags, T_FLAG_LEAVES, 0 ) ); - - duplicate_rule( "Match", - bind_builtin( "MATCH", - builtin_match, 0, 0 ) ); - - { - char * args[] = { "string", ":", "delimiters" }; - bind_builtin( "SPLIT_BY_CHARACTERS", - builtin_split_by_characters, 0, 0 ); - } - - duplicate_rule( "NoCare", - bind_builtin( "NOCARE", - builtin_flags, T_FLAG_NOCARE, 0 ) ); - - duplicate_rule( "NOTIME", - duplicate_rule( "NotFile", - bind_builtin( "NOTFILE", - builtin_flags, T_FLAG_NOTFILE, 0 ) ) ); - - duplicate_rule( "NoUpdate", - bind_builtin( "NOUPDATE", - builtin_flags, T_FLAG_NOUPDATE, 0 ) ); - - duplicate_rule( "Temporary", - bind_builtin( "TEMPORARY", - builtin_flags, T_FLAG_TEMP, 0 ) ); - - bind_builtin( "ISFILE", - builtin_flags, T_FLAG_ISFILE, 0 ); - - duplicate_rule( "HdrMacro", - bind_builtin( "HDRMACRO", - builtin_hdrmacro, 0, 0 ) ); - - /* FAIL_EXPECTED is used to indicate that the result of a target build - * action should be inverted (ok <=> fail) this can be useful when - * performing test runs from Jamfiles. - */ - bind_builtin( "FAIL_EXPECTED", - builtin_flags, T_FLAG_FAIL_EXPECTED, 0 ); - - bind_builtin( "RMOLD", - builtin_flags, T_FLAG_RMOLD, 0 ); - - { - char * args[] = { "targets", "*", 0 }; - bind_builtin( "UPDATE", - builtin_update, 0, args ); - } - - { - char * args[] = { "targets", "*", - ":", "log", "?", - ":", "ignore-minus-n", "?", - ":", "ignore-minus-q", "?", 0 }; - bind_builtin( "UPDATE_NOW", - builtin_update_now, 0, args ); - } - - { - char * args[] = { "string", "pattern", "replacements", "+", 0 }; - duplicate_rule( "subst", - bind_builtin( "SUBST", - builtin_subst, 0, args ) ); - } - - { - char * args[] = { "module", "?", 0 }; - bind_builtin( "RULENAMES", - builtin_rulenames, 0, args ); - } - - - { - char * args[] = { "module", "?", 0 }; - bind_builtin( "VARNAMES", - builtin_varnames, 0, args ); - } - - { - char * args[] = { "module", "?", 0 }; - bind_builtin( "DELETE_MODULE", - builtin_delete_module, 0, args ); - } - - { - char * args[] = { "source_module", "?", - ":", "source_rules", "*", - ":", "target_module", "?", - ":", "target_rules", "*", - ":", "localize", "?", 0 }; - bind_builtin( "IMPORT", - builtin_import, 0, args ); - } - - { - char * args[] = { "module", "?", ":", "rules", "*", 0 }; - bind_builtin( "EXPORT", - builtin_export, 0, args ); - } - - { - char * args[] = { "levels", "?", 0 }; - bind_builtin( "CALLER_MODULE", - builtin_caller_module, 0, args ); - } - - { - char * args[] = { "levels", "?", 0 }; - bind_builtin( "BACKTRACE", - builtin_backtrace, 0, args ); - } - - { - char * args[] = { 0 }; - bind_builtin( "PWD", - builtin_pwd, 0, args ); - } - - { - char * args[] = { "target", "*", ":", "path", "*", 0 }; - bind_builtin( "SEARCH_FOR_TARGET", - builtin_search_for_target, 0, args ); - } - - { - char * args[] = { "modules_to_import", "+", ":", "target_module", "?", 0 }; - bind_builtin( "IMPORT_MODULE", - builtin_import_module, 0, args ); - } - - { - char * args[] = { "module", "?", 0 }; - bind_builtin( "IMPORTED_MODULES", - builtin_imported_modules, 0, args ); - } - - { - char * args[] = { "instance_module", ":", "class_module", 0 }; - bind_builtin( "INSTANCE", - builtin_instance, 0, args ); - } - - { - char * args[] = { "sequence", "*", 0 }; - bind_builtin( "SORT", - builtin_sort, 0, args ); - } - - { - char * args[] = { "path_parts", "*", 0 }; - bind_builtin( "NORMALIZE_PATH", - builtin_normalize_path, 0, args ); - } - - { - char * args[] = { "args", "*", 0 }; - bind_builtin( "CALC", - builtin_calc, 0, args ); - } - - { - char * args[] = { "module", ":", "rule", 0 }; - bind_builtin( "NATIVE_RULE", - builtin_native_rule, 0, args ); - } - - { - char * args[] = { "module", ":", "rule", ":", "version", 0 }; - bind_builtin( "HAS_NATIVE_RULE", - builtin_has_native_rule, 0, args ); - } - - { - char * args[] = { "module", "*", 0 }; - bind_builtin( "USER_MODULE", - builtin_user_module, 0, args ); - } - - { - char * args[] = { 0 }; - bind_builtin( "NEAREST_USER_LOCATION", - builtin_nearest_user_location, 0, args ); - } - - { - char * args[] = { "file", 0 }; - bind_builtin( "CHECK_IF_FILE", - builtin_check_if_file, 0, args ); - } - -#ifdef HAVE_PYTHON - { - char * args[] = { "python-module", ":", "function", ":", - "jam-module", ":", "rule-name", 0 }; - bind_builtin( "PYTHON_IMPORT_RULE", - builtin_python_import_rule, 0, args ); - } -#endif - -# if defined( OS_NT ) || defined( OS_CYGWIN ) - { - char * args[] = { "key_path", ":", "data", "?", 0 }; - bind_builtin( "W32_GETREG", - builtin_system_registry, 0, args ); - } - - { - char * args[] = { "key_path", ":", "result-type", 0 }; - bind_builtin( "W32_GETREGNAMES", - builtin_system_registry_names, 0, args ); - } -# endif - - { - char * args[] = { "command", ":", "*", 0 }; - duplicate_rule( "SHELL", - bind_builtin( "COMMAND", - builtin_shell, 0, args ) ); - } - - { - char * args[] = { "string", 0 }; - bind_builtin( "MD5", - builtin_md5, 0, args ) ; - } - - { - char * args[] = { "name", ":", "mode", 0 }; - bind_builtin( "FILE_OPEN", - builtin_file_open, 0, args ); - } - - { - char * args[] = { "string", ":", "width", 0 }; - bind_builtin( "PAD", - builtin_pad, 0, args ); - } - - { - char * args[] = { "targets", "*", 0 }; - bind_builtin( "PRECIOUS", - builtin_precious, 0, args ); - } - - { - char * args [] = { 0 }; - bind_builtin( "SELF_PATH", builtin_self_path, 0, args ); - } - - { - char * args [] = { "path", 0 }; - bind_builtin( "MAKEDIR", builtin_makedir, 0, args ); - } - - /* Initialize builtin modules. */ - init_set(); - init_path(); - init_regex(); - init_property_set(); - init_sequence(); - init_order(); -} - - -/* - * builtin_calc() - CALC rule. - * - * The CALC rule performs simple mathematical operations on two arguments. - */ - -LIST * builtin_calc( PARSE * parse, FRAME * frame ) -{ - LIST * arg = lol_get( frame->args, 0 ); - - LIST * result = 0; - long lhs_value; - long rhs_value; - long result_value; - char buffer [ 16 ]; - char const * lhs; - char const * op; - char const * rhs; - - if ( arg == 0 ) return L0; - lhs = arg->string; - - arg = list_next( arg ); - if ( arg == 0 ) return L0; - op = arg->string; - - arg = list_next( arg ); - if ( arg == 0 ) return L0; - rhs = arg->string; - - lhs_value = atoi( lhs ); - rhs_value = atoi( rhs ); - - if ( strcmp( "+", op ) == 0 ) - { - result_value = lhs_value + rhs_value; - } - else if ( strcmp( "-", op ) == 0 ) - { - result_value = lhs_value - rhs_value; - } - else - { - return L0; - } - - sprintf( buffer, "%ld", result_value ); - result = list_new( result, newstr( buffer ) ); - return result; -} - - -/* - * builtin_depends() - DEPENDS/INCLUDES rule. - * - * The DEPENDS/INCLUDES builtin rule appends each of the listed sources on the - * dependency/includes list of each of the listed targets. It binds both the - * targets and sources as TARGETs. - */ - -LIST * builtin_depends( PARSE * parse, FRAME * frame ) -{ - LIST * targets = lol_get( frame->args, 0 ); - LIST * sources = lol_get( frame->args, 1 ); - LIST * l; - - for ( l = targets; l; l = list_next( l ) ) - { - TARGET * t = bindtarget( l->string ); - - /* If doing INCLUDES, switch to the TARGET's include */ - /* TARGET, creating it if needed. The internal include */ - /* TARGET shares the name of its parent. */ - - if ( parse->num ) - { - if ( !t->includes ) - { - t->includes = copytarget( t ); - t->includes->original_target = t; - } - t = t->includes; - } - - t->depends = targetlist( t->depends, sources ); - } - - /* Enter reverse links */ - for ( l = sources; l; l = list_next( l ) ) - { - TARGET * s = bindtarget( l->string ); - s->dependants = targetlist( s->dependants, targets ); - } - - return L0; -} - - -/* - * builtin_rebuilds() - REBUILDS rule. - * - * The REBUILDS builtin rule appends each of the listed rebuild-targets in its - * 2nd argument on the rebuilds list of each of the listed targets in its first - * argument. - */ - -LIST * builtin_rebuilds( PARSE * parse, FRAME * frame ) -{ - LIST * targets = lol_get( frame->args, 0 ); - LIST * rebuilds = lol_get( frame->args, 1 ); - LIST * l; - - for ( l = targets; l; l = list_next( l ) ) - { - TARGET * t = bindtarget( l->string ); - t->rebuilds = targetlist( t->rebuilds, rebuilds ); - } - - return L0; -} - - -/* - * builtin_echo() - ECHO rule. - * - * The ECHO builtin rule echoes the targets to the user. No other actions are - * taken. - */ - -LIST * builtin_echo( PARSE * parse, FRAME * frame ) -{ - list_print( lol_get( frame->args, 0 ) ); - printf( "\n" ); - fflush( stdout ); - return L0; -} - - -/* - * builtin_exit() - EXIT rule. - * - * The EXIT builtin rule echoes the targets to the user and exits the program - * with a failure status. - */ - -LIST * builtin_exit( PARSE * parse, FRAME * frame ) -{ - list_print( lol_get( frame->args, 0 ) ); - printf( "\n" ); - if ( lol_get( frame->args, 1 ) ) - { - exit( atoi( lol_get( frame->args, 1 )->string ) ); - } - else - { - exit( EXITBAD ); /* yeech */ - } - return L0; -} - - -/* - * builtin_flags() - NOCARE, NOTFILE, TEMPORARY rule. - * - * Builtin_flags() marks the target with the appropriate flag, for use by make0(). - * It binds each target as a TARGET. - */ - -LIST * builtin_flags( PARSE * parse, FRAME * frame ) -{ - LIST * l = lol_get( frame->args, 0 ); - for ( ; l; l = list_next( l ) ) - bindtarget( l->string )->flags |= parse->num; - return L0; -} - - -/* - * builtin_globbing() - GLOB rule. - */ - -struct globbing -{ - LIST * patterns; - LIST * results; - LIST * case_insensitive; -}; - - -static void downcase_inplace( char * p ) -{ - for ( ; *p; ++p ) - *p = tolower( *p ); -} - - -static void builtin_glob_back -( - void * closure, - char * file, - int status, - time_t time -) -{ - PROFILE_ENTER( BUILTIN_GLOB_BACK ); - - struct globbing * globbing = (struct globbing *)closure; - LIST * l; - PATHNAME f; - string buf[ 1 ]; - - /* Null out directory for matching. We wish we had file_dirscan() pass up a - * PATHNAME. - */ - path_parse( file, &f ); - f.f_dir.len = 0; - - /* For globbing, we unconditionally ignore current and parent directory - * items. Since they items always exist, there is no reason why caller of - * GLOB would want to see them. We could also change file_dirscan(), but - * then paths with embedded "." and ".." would not work anywhere. - */ - if ( !strcmp( f.f_base.ptr, "." ) || !strcmp( f.f_base.ptr, ".." ) ) - { - PROFILE_EXIT( BUILTIN_GLOB_BACK ); - return; - } - - string_new( buf ); - path_build( &f, buf, 0 ); - - if ( globbing->case_insensitive ) - downcase_inplace( buf->value ); - - for ( l = globbing->patterns; l; l = l->next ) - { - if ( !glob( l->string, buf->value ) ) - { - globbing->results = list_new( globbing->results, newstr( file ) ); - break; - } - } - - string_free( buf ); - - PROFILE_EXIT( BUILTIN_GLOB_BACK ); -} - - -static LIST * downcase_list( LIST * in ) -{ - LIST * result = 0; - - string s[ 1 ]; - string_new( s ); - - while ( in ) - { - string_copy( s, in->string ); - downcase_inplace( s->value ); - result = list_append( result, list_new( 0, newstr( s->value ) ) ); - in = in->next; - } - - string_free( s ); - return result; -} - - -LIST * builtin_glob( PARSE * parse, FRAME * frame ) -{ - LIST * l = lol_get( frame->args, 0 ); - LIST * r = lol_get( frame->args, 1 ); - - struct globbing globbing; - - globbing.results = L0; - globbing.patterns = r; - - globbing.case_insensitive -# if defined( OS_NT ) || defined( OS_CYGWIN ) - = l; /* Always case-insensitive if any files can be found. */ -# else - = lol_get( frame->args, 2 ); -# endif - - if ( globbing.case_insensitive ) - globbing.patterns = downcase_list( r ); - - for ( ; l; l = list_next( l ) ) - file_dirscan( l->string, builtin_glob_back, &globbing ); - - if ( globbing.case_insensitive ) - list_free( globbing.patterns ); - - return globbing.results; -} - - -static int has_wildcards( char const * str ) -{ - size_t const index = strcspn( str, "[]*?" ); - return str[ index ] == '\0' ? 0 : 1; -} - - -/* - * If 'file' exists, append 'file' to 'list'. Returns 'list'. - */ - -static LIST * append_if_exists( LIST * list, char * file ) -{ - time_t time; - timestamp( file, &time ); - return time > 0 - ? list_new( list, newstr( file ) ) - : list; -} - - -LIST * glob1( char * dirname, char * pattern ) -{ - LIST * plist = list_new( L0, pattern ); - struct globbing globbing; - - globbing.results = L0; - globbing.patterns = plist; - - globbing.case_insensitive -# if defined( OS_NT ) || defined( OS_CYGWIN ) - = plist; /* always case-insensitive if any files can be found */ -# else - = L0; -# endif - - if ( globbing.case_insensitive ) - globbing.patterns = downcase_list( plist ); - - file_dirscan( dirname, builtin_glob_back, &globbing ); - - if ( globbing.case_insensitive ) - list_free( globbing.patterns ); - - list_free( plist ); - - return globbing.results; -} - - -LIST * glob_recursive( char * pattern ) -{ - LIST * result = L0; - - /* Check if there's metacharacters in pattern */ - if ( !has_wildcards( pattern ) ) - { - /* No metacharacters. Check if the path exists. */ - result = append_if_exists(result, pattern); - } - else - { - /* Have metacharacters in the pattern. Split into dir/name. */ - PATHNAME path[ 1 ]; - path_parse( pattern, path ); - - if ( path->f_dir.ptr ) - { - LIST * dirs = L0; - string dirname[ 1 ]; - string basename[ 1 ]; - string_new( dirname ); - string_new( basename ); - - string_append_range( dirname, path->f_dir.ptr, - path->f_dir.ptr + path->f_dir.len ); - - path->f_grist.ptr = 0; - path->f_grist.len = 0; - path->f_dir.ptr = 0; - path->f_dir.len = 0; - path_build( path, basename, 0 ); - - dirs = has_wildcards( dirname->value ) - ? glob_recursive( dirname->value ) - : list_new( dirs, dirname->value ); - - if ( has_wildcards( basename->value ) ) - { - for ( ; dirs; dirs = dirs->next ) - result = list_append( result, glob1( dirs->string, - basename->value ) ); - } - else - { - string file_string[ 1 ]; - string_new( file_string ); - - /* No wildcard in basename. */ - for ( ; dirs; dirs = dirs->next ) - { - path->f_dir.ptr = dirs->string; - path->f_dir.len = strlen( dirs->string ); - path_build( path, file_string, 0 ); - - result = append_if_exists( result, file_string->value ); - - string_truncate( file_string, 0 ); - } - - string_free( file_string ); - } - - string_free( dirname ); - string_free( basename ); - } - else - { - /** No directory, just a pattern. */ - result = list_append( result, glob1( ".", pattern ) ); - } - } - - return result; -} - - -LIST * builtin_glob_recursive( PARSE * parse, FRAME * frame ) -{ - LIST * result = L0; - LIST * l = lol_get( frame->args, 0 ); - for ( ; l; l = l->next ) - result = list_append( result, glob_recursive( l->string ) ); - return result; -} - - -/* - * builtin_match() - MATCH rule, regexp matching. - */ - -LIST * builtin_match( PARSE * parse, FRAME * frame ) -{ - LIST * l; - LIST * r; - LIST * result = 0; - - string buf[ 1 ]; - string_new( buf ); - - /* For each pattern */ - - for ( l = lol_get( frame->args, 0 ); l; l = l->next ) - { - /* Result is cached and intentionally never freed. */ - regexp * re = regex_compile( l->string ); - - /* For each string to match against. */ - for ( r = lol_get( frame->args, 1 ); r; r = r->next ) - { - if ( regexec( re, r->string ) ) - { - int i; - int top; - - /* Find highest parameter */ - - for ( top = NSUBEXP; top-- > 1; ) - if ( re->startp[ top ] ) - break; - - /* And add all parameters up to highest onto list. */ - /* Must have parameters to have results! */ - for ( i = 1; i <= top; ++i ) - { - string_append_range( buf, re->startp[ i ], re->endp[ i ] ); - result = list_new( result, newstr( buf->value ) ); - string_truncate( buf, 0 ); - } - } - } - } - - string_free( buf ); - return result; -} - -LIST * builtin_split_by_characters( PARSE * parse, FRAME * frame ) -{ - LIST * l1 = lol_get( frame->args, 0 ); - LIST * l2 = lol_get( frame->args, 1 ); - - LIST * result = 0; - - char* s = strdup (l1->string); - char* delimiters = l2->string; - char* t; - - t = strtok (s, delimiters); - while (t) - { - result = list_new(result, newstr(t)); - t = strtok (NULL, delimiters); - } - - free (s); - - return result; -} - -LIST * builtin_hdrmacro( PARSE * parse, FRAME * frame ) -{ - LIST * l = lol_get( frame->args, 0 ); - - for ( ; l; l = list_next( l ) ) - { - TARGET * t = bindtarget( l->string ); - - /* Scan file for header filename macro definitions. */ - if ( DEBUG_HEADER ) - printf( "scanning '%s' for header file macro definitions\n", - l->string ); - - macro_headers( t ); - } - - return L0; -} - - -/* - * builtin_rulenames() - RULENAMES ( MODULE ? ). - * - * Returns a list of the non-local rule names in the given MODULE. If MODULE is - * not supplied, returns the list of rule names in the global module. - */ - -static void add_rule_name( void * r_, void * result_ ) -{ - RULE * r = (RULE *)r_; - LIST * * result = (LIST * *)result_; - if ( r->exported ) - *result = list_new( *result, copystr( r->name ) ); -} - - -LIST * builtin_rulenames( PARSE * parse, FRAME * frame ) -{ - LIST * arg0 = lol_get( frame->args, 0 ); - LIST * result = L0; - module_t * source_module = bindmodule( arg0 ? arg0->string : 0 ); - - if ( source_module->rules ) - hashenumerate( source_module->rules, add_rule_name, &result ); - return result; -} - - -/* - * builtin_varnames() - VARNAMES ( MODULE ? ). - * - * Returns a list of the variable names in the given MODULE. If MODULE is not - * supplied, returns the list of variable names in the global module. - */ - -/* helper function for builtin_varnames(), below. Used with hashenumerate, will - * prepend the key of each element to the list - */ -static void add_hash_key( void * np, void * result_ ) -{ - LIST * * result = (LIST * *)result_; - *result = list_new( *result, copystr( *(char * *)np ) ); -} - - -static struct hash * get_running_module_vars() -{ - struct hash * dummy; - struct hash * vars = NULL; - /* Get the global variables pointer (that of the currently running module). - */ - var_hash_swap( &vars ); - dummy = vars; - /* Put the global variables pointer in its right place. */ - var_hash_swap( &dummy ); - return vars; -} - - -LIST * builtin_varnames( PARSE * parse, FRAME * frame ) -{ - LIST * arg0 = lol_get( frame->args, 0 ); - LIST * result = L0; - module_t * source_module = bindmodule( arg0 ? arg0->string : 0 ); - - /* The running module _always_ has its 'variables' member set to NULL due to - * the way enter_module() and var_hash_swap() work. - */ - struct hash * vars = source_module == frame->module - ? get_running_module_vars() - : source_module->variables; - - if ( vars ) - hashenumerate( vars, add_hash_key, &result ); - return result; -} - - -/* - * builtin_delete_module() - MODULE ?. - * - * Clears all rules and variables from the given module. - */ - -LIST * builtin_delete_module( PARSE * parse, FRAME * frame ) -{ - LIST * arg0 = lol_get( frame->args, 0 ); - LIST * result = L0; - module_t * source_module = bindmodule( arg0 ? arg0->string : 0 ); - delete_module( source_module ); - return result; -} - - -static void unknown_rule( FRAME * frame, char * key, char * module_name, char * rule_name ) -{ - backtrace_line( frame->prev ); - printf( "%s error: rule \"%s\" unknown in module \"%s\"\n", key, rule_name, module_name ); - backtrace( frame->prev ); - exit( 1 ); -} - - -/* - * builtin_import() - IMPORT - * ( - * SOURCE_MODULE ? : - * SOURCE_RULES * : - * TARGET_MODULE ? : - * TARGET_RULES * : - * LOCALIZE ? - * ) - * - * The IMPORT rule imports rules from the SOURCE_MODULE into the TARGET_MODULE - * as local rules. If either SOURCE_MODULE or TARGET_MODULE is not supplied, it - * refers to the global module. SOURCE_RULES specifies which rules from the - * SOURCE_MODULE to import; TARGET_RULES specifies the names to give those rules - * in TARGET_MODULE. If SOURCE_RULES contains a name which doesn't correspond to - * a rule in SOURCE_MODULE, or if it contains a different number of items than - * TARGET_RULES, an error is issued. If LOCALIZE is specified, the rules will be - * executed in TARGET_MODULE, with corresponding access to its module local - * variables. - */ - -LIST * builtin_import( PARSE * parse, FRAME * frame ) -{ - LIST * source_module_list = lol_get( frame->args, 0 ); - LIST * source_rules = lol_get( frame->args, 1 ); - LIST * target_module_list = lol_get( frame->args, 2 ); - LIST * target_rules = lol_get( frame->args, 3 ); - LIST * localize = lol_get( frame->args, 4 ); - - module_t * target_module = - bindmodule( target_module_list ? target_module_list->string : 0 ); - module_t * source_module = - bindmodule( source_module_list ? source_module_list->string : 0 ); - - LIST * source_name; - LIST * target_name; - - for ( source_name = source_rules, target_name = target_rules; - source_name && target_name; - source_name = list_next( source_name ), - target_name = list_next( target_name ) ) - { - RULE r_; - RULE * r = &r_; - RULE * imported; - r_.name = source_name->string; - - if ( !source_module->rules || - !hashcheck( source_module->rules, (HASHDATA * *)&r ) ) - unknown_rule( frame, "IMPORT", source_module->name, r_.name ); - - imported = import_rule( r, target_module, target_name->string ); - if ( localize ) - imported->module = target_module; - /* This rule is really part of some other module. Just refer to it here, - * but do not let it out. - */ - imported->exported = 0; - } - - if ( source_name || target_name ) - { - backtrace_line( frame->prev ); - printf( "import error: length of source and target rule name lists don't match!\n" ); - printf( " source: " ); - list_print( source_rules ); - printf( "\n target: " ); - list_print( target_rules ); - printf( "\n" ); - backtrace( frame->prev ); - exit( 1 ); - } - - return L0; -} - - -/* - * builtin_export() - EXPORT ( MODULE ? : RULES * ). - * - * The EXPORT rule marks RULES from the SOURCE_MODULE as non-local (and thus - * exportable). If an element of RULES does not name a rule in MODULE, an error - * is issued. - */ - -LIST * builtin_export( PARSE * parse, FRAME * frame ) -{ - LIST * module_list = lol_get( frame->args, 0 ); - LIST * rules = lol_get( frame->args, 1 ); - module_t * m = bindmodule( module_list ? module_list->string : 0 ); - - for ( ; rules; rules = list_next( rules ) ) - { - RULE r_; - RULE * r = &r_; - r_.name = rules->string; - - if ( !m->rules || !hashcheck( m->rules, (HASHDATA * *)&r ) ) - unknown_rule( frame, "EXPORT", m->name, r_.name ); - - r->exported = 1; - } - return L0; -} - - -/* - * get_source_line() - Retrieve the file and line number that should be - * indicated for a given procedure in debug output or an error backtrace. - */ - -static void get_source_line( PARSE * procedure, char * * file, int * line ) -{ - if ( procedure ) - { - char * f = procedure->file; - int l = procedure->line; - if ( !strcmp( f, "+" ) ) - { - f = "jambase.c"; - l += 3; - } - *file = f; - *line = l; - } - else - { - *file = "(builtin)"; - *line = -1; - } -} - - -void print_source_line( PARSE * p ) -{ - char * file; - int line; - - get_source_line( p, &file, &line ); - if ( line < 0 ) - printf( "(builtin):" ); - else - printf( "%s:%d:", file, line ); -} - - -/* - * backtrace_line() - print a single line of error backtrace for the given - * frame. - */ - -void backtrace_line( FRAME * frame ) -{ - if ( frame == 0 ) - { - printf( "(no frame):" ); - } - else - { - print_source_line( frame->procedure ); - printf( " in %s\n", frame->rulename ); - } -} - - -/* - * backtrace() - Print the entire backtrace from the given frame to the Jambase - * which invoked it. - */ - -void backtrace( FRAME * frame ) -{ - if ( !frame ) return; - while ( ( frame = frame->prev ) ) - backtrace_line( frame ); -} - - -/* - * builtin_backtrace() - A Jam version of the backtrace function, taking no - * arguments and returning a list of quadruples: FILENAME LINE MODULE. RULENAME - * describing each frame. Note that the module-name is always followed by a - * period. - */ - -LIST * builtin_backtrace( PARSE * parse, FRAME * frame ) -{ - LIST * levels_arg = lol_get( frame->args, 0 ); - int levels = levels_arg ? atoi( levels_arg->string ) : ( (unsigned int)(-1) >> 1 ) ; - - LIST * result = L0; - for ( ; ( frame = frame->prev ) && levels ; --levels ) - { - char * file; - int line; - char buf[32]; - get_source_line( frame->procedure, &file, &line ); - sprintf( buf, "%d", line ); - result = list_new( result, newstr( file ) ); - result = list_new( result, newstr( buf ) ); - result = list_new( result, newstr( frame->module->name ) ); - result = list_new( result, newstr( frame->rulename ) ); - } - return result; -} - - -/* - * builtin_caller_module() - CALLER_MODULE ( levels ? ) - * - * If levels is not supplied, returns the name of the module of the rule which - * called the one calling this one. If levels is supplied, it is interpreted as - * an integer specifying a number of additional levels of call stack to traverse - * in order to locate the module in question. If no such module exists, returns - * the empty list. Also returns the empty list when the module in question is - * the global module. This rule is needed for implementing module import - * behavior. - */ - -LIST * builtin_caller_module( PARSE * parse, FRAME * frame ) -{ - LIST * levels_arg = lol_get( frame->args, 0 ); - int levels = levels_arg ? atoi( levels_arg->string ) : 0 ; - - int i; - for ( i = 0; ( i < levels + 2 ) && frame->prev; ++i ) - frame = frame->prev; - - if ( frame->module == root_module() ) - return L0; - - { - LIST * result; - string name; - string_copy( &name, frame->module->name ); - string_pop_back( &name ); - result = list_new( L0, newstr(name.value) ); - string_free( &name ); - return result; - } -} - - -/* - * Return the current working directory. - * - * Usage: pwd = [ PWD ] ; - */ - -LIST * builtin_pwd( PARSE * parse, FRAME * frame ) -{ - return pwd(); -} - - -/* - * Adds targets to the list of target that jam will attempt to update. - */ - -LIST * builtin_update( PARSE * parse, FRAME * frame ) -{ - LIST * result = list_copy( L0, targets_to_update() ); - LIST * arg1 = lol_get( frame->args, 0 ); - clear_targets_to_update(); - for ( ; arg1; arg1 = list_next( arg1 ) ) - mark_target_for_updating( newstr( arg1->string ) ); - return result; -} - -extern int anyhow; -int last_update_now_status; - -/* Takes a list of target names as first argument, and immediately - updates them. - Second parameter, if specified, if the descriptor (converted to a string) - of a log file where all build output is redirected. - Third parameter, if non-empty, specifies that the -n option should have - no effect -- that is, all out-of-date targets should be rebuild. -*/ -LIST * builtin_update_now( PARSE * parse, FRAME * frame ) -{ - LIST * targets = lol_get( frame->args, 0 ); - LIST * log = lol_get( frame->args, 1 ); - LIST * force = lol_get (frame->args, 2); - LIST * continue_ = lol_get(frame->args, 3); - int status = 0; - int original_stdout; - int original_stderr; - int n; - int targets_count; - const char** targets2; - int i; - int original_noexec; - int original_quitquick; - - - if (log) - { - int fd = atoi(log->string); - /* Redirect stdout and stderr, temporary, to the log file. */ - original_stdout = dup (0); - original_stderr = dup (1); - dup2 (fd, 0); - dup2 (fd, 1); - } - - if (force) - { - original_noexec = globs.noexec; - globs.noexec = 0; - original_quitquick = globs.quitquick; - globs.quitquick = 0; - } - - if (continue_) - { - original_quitquick = globs.quitquick; - globs.quitquick = 0; - } - - targets_count = list_length( targets ); - targets2 = (const char * *)BJAM_MALLOC( targets_count * sizeof( char * ) ); - for (i = 0 ; targets; targets = list_next( targets ) ) - targets2[ i++ ] = targets->string; - status |= make( targets_count, targets2, anyhow); - free( targets ); - - if (force) - { - globs.noexec = original_noexec; - globs.quitquick = original_quitquick; - } - - if (continue_) - { - globs.quitquick = original_quitquick; - } - - if (log) - { - /* Flush whatever stdio might have buffered, while descriptions - 0 and 1 still refer to the log file. */ - fflush (stdout); - fflush (stderr); - dup2 (original_stdout, 0); - dup2 (original_stderr, 1); - close (original_stdout); - close (original_stderr); - } - - last_update_now_status = status; - - if (status == 0) - return list_new (L0, newstr ("ok")); - else - return L0; -} - -LIST * builtin_search_for_target( PARSE * parse, FRAME * frame ) -{ - LIST * arg1 = lol_get( frame->args, 0 ); - LIST * arg2 = lol_get( frame->args, 1 ); - TARGET * t = search_for_target( arg1->string, arg2 ); - return list_new( L0, t->name ); -} - - -LIST * builtin_import_module( PARSE * parse, FRAME * frame ) -{ - LIST * arg1 = lol_get( frame->args, 0 ); - LIST * arg2 = lol_get( frame->args, 1 ); - module_t * m = arg2 ? bindmodule( arg2->string ) : root_module(); - import_module( arg1, m ); - return L0; -} - - -LIST * builtin_imported_modules( PARSE * parse, FRAME * frame ) -{ - LIST * arg0 = lol_get( frame->args, 0 ); - return imported_modules( bindmodule( arg0 ? arg0->string : 0 ) ); -} - - -LIST * builtin_instance( PARSE * parse, FRAME * frame ) -{ - LIST * arg1 = lol_get( frame->args, 0 ); - LIST * arg2 = lol_get( frame->args, 1 ); - module_t * const instance = bindmodule( arg1->string ); - module_t * const class_module = bindmodule( arg2->string ); - instance->class_module = class_module; - return L0; -} - - -LIST * builtin_sort( PARSE * parse, FRAME * frame ) -{ - LIST * arg1 = lol_get( frame->args, 0 ); - return list_sort( arg1 ); -} - - -LIST * builtin_normalize_path( PARSE * parse, FRAME * frame ) -{ - LIST * arg = lol_get( frame->args, 0 ); - - /* First, we iterate over all '/'-separated elements, starting from the end - * of string. If we see a '..', we remove a previous path elements. If we - * see '.', we remove it. The removal is done by overwriting data using '\1' - * in the string. After the whole string has been processed, we do a second - * pass, removing all the entered '\1' characters. - */ - - string in[ 1 ]; - string out[ 1 ]; - /* Last character of the part of string still to be processed. */ - char * end; - /* Working pointer. */ - char * current; - /* Number of '..' elements seen and not processed yet. */ - int dotdots = 0; - int rooted = 0; - char * result = 0; - - /* Make a copy of input: we should not change it. Prepend a '/' before it as - * a guard for the algorithm later on and remember whether it was originally - * rooted or not. - */ - string_new( in ); - string_push_back( in, '/' ); - for ( ; arg; arg = list_next( arg ) ) - { - if ( arg->string[ 0 ] != '\0' ) - { - if ( in->size == 1 ) - rooted = ( ( arg->string[ 0 ] == '/' ) || - ( arg->string[ 0 ] == '\\' ) ); - else - string_append( in, "/" ); - string_append( in, arg->string ); - } - } - - /* Convert \ into /. On Windows, paths using / and \ are equivalent, and we - * want this function to obtain a canonic representation. - */ - for ( current = in->value, end = in->value + in->size; - current < end; ++current ) - if ( *current == '\\' ) - *current = '/'; - - /* Now we remove any extra path elements by overwriting them with '\1' - * characters and cound how many more unused '..' path elements there are - * remaining. Note that each remaining path element with always starts with - * a '/' character. - */ - for ( end = in->value + in->size - 1; end >= in->value; ) - { - /* Set 'current' to the next occurence of '/', which always exists. */ - for ( current = end; *current != '/'; --current ); - - if ( current == end ) - { - /* Found a trailing or duplicate '/'. Remove it. */ - *current = '\1'; - } - else if ( ( end - current == 1 ) && ( *(current + 1) == '.' ) ) - { - /* Found '/.'. Remove them all. */ - *current = '\1'; - *(current + 1) = '\1'; - } - else if ( ( end - current == 2 ) && ( *(current + 1) == '.' ) && ( *(current + 2) == '.' ) ) - { - /* Found '/..'. Remove them all. */ - *current = '\1'; - *(current + 1) = '\1'; - *(current + 2) = '\1'; - ++dotdots; - } - else if ( dotdots ) - { - memset( current, '\1', end - current + 1 ); - --dotdots; - } - end = current - 1; - } - - string_new( out ); - - /* Now we know that we need to add exactly dotdots '..' path elements to the - * front and that our string is either empty or has a '/' as its first - * significant character. If we have any dotdots remaining then the passed - * path must not have been rooted or else it is invalid we return an empty - * list. - */ - if ( dotdots ) - { - if ( rooted ) return L0; - do - string_append( out, "/.." ); - while ( --dotdots ); - } - - /* Now we actually remove all the path characters marked for removal. */ - for ( current = in->value; *current; ++current ) - if ( *current != '\1' ) - string_push_back( out, *current ); - - /* Here we know that our string contains no '\1' characters and is either - * empty or has a '/' as its initial character. If the original path was not - * rooted and we have a non-empty path we need to drop the initial '/'. If - * the original path was rooted and we have an empty path we need to add - * back the '/'. - */ - result = newstr( out->size ? out->value + !rooted : ( rooted ? "/" : "." ) ); - - string_free( out ); - string_free( in ); - - return list_new( 0, result ); -} - - -LIST * builtin_native_rule( PARSE * parse, FRAME * frame ) -{ - LIST * module_name = lol_get( frame->args, 0 ); - LIST * rule_name = lol_get( frame->args, 1 ); - - module_t * module = bindmodule( module_name->string ); - - native_rule_t n; - native_rule_t * np = &n; - n.name = rule_name->string; - if ( module->native_rules && hashcheck( module->native_rules, (HASHDATA * *)&np ) ) - { - new_rule_body( module, np->name, np->arguments, np->procedure, 1 ); - } - else - { - backtrace_line( frame->prev ); - printf( "error: no native rule \"%s\" defined in module \"%s\"\n", - n.name, module->name ); - backtrace( frame->prev ); - exit( 1 ); - } - return L0; -} - - -LIST * builtin_has_native_rule( PARSE * parse, FRAME * frame ) -{ - LIST * module_name = lol_get( frame->args, 0 ); - LIST * rule_name = lol_get( frame->args, 1 ); - LIST * version = lol_get( frame->args, 2 ); - - module_t * module = bindmodule( module_name->string ); - - native_rule_t n; - native_rule_t * np = &n; - n.name = rule_name->string; - if ( module->native_rules && hashcheck( module->native_rules, (HASHDATA * *)&np ) ) - { - int expected_version = atoi( version->string ); - if ( np->version == expected_version ) - return list_new( 0, newstr( "true" ) ); - } - return L0; -} - - -LIST * builtin_user_module( PARSE * parse, FRAME * frame ) -{ - LIST * module_name = lol_get( frame->args, 0 ); - for ( ; module_name; module_name = module_name->next ) - { - module_t * m = bindmodule( module_name->string ); - m->user_module = 1; - } - return L0; -} - - -LIST * builtin_nearest_user_location( PARSE * parse, FRAME * frame ) -{ - FRAME * nearest_user_frame = - frame->module->user_module ? frame : frame->prev_user; - if ( !nearest_user_frame ) - return L0; - - { - LIST * result = 0; - char * file; - int line; - char buf[32]; - - get_source_line( nearest_user_frame->procedure, &file, &line ); - sprintf( buf, "%d", line ); - result = list_new( result, newstr( file ) ); - result = list_new( result, newstr( buf ) ); - return result; - } -} - - -LIST * builtin_check_if_file( PARSE * parse, FRAME * frame ) -{ - LIST * name = lol_get( frame->args, 0 ); - return file_is_file( name->string ) == 1 - ? list_new( 0, newstr( "true" ) ) - : L0 ; -} - - -LIST * builtin_md5( PARSE * parse, FRAME * frame ) -{ - LIST * l = lol_get( frame->args, 0 ); - char* s = l->string; - - md5_state_t state; - md5_byte_t digest[16]; - char hex_output[16*2 + 1]; - - int di; - - md5_init(&state); - md5_append(&state, (const md5_byte_t *)s, strlen(s)); - md5_finish(&state, digest); - - for (di = 0; di < 16; ++di) - sprintf(hex_output + di * 2, "%02x", digest[di]); - - return list_new (0, newstr(hex_output)); -} - -LIST *builtin_file_open( PARSE *parse, FRAME *frame ) -{ - char* name = lol_get(frame->args, 0)->string; - char* mode = lol_get(frame->args, 1)->string; - int fd; - char buffer[sizeof("4294967295")]; - - if (strcmp(mode, "w") == 0) - { - fd = open(name, O_WRONLY|O_CREAT|O_TRUNC, 0666); - } - else - { - fd = open(name, O_RDONLY); - } - - if (fd != -1) - { - sprintf(buffer, "%d", fd); - return list_new(L0, newstr(buffer)); - } - else - { - return L0; - } -} - -LIST *builtin_pad( PARSE *parse, FRAME *frame ) -{ - char *string = lol_get(frame->args, 0)->string; - char *width_s = lol_get(frame->args, 1)->string; - - int current = strlen (string); - int desired = atoi(width_s); - if (current >= desired) - return list_new (L0, string); - else - { - char *buffer = malloc (desired + 1); - int i; - LIST *result; - - strcpy (buffer, string); - for (i = current; i < desired; ++i) - buffer[i] = ' '; - buffer[desired] = '\0'; - result = list_new (L0, newstr (buffer)); - free (buffer); - return result; - } -} - -LIST *builtin_precious( PARSE *parse, FRAME *frame ) -{ - LIST* targets = lol_get(frame->args, 0); - - for ( ; targets; targets = list_next( targets ) ) - { - TARGET* t = bindtarget (targets->string); - t->flags |= T_FLAG_PRECIOUS; - } - - return L0; -} - -LIST *builtin_self_path( PARSE *parse, FRAME *frame ) -{ - extern char *saved_argv0; - char *p = executable_path (saved_argv0); - if (p) - { - LIST* result = list_new (0, newstr (p)); - free(p); - return result; - } - else - { - return L0; - } -} - -LIST *builtin_makedir( PARSE *parse, FRAME *frame ) -{ - LIST *path = lol_get(frame->args, 0); - - if (file_mkdir(path->string) == 0) - { - LIST *result = list_new (0, newstr(path->string)); - return result; - } - else - { - return L0; - } -} - -#ifdef HAVE_PYTHON - -LIST * builtin_python_import_rule( PARSE * parse, FRAME * frame ) -{ - static int first_time = 1; - char * python_module = lol_get( frame->args, 0 )->string; - char * python_function = lol_get( frame->args, 1 )->string; - char * jam_module = lol_get( frame->args, 2 )->string; - char * jam_rule = lol_get( frame->args, 3 )->string; - - PyObject * pName; - PyObject * pModule; - PyObject * pDict; - PyObject * pFunc; - - if ( first_time ) - { - /* At the first invocation, we add the value of the global - * EXTRA_PYTHONPATH to the sys.path Python variable. - */ - LIST * extra = 0; - module_t * outer_module = frame->module; - - first_time = 0; - - if ( outer_module != root_module() ) - { - exit_module( outer_module ); - enter_module( root_module() ); - } - - extra = var_get( "EXTRA_PYTHONPATH" ); - - if ( outer_module != root_module() ) - { - exit_module( root_module() ); - enter_module( outer_module ); - } - - for ( ; extra; extra = extra->next ) - { - string buf[ 1 ]; - string_new( buf ); - string_append( buf, "import sys\nsys.path.append(\"" ); - string_append( buf, extra->string ); - string_append( buf, "\")\n" ); - PyRun_SimpleString( buf->value ); - string_free( buf ); - } - } - - pName = PyString_FromString( python_module ); - pModule = PyImport_Import( pName ); - Py_DECREF( pName ); - - if ( pModule != NULL ) - { - pDict = PyModule_GetDict( pModule ); - pFunc = PyDict_GetItemString( pDict, python_function ); - - if ( pFunc && PyCallable_Check( pFunc ) ) - { - module_t * m = bindmodule( jam_module ); - RULE * r = bindrule( jam_rule, m ); - - /* Make pFunc owned. */ - Py_INCREF( pFunc ); - - r->python_function = pFunc; - } - else - { - if ( PyErr_Occurred() ) - PyErr_Print(); - fprintf( stderr, "Cannot find function \"%s\"\n", python_function ); - } - Py_DECREF( pModule ); - } - else - { - PyErr_Print(); - fprintf( stderr, "Failed to load \"%s\"\n", python_module ); - } - return L0; - -} - -#endif - -void lol_build( LOL * lol, char * * elements ) -{ - LIST * l = L0; - lol_init( lol ); - - while ( elements && *elements ) - { - if ( !strcmp( *elements, ":" ) ) - { - lol_add( lol, l ); - l = L0 ; - } - else - { - l = list_new( l, newstr( *elements ) ); - } - ++elements; - } - - if ( l != L0 ) - lol_add( lol, l ); -} - - -#ifdef HAVE_PYTHON - -/* - * Calls the bjam rule specified by name passed in 'args'. The name is looked up - * in the context of bjam's 'python_interface' module. Returns the list of - * string retured by the rule. - */ - -PyObject* bjam_call( PyObject * self, PyObject * args ) -{ - FRAME inner[ 1 ]; - LIST * result; - PARSE * p; - char * rulename; - - /* Build up the list of arg lists. */ - frame_init( inner ); - inner->prev = 0; - inner->prev_user = 0; - inner->module = bindmodule( "python_interface" ); - inner->procedure = 0; - - /* Extract the rule name and arguments from 'args'. */ - - /* PyTuple_GetItem returns borrowed reference. */ - rulename = PyString_AsString( PyTuple_GetItem( args, 0 ) ); - { - int i = 1; - int size = PyTuple_Size( args ); - for ( ; i < size; ++i ) - { - PyObject * a = PyTuple_GetItem( args, i ); - if ( PyString_Check( a ) ) - { - lol_add( inner->args, list_new( 0, newstr( - PyString_AsString( a ) ) ) ); - } - else if ( PySequence_Check( a ) ) - { - LIST * l = 0; - int s = PySequence_Size( a ); - int i = 0; - for ( ; i < s; ++i ) - { - /* PySequence_GetItem returns new reference. */ - PyObject * e = PySequence_GetItem( a, i ); - char * s = PyString_AsString( e ); - if ( !s ) - { - printf( "Invalid parameter type passed from Python\n" ); - exit( 1 ); - } - l = list_new( l, newstr( s ) ); - Py_DECREF( e ); - } - lol_add( inner->args, l ); - } - } - } - - result = evaluate_rule( rulename, inner ); - - frame_free( inner ); - - /* Convert the bjam list into a Python list result. */ - { - PyObject * pyResult = PyList_New( list_length( result ) ); - int i = 0; - while ( result ) - { - PyList_SetItem( pyResult, i, PyString_FromString( result->string ) ); - result = list_next( result ); - i += 1; - } - list_free( result ); - return pyResult; - } -} - - -/* - * Accepts four arguments: - * - module name - * - rule name, - * - Python callable. - * - (optional) bjam language function signature. - * Creates a bjam rule with the specified name in the specified module, which will - * invoke the Python callable. - */ - -PyObject * bjam_import_rule( PyObject * self, PyObject * args ) -{ - char * module; - char * rule; - PyObject * func; - PyObject * bjam_signature = NULL; - module_t * m; - RULE * r; - - if ( !PyArg_ParseTuple( args, "ssO|O:import_rule", - &module, &rule, &func, &bjam_signature ) ) - return NULL; - - if ( !PyCallable_Check( func ) ) - { - PyErr_SetString( PyExc_RuntimeError, - "Non-callable object passed to bjam.import_rule" ); - return NULL; - } - - m = bindmodule( *module ? module : 0 ); - r = bindrule( rule, m ); - - /* Make pFunc owned. */ - Py_INCREF( func ); - - r->python_function = func; - r->arguments = 0; - - if (bjam_signature) - { - argument_list * arg_list = args_new(); - Py_ssize_t i; - - Py_ssize_t s = PySequence_Size (bjam_signature); - for (i = 0; i < s; ++i) - { - PyObject* v = PySequence_GetItem (bjam_signature, i); - lol_add(arg_list->data, list_from_python (v)); - Py_DECREF(v); - } - r->arguments = arg_list; - } - - Py_INCREF( Py_None ); - return Py_None; -} - - -/* - * Accepts four arguments: - * - an action name - * - an action body - * - a list of variable that will be bound inside the action - * - integer flags. - * Defines an action on bjam side. - */ - -PyObject * bjam_define_action( PyObject * self, PyObject * args ) -{ - char * name; - char * body; - module_t * m; - PyObject * bindlist_python; - int flags; - LIST * bindlist = L0; - int n; - int i; - - if ( !PyArg_ParseTuple( args, "ssO!i:define_action", &name, &body, - &PyList_Type, &bindlist_python, &flags ) ) - return NULL; - - n = PyList_Size( bindlist_python ); - for ( i = 0; i < n; ++i ) - { - PyObject * next = PyList_GetItem( bindlist_python, i ); - if ( !PyString_Check( next ) ) - { - PyErr_SetString( PyExc_RuntimeError, - "bind list has non-string type" ); - return NULL; - } - bindlist = list_new( bindlist, PyString_AsString( next ) ); - } - - new_rule_actions( root_module(), name, newstr( body ), bindlist, flags ); - - Py_INCREF( Py_None ); - return Py_None; -} - - -/* - * Returns the value of a variable in root Jam module. - */ - -PyObject * bjam_variable( PyObject * self, PyObject * args ) -{ - char * name; - LIST * value; - PyObject * result; - int i; - - if ( !PyArg_ParseTuple( args, "s", &name ) ) - return NULL; - - enter_module( root_module() ); - value = var_get( name ); - exit_module( root_module() ); - - result = PyList_New( list_length( value ) ); - for ( i = 0; value; value = list_next( value ), ++i ) - PyList_SetItem( result, i, PyString_FromString( value->string ) ); - - return result; -} - - -PyObject * bjam_backtrace( PyObject * self, PyObject * args ) -{ - PyObject * result = PyList_New( 0 ); - struct frame * f = frame_before_python_call; - - for ( ; f = f->prev; ) - { - PyObject * tuple = PyTuple_New( 4 ); - char * file; - int line; - char buf[ 32 ]; - - get_source_line( f->procedure, &file, &line ); - sprintf( buf, "%d", line ); - - /* PyTuple_SetItem steals reference. */ - PyTuple_SetItem( tuple, 0, PyString_FromString( file ) ); - PyTuple_SetItem( tuple, 1, PyString_FromString( buf ) ); - PyTuple_SetItem( tuple, 2, PyString_FromString( f->module->name ) ); - PyTuple_SetItem( tuple, 3, PyString_FromString( f->rulename ) ); - - PyList_Append( result, tuple ); - Py_DECREF( tuple ); - } - return result; -} - -PyObject * bjam_caller( PyObject * self, PyObject * args ) -{ - PyObject *result = PyString_FromString( - frame_before_python_call->prev->module->name); - return result; -} - -#endif /* #ifdef HAVE_PYTHON */ - - -#ifdef HAVE_POPEN - -#if defined(_MSC_VER) || defined(__BORLANDC__) - #define popen windows_popen_wrapper - #define pclose _pclose - - /* - * This wrapper is a workaround for a funny _popen() feature on Windows - * where it eats external quotes in some cases. The bug seems to be related - * to the quote stripping functionality used by the Windows cmd.exe - * interpreter when its /S is not specified. - * - * Cleaned up quote from the cmd.exe help screen as displayed on Windows XP - * SP3: - * - * 1. If all of the following conditions are met, then quote characters on - * the command line are preserved: - * - * - no /S switch - * - exactly two quote characters - * - no special characters between the two quote characters, where - * special is one of: &<>()@^| - * - there are one or more whitespace characters between the two quote - * characters - * - the string between the two quote characters is the name of an - * executable file. - * - * 2. Otherwise, old behavior is to see if the first character is a quote - * character and if so, strip the leading character and remove the last - * quote character on the command line, preserving any text after the - * last quote character. - * - * This causes some commands containing quotes not to be executed correctly. - * For example: - * - * "\Long folder name\aaa.exe" --name="Jurko" --no-surname - * - * would get its outermost quotes stripped and would be executed as: - * - * \Long folder name\aaa.exe" --name="Jurko --no-surname - * - * which would report an error about '\Long' not being a valid command. - * - * cmd.exe help seems to indicate it would be enough to add an extra space - * character in front of the command to avoid this but this does not work, - * most likely due to the shell first stripping all leading whitespace - * characters from the command. - * - * Solution implemented here is to quote the whole command in case it - * contains any quote characters. Note thought this will not work correctly - * should Windows ever 'fix' this feature. - * (03.06.2008.) (Jurko) - */ - static FILE * windows_popen_wrapper( char * command, char * mode ) - { - int extra_command_quotes_needed = ( strchr( command, '"' ) != 0 ); - string quoted_command; - FILE * result; - - if ( extra_command_quotes_needed ) - { - string_new( "ed_command ); - string_append( "ed_command, "\"" ); - string_append( "ed_command, command ); - string_append( "ed_command, "\"" ); - command = quoted_command.value; - } - - result = _popen( command, "r" ); - - if ( extra_command_quotes_needed ) - string_free( "ed_command ); - - return result; - } -#endif - - -static char * rtrim(char *s) -{ - char *p = s; - while(*p) ++p; - for(--p; p >= s && isspace(*p); *p-- = 0); - return s; -} - -LIST * builtin_shell( PARSE * parse, FRAME * frame ) -{ - LIST * command = lol_get( frame->args, 0 ); - LIST * result = 0; - string s; - int ret; - char buffer[ 1024 ]; - FILE * p = NULL; - int exit_status = -1; - int exit_status_opt = 0; - int no_output_opt = 0; - int strip_eol_opt = 0; - - /* Process the variable args options. */ - { - int a = 1; - LIST * arg = lol_get( frame->args, a ); - while ( arg ) - { - if ( strcmp( "exit-status", arg->string ) == 0 ) - { - exit_status_opt = 1; - } - else if ( strcmp( "no-output", arg->string ) == 0 ) - { - no_output_opt = 1; - } - else if ( strcmp("strip-eol", arg->string) == 0 ) - { - strip_eol_opt = 1; - } - arg = lol_get( frame->args, ++a ); - } - } - - /* The following fflush() call seems to be indicated as a workaround for a - * popen() bug on POSIX implementations related to synhronizing input - * stream positions for the called and the calling process. - */ - fflush( NULL ); - - p = popen( command->string, "r" ); - if ( p == NULL ) - return L0; - - string_new( &s ); - - while ( ( ret = fread( buffer, sizeof( char ), sizeof( buffer ) - 1, p ) ) > 0 ) - { - buffer[ret] = 0; - if ( !no_output_opt ) - { - if ( strip_eol_opt ) - rtrim(buffer); - string_append( &s, buffer ); - } - } - - exit_status = pclose( p ); - - /* The command output is returned first. */ - result = list_new( L0, newstr( s.value ) ); - string_free( &s ); - - /* The command exit result next. */ - if ( exit_status_opt ) - { - if ( WIFEXITED(exit_status) ) - exit_status = WEXITSTATUS(exit_status); - else - exit_status = -1; - sprintf( buffer, "%d", exit_status ); - result = list_new( result, newstr( buffer ) ); - } - - return result; -} - -#else /* #ifdef HAVE_POPEN */ - -LIST * builtin_shell( PARSE * parse, FRAME * frame ) -{ - return L0; -} - -#endif /* #ifdef HAVE_POPEN */ diff --git a/jam-files/engine/builtins.h b/jam-files/engine/builtins.h deleted file mode 100644 index 5fed07c9..00000000 --- a/jam-files/engine/builtins.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#ifndef JAM_BUILTINS_H -# define JAM_BUILTINS_H - -# include "frames.h" - -/* - * builtins.h - compile parsed jam statements - */ - -void load_builtins(); -void init_set(); -void init_path(); -void init_regex(); -void init_property_set(); -void init_sequence(); -void init_order(); - -LIST *builtin_calc( PARSE *parse, FRAME *args ); -LIST *builtin_depends( PARSE *parse, FRAME *args ); -LIST *builtin_rebuilds( PARSE *parse, FRAME *args ); -LIST *builtin_echo( PARSE *parse, FRAME *args ); -LIST *builtin_exit( PARSE *parse, FRAME *args ); -LIST *builtin_flags( PARSE *parse, FRAME *args ); -LIST *builtin_glob( PARSE *parse, FRAME *args ); -LIST *builtin_glob_recursive( PARSE *parse, FRAME *frame ); -LIST *builtin_subst( PARSE *parse, FRAME *args ); -LIST *builtin_match( PARSE *parse, FRAME *args ); -LIST *builtin_split_by_characters( PARSE *parse, FRAME *args ); -LIST *builtin_hdrmacro( PARSE *parse, FRAME *args ); -LIST *builtin_rulenames( PARSE *parse, FRAME *args ); -LIST *builtin_varnames( PARSE *parse, FRAME *args ); -LIST *builtin_delete_module( PARSE *parse, FRAME *args ); -LIST *builtin_import( PARSE *parse, FRAME *args ); -LIST *builtin_export( PARSE *parse, FRAME *args ); -LIST *builtin_caller_module( PARSE *parse, FRAME *args ); -LIST *builtin_backtrace( PARSE *parse, FRAME *args ); -LIST *builtin_pwd( PARSE *parse, FRAME *args ); -LIST *builtin_update( PARSE *parse, FRAME *args ); -LIST *builtin_update_now( PARSE *parse, FRAME *args ); -LIST *builtin_search_for_target( PARSE *parse, FRAME *args ); -LIST *builtin_import_module( PARSE *parse, FRAME *args ); -LIST *builtin_imported_modules( PARSE *parse, FRAME *frame ); -LIST *builtin_instance( PARSE *parse, FRAME *frame ); -LIST *builtin_sort( PARSE *parse, FRAME *frame ); -LIST *builtin_normalize_path( PARSE *parse, FRAME *frame ); -LIST *builtin_native_rule( PARSE *parse, FRAME *frame ); -LIST *builtin_has_native_rule( PARSE *parse, FRAME *frame ); -LIST *builtin_user_module( PARSE *parse, FRAME *frame ); -LIST *builtin_nearest_user_location( PARSE *parse, FRAME *frame ); -LIST *builtin_check_if_file( PARSE *parse, FRAME *frame ); -LIST *builtin_python_import_rule( PARSE *parse, FRAME *frame ); -LIST *builtin_shell( PARSE *parse, FRAME *frame ); -LIST *builtin_md5( PARSE *parse, FRAME *frame ); -LIST *builtin_file_open( PARSE *parse, FRAME *frame ); -LIST *builtin_pad( PARSE *parse, FRAME *frame ); -LIST *builtin_precious( PARSE *parse, FRAME *frame ); -LIST *builtin_self_path( PARSE *parse, FRAME *frame ); -LIST *builtin_makedir( PARSE *parse, FRAME *frame ); - -void backtrace( FRAME *frame ); -extern int last_update_now_status; - -#endif diff --git a/jam-files/engine/bump_version.py b/jam-files/engine/bump_version.py deleted file mode 100644 index 9423c4c7..00000000 --- a/jam-files/engine/bump_version.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python - -# This script is used to bump version of bjam. It takes a single argument, e.g -# -# ./bump_version.py 3.1.9 -# -# and updates all necessary files. For the time being, it's assumes presense -# of 'perl' executable and Debian-specific 'dch' executable. -# - - -import os -import os.path -import re -import string -import sys - -srcdir = os.path.abspath(os.path.dirname(__file__ )) -docdir = os.path.abspath(os.path.join(srcdir,"..","doc")) - -def edit(file,replacements): - print " '%s'..." %(file) - text = open(file,'r').read() - while len(replacements) > 0: - #~ print " '%s' ==> '%s'" % (replacements[0],replacements[1]) - text = re.compile(replacements[0],re.M).subn(replacements[1],text)[0] - replacements = replacements[2:] - #~ print text - open(file,'w').write(text) - -def make_edits(version): - edit(os.path.join(srcdir,"boost-jam.spec"), [ - '^Version:.*$','Version: %s' % string.join(version, "."), - ]) - - edit(os.path.join(srcdir,"build.jam"), [ - '^_VERSION_ = .* ;$','_VERSION_ = %s %s %s ;' % (version[0], version[1], version[2]), - ]) - - edit(os.path.join(docdir,"bjam.qbk"), [ - '\[version.*\]','[version: %s]' % string.join(version, '.'), - '\[def :version:.*\]','[def :version: %s]' % string.join(version, '.'), - ]) - - edit(os.path.join(srcdir,"patchlevel.h"), [ - '^#define VERSION_MAJOR .*$', - '#define VERSION_MAJOR %s' % (version[0]), - '^#define VERSION_MINOR .*$', - '#define VERSION_MINOR %s' % (version[1]), - '^#define VERSION_PATCH .*$', - '#define VERSION_PATCH %s' % (version[2]), - '^#define VERSION_MAJOR_SYM .*$', - '#define VERSION_MAJOR_SYM "0%s"' % (version[0]), - '^#define VERSION_MINOR_SYM .*$', - '#define VERSION_MINOR_SYM "%s"' % (version[1]), - '^#define VERSION_PATCH_SYM .*$', - '#define VERSION_PATCH_SYM "%s"' % (version[2]), - '^#define VERSION .*$', - '#define VERSION "%s"' % string.join(version, '.'), - '^#define JAMVERSYM .*$', - '#define JAMVERSYM "JAMVERSION=%s.%s"' % (version[0],version[1]), - ]) - -def main(): - - if len(sys.argv) < 2: - print "Expect new version as argument" - sys.exit(1) - - version = string.split(sys.argv[1], ".") - print "Setting version to", version - make_edits(version) - -if __name__ == '__main__': - main() - -#~ Copyright 2006 Rene Rivera. -#~ Copyright 2005-2006 Vladimir Prus. -#~ Distributed under the Boost Software License, Version 1.0. -#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) diff --git a/jam-files/engine/class.c b/jam-files/engine/class.c deleted file mode 100644 index ff4ec568..00000000 --- a/jam-files/engine/class.c +++ /dev/null @@ -1,141 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "class.h" -#include "strings.h" -#include "variable.h" -#include "frames.h" -#include "rules.h" -#include "newstr.h" - -#include "hash.h" - - -static struct hash * classes = 0; - - -static void check_defined( LIST * class_names ) -{ - for ( ; class_names; class_names = class_names->next ) - { - char * * p = &class_names->string; - if ( !hashcheck( classes, (HASHDATA * *)&p ) ) - { - printf( "Class %s is not defined\n", class_names->string ); - abort(); - } - } -} - - -static char * class_module_name( char * declared_name ) -{ - string name[ 1 ]; - char * result; - - string_new( name ); - string_append( name, "class@" ); - string_append( name, declared_name ); - - result = newstr( name->value ); - string_free( name ); - - return result; -} - - -struct import_base_data -{ - char * base_name; - module_t * base_module; - module_t * class_module; -}; - - -static void import_base_rule( void * r_, void * d_ ) -{ - RULE * r = (RULE *)r_; - RULE * ir1; - RULE * ir2; - struct import_base_data * d = (struct import_base_data *)d_; - string qualified_name[ 1 ]; - - string_new ( qualified_name ); - string_append ( qualified_name, d->base_name ); - string_push_back( qualified_name, '.' ); - string_append ( qualified_name, r->name ); - - ir1 = import_rule( r, d->class_module, r->name ); - ir2 = import_rule( r, d->class_module, qualified_name->value ); - - /* Copy 'exported' flag. */ - ir1->exported = ir2->exported = r->exported; - - /* If we are importing a class method, localize it. */ - if ( ( r->module == d->base_module ) || ( r->module->class_module && - ( r->module->class_module == d->base_module ) ) ) - ir1->module = ir2->module = d->class_module; - - string_free( qualified_name ); -} - - -/* - * For each exported rule 'n', declared in class module for base, imports that - * rule in 'class' as 'n' and as 'base.n'. Imported rules are localized and - * marked as exported. - */ - -static void import_base_rules( module_t * class, char * base ) -{ - module_t * base_module = bindmodule( class_module_name( base ) ); - struct import_base_data d; - d.base_name = base; - d.base_module = base_module; - d.class_module = class; - - if ( base_module->rules ) - hashenumerate( base_module->rules, import_base_rule, &d ); - - import_module( imported_modules( base_module ), class ); -} - - -char * make_class_module( LIST * xname, LIST * bases, FRAME * frame ) -{ - char * name = class_module_name( xname->string ); - char * * pp = &xname->string; - module_t * class_module = 0; - module_t * outer_module = frame->module; - - if ( !classes ) - classes = hashinit( sizeof( char * ), "classes" ); - - if ( hashcheck( classes, (HASHDATA * *)&pp ) ) - { - printf( "Class %s already defined\n", xname->string ); - abort(); - } - else - { - hashenter( classes, (HASHDATA * *)&pp ); - } - check_defined( bases ); - - class_module = bindmodule( name ); - - exit_module( outer_module ); - enter_module( class_module ); - - var_set( "__name__", xname, VAR_SET ); - var_set( "__bases__", bases, VAR_SET ); - - exit_module( class_module ); - enter_module( outer_module ); - - for ( ; bases; bases = bases->next ) - import_base_rules( class_module, bases->string ); - - return name; -} diff --git a/jam-files/engine/class.h b/jam-files/engine/class.h deleted file mode 100644 index f7faeff6..00000000 --- a/jam-files/engine/class.h +++ /dev/null @@ -1,13 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#ifndef CLASS_H_VP_2003_08_01 -#define CLASS_H_VP_2003_08_01 - -#include "lists.h" -#include "frames.h" - -char* make_class_module(LIST* xname, LIST* bases, FRAME* frame); - -#endif diff --git a/jam-files/engine/command.c b/jam-files/engine/command.c deleted file mode 100644 index d2ea0681..00000000 --- a/jam-files/engine/command.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * command.c - maintain lists of commands - */ - -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "variable.h" -#include "rules.h" - -#include "command.h" -#include <limits.h> -#include <string.h> - - -/* - * cmd_new() - return a new CMD or 0 if too many args - */ - -CMD * cmd_new( RULE * rule, LIST * targets, LIST * sources, LIST * shell ) -{ - CMD * cmd = (CMD *)BJAM_MALLOC( sizeof( CMD ) ); - /* Lift line-length limitation entirely when JAMSHELL is just "%". */ - int no_limit = ( shell && !strcmp(shell->string,"%") && !list_next(shell) ); - int max_line = MAXLINE; - int allocated = -1; - - cmd->rule = rule; - cmd->shell = shell; - cmd->next = 0; - - lol_init( &cmd->args ); - lol_add( &cmd->args, targets ); - lol_add( &cmd->args, sources ); - cmd->buf = 0; - - do - { - BJAM_FREE( cmd->buf ); /* free any buffer from previous iteration */ - - cmd->buf = (char*)BJAM_MALLOC_ATOMIC( max_line + 1 ); - - if ( cmd->buf == 0 ) - break; - - allocated = var_string( rule->actions->command, cmd->buf, max_line, &cmd->args ); - - max_line = max_line * 2; - } - while ( ( allocated < 0 ) && ( max_line < INT_MAX / 2 ) ); - - if ( !no_limit ) - { - /* Bail if the result will not fit in MAXLINE. */ - char * s = cmd->buf; - while ( *s ) - { - size_t l = strcspn( s, "\n" ); - - if ( l > MAXLINE ) - { - /* We do not free targets/sources/shell if bailing. */ - cmd_free( cmd ); - return 0; - } - - s += l; - if ( *s ) - ++s; - } - } - - return cmd; -} - - -/* - * cmd_free() - free a CMD - */ - -void cmd_free( CMD * cmd ) -{ - lol_free( &cmd->args ); - list_free( cmd->shell ); - BJAM_FREE( cmd->buf ); - BJAM_FREE( (char *)cmd ); -} diff --git a/jam-files/engine/command.h b/jam-files/engine/command.h deleted file mode 100644 index ddd38e68..00000000 --- a/jam-files/engine/command.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 1994 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * command.h - the CMD structure and routines to manipulate them - * - * Both ACTION and CMD contain a rule, targets, and sources. An - * ACTION describes a rule to be applied to the given targets and - * sources; a CMD is what actually gets executed by the shell. The - * differences are due to: - * - * ACTIONS must be combined if 'actions together' is given. - * ACTIONS must be split if 'actions piecemeal' is given. - * ACTIONS must have current sources omitted for 'actions updated'. - * - * The CMD datatype holds a single command that is to be executed - * against a target, and they can chain together to represent the - * full collection of commands used to update a target. - * - * Structures: - * - * CMD - an action, ready to be formatted into a buffer and executed. - * - * External routines: - * - * cmd_new() - return a new CMD or 0 if too many args. - * cmd_free() - delete CMD and its parts. - * cmd_next() - walk the CMD chain. - */ - - -/* - * CMD - an action, ready to be formatted into a buffer and executed. - */ - -typedef struct _cmd CMD; - -struct _cmd -{ - CMD * next; - CMD * tail; /* valid on in head */ - RULE * rule; /* rule->actions contains shell script */ - LIST * shell; /* $(SHELL) value */ - LOL args; /* LISTs for $(<), $(>) */ - char * buf; /* actual commands */ -}; - -CMD * cmd_new -( - RULE * rule, /* rule (referenced) */ - LIST * targets, /* $(<) (freed) */ - LIST * sources, /* $(>) (freed) */ - LIST * shell /* $(SHELL) (freed) */ -); - -void cmd_free( CMD * ); - -#define cmd_next( c ) ( ( c )->next ) diff --git a/jam-files/engine/compile.c b/jam-files/engine/compile.c deleted file mode 100644 index 2c049aae..00000000 --- a/jam-files/engine/compile.c +++ /dev/null @@ -1,1424 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" - -# include "lists.h" -# include "parse.h" -# include "compile.h" -# include "variable.h" -# include "expand.h" -# include "rules.h" -# include "newstr.h" -# include "make.h" -# include "search.h" -# include "hdrmacro.h" -# include "hash.h" -# include "modules.h" -# include "strings.h" -# include "builtins.h" -# include "class.h" - -# include <assert.h> -# include <string.h> -# include <stdarg.h> - -/* - * compile.c - compile parsed jam statements - * - * External routines: - * - * compile_append() - append list results of two statements - * compile_eval() - evaluate if to determine which leg to compile - * compile_foreach() - compile the "for x in y" statement - * compile_if() - compile 'if' rule - * compile_while() - compile 'while' rule - * compile_include() - support for 'include' - call include() on file - * compile_list() - expand and return a list - * compile_local() - declare (and set) local variables - * compile_null() - do nothing -- a stub for parsing - * compile_on() - run rule under influence of on-target variables - * compile_rule() - compile a single user defined rule - * compile_rules() - compile a chain of rules - * compile_set() - compile the "set variable" statement - * compile_setcomp() - support for `rule` - save parse tree - * compile_setexec() - support for `actions` - save execution string - * compile_settings() - compile the "on =" (set variable on exec) statement - * compile_switch() - compile 'switch' rule - * - * Internal routines: - * - * debug_compile() - printf with indent to show rule expansion. - * evaluate_rule() - execute a rule invocation - * - * builtin_depends() - DEPENDS/INCLUDES rule - * builtin_echo() - ECHO rule - * builtin_exit() - EXIT rule - * builtin_flags() - NOCARE, NOTFILE, TEMPORARY rule - * - * 02/03/94 (seiwald) - Changed trace output to read "setting" instead of - * the awkward sounding "settings". - * 04/12/94 (seiwald) - Combined build_depends() with build_includes(). - * 04/12/94 (seiwald) - actionlist() now just appends a single action. - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 05/13/94 (seiwald) - include files are now bound as targets, and thus - * can make use of $(SEARCH) - * 06/01/94 (seiwald) - new 'actions existing' does existing sources - * 08/23/94 (seiwald) - Support for '+=' (append to variable) - * 12/20/94 (seiwald) - NOTIME renamed NOTFILE. - * 01/22/95 (seiwald) - Exit rule. - * 02/02/95 (seiwald) - Always rule; LEAVES rule. - * 02/14/95 (seiwald) - NoUpdate rule. - * 09/11/00 (seiwald) - new evaluate_rule() for headers(). - * 09/11/00 (seiwald) - compile_xxx() now return LIST *. - * New compile_append() and compile_list() in - * support of building lists here, rather than - * in jamgram.yy. - * 01/10/00 (seiwald) - built-ins split out to builtin.c. - */ - -static void debug_compile( int which, char *s, FRAME* frame ); -int glob( char *s, char *c ); -/* Internal functions from builtins.c */ -void backtrace( FRAME *frame ); -void backtrace_line( FRAME *frame ); -void print_source_line( PARSE* p ); - -struct frame * frame_before_python_call; - -void frame_init( FRAME* frame ) -{ - frame->prev = 0; - frame->prev_user = 0; - lol_init(frame->args); - frame->module = root_module(); - frame->rulename = "module scope"; - frame->procedure = 0; -} - - -void frame_free( FRAME* frame ) -{ - lol_free( frame->args ); -} - - -/* - * compile_append() - append list results of two statements - * - * parse->left more compile_append() by left-recursion - * parse->right single rule - */ - -LIST * compile_append( PARSE * parse, FRAME * frame ) -{ - /* Append right to left. */ - return list_append( - parse_evaluate( parse->left, frame ), - parse_evaluate( parse->right, frame ) ); -} - - -/* - * compile_eval() - evaluate if to determine which leg to compile - * - * Returns: - * list if expression true - compile 'then' clause - * L0 if expression false - compile 'else' clause - */ - -static int lcmp( LIST * t, LIST * s ) -{ - int status = 0; - - while ( !status && ( t || s ) ) - { - char *st = t ? t->string : ""; - char *ss = s ? s->string : ""; - - status = strcmp( st, ss ); - - t = t ? list_next( t ) : t; - s = s ? list_next( s ) : s; - } - - return status; -} - -LIST * compile_eval( PARSE * parse, FRAME * frame ) -{ - LIST * ll; - LIST * lr; - LIST * s; - LIST * t; - int status = 0; - - /* Short circuit lr eval for &&, ||, and 'in'. */ - - ll = parse_evaluate( parse->left, frame ); - lr = 0; - - switch ( parse->num ) - { - case EXPR_AND: - case EXPR_IN : if ( ll ) goto eval; break; - case EXPR_OR : if ( !ll ) goto eval; break; - default: eval: lr = parse_evaluate( parse->right, frame ); - } - - /* Now eval. */ - switch ( parse->num ) - { - case EXPR_NOT: if ( !ll ) status = 1; break; - case EXPR_AND: if ( ll && lr ) status = 1; break; - case EXPR_OR : if ( ll || lr ) status = 1; break; - - case EXPR_IN: - /* "a in b": make sure each of ll is equal to something in lr. */ - for ( t = ll; t; t = list_next( t ) ) - { - for ( s = lr; s; s = list_next( s ) ) - if ( !strcmp( t->string, s->string ) ) - break; - if ( !s ) break; - } - /* No more ll? Success. */ - if ( !t ) status = 1; - break; - - case EXPR_EXISTS: if ( lcmp( ll, L0 ) != 0 ) status = 1; break; - case EXPR_EQUALS: if ( lcmp( ll, lr ) == 0 ) status = 1; break; - case EXPR_NOTEQ : if ( lcmp( ll, lr ) != 0 ) status = 1; break; - case EXPR_LESS : if ( lcmp( ll, lr ) < 0 ) status = 1; break; - case EXPR_LESSEQ: if ( lcmp( ll, lr ) <= 0 ) status = 1; break; - case EXPR_MORE : if ( lcmp( ll, lr ) > 0 ) status = 1; break; - case EXPR_MOREEQ: if ( lcmp( ll, lr ) >= 0 ) status = 1; break; - } - - if ( DEBUG_IF ) - { - debug_compile( 0, "if", frame ); - list_print( ll ); - printf( "(%d) ", status ); - list_print( lr ); - printf( "\n" ); - } - - /* Find something to return. */ - /* In odd circumstances (like "" = "") */ - /* we'll have to return a new string. */ - - if ( !status ) t = 0; - else if ( ll ) t = ll, ll = 0; - else if ( lr ) t = lr, lr = 0; - else t = list_new( L0, newstr( "1" ) ); - - if ( ll ) list_free( ll ); - if ( lr ) list_free( lr ); - return t; -} - - -/* - * compile_foreach() - compile the "for x in y" statement - * - * Compile_foreach() resets the given variable name to each specified - * value, executing the commands enclosed in braces for each iteration. - * - * parse->string index variable - * parse->left variable values - * parse->right rule to compile - */ - -LIST * compile_foreach( PARSE * parse, FRAME * frame ) -{ - LIST * nv = parse_evaluate( parse->left, frame ); - LIST * l; - SETTINGS * s = 0; - - if ( parse->num ) - { - s = addsettings( s, VAR_SET, parse->string, L0 ); - pushsettings( s ); - } - - /* Call var_set to reset $(parse->string) for each val. */ - - for ( l = nv; l; l = list_next( l ) ) - { - LIST * val = list_new( L0, copystr( l->string ) ); - var_set( parse->string, val, VAR_SET ); - list_free( parse_evaluate( parse->right, frame ) ); - } - - if ( parse->num ) - { - popsettings( s ); - freesettings( s ); - } - - list_free( nv ); - - return L0; -} - -/* - * compile_if() - compile 'if' rule - * - * parse->left condition tree - * parse->right then tree - * parse->third else tree - */ - -LIST * compile_if( PARSE * p, FRAME * frame ) -{ - LIST * l = parse_evaluate( p->left, frame ); - if ( l ) - { - list_free( l ); - return parse_evaluate( p->right, frame ); - } - return parse_evaluate( p->third, frame ); -} - - -LIST * compile_while( PARSE * p, FRAME * frame ) -{ - LIST * r = 0; - LIST * l; - while ( ( l = parse_evaluate( p->left, frame ) ) ) - { - list_free( l ); - if ( r ) list_free( r ); - r = parse_evaluate( p->right, frame ); - } - return r; -} - - -/* - * compile_include() - support for 'include' - call include() on file - * - * parse->left list of files to include (can only do 1) - */ - -LIST * compile_include( PARSE * parse, FRAME * frame ) -{ - LIST * nt = parse_evaluate( parse->left, frame ); - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "include", frame); - list_print( nt ); - printf( "\n" ); - } - - if ( nt ) - { - TARGET * t = bindtarget( nt->string ); - - /* DWA 2001/10/22 - Perforce Jam cleared the arguments here, which - * prevents an included file from being treated as part of the body of a - * rule. I did not see any reason to do that, so I lifted the - * restriction. - */ - - /* Bind the include file under the influence of */ - /* "on-target" variables. Though they are targets, */ - /* include files are not built with make(). */ - - pushsettings( t->settings ); - /* We don't expect that file to be included is generated by some - action. Therefore, pass 0 as third argument. - If the name resolves to directory, let it error out. */ - t->boundname = search( t->name, &t->time, 0, 0 ); - popsettings( t->settings ); - - parse_file( t->boundname, frame ); - } - - list_free( nt ); - - return L0; -} - -static LIST* evaluate_in_module ( char* module_name, PARSE * p, FRAME* frame) -{ - LIST* result; - - module_t* outer_module = frame->module; - frame->module = module_name ? bindmodule( module_name ) : root_module(); - - if ( outer_module != frame->module ) - { - exit_module( outer_module ); - enter_module( frame->module ); - } - - result = parse_evaluate( p, frame ); - - if ( outer_module != frame->module ) - { - exit_module( frame->module ); - enter_module( outer_module ); - frame->module = outer_module; - } - - return result; -} - - -LIST * compile_module( PARSE * p, FRAME * frame ) -{ - /* Here we are entering a module declaration block. */ - LIST * module_name = parse_evaluate( p->left, frame ); - LIST * result = evaluate_in_module( module_name ? module_name->string : 0, - p->right, frame ); - list_free( module_name ); - return result; -} - - -LIST * compile_class( PARSE * p, FRAME * frame ) -{ - /** Todo: check for empty class name. - Check for class redeclaration. */ - - char * class_module = 0; - - LIST * name = parse_evaluate( p->left->right, frame ); - LIST * bases = 0; - - if ( p->left->left ) - bases = parse_evaluate( p->left->left->right, frame ); - - class_module = make_class_module( name, bases, frame ); - evaluate_in_module( class_module, p->right, frame ); - - return L0; -} - - -/* - * compile_list() - expand and return a list. - * - * parse->string - character string to expand. - */ - -LIST * compile_list( PARSE * parse, FRAME * frame ) -{ - /* s is a copyable string */ - char * s = parse->string; - return var_expand( L0, s, s + strlen( s ), frame->args, 1 ); -} - - -/* - * compile_local() - declare (and set) local variables. - * - * parse->left list of variables - * parse->right list of values - * parse->third rules to execute - */ - -LIST * compile_local( PARSE * parse, FRAME * frame ) -{ - LIST * l; - SETTINGS * s = 0; - LIST * nt = parse_evaluate( parse->left, frame ); - LIST * ns = parse_evaluate( parse->right, frame ); - LIST * result; - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "local", frame ); - list_print( nt ); - printf( " = " ); - list_print( ns ); - printf( "\n" ); - } - - /* Initial value is ns. */ - for ( l = nt; l; l = list_next( l ) ) - s = addsettings( s, VAR_SET, l->string, list_copy( (LIST *)0, ns ) ); - - list_free( ns ); - list_free( nt ); - - /* Note that callees of the current context get this "local" variable, - * making it not so much local as layered. - */ - - pushsettings( s ); - result = parse_evaluate( parse->third, frame ); - popsettings( s ); - - freesettings( s ); - - return result; -} - - -/* - * compile_null() - do nothing -- a stub for parsing. - */ - -LIST * compile_null( PARSE * parse, FRAME * frame ) -{ - return L0; -} - - -/* - * compile_on() - run rule under influence of on-target variables - * - * parse->left list of files to include (can only do 1). - * parse->right rule to run. - * - * EXPERIMENTAL! - */ - -LIST * compile_on( PARSE * parse, FRAME * frame ) -{ - LIST * nt = parse_evaluate( parse->left, frame ); - LIST * result = 0; - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "on", frame ); - list_print( nt ); - printf( "\n" ); - } - - if ( nt ) - { - TARGET * t = bindtarget( nt->string ); - pushsettings( t->settings ); - result = parse_evaluate( parse->right, frame ); - popsettings( t->settings ); - } - - list_free( nt ); - - return result; -} - - -/* - * compile_rule() - compile a single user defined rule. - * - * parse->string name of user defined rule. - * parse->left parameters (list of lists) to rule, recursing left. - * - * Wrapped around evaluate_rule() so that headers() can share it. - */ - -LIST * compile_rule( PARSE * parse, FRAME * frame ) -{ - FRAME inner[ 1 ]; - LIST * result; - PARSE * p; - - /* Build up the list of arg lists. */ - frame_init( inner ); - inner->prev = frame; - inner->prev_user = frame->module->user_module ? frame : frame->prev_user; - inner->module = frame->module; /* This gets fixed up in evaluate_rule(), below. */ - inner->procedure = parse; - /* Special-case LOL of length 1 where the first list is totally empty. - This is created when calling functions with no parameters, due to - the way jam grammar is written. This is OK when one jam function - calls another, but really not good when Jam function calls Python. */ - if ( parse->left->left == NULL && parse->left->right->func == compile_null) - ; - else - for ( p = parse->left; p; p = p->left ) - lol_add( inner->args, parse_evaluate( p->right, frame ) ); - - /* And invoke the rule. */ - result = evaluate_rule( parse->string, inner ); - frame_free( inner ); - return result; -} - - -static void argument_error( char * message, RULE * rule, FRAME * frame, LIST* arg ) -{ - LOL * actual = frame->args; - assert( frame->procedure != 0 ); - backtrace_line( frame->prev ); - printf( "*** argument error\n* rule %s ( ", frame->rulename ); - lol_print( rule->arguments->data ); - printf( " )\n* called with: ( " ); - lol_print( actual ); - printf( " )\n* %s %s\n", message, arg ? arg->string : "" ); - print_source_line( rule->procedure ); - printf( "see definition of rule '%s' being called\n", rule->name ); - backtrace( frame->prev ); - exit( 1 ); -} - - -/* Define delimiters for type check elements in argument lists (and return type - * specifications, eventually). - */ -# define TYPE_OPEN_DELIM '[' -# define TYPE_CLOSE_DELIM ']' - -/* - * is_type_name() - true iff the given string represents a type check - * specification. - */ - -static int is_type_name( char * s ) -{ - return ( s[ 0 ] == TYPE_OPEN_DELIM ) && - ( s[ strlen( s ) - 1 ] == TYPE_CLOSE_DELIM ); -} - - -/* - * arg_modifier - if the next element of formal is a single character, return - * that; return 0 otherwise. Used to extract "*+?" modifiers * from argument - * lists. - */ - -static char arg_modifier( LIST * formal ) -{ - if ( formal->next ) - { - char * next = formal->next->string; - if ( next && ( next[ 0 ] != 0 ) && ( next[ 1 ] == 0 ) ) - return next[ 0 ]; - } - return 0; -} - - -/* - * type_check() - checks that each element of values satisfies the requirements - * of type_name. - * - * caller - the frame of the rule calling the rule whose arguments are - * being checked - * - * called - the rule being called - * - * arg_name - a list element containing the name of the argument being - * checked - */ - -static void type_check -( - char * type_name, - LIST * values, - FRAME * caller, - RULE * called, - LIST * arg_name -) -{ - static module_t * typecheck = 0; - - /* If nothing to check, bail now. */ - if ( !values || !type_name ) - return; - - if ( !typecheck ) - typecheck = bindmodule( ".typecheck" ); - - /* If the checking rule can not be found, also bail. */ - { - RULE checker_, *checker = &checker_; - - checker->name = type_name; - if ( !typecheck->rules || !hashcheck( typecheck->rules, (HASHDATA * *)&checker ) ) - return; - } - - exit_module( caller->module ); - - while ( values != 0 ) - { - LIST *error; - FRAME frame[1]; - frame_init( frame ); - frame->module = typecheck; - frame->prev = caller; - frame->prev_user = caller->module->user_module ? caller : caller->prev_user; - - enter_module( typecheck ); - /* Prepare the argument list */ - lol_add( frame->args, list_new( L0, values->string ) ); - error = evaluate_rule( type_name, frame ); - - exit_module( typecheck ); - - if ( error ) - argument_error( error->string, called, caller, arg_name ); - - frame_free( frame ); - values = values->next; - } - - enter_module( caller->module ); -} - -/* - * collect_arguments() - local argument checking and collection - */ -static SETTINGS * -collect_arguments( RULE* rule, FRAME* frame ) -{ - SETTINGS *locals = 0; - - LOL * all_actual = frame->args; - LOL * all_formal = rule->arguments ? rule->arguments->data : 0; - if ( all_formal ) /* Nothing to set; nothing to check */ - { - int max = all_formal->count > all_actual->count - ? all_formal->count - : all_actual->count; - - int n; - for ( n = 0; n < max ; ++n ) - { - LIST *actual = lol_get( all_actual, n ); - char *type_name = 0; - - LIST *formal; - for ( formal = lol_get( all_formal, n ); formal; formal = formal->next ) - { - char* name = formal->string; - - if ( is_type_name(name) ) - { - if ( type_name ) - argument_error( "missing argument name before type name:", rule, frame, formal ); - - if ( !formal->next ) - argument_error( "missing argument name after type name:", rule, frame, formal ); - - type_name = formal->string; - } - else - { - LIST* value = 0; - char modifier; - LIST* arg_name = formal; /* hold the argument name for type checking */ - int multiple = 0; - - /* Stop now if a variable number of arguments are specified */ - if ( name[0] == '*' && name[1] == 0 ) - return locals; - - modifier = arg_modifier( formal ); - - if ( !actual && modifier != '?' && modifier != '*' ) - argument_error( "missing argument", rule, frame, formal ); - - switch ( modifier ) - { - case '+': - case '*': - value = list_copy( 0, actual ); - multiple = 1; - actual = 0; - /* skip an extra element for the modifier */ - formal = formal->next; - break; - case '?': - /* skip an extra element for the modifier */ - formal = formal->next; - /* fall through */ - default: - if ( actual ) /* in case actual is missing */ - { - value = list_new( 0, actual->string ); - actual = actual->next; - } - } - - locals = addsettings(locals, VAR_SET, name, value); - locals->multiple = multiple; - type_check( type_name, value, frame, rule, arg_name ); - type_name = 0; - } - } - - if ( actual ) - { - argument_error( "extra argument", rule, frame, actual ); - } - } - } - return locals; -} - -RULE * -enter_rule( char *rulename, module_t *target_module ); - -#ifdef HAVE_PYTHON - -static int python_instance_number = 0; - - -/* Given a Python object, return a string to use in Jam - code instead of said object. - If the object is string, use the string value - If the object implemenets __jam_repr__ method, use that. - Otherwise return 0. - - The result value is newstr-ed. */ -char *python_to_string(PyObject* value) -{ - if (PyString_Check(value)) - { - return newstr(PyString_AsString(value)); - } - else - { - /* See if this is an instance that defines special __jam_repr__ - method. */ - if (PyInstance_Check(value) - && PyObject_HasAttrString(value, "__jam_repr__")) - { - PyObject* repr = PyObject_GetAttrString(value, "__jam_repr__"); - if (repr) - { - PyObject* arguments2 = PyTuple_New(0); - PyObject* value2 = PyObject_Call(repr, arguments2, 0); - Py_DECREF(repr); - Py_DECREF(arguments2); - if (PyString_Check(value2)) - { - return newstr(PyString_AsString(value2)); - } - Py_DECREF(value2); - } - } - return 0; - } -} - -static LIST* -call_python_function(RULE* r, FRAME* frame) -{ - LIST * result = 0; - PyObject * arguments = 0; - PyObject * kw = NULL; - int i ; - PyObject * py_result; - - if (r->arguments) - { - SETTINGS * args; - - arguments = PyTuple_New(0); - kw = PyDict_New(); - - for (args = collect_arguments(r, frame); args; args = args->next) - { - PyObject *key = PyString_FromString(args->symbol); - PyObject *value = 0; - if (args->multiple) - value = list_to_python(args->value); - else { - if (args->value) - value = PyString_FromString(args->value->string); - } - - if (value) - PyDict_SetItem(kw, key, value); - Py_DECREF(key); - Py_XDECREF(value); - } - } - else - { - arguments = PyTuple_New( frame->args->count ); - for ( i = 0; i < frame->args->count; ++i ) - { - PyObject * arg = PyList_New(0); - LIST* l = lol_get( frame->args, i); - - for ( ; l; l = l->next ) - { - PyObject * v = PyString_FromString(l->string); - PyList_Append( arg, v ); - Py_DECREF(v); - } - /* Steals reference to 'arg' */ - PyTuple_SetItem( arguments, i, arg ); - } - } - - frame_before_python_call = frame; - py_result = PyObject_Call( r->python_function, arguments, kw ); - Py_DECREF(arguments); - Py_XDECREF(kw); - if ( py_result != NULL ) - { - if ( PyList_Check( py_result ) ) - { - int size = PyList_Size( py_result ); - int i; - for ( i = 0; i < size; ++i ) - { - PyObject * item = PyList_GetItem( py_result, i ); - char *s = python_to_string (item); - if (!s) { - fprintf( stderr, "Non-string object returned by Python call.\n" ); - } else { - result = list_new (result, s); - } - } - } - else if ( py_result == Py_None ) - { - result = L0; - } - else - { - char *s = python_to_string(py_result); - if (s) - result = list_new(0, s); - else - /* We have tried all we could. Return empty list. There are - cases, e.g. feature.feature function that should return - value for the benefit of Python code and which also can be - called by Jam code, where no sensible value can be - returned. We cannot even emit a warning, since there will - be a pile of them. */ - result = L0; - } - - Py_DECREF( py_result ); - } - else - { - PyErr_Print(); - fprintf(stderr,"Call failed\n"); - } - - return result; -} - - -module_t * python_module() -{ - static module_t * python = 0; - if ( !python ) - python = bindmodule("__python__"); - return python; -} - -#endif - - -/* - * evaluate_rule() - execute a rule invocation. - */ - -LIST * -evaluate_rule( - char * rulename, - FRAME * frame ) -{ - LIST * result = L0; - RULE * rule; - profile_frame prof[1]; - module_t * prev_module = frame->module; - - LIST * l; - { - LOL arg_context_, * arg_context = &arg_context_; - if ( !frame->prev ) - lol_init(arg_context); - else - arg_context = frame->prev->args; - l = var_expand( L0, rulename, rulename+strlen(rulename), arg_context, 0 ); - } - - if ( !l ) - { - backtrace_line( frame->prev ); - printf( "warning: rulename %s expands to empty string\n", rulename ); - backtrace( frame->prev ); - return result; - } - - rulename = l->string; - rule = bindrule( l->string, frame->module ); - -#ifdef HAVE_PYTHON - if ( rule->python_function ) - { - /* The below messing with modules is due to the way modules are - * implemented in Jam. Suppose we are in module M1 now. The global - * variable map actually holds 'M1' variables, and M1->variables hold - * global variables. - * - * If we call Python right away, Python calls back Jam and then Jam - * does 'module M1 { }' then Jam will try to swap the current global - * variables with M1->variables. The result will be that global - * variables map will hold global variables, and any variable settings - * we do will go to the global module, not M1. - * - * By restoring basic state, where the global variable map holds global - * variable, we make sure any future 'module M1' entry will work OK. - */ - - LIST * result; - module_t * m = python_module(); - - frame->module = m; - - exit_module( prev_module ); - enter_module( m ); - - result = call_python_function( rule, frame ); - - exit_module( m ); - enter_module ( prev_module ); - - return result; - } -#endif - - /* Drop the rule name. */ - l = list_pop_front( l ); - - /* Tack the rest of the expansion onto the front of the first argument. */ - frame->args->list[0] = list_append( l, lol_get( frame->args, 0 ) ); - - if ( DEBUG_COMPILE ) - { - /* Try hard to indicate in which module the rule is going to execute. */ - if ( rule->module != frame->module - && rule->procedure != 0 && strcmp( rulename, rule->procedure->rulename ) ) - { - char buf[256] = ""; - strncat( buf, rule->module->name, sizeof( buf ) - 1 ); - strncat( buf, rule->name, sizeof( buf ) - 1 ); - debug_compile( 1, buf, frame ); - } - else - { - debug_compile( 1, rulename, frame ); - } - - lol_print( frame->args ); - printf( "\n" ); - } - - if ( rule->procedure && rule->module != prev_module ) - { - /* Propagate current module to nested rule invocations. */ - frame->module = rule->module; - - /* Swap variables. */ - exit_module( prev_module ); - enter_module( rule->module ); - } - - /* Record current rule name in frame. */ - if ( rule->procedure ) - { - frame->rulename = rulename; - /* And enter record profile info. */ - if ( DEBUG_PROFILE ) - profile_enter( rule->procedure->rulename, prof ); - } - - /* Check traditional targets $(<) and sources $(>). */ - if ( !rule->actions && !rule->procedure ) - { - backtrace_line( frame->prev ); - printf( "rule %s unknown in module %s\n", rule->name, frame->module->name ); - backtrace( frame->prev ); - exit( 1 ); - } - - /* If this rule will be executed for updating the targets then construct the - * action for make(). - */ - if ( rule->actions ) - { - TARGETS * t; - ACTION * action; - - /* The action is associated with this instance of this rule. */ - action = (ACTION *)BJAM_MALLOC( sizeof( ACTION ) ); - memset( (char *)action, '\0', sizeof( *action ) ); - - action->rule = rule; - action->targets = targetlist( (TARGETS *)0, lol_get( frame->args, 0 ) ); - action->sources = targetlist( (TARGETS *)0, lol_get( frame->args, 1 ) ); - - /* If we have a group of targets all being built using the same action - * then we must not allow any of them to be used as sources unless they - * had all already been built in the first place or their joined action - * has had a chance to finish its work and build all of them anew. - * - * Without this it might be possible, in case of a multi-process build, - * for their action, triggered by buiding one of the targets, to still - * be running when another target in the group reports as done in order - * to avoid triggering the same action again and gets used prematurely. - * - * As a quick-fix to achieve this effect we make all the targets list - * each other as 'included targets'. More precisely, we mark the first - * listed target as including all the other targets in the list and vice - * versa. This makes anyone depending on any of those targets implicitly - * depend on all of them, thus making sure none of those targets can be - * used as sources until all of them have been built. Note that direct - * dependencies could not have been used due to the 'circular - * dependency' issue. - * - * TODO: Although the current implementation solves the problem of one - * of the targets getting used before its action completes its work it - * also forces the action to run whenever any of the targets in the - * group is not up to date even though some of them might not actually - * be used by the targets being built. We should see how we can - * correctly recognize such cases and use that to avoid running the - * action if possible and not rebuild targets not actually depending on - * targets that are not up to date. - * - * TODO: Using the 'include' feature might have side-effects due to - * interaction with the actual 'inclusion scanning' system. This should - * be checked. - */ - if ( action->targets ) - { - TARGET * t0 = action->targets->target; - for ( t = action->targets->next; t; t = t->next ) - { - target_include( t->target, t0 ); - target_include( t0, t->target ); - } - } - - /* Append this action to the actions of each target. */ - for ( t = action->targets; t; t = t->next ) - t->target->actions = actionlist( t->target->actions, action ); - } - - /* Now recursively compile any parse tree associated with this rule. - * parse_refer()/parse_free() call pair added to ensure rule not freed - * during use. - */ - if ( rule->procedure ) - { - SETTINGS * local_args = collect_arguments( rule, frame ); - PARSE * parse = rule->procedure; - parse_refer( parse ); - - pushsettings( local_args ); - result = parse_evaluate( parse, frame ); - popsettings( local_args ); - freesettings( local_args ); - - parse_free( parse ); - } - - if ( frame->module != prev_module ) - { - exit_module( frame->module ); - enter_module( prev_module ); - } - - if ( DEBUG_PROFILE && rule->procedure ) - profile_exit( prof ); - - if ( DEBUG_COMPILE ) - debug_compile( -1, 0, frame); - - return result; -} - - -/* - * Call the given rule with the specified parameters. The parameters should be - * of type LIST* and end with a NULL pointer. This differs from 'evaluate_rule' - * in that frame for the called rule is prepared inside 'call_rule'. - * - * This function is useful when a builtin rule (in C) wants to call another rule - * which might be implemented in Jam. - */ - -LIST * call_rule( char * rulename, FRAME * caller_frame, ... ) -{ - va_list va; - LIST * result; - - FRAME inner[1]; - frame_init( inner ); - inner->prev = caller_frame; - inner->prev_user = caller_frame->module->user_module ? - caller_frame : caller_frame->prev_user; - inner->module = caller_frame->module; - inner->procedure = 0; - - va_start( va, caller_frame ); - for ( ; ; ) - { - LIST * l = va_arg( va, LIST* ); - if ( !l ) - break; - lol_add( inner->args, l ); - } - va_end( va ); - - result = evaluate_rule( rulename, inner ); - - frame_free( inner ); - - return result; -} - - -/* - * compile_rules() - compile a chain of rules - * - * parse->left single rule - * parse->right more compile_rules() by right-recursion - */ - -LIST * compile_rules( PARSE * parse, FRAME * frame ) -{ - /* Ignore result from first statement; return the 2nd. */ - /* Optimize recursion on the right by looping. */ - do list_free( parse_evaluate( parse->left, frame ) ); - while ( ( parse = parse->right )->func == compile_rules ); - return parse_evaluate( parse, frame ); -} - - -/* - * assign_var_mode() - convert ASSIGN_XXX compilation flag into corresponding - * VAR_XXX variable set flag. - */ - -static int assign_var_mode( int parsenum, char const * * tracetext ) -{ - char const * trace; - int setflag; - switch ( parsenum ) - { - case ASSIGN_SET : setflag = VAR_SET ; trace = "=" ; break; - case ASSIGN_APPEND : setflag = VAR_APPEND ; trace = "+="; break; - case ASSIGN_DEFAULT: setflag = VAR_DEFAULT; trace = "?="; break; - default: setflag = VAR_SET ; trace = "" ; break; - } - if ( tracetext ) - *tracetext = trace ; - return setflag; -} - -/* - * compile_set() - compile the "set variable" statement - * - * parse->left variable names - * parse->right variable values - * parse->num ASSIGN_SET/APPEND/DEFAULT - */ - -LIST * compile_set( PARSE * parse, FRAME * frame ) -{ - LIST * nt = parse_evaluate( parse->left, frame ); - LIST * ns = parse_evaluate( parse->right, frame ); - LIST * l; - char const * trace; - int setflag = assign_var_mode( parse->num, &trace ); - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "set", frame ); - list_print( nt ); - printf( " %s ", trace ); - list_print( ns ); - printf( "\n" ); - } - - /* Call var_set to set variable. var_set keeps ns, so need to copy it. */ - for ( l = nt; l; l = list_next( l ) ) - var_set( l->string, list_copy( L0, ns ), setflag ); - list_free( nt ); - return ns; -} - - -/* - * compile_setcomp() - support for `rule` - save parse tree. - * - * parse->string rule name - * parse->left rules for rule - * parse->right optional list-of-lists describing arguments - */ - -LIST * compile_setcomp( PARSE * parse, FRAME * frame ) -{ - argument_list * arg_list = 0; - - /* Create new LOL describing argument requirements if supplied. */ - if ( parse->right ) - { - PARSE * p; - arg_list = args_new(); - for ( p = parse->right; p; p = p->left ) - lol_add( arg_list->data, parse_evaluate( p->right, frame ) ); - } - - new_rule_body( frame->module, parse->string, arg_list, parse->left, !parse->num ); - return L0; -} - - -/* - * compile_setexec() - support for `actions` - save execution string. - * - * parse->string rule name - * parse->string1 OS command string - * parse->num flags - * parse->left `bind` variables - * - * Note that the parse flags (as defined in compile.h) are transferred directly - * to the rule flags (as defined in rules.h). - */ - -LIST * compile_setexec( PARSE * parse, FRAME * frame ) -{ - LIST * bindlist = parse_evaluate( parse->left, frame ); - new_rule_actions( frame->module, parse->string, parse->string1, bindlist, parse->num ); - return L0; -} - - -/* - * compile_settings() - compile the "on =" (set variable on exec) statement. - * - * parse->left variable names - * parse->right target name - * parse->third variable value - * parse->num ASSIGN_SET/APPEND - */ - -LIST * compile_settings( PARSE * parse, FRAME * frame ) -{ - LIST * nt = parse_evaluate( parse->left, frame ); - LIST * ns = parse_evaluate( parse->third, frame ); - LIST * targets = parse_evaluate( parse->right, frame ); - LIST * ts; - char const * trace; - int setflag = assign_var_mode( parse->num, &trace ); - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "set", frame ); - list_print( nt ); - printf( " on " ); - list_print( targets ); - printf( " %s ", trace ); - list_print( ns ); - printf( "\n" ); - } - - /* Call addsettings() to save variable setting. addsettings() keeps ns, so - * need to copy it. Pass append flag to addsettings(). - */ - for ( ts = targets; ts; ts = list_next( ts ) ) - { - TARGET * t = bindtarget( ts->string ); - LIST * l; - - for ( l = nt; l; l = list_next( l ) ) - t->settings = addsettings( t->settings, setflag, l->string, - list_copy( (LIST *)0, ns ) ); - } - - list_free( nt ); - list_free( targets ); - return ns; -} - - -/* - * compile_switch() - compile 'switch' rule. - * - * parse->left switch value (only 1st used) - * parse->right cases - * - * cases->left 1st case - * cases->right next cases - * - * case->string argument to match - * case->left parse tree to execute - */ - -LIST * compile_switch( PARSE * parse, FRAME * frame ) -{ - LIST * nt = parse_evaluate( parse->left, frame ); - LIST * result = 0; - - if ( DEBUG_COMPILE ) - { - debug_compile( 0, "switch", frame ); - list_print( nt ); - printf( "\n" ); - } - - /* Step through cases. */ - for ( parse = parse->right; parse; parse = parse->right ) - { - if ( !glob( parse->left->string, nt ? nt->string : "" ) ) - { - /* Get & exec parse tree for this case. */ - parse = parse->left->left; - result = parse_evaluate( parse, frame ); - break; - } - } - - list_free( nt ); - return result; -} - - -/* - * debug_compile() - printf with indent to show rule expansion. - */ - -static void debug_compile( int which, char * s, FRAME * frame ) -{ - static int level = 0; - static char indent[36] = ">>>>|>>>>|>>>>|>>>>|>>>>|>>>>|>>>>|"; - - if ( which >= 0 ) - { - int i; - - print_source_line( frame->procedure ); - - i = ( level + 1 ) * 2; - while ( i > 35 ) - { - fputs( indent, stdout ); - i -= 35; - } - - printf( "%*.*s ", i, i, indent ); - } - - if ( s ) - printf( "%s ", s ); - - level += which; -} diff --git a/jam-files/engine/compile.h b/jam-files/engine/compile.h deleted file mode 100644 index 7d5191f0..00000000 --- a/jam-files/engine/compile.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#ifndef COMPILE_DWA20011022_H -# define COMPILE_DWA20011022_H - -# include "frames.h" -# include "parse.h" -# include "regexp.h" - -/* - * compile.h - compile parsed jam statements - */ - -void compile_builtins(); - -LIST *compile_append( PARSE *parse, FRAME *frame ); -LIST *compile_foreach( PARSE *parse, FRAME *frame ); -LIST *compile_if( PARSE *parse, FRAME *frame ); -LIST *compile_eval( PARSE *parse, FRAME *args ); -LIST *compile_include( PARSE *parse, FRAME *frame ); -LIST *compile_list( PARSE *parse, FRAME *frame ); -LIST *compile_local( PARSE *parse, FRAME *frame ); -LIST *compile_module( PARSE *parse, FRAME *frame ); -LIST *compile_class( PARSE *parse, FRAME *frame ); -LIST *compile_null( PARSE *parse, FRAME *frame ); -LIST *compile_on( PARSE *parse, FRAME *frame ); -LIST *compile_rule( PARSE *parse, FRAME *frame ); -LIST *compile_rules( PARSE *parse, FRAME *frame ); -LIST *compile_set( PARSE *parse, FRAME *frame ); -LIST *compile_setcomp( PARSE *parse, FRAME *frame ); -LIST *compile_setexec( PARSE *parse, FRAME *frame ); -LIST *compile_settings( PARSE *parse, FRAME *frame ); -LIST *compile_switch( PARSE *parse, FRAME *frame ); -LIST *compile_while( PARSE *parse, FRAME *frame ); - -LIST *evaluate_rule( char *rulename, FRAME *frame ); -LIST *call_rule( char *rulename, FRAME* caller_frame, ...); - -regexp* regex_compile( const char* pattern ); - -/* Flags for compile_set(), etc */ - -# define ASSIGN_SET 0x00 /* = assign variable */ -# define ASSIGN_APPEND 0x01 /* += append variable */ -# define ASSIGN_DEFAULT 0x02 /* set only if unset */ - -/* Flags for compile_setexec() */ - -# define EXEC_UPDATED 0x01 /* executes updated */ -# define EXEC_TOGETHER 0x02 /* executes together */ -# define EXEC_IGNORE 0x04 /* executes ignore */ -# define EXEC_QUIETLY 0x08 /* executes quietly */ -# define EXEC_PIECEMEAL 0x10 /* executes piecemeal */ -# define EXEC_EXISTING 0x20 /* executes existing */ - -/* Conditions for compile_if() */ - -# define EXPR_NOT 0 /* ! cond */ -# define EXPR_AND 1 /* cond && cond */ -# define EXPR_OR 2 /* cond || cond */ - -# define EXPR_EXISTS 3 /* arg */ -# define EXPR_EQUALS 4 /* arg = arg */ -# define EXPR_NOTEQ 5 /* arg != arg */ -# define EXPR_LESS 6 /* arg < arg */ -# define EXPR_LESSEQ 7 /* arg <= arg */ -# define EXPR_MORE 8 /* arg > arg */ -# define EXPR_MOREEQ 9 /* arg >= arg */ -# define EXPR_IN 10 /* arg in arg */ - -#endif - diff --git a/jam-files/engine/debian/changelog b/jam-files/engine/debian/changelog deleted file mode 100644 index 29084289..00000000 --- a/jam-files/engine/debian/changelog +++ /dev/null @@ -1,72 +0,0 @@ -bjam (3.1.12-1) unstable; urgency=low - - * New upstream release. - - -- Rene Rivera <grafik@redshift-software.com> Sat, 01 Oct 2005 00:00:00 +0000 - -bjam (3.1.11-1) unstable; urgency=low - - * New upstream release. - - -- Rene Rivera <grafik@redshift-software.com> Sat, 30 Apr 2005 00:00:00 +0000 - -bjam (3.1.10-1) unstable; urgency=low - - * New upstream release. - - -- Rene Rivera <grafik@redshift-software.com> Tue, 1 Jun 2004 05:42:35 +0000 - -bjam (3.1.9-2) unstable; urgency=low - - * Use default value of BOOST_BUILD_PATH is not is set in environment. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Wed, 17 Dec 2003 16:44:35 +0300 - -bjam (3.1.9-1) unstable; urgency=low - - * Implement NATIVE_FILE builtin and several native rules. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Thu, 11 Dec 2003 13:15:26 +0300 - -bjam (3.1.8-1) unstable; urgency=low - - * New upstream release. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Tue, 4 Nov 2003 20:50:43 +0300 - -bjam (3.1.7-1) unstable; urgency=low - - * New upstream release. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Thu, 11 Sep 2003 10:45:44 +0400 - -bjam (3.1.6-1) unstable; urgency=low - - * New upstream release. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Tue, 1 Jul 2003 09:12:18 +0400 - -bjam (3.1.5-1) unstable; urgency=low - - * New upstream release. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Mon, 19 May 2003 14:05:13 +0400 - -bjam (3.1.3-2) unstable; urgency=low - - * Changed Debian package to be similar to Jam's package. - - -- Vladimir Prus <ghost@cs.msu.su> Thu, 10 Oct 2002 18:43:26 +0400 - -bjam (3.1.3-1) unstable; urgency=low - - * New upstream release. - - -- Vladimir Prus <ghost@zigzag.lvk.cs.msu.su> Fri, 4 Oct 2002 18:16:54 +0400 - -bjam (3.1.2-1) unstable; urgency=low - - * Initial Release. - - -- Vladimir Prus <ghost@cs.msu.su> Wed, 14 Aug 2002 14:08:00 +0400 - diff --git a/jam-files/engine/debian/control b/jam-files/engine/debian/control deleted file mode 100644 index c7f15193..00000000 --- a/jam-files/engine/debian/control +++ /dev/null @@ -1,16 +0,0 @@ -Source: bjam -Section: devel -Priority: optional -Maintainer: Vladimir Prus <ghost@cs.msu.su> -Build-Depends: debhelper (>> 3.0.0), docbook-to-man, bison -Standards-Version: 3.5.2 - -Package: bjam -Architecture: any -Depends: ${shlibs:Depends} -Description: Build tool - Boost.Jam is a portable build tool with its own interpreted language, which - allows to implement rather complex logic in a readable way and without - resorting to external programs. It is a descendant of Jam/MR tool modified to - suit the needs of Boost.Build. In particular, modules and rule parameters - were added, as well as several new builtins. diff --git a/jam-files/engine/debian/copyright b/jam-files/engine/debian/copyright deleted file mode 100644 index f72e4e3a..00000000 --- a/jam-files/engine/debian/copyright +++ /dev/null @@ -1,25 +0,0 @@ -This package was debianized by Vladimir Prus <ghost@cs.msu.su> on -Wed, 17 July 2002, 19:27:00 +0400. - -Copyright: - - /+\ - +\ Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - \+/ - - This is Release 2.4 of Jam/MR, a make-like program. - - License is hereby granted to use this software and distribute it - freely, as long as this copyright notice is retained and modifications - are clearly marked. - - ALL WARRANTIES ARE HEREBY DISCLAIMED. - -Some portions are also: - - Copyright 2001-2006 David Abrahams. - Copyright 2002-2006 Rene Rivera. - Copyright 2003-2006 Vladimir Prus. - - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) diff --git a/jam-files/engine/debian/jam.man.sgml b/jam-files/engine/debian/jam.man.sgml deleted file mode 100644 index ee21d4d8..00000000 --- a/jam-files/engine/debian/jam.man.sgml +++ /dev/null @@ -1,236 +0,0 @@ -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V4.1//EN" [ - -<!-- Process this file with docbook-to-man to generate an nroff manual - page: `docbook-to-man manpage.sgml > manpage.1'. You may view - the manual page with: `docbook-to-man manpage.sgml | nroff -man | - less'. A typical entry in a Makefile or Makefile.am is: - -manpage.1: manpage.sgml - docbook-to-man $< > $@ - --> - - <!ENTITY dhfirstname "<firstname>Yann</firstname>"> - <!ENTITY dhsurname "<surname>Dirson</surname>"> - <!-- Please adjust the date whenever revising the manpage. --> - <!ENTITY dhdate "<date>mai 23, 2001</date>"> - <!ENTITY dhemail "<email>dirson@debian.org</email>"> - <!ENTITY dhusername "Yann Dirson"> - <!ENTITY dhpackage "jam"> - - <!ENTITY debian "<productname>Debian GNU/Linux</productname>"> - <!ENTITY gnu "<acronym>GNU</acronym>"> -]> - -<refentry> - <refentryinfo> - <address> - &dhemail; - </address> - <author> - &dhfirstname; - &dhsurname; - </author> - <copyright> - <year>2001</year> - <holder>&dhusername;</holder> - </copyright> - &dhdate; - </refentryinfo> - - <refmeta> - <refentrytitle>JAM</refentrytitle> - <manvolnum>1</manvolnum> - </refmeta> - - <refnamediv> - <refname>Jam/MR</refname> - <refpurpose>Make(1) Redux</refpurpose> - </refnamediv> - - <refsynopsisdiv> - <cmdsynopsis> - <command>jam</command> - - <arg><option>-a</option></arg> - <arg><option>-n</option></arg> - <arg><option>-v</option></arg> - - <arg><option>-d <replaceable/debug/</option></arg> - <arg><option>-f <replaceable/jambase/</option></arg> - <arg><option>-j <replaceable/jobs/</option></arg> - <arg><option>-o <replaceable/actionsfile/</option></arg> - <arg><option>-s <replaceable/var/=<replaceable/value/</option></arg> - <arg><option>-t <replaceable/target/</option></arg> - - <arg repeat><option><replaceable/target/</option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <refsect1> - <title>DESCRIPTION</title> - - <para>Jam is a program construction tool, like make(1).</para> - - <para>Jam recursively builds target files from source files, using - dependency information and updating actions expressed in the - Jambase file, which is written in jam's own interpreted language. - The default Jambase is compiled into jam and provides a - boilerplate for common use, relying on a user-provide file - "Jamfile" to enumerate actual targets and sources.</para> - </refsect1> - - <refsect1> - <title>OPTIONS</title> - - <variablelist> - <varlistentry> - <term><option/-a/</term> - <listitem> - <para>Build all targets anyway, even if they are up-to-date.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-d <replaceable/n/</option></term> - <listitem> - <para>Enable cummulative debugging levels from 1 to - <replaceable/n/. Interesting values are: - - <glosslist> - <glossentry><glossterm/1/ <glossdef><simpara/Show - actions (the default)/</glossdef></glossentry> - - <glossentry><glossterm/2/ <glossdef><simpara/Show - "quiet" actions and display all action - text/</glossdef></glossentry> - - <glossentry><glossterm/3/ <glossdef><simpara>Show - dependency analysis, and target/source - timestamps/paths</simpara></glossdef></glossentry> - - <glossentry><glossterm/4/ <glossdef><simpara/Show shell - arguments/</glossdef></glossentry> - - <glossentry><glossterm/5/ <glossdef><simpara/Show rule - invocations and variable - expansions/</glossdef></glossentry> - - <glossentry><glossterm/6/ <glossdef><simpara>Show - directory/header file/archive - scans</simpara></glossdef></glossentry> - - <glossentry><glossterm/7/ <glossdef><simpara/Show - variable settings/</glossdef></glossentry> - - <glossentry><glossterm/8/ <glossdef><simpara/Show - variable fetches/</glossdef></glossentry> - - <glossentry><glossterm/9/ <glossdef><simpara/Show - variable manipulation, scanner - tokens/</glossdef></glossentry> - </glosslist> - </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-d +<replaceable/n/</option></term> - <listitem> - <para>Enable debugging level <replaceable/n/.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option/-d 0/</term> - <listitem> - <para>Turn off all debugging levels. Only errors are not - suppressed.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-f <replaceable/jambase/</option></term> - <listitem> - <para>Read <replaceable/jambase/ instead of using the - built-in Jambase. Only one <option/-f/ flag is permitted, - but the <replaceable/jambase/ may explicitly include other - files.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-j <replaceable/n/</option></term> - <listitem> - <para>Run up to <replaceable/n/ shell commands concurrently - (UNIX and NT only). The default is 1.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option/-n/</term> - <listitem> - <para>Don't actually execute the updating actions, but do - everything else. This changes the debug level default to - <option/-d2/.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-o <replaceable/file/</option></term> - <listitem> - <para>Write the updating actions to the specified file - instead of running them (or outputting them, as on the - Mac).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-s <replaceable/var/=<replaceable/value/</option></term> - <listitem> - <para>Set the variable <replaceable/var/ to - <replaceable/value/, overriding both internal variables and - variables imported from the environment. </para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-t <replaceable/target/</option></term> - <listitem> - <para>Rebuild <replaceable/target/ and everything that - depends on it, even if it is up-to-date.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option/-v/</term> - <listitem> - <para>Print the version of jam and exit.</para> - </listitem> - </varlistentry> - - </variablelist> - </refsect1> - - <refsect1> - <title>SEE ALSO</title> - - <para>Jam is documented fully in HTML pages available on Debian - systems from - <filename>/usr/share/doc/jam/Jam.html</filename>.</para> - </refsect1> - - <refsect1> - <title>AUTHOR</title> - - <para>This manual page was created by &dhusername; &dhemail; from - the <filename/Jam.html/ documentation, for the &debian; system - (but may be used by others).</para> - </refsect1> -</refentry> - -<!-- Keep this comment at the end of the file -Local variables: -sgml-omittag:t -sgml-shorttag:t -End: ---> diff --git a/jam-files/engine/debian/rules b/jam-files/engine/debian/rules deleted file mode 100755 index 756052a3..00000000 --- a/jam-files/engine/debian/rules +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/make -f -# Sample debian/rules that uses debhelper. -# GNU copyright 1997 to 1999 by Joey Hess. -# GNU copyright 2001 by Yann Dirson. - -# This is the debian/rules file for packages jam and ftjam -# It should be usable with both packages without any change - -# Uncomment this to turn on verbose mode. -#export DH_VERBOSE=1 - -# This is the debhelper compatability version to use. -export DH_COMPAT=3 - -topdir=$(shell pwd) - -jam=bjam -binname=bjam - -build: build-stamp -build-stamp: debian/jam.1 - dh_testdir - - ./build.sh - - touch build-stamp - -%.1: %.man.sgml - /usr/bin/docbook-to-man $< > $@ - -clean: - dh_testdir - dh_testroot - rm -f build-stamp - rm -rf bin.* - rm -f jam0 debian/jam.1 - dh_clean - -install: build - dh_testdir - dh_testroot - dh_clean -k - dh_installdirs - - install -d ${topdir}/debian/${jam}/usr/bin - install -m755 bin.linuxx86/bjam ${topdir}/debian/${jam}/usr/bin/ - install -d ${topdir}/debian/${jam}/usr/share/man/man1/ - install -m644 debian/jam.1 ${topdir}/debian/${jam}/usr/share/man/man1/${binname}.1 - - -# Build architecture-independent files here. -binary-indep: build install -# We have nothing to do by default. - -# Build architecture-dependent files here. -binary-arch: build install - dh_testdir - dh_testroot - dh_installdocs README RELNOTES Jambase *.html -# dh_installemacsen -# dh_undocumented - dh_installchangelogs - dh_strip - dh_compress - dh_fixperms - dh_installdeb - dh_shlibdeps - dh_gencontrol - dh_md5sums - dh_builddeb - -binary: binary-indep binary-arch -.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/jam-files/engine/debug.c b/jam-files/engine/debug.c deleted file mode 100644 index 7290555a..00000000 --- a/jam-files/engine/debug.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - Copyright Rene Rivera 2005. - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -*/ - -#include "jam.h" - -#include "hash.h" - -#include <time.h> -#include <assert.h> - - -static profile_frame * profile_stack = 0; -static struct hash * profile_hash = 0; -static profile_info profile_other = { "[OTHER]", 0, 0, 0, 0, 0 }; -static profile_info profile_total = { "[TOTAL]", 0, 0, 0, 0, 0 }; - - -profile_frame * profile_init( char * rulename, profile_frame * frame ) -{ - if ( DEBUG_PROFILE ) profile_enter( rulename, frame ); - return frame; -} - - -void profile_enter( char * rulename, profile_frame * frame ) -{ - if ( DEBUG_PROFILE ) - { - clock_t start = clock(); - profile_info info; - profile_info * p = &info; - - if ( !rulename ) p = &profile_other; - - if ( !profile_hash && rulename ) - profile_hash = hashinit( sizeof( profile_info ), "profile" ); - - info.name = rulename; - - if ( rulename && hashenter( profile_hash, (HASHDATA * *)&p ) ) - p->cumulative = p->net = p->num_entries = p->stack_count = p->memory = 0; - - ++p->num_entries; - ++p->stack_count; - - frame->info = p; - - frame->caller = profile_stack; - profile_stack = frame; - - frame->entry_time = clock(); - frame->overhead = 0; - frame->subrules = 0; - - /* caller pays for the time it takes to play with the hash table */ - if ( frame->caller ) - frame->caller->overhead += frame->entry_time - start; - } -} - - -void profile_memory( long mem ) -{ - if ( DEBUG_PROFILE ) - if ( profile_stack && profile_stack->info ) - profile_stack->info->memory += mem; -} - - -void profile_exit( profile_frame * frame ) -{ - if ( DEBUG_PROFILE ) - { - /* Cumulative time for this call. */ - clock_t t = clock() - frame->entry_time - frame->overhead; - /* If this rule is already present on the stack, don't add the time for - * this instance. - */ - if ( frame->info->stack_count == 1 ) - frame->info->cumulative += t; - /* Net time does not depend on presense of the same rule in call stack. - */ - frame->info->net += t - frame->subrules; - - if ( frame->caller ) - { - /* Caller's cumulative time must account for this overhead. */ - frame->caller->overhead += frame->overhead; - frame->caller->subrules += t; - } - /* Pop this stack frame. */ - --frame->info->stack_count; - profile_stack = frame->caller; - } -} - - -static void dump_profile_entry( void * p_, void * ignored ) -{ - profile_info * p = (profile_info *)p_; - unsigned long mem_each = ( p->memory / ( p->num_entries ? p->num_entries : 1 ) ); - double cumulative = p->cumulative; - double net = p->net; - double q = p->net; - q /= ( p->num_entries ? p->num_entries : 1 ); - cumulative /= CLOCKS_PER_SEC; - net /= CLOCKS_PER_SEC; - q /= CLOCKS_PER_SEC; - if ( !ignored ) - { - profile_total.cumulative += p->net; - profile_total.memory += p->memory; - } - printf( "%10ld %12.6f %12.6f %12.8f %10ld %10ld %s\n", p->num_entries, - cumulative, net, q, p->memory, mem_each, p->name ); -} - - -void profile_dump() -{ - if ( profile_hash ) - { - printf( "%10s %12s %12s %12s %10s %10s %s\n", "--count--", "--gross--", - "--net--", "--each--", "--mem--", "--each--", "--name--" ); - hashenumerate( profile_hash, dump_profile_entry, 0 ); - dump_profile_entry( &profile_other, 0 ); - dump_profile_entry( &profile_total, (void *)1 ); - } -} diff --git a/jam-files/engine/debug.h b/jam-files/engine/debug.h deleted file mode 100644 index 115a8873..00000000 --- a/jam-files/engine/debug.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright Rene Rivera 2005. - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -*/ -#ifndef BJAM_DEBUG_H -#define BJAM_DEBUG_H - -#include "jam.h" -#include <time.h> - - -struct profile_info -{ - /* name of rule being called */ - char* name; - /* cumulative time spent in rule */ - clock_t cumulative; - /* time spent in rule proper */ - clock_t net; - /* number of time rule was entered */ - unsigned long num_entries; - /* number of the times this function is present in stack */ - unsigned long stack_count; - /* bytes of memory allocated by the call */ - unsigned long memory; -}; -typedef struct profile_info profile_info; - -struct profile_frame -{ - /* permanent storage where data accumulates */ - profile_info* info; - /* overhead for profiling in this call */ - clock_t overhead; - /* time of last entry to rule */ - clock_t entry_time; - /* stack frame of caller */ - struct profile_frame* caller; - /* time spent in subrules */ - clock_t subrules; -}; -typedef struct profile_frame profile_frame; - -profile_frame * profile_init( char * rulename, profile_frame * frame ); -void profile_enter( char* rulename, profile_frame * frame ); -void profile_memory( long mem ); -void profile_exit( profile_frame * frame ); -void profile_dump(); - -#define PROFILE_ENTER( scope ) profile_frame PROF_ ## scope, *PROF_ ## scope ## _p = profile_init( #scope, &PROF_ ## scope ) -#define PROFILE_EXIT( scope ) profile_exit( PROF_ ## scope ## _p ) - -#endif diff --git a/jam-files/engine/execcmd.h b/jam-files/engine/execcmd.h deleted file mode 100644 index 67f2b839..00000000 --- a/jam-files/engine/execcmd.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * execcmd.h - execute a shell script. - * - * Defines the interface to be implemented in platform specific implementation - * modules. - * - * 05/04/94 (seiwald) - async multiprocess interface - */ - -#ifndef EXECCMD_H -#define EXECCMD_H - -#include <time.h> - -typedef struct timing_info -{ - double system; - double user; - time_t start; - time_t end; -} timing_info; - -void exec_cmd -( - char * string, - void (* func)( void * closure, int status, timing_info *, char *, char * ), - void * closure, - LIST * shell, - char * action, - char * target -); - -int exec_wait(); - -#define EXEC_CMD_OK 0 -#define EXEC_CMD_FAIL 1 -#define EXEC_CMD_INTR 2 - -#endif diff --git a/jam-files/engine/execmac.c b/jam-files/engine/execmac.c deleted file mode 100644 index 2ddddedd..00000000 --- a/jam-files/engine/execmac.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#include "jam.h" -#include "lists.h" -#include "execcmd.h" -#include <errno.h> - -#ifdef OS_MAC - -/* - * execunix.c - execute a shell script on UNIX - * - * If $(JAMSHELL) is defined, uses that to formulate execvp(). - * The default is: - * - * /bin/sh -c % - * - * Each word must be an individual element in a jam variable value. - * - * In $(JAMSHELL), % expands to the command string and ! expands to - * the slot number (starting at 1) for multiprocess (-j) invocations. - * If $(JAMSHELL) doesn't include a %, it is tacked on as the last - * argument. - * - * Don't just set JAMSHELL to /bin/sh - it won't work! - * - * External routines: - * exec_cmd() - launch an async command execution. - * exec_wait() - wait and drive at most one execution completion. - * - * Internal routines: - * onintr() - bump intr to note command interruption. - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 05/04/94 (seiwald) - async multiprocess interface - * 01/22/95 (seiwald) - $(JAMSHELL) support - */ - - -/* - * exec_cmd() - launch an async command execution. - */ - -void exec_cmd -( - char * string, - void (* func)( void * closure, int status, timing_info *, char *, char * ), - void * closure, - LIST * shell -) -{ - printf( "%s", string ); - (*func)( closure, EXEC_CMD_OK ); -} - -/* - * exec_wait() - wait and drive at most one execution completion. - */ - -int exec_wait() -{ - return 0; -} - -#endif /* OS_MAC */ diff --git a/jam-files/engine/execnt.c b/jam-files/engine/execnt.c deleted file mode 100644 index 76420451..00000000 --- a/jam-files/engine/execnt.c +++ /dev/null @@ -1,1296 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Copyright 2007 Rene Rivera. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#include "jam.h" -#include "lists.h" -#include "execcmd.h" -#include "pathsys.h" -#include "string.h" -#include "output.h" -#include <errno.h> -#include <assert.h> -#include <ctype.h> -#include <time.h> -#include <math.h> - -#ifdef USE_EXECNT - -#define WIN32_LEAN_AND_MEAN -#include <windows.h> -#include <process.h> -#include <tlhelp32.h> - -/* - * execnt.c - execute a shell command on Windows NT - * - * If $(JAMSHELL) is defined, uses that to formulate execvp()/spawnvp(). - * The default is: - * - * /bin/sh -c % [ on UNIX/AmigaOS ] - * cmd.exe /c % [ on Windows NT ] - * - * Each word must be an individual element in a jam variable value. - * - * In $(JAMSHELL), % expands to the command string and ! expands to - * the slot number (starting at 1) for multiprocess (-j) invocations. - * If $(JAMSHELL) doesn't include a %, it is tacked on as the last - * argument. - * - * Don't just set JAMSHELL to /bin/sh or cmd.exe - it won't work! - * - * External routines: - * exec_cmd() - launch an async command execution. - * exec_wait() - wait and drive at most one execution completion. - * - * Internal routines: - * onintr() - bump intr to note command interruption. - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 05/04/94 (seiwald) - async multiprocess interface - * 01/22/95 (seiwald) - $(JAMSHELL) support - * 06/02/97 (gsar) - full async multiprocess support for Win32 - */ - -/* get the maximum command line length according to the OS */ -int maxline(); - -/* delete and argv list */ -static void free_argv(char**); -/* Convert a command string into arguments for spawnvp. */ -static char** string_to_args(const char*); -/* bump intr to note command interruption */ -static void onintr(int); -/* If the command is suitable for execution via spawnvp */ -long can_spawn(char*); -/* Add two 64-bit unsigned numbers, h1l1 and h2l2 */ -static FILETIME add_64( - unsigned long h1, unsigned long l1, - unsigned long h2, unsigned long l2); -static FILETIME add_FILETIME(FILETIME t1, FILETIME t2); -static FILETIME negate_FILETIME(FILETIME t); -/* Convert a FILETIME to a number of seconds */ -static double filetime_seconds(FILETIME t); -/* record the timing info for the process */ -static void record_times(HANDLE, timing_info*); -/* calc the current running time of an *active* process */ -static double running_time(HANDLE); -/* */ -DWORD get_process_id(HANDLE); -/* terminate the given process, after terminating all its children */ -static void kill_process_tree(DWORD, HANDLE); -/* waits for a command to complete or for the given timeout, whichever is first */ -static int try_wait(int timeoutMillis); -/* reads any pending output for running commands */ -static void read_output(); -/* checks if a command ran out of time, and kills it */ -static int try_kill_one(); -/* */ -static double creation_time(HANDLE); -/* Recursive check if first process is parent (directly or indirectly) of -the second one. */ -static int is_parent_child(DWORD, DWORD); -/* */ -static void close_alert(HANDLE); -/* close any alerts hanging around */ -static void close_alerts(); - -/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ - -static int intr = 0; -static int cmdsrunning = 0; -static void (* istat)( int ); - - -/* The list of commands we run. */ -static struct -{ - string action; /* buffer to hold action */ - string target; /* buffer to hold target */ - string command; /* buffer to hold command being invoked */ - - /* Temporary batch file used to execute the action when needed. */ - char * tempfile_bat; - - /* Pipes for communicating with the child process. Parent reads from (0), - * child writes to (1). - */ - HANDLE pipe_out[ 2 ]; - HANDLE pipe_err[ 2 ]; - - string buffer_out; /* buffer to hold stdout, if any */ - string buffer_err; /* buffer to hold stderr, if any */ - - PROCESS_INFORMATION pi; /* running process information */ - DWORD exit_code; /* executed command's exit code */ - int exit_reason; /* reason why a command completed */ - - /* Function called when the command completes. */ - void (* func)( void * closure, int status, timing_info *, char *, char * ); - - /* Opaque data passed back to the 'func' callback called when the command - * completes. - */ - void * closure; -} -cmdtab[ MAXJOBS ] = { { 0 } }; - - -/* - * Execution unit tests. - */ - -void execnt_unit_test() -{ -#if !defined( NDEBUG ) - /* vc6 preprocessor is broken, so assert with these strings gets confused. - * Use a table instead. - */ - typedef struct test { char * command; int result; } test; - test tests[] = { - { "x", 0 }, - { "x\n ", 0 }, - { "x\ny", 1 }, - { "x\n\n y", 1 }, - { "echo x > foo.bar", 1 }, - { "echo x < foo.bar", 1 }, - { "echo x \">\" foo.bar", 0 }, - { "echo x \"<\" foo.bar", 0 }, - { "echo x \\\">\\\" foo.bar", 1 }, - { "echo x \\\"<\\\" foo.bar", 1 } }; - int i; - for ( i = 0; i < sizeof( tests ) / sizeof( *tests ); ++i ) - assert( !can_spawn( tests[ i ].command ) == tests[ i ].result ); - - { - char * long_command = BJAM_MALLOC_ATOMIC( MAXLINE + 10 ); - assert( long_command != 0 ); - memset( long_command, 'x', MAXLINE + 9 ); - long_command[ MAXLINE + 9 ] = 0; - assert( can_spawn( long_command ) == MAXLINE + 9 ); - BJAM_FREE( long_command ); - } - - { - /* Work around vc6 bug; it doesn't like escaped string - * literals inside assert - */ - char * * argv = string_to_args(" \"g++\" -c -I\"Foobar\"" ); - char const expected[] = "-c -I\"Foobar\""; - - assert( !strcmp( argv[ 0 ], "g++" ) ); - assert( !strcmp( argv[ 1 ], expected ) ); - free_argv( argv ); - } -#endif -} - - -/* - * exec_cmd() - launch an async command execution. - */ - -void exec_cmd -( - char * command, - void (* func)( void * closure, int status, timing_info *, char * invoked_command, char * command_output ), - void * closure, - LIST * shell, - char * action, - char * target -) -{ - int slot; - int raw_cmd = 0 ; - char * argv_static[ MAXARGC + 1 ]; /* +1 for NULL */ - char * * argv = argv_static; - char * p; - char * command_orig = command; - - /* Check to see if we need to hack around the line-length limitation. Look - * for a JAMSHELL setting of "%", indicating that the command should be - * invoked directly. - */ - if ( shell && !strcmp( shell->string, "%" ) && !list_next( shell ) ) - { - raw_cmd = 1; - shell = 0; - } - - /* Find a slot in the running commands table for this one. */ - for ( slot = 0; slot < MAXJOBS; ++slot ) - if ( !cmdtab[ slot ].pi.hProcess ) - break; - if ( slot == MAXJOBS ) - { - printf( "no slots for child!\n" ); - exit( EXITBAD ); - } - - /* Compute the name of a temp batch file, for possible use. */ - if ( !cmdtab[ slot ].tempfile_bat ) - { - char const * tempdir = path_tmpdir(); - DWORD procID = GetCurrentProcessId(); - - /* SVA - allocate 64 bytes extra just to be safe. */ - cmdtab[ slot ].tempfile_bat = BJAM_MALLOC_ATOMIC( strlen( tempdir ) + 64 ); - - sprintf( cmdtab[ slot ].tempfile_bat, "%s\\jam%d-%02d.bat", - tempdir, procID, slot ); - } - - /* Trim leading, -ending- white space */ - while ( *( command + 1 ) && isspace( *command ) ) - ++command; - - /* Write to .BAT file unless the line would be too long and it meets the - * other spawnability criteria. - */ - if ( raw_cmd && ( can_spawn( command ) >= MAXLINE ) ) - { - if ( DEBUG_EXECCMD ) - printf("Executing raw command directly\n"); - } - else - { - FILE * f = 0; - int tries = 0; - raw_cmd = 0; - - /* Write command to bat file. For some reason this open can fail - * intermitently. But doing some retries works. Most likely this is due - * to a previously existing file of the same name that happens to be - * opened by an active virus scanner. Pointed out and fixed by Bronek - * Kozicki. - */ - for ( ; !f && ( tries < 4 ); ++tries ) - { - f = fopen( cmdtab[ slot ].tempfile_bat, "w" ); - if ( !f && ( tries < 4 ) ) Sleep( 250 ); - } - if ( !f ) - { - printf( "failed to write command file!\n" ); - exit( EXITBAD ); - } - fputs( command, f ); - fclose( f ); - - command = cmdtab[ slot ].tempfile_bat; - - if ( DEBUG_EXECCMD ) - { - if ( shell ) - printf( "using user-specified shell: %s", shell->string ); - else - printf( "Executing through .bat file\n" ); - } - } - - /* Formulate argv; If shell was defined, be prepared for % and ! subs. - * Otherwise, use stock cmd.exe. - */ - if ( shell ) - { - int i; - char jobno[ 4 ]; - int gotpercent = 0; - - sprintf( jobno, "%d", slot + 1 ); - - for ( i = 0; shell && ( i < MAXARGC ); ++i, shell = list_next( shell ) ) - { - switch ( shell->string[ 0 ] ) - { - case '%': argv[ i ] = command; ++gotpercent; break; - case '!': argv[ i ] = jobno; break; - default : argv[ i ] = shell->string; - } - if ( DEBUG_EXECCMD ) - printf( "argv[%d] = '%s'\n", i, argv[ i ] ); - } - - if ( !gotpercent ) - argv[ i++ ] = command; - - argv[ i ] = 0; - } - else if ( raw_cmd ) - { - argv = string_to_args( command ); - } - else - { - argv[ 0 ] = "cmd.exe"; - argv[ 1 ] = "/Q/C"; /* anything more is non-portable */ - argv[ 2 ] = command; - argv[ 3 ] = 0; - } - - /* Catch interrupts whenever commands are running. */ - if ( !cmdsrunning++ ) - istat = signal( SIGINT, onintr ); - - /* Start the command. */ - { - SECURITY_ATTRIBUTES sa - = { sizeof( SECURITY_ATTRIBUTES ), 0, 0 }; - SECURITY_DESCRIPTOR sd; - STARTUPINFO si - = { sizeof( STARTUPINFO ), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - string cmd; - - /* Init the security data. */ - InitializeSecurityDescriptor( &sd, SECURITY_DESCRIPTOR_REVISION ); - SetSecurityDescriptorDacl( &sd, TRUE, NULL, FALSE ); - sa.lpSecurityDescriptor = &sd; - sa.bInheritHandle = TRUE; - - /* Create the stdout, which is also the merged out + err, pipe. */ - if ( !CreatePipe( &cmdtab[ slot ].pipe_out[ 0 ], - &cmdtab[ slot ].pipe_out[ 1 ], &sa, 0 ) ) - { - perror( "CreatePipe" ); - exit( EXITBAD ); - } - - /* Create the stdout, which is also the merged out+err, pipe. */ - if ( globs.pipe_action == 2 ) - { - if ( !CreatePipe( &cmdtab[ slot ].pipe_err[ 0 ], - &cmdtab[ slot ].pipe_err[ 1 ], &sa, 0 ) ) - { - perror( "CreatePipe" ); - exit( EXITBAD ); - } - } - - /* Set handle inheritance off for the pipe ends the parent reads from. */ - SetHandleInformation( cmdtab[ slot ].pipe_out[ 0 ], HANDLE_FLAG_INHERIT, 0 ); - if ( globs.pipe_action == 2 ) - SetHandleInformation( cmdtab[ slot ].pipe_err[ 0 ], HANDLE_FLAG_INHERIT, 0 ); - - /* Hide the child window, if any. */ - si.dwFlags |= STARTF_USESHOWWINDOW; - si.wShowWindow = SW_HIDE; - - /* Set the child outputs to the pipes. */ - si.dwFlags |= STARTF_USESTDHANDLES; - si.hStdOutput = cmdtab[ slot ].pipe_out[ 1 ]; - if ( globs.pipe_action == 2 ) - { - /* Pipe stderr to the action error output. */ - si.hStdError = cmdtab[ slot ].pipe_err[ 1 ]; - } - else if ( globs.pipe_action == 1 ) - { - /* Pipe stderr to the console error output. */ - si.hStdError = GetStdHandle( STD_ERROR_HANDLE ); - } - else - { - /* Pipe stderr to the action merged output. */ - si.hStdError = cmdtab[ slot ].pipe_out[ 1 ]; - } - - /* Let the child inherit stdin, as some commands assume it's available. */ - si.hStdInput = GetStdHandle(STD_INPUT_HANDLE); - - /* Save the operation for exec_wait() to find. */ - cmdtab[ slot ].func = func; - cmdtab[ slot ].closure = closure; - if ( action && target ) - { - string_copy( &cmdtab[ slot ].action, action ); - string_copy( &cmdtab[ slot ].target, target ); - } - else - { - string_free( &cmdtab[ slot ].action ); - string_new ( &cmdtab[ slot ].action ); - string_free( &cmdtab[ slot ].target ); - string_new ( &cmdtab[ slot ].target ); - } - string_copy( &cmdtab[ slot ].command, command_orig ); - - /* Put together the command we run. */ - { - char * * argp = argv; - string_new( &cmd ); - string_copy( &cmd, *(argp++) ); - while ( *argp ) - { - string_push_back( &cmd, ' ' ); - string_append( &cmd, *(argp++) ); - } - } - - /* Create output buffers. */ - string_new( &cmdtab[ slot ].buffer_out ); - string_new( &cmdtab[ slot ].buffer_err ); - - /* Run the command by creating a sub-process for it. */ - if ( - ! CreateProcess( - NULL , /* application name */ - cmd.value , /* command line */ - NULL , /* process attributes */ - NULL , /* thread attributes */ - TRUE , /* inherit handles */ - CREATE_NEW_PROCESS_GROUP, /* create flags */ - NULL , /* env vars, null inherits env */ - NULL , /* current dir, null is our */ - /* current dir */ - &si , /* startup info */ - &cmdtab[ slot ].pi /* child process info, if created */ - ) - ) - { - perror( "CreateProcess" ); - exit( EXITBAD ); - } - - /* Clean up temporary stuff. */ - string_free( &cmd ); - } - - /* Wait until we are under the limit of concurrent commands. Do not trust - * globs.jobs alone. - */ - while ( ( cmdsrunning >= MAXJOBS ) || ( cmdsrunning >= globs.jobs ) ) - if ( !exec_wait() ) - break; - - if ( argv != argv_static ) - free_argv( argv ); -} - - -/* - * exec_wait() - * * wait and drive at most one execution completion. - * * waits for one command to complete, while processing the i/o for all - * ongoing commands. - * - * Returns 0 if called when there were no more commands being executed or 1 - * otherwise. - */ - -int exec_wait() -{ - int i = -1; - - /* Handle naive make1() which does not know if cmds are running. */ - if ( !cmdsrunning ) - return 0; - - /* Wait for a command to complete, while snarfing up any output. */ - do - { - /* Check for a complete command, briefly. */ - i = try_wait(500); - /* Read in the output of all running commands. */ - read_output(); - /* Close out pending debug style dialogs. */ - close_alerts(); - /* Check if a command ran out of time. */ - if ( i < 0 ) i = try_kill_one(); - } - while ( i < 0 ); - - /* We have a command... process it. */ - --cmdsrunning; - { - timing_info time; - int rstat; - - /* The time data for the command. */ - record_times( cmdtab[ i ].pi.hProcess, &time ); - - /* Clear the temp file. */ - if ( cmdtab[ i ].tempfile_bat ) - { - unlink( cmdtab[ i ].tempfile_bat ); - BJAM_FREE( cmdtab[ i ].tempfile_bat ); - cmdtab[ i ].tempfile_bat = NULL; - } - - /* Find out the process exit code. */ - GetExitCodeProcess( cmdtab[ i ].pi.hProcess, &cmdtab[ i ].exit_code ); - - /* The dispossition of the command. */ - if ( intr ) - rstat = EXEC_CMD_INTR; - else if ( cmdtab[ i ].exit_code != 0 ) - rstat = EXEC_CMD_FAIL; - else - rstat = EXEC_CMD_OK; - - /* Output the action block. */ - out_action( - cmdtab[ i ].action.size > 0 ? cmdtab[ i ].action.value : 0, - cmdtab[ i ].target.size > 0 ? cmdtab[ i ].target.value : 0, - cmdtab[ i ].command.size > 0 ? cmdtab[ i ].command.value : 0, - cmdtab[ i ].buffer_out.size > 0 ? cmdtab[ i ].buffer_out.value : 0, - cmdtab[ i ].buffer_err.size > 0 ? cmdtab[ i ].buffer_err.value : 0, - cmdtab[ i ].exit_reason ); - - /* Call the callback, may call back to jam rule land. Assume -p0 in - * effect so only pass buffer containing merged output. - */ - (*cmdtab[ i ].func)( - cmdtab[ i ].closure, - rstat, - &time, - cmdtab[ i ].command.value, - cmdtab[ i ].buffer_out.value ); - - /* Clean up the command data, process, etc. */ - string_free( &cmdtab[ i ].action ); string_new( &cmdtab[ i ].action ); - string_free( &cmdtab[ i ].target ); string_new( &cmdtab[ i ].target ); - string_free( &cmdtab[ i ].command ); string_new( &cmdtab[ i ].command ); - if ( cmdtab[ i ].pi.hProcess ) { CloseHandle( cmdtab[ i ].pi.hProcess ); cmdtab[ i ].pi.hProcess = 0; } - if ( cmdtab[ i ].pi.hThread ) { CloseHandle( cmdtab[ i ].pi.hThread ); cmdtab[ i ].pi.hThread = 0; } - if ( cmdtab[ i ].pipe_out[ 0 ] ) { CloseHandle( cmdtab[ i ].pipe_out[ 0 ] ); cmdtab[ i ].pipe_out[ 0 ] = 0; } - if ( cmdtab[ i ].pipe_out[ 1 ] ) { CloseHandle( cmdtab[ i ].pipe_out[ 1 ] ); cmdtab[ i ].pipe_out[ 1 ] = 0; } - if ( cmdtab[ i ].pipe_err[ 0 ] ) { CloseHandle( cmdtab[ i ].pipe_err[ 0 ] ); cmdtab[ i ].pipe_err[ 0 ] = 0; } - if ( cmdtab[ i ].pipe_err[ 1 ] ) { CloseHandle( cmdtab[ i ].pipe_err[ 1 ] ); cmdtab[ i ].pipe_err[ 1 ] = 0; } - string_free( &cmdtab[ i ].buffer_out ); string_new( &cmdtab[ i ].buffer_out ); - string_free( &cmdtab[ i ].buffer_err ); string_new( &cmdtab[ i ].buffer_err ); - cmdtab[ i ].exit_code = 0; - cmdtab[ i ].exit_reason = EXIT_OK; - } - - return 1; -} - - -/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ - -static void free_argv( char * * args ) -{ - BJAM_FREE( args[ 0 ] ); - BJAM_FREE( args ); -} - - -/* - * For more details on Windows cmd.exe shell command-line length limitations see - * the following MSDN article: - * http://support.microsoft.com/default.aspx?scid=kb;en-us;830473 - */ - -int maxline() -{ - OSVERSIONINFO os_info; - os_info.dwOSVersionInfoSize = sizeof( os_info ); - GetVersionEx( &os_info ); - - if ( os_info.dwMajorVersion >= 5 ) return 8191; /* XP > */ - if ( os_info.dwMajorVersion == 4 ) return 2047; /* NT 4.x */ - return 996; /* NT 3.5.1 */ -} - - -/* - * Convert a command string into arguments for spawnvp(). The original code, - * inherited from ftjam, tried to break up every argument on the command-line, - * dealing with quotes, but that is really a waste of time on Win32, at least. - * It turns out that all you need to do is get the raw path to the executable in - * the first argument to spawnvp(), and you can pass all the rest of the - * command-line arguments to spawnvp() in one, un-processed string. - * - * New strategy: break the string in at most one place. - */ - -static char * * string_to_args( char const * string ) -{ - int src_len; - int in_quote; - char * line; - char const * src; - char * dst; - char * * argv; - - /* Drop leading and trailing whitespace if any. */ - while ( isspace( *string ) ) - ++string; - - src_len = strlen( string ); - while ( ( src_len > 0 ) && isspace( string[ src_len - 1 ] ) ) - --src_len; - - /* Copy the input string into a buffer we can modify. */ - line = (char *)BJAM_MALLOC_ATOMIC( src_len + 1 ); - if ( !line ) - return 0; - - /* Allocate the argv array. - * element 0: stores the path to the executable - * element 1: stores the command-line arguments to the executable - * element 2: NULL terminator - */ - argv = (char * *)BJAM_MALLOC( 3 * sizeof( char * ) ); - if ( !argv ) - { - BJAM_FREE( line ); - return 0; - } - - /* Strip quotes from the first command-line argument and find where it ends. - * Quotes are illegal in Win32 pathnames, so we do not need to worry about - * preserving escaped quotes here. Spaces can not be escaped in Win32, only - * enclosed in quotes, so removing backslash escapes is also a non-issue. - */ - in_quote = 0; - for ( src = string, dst = line ; *src; ++src ) - { - if ( *src == '"' ) - in_quote = !in_quote; - else if ( !in_quote && isspace( *src ) ) - break; - else - *dst++ = *src; - } - *dst++ = 0; - argv[ 0 ] = line; - - /* Skip whitespace in src. */ - while ( isspace( *src ) ) - ++src; - - argv[ 1 ] = dst; - - /* Copy the rest of the arguments verbatim. */ - src_len -= src - string; - - /* Use strncat() because it appends a trailing nul. */ - *dst = 0; - strncat( dst, src, src_len ); - - argv[ 2 ] = 0; - - return argv; -} - - -static void onintr( int disp ) -{ - ++intr; - printf( "...interrupted\n" ); -} - - -/* - * can_spawn() - If the command is suitable for execution via spawnvp(), return - * a number >= the number of characters it would occupy on the command-line. - * Otherwise, return zero. - */ - -long can_spawn( char * command ) -{ - char * p; - char inquote = 0; - - /* Move to the first non-whitespace. */ - command += strspn( command, " \t" ); - - p = command; - - /* Look for newlines and unquoted i/o redirection. */ - do - { - p += strcspn( p, "'\n\"<>|" ); - - switch ( *p ) - { - case '\n': - /* Skip over any following spaces. */ - while ( isspace( *p ) ) - ++p; - /* Must use a .bat file if there is anything significant following - * the newline. - */ - if ( *p ) - return 0; - break; - - case '"': - case '\'': - if ( ( p > command ) && ( p[ -1 ] != '\\' ) ) - { - if ( inquote == *p ) - inquote = 0; - else if ( inquote == 0 ) - inquote = *p; - } - ++p; - break; - - case '<': - case '>': - case '|': - if ( !inquote ) - return 0; - ++p; - break; - } - } - while ( *p ); - - /* Return the number of characters the command will occupy. */ - return p - command; -} - - -/* 64-bit arithmetic helpers. */ - -/* Compute the carry bit from the addition of two 32-bit unsigned numbers. */ -#define add_carry_bit( a, b ) ( (((a) | (b)) >> 31) & (~((a) + (b)) >> 31) & 0x1 ) - -/* Compute the high 32 bits of the addition of two 64-bit unsigned numbers, h1l1 and h2l2. */ -#define add_64_hi( h1, l1, h2, l2 ) ((h1) + (h2) + add_carry_bit(l1, l2)) - - -/* - * Add two 64-bit unsigned numbers, h1l1 and h2l2. - */ - -static FILETIME add_64 -( - unsigned long h1, unsigned long l1, - unsigned long h2, unsigned long l2 -) -{ - FILETIME result; - result.dwLowDateTime = l1 + l2; - result.dwHighDateTime = add_64_hi( h1, l1, h2, l2 ); - return result; -} - - -static FILETIME add_FILETIME( FILETIME t1, FILETIME t2 ) -{ - return add_64( t1.dwHighDateTime, t1.dwLowDateTime, t2.dwHighDateTime, - t2.dwLowDateTime ); -} - - -static FILETIME negate_FILETIME( FILETIME t ) -{ - /* 2s complement negation */ - return add_64( ~t.dwHighDateTime, ~t.dwLowDateTime, 0, 1 ); -} - - -/* - * Convert a FILETIME to a number of seconds. - */ - -static double filetime_seconds( FILETIME t ) -{ - return t.dwHighDateTime * ( (double)( 1UL << 31 ) * 2.0 * 1.0e-7 ) + t.dwLowDateTime * 1.0e-7; -} - - -/* - * What should be a simple conversion, turns out to be horribly complicated by - * the defficiencies of MSVC and the Win32 API. - */ - -static time_t filetime_dt( FILETIME t_utc ) -{ - static int calc_time_diff = 1; - static double time_diff; - if ( calc_time_diff ) - { - struct tm t0_; - FILETIME f0_local; - FILETIME f0_; - SYSTEMTIME s0_; - GetSystemTime( &s0_ ); - t0_.tm_year = s0_.wYear-1900; - t0_.tm_mon = s0_.wMonth-1; - t0_.tm_wday = s0_.wDayOfWeek; - t0_.tm_mday = s0_.wDay; - t0_.tm_hour = s0_.wHour; - t0_.tm_min = s0_.wMinute; - t0_.tm_sec = s0_.wSecond; - t0_.tm_isdst = 0; - SystemTimeToFileTime( &s0_, &f0_local ); - LocalFileTimeToFileTime( &f0_local, &f0_ ); - time_diff = filetime_seconds( f0_ ) - (double)mktime( &t0_ ); - calc_time_diff = 0; - } - return ceil( filetime_seconds( t_utc ) - time_diff ); -} - - -static void record_times( HANDLE process, timing_info * time ) -{ - FILETIME creation; - FILETIME exit; - FILETIME kernel; - FILETIME user; - if ( GetProcessTimes( process, &creation, &exit, &kernel, &user ) ) - { - time->system = filetime_seconds( kernel ); - time->user = filetime_seconds( user ); - time->start = filetime_dt ( creation ); - time->end = filetime_dt ( exit ); - } -} - - -#define IO_BUFFER_SIZE ( 16 * 1024 ) - -static char ioBuffer[ IO_BUFFER_SIZE + 1 ]; - - -static void read_pipe -( - HANDLE in, /* the pipe to read from */ - string * out -) -{ - DWORD bytesInBuffer = 0; - DWORD bytesAvailable = 0; - - do - { - /* check if we have any data to read */ - if ( !PeekNamedPipe( in, ioBuffer, IO_BUFFER_SIZE, &bytesInBuffer, &bytesAvailable, NULL ) ) - bytesAvailable = 0; - - /* read in the available data */ - if ( bytesAvailable > 0 ) - { - /* we only read in the available bytes, to avoid blocking */ - if ( ReadFile( in, ioBuffer, - bytesAvailable <= IO_BUFFER_SIZE ? bytesAvailable : IO_BUFFER_SIZE, - &bytesInBuffer, NULL ) ) - { - if ( bytesInBuffer > 0 ) - { - /* Clean up some illegal chars. */ - int i; - for ( i = 0; i < bytesInBuffer; ++i ) - { - if ( ( (unsigned char)ioBuffer[ i ] < 1 ) ) - ioBuffer[ i ] = '?'; - } - /* Null, terminate. */ - ioBuffer[ bytesInBuffer ] = '\0'; - /* Append to the output. */ - string_append( out, ioBuffer ); - /* Subtract what we read in. */ - bytesAvailable -= bytesInBuffer; - } - else - { - /* Likely read a error, bail out. */ - bytesAvailable = 0; - } - } - else - { - /* Definitely read a error, bail out. */ - bytesAvailable = 0; - } - } - } - while ( bytesAvailable > 0 ); -} - - -static void read_output() -{ - int i; - for ( i = 0; i < globs.jobs && i < MAXJOBS; ++i ) - { - /* Read stdout data. */ - if ( cmdtab[ i ].pipe_out[ 0 ] ) - read_pipe( cmdtab[ i ].pipe_out[ 0 ], & cmdtab[ i ].buffer_out ); - /* Read stderr data. */ - if ( cmdtab[ i ].pipe_err[ 0 ] ) - read_pipe( cmdtab[ i ].pipe_err[ 0 ], & cmdtab[ i ].buffer_err ); - } -} - - -/* - * Waits for a single child process command to complete, or the timeout, - * whichever comes first. Returns the index of the completed command in the - * cmdtab array, or -1. - */ - -static int try_wait( int timeoutMillis ) -{ - int i; - int num_active; - int wait_api_result; - HANDLE active_handles[ MAXJOBS ]; - int active_procs[ MAXJOBS ]; - - /* Prepare a list of all active processes to wait for. */ - for ( num_active = 0, i = 0; i < globs.jobs; ++i ) - { - if ( cmdtab[ i ].pi.hProcess ) - { - active_handles[ num_active ] = cmdtab[ i ].pi.hProcess; - active_procs[ num_active ] = i; - ++num_active; - } - } - - /* Wait for a child to complete, or for our timeout window to expire. */ - wait_api_result = WaitForMultipleObjects( num_active, active_handles, - FALSE, timeoutMillis ); - if ( ( WAIT_OBJECT_0 <= wait_api_result ) && - ( wait_api_result < WAIT_OBJECT_0 + num_active ) ) - { - /* Rerminated process detected - return its index. */ - return active_procs[ wait_api_result - WAIT_OBJECT_0 ]; - } - - /* Timeout. */ - return -1; -} - - -static int try_kill_one() -{ - /* Only need to check if a timeout was specified with the -l option. */ - if ( globs.timeout > 0 ) - { - int i; - for ( i = 0; i < globs.jobs; ++i ) - { - double t = running_time( cmdtab[ i ].pi.hProcess ); - if ( t > (double)globs.timeout ) - { - /* The job may have left an alert dialog around, try and get rid - * of it before killing - */ - close_alert( cmdtab[ i ].pi.hProcess ); - /* We have a "runaway" job, kill it. */ - kill_process_tree( 0, cmdtab[ i ].pi.hProcess ); - /* And return it marked as a timeout. */ - cmdtab[ i ].exit_reason = EXIT_TIMEOUT; - return i; - } - } - } - return -1; -} - - -static void close_alerts() -{ - /* We only attempt this every 5 seconds, or so, because it is not a cheap - * operation, and we will catch the alerts eventually. This check uses - * floats as some compilers define CLOCKS_PER_SEC as a float or double. - */ - if ( ( (float)clock() / (float)( CLOCKS_PER_SEC * 5 ) ) < ( 1.0 / 5.0 ) ) - { - int i; - for ( i = 0; i < globs.jobs; ++i ) - close_alert( cmdtab[ i ].pi.hProcess ); - } -} - - -/* - * Calc the current running time of an *active* process. - */ - -static double running_time( HANDLE process ) -{ - FILETIME creation; - FILETIME exit; - FILETIME kernel; - FILETIME user; - FILETIME current; - if ( GetProcessTimes( process, &creation, &exit, &kernel, &user ) ) - { - /* Compute the elapsed time. */ - GetSystemTimeAsFileTime( ¤t ); - return filetime_seconds( add_FILETIME( current, - negate_FILETIME( creation ) ) ); - } - return 0.0; -} - - -/* It is just stupidly silly that one has to do this. */ -typedef struct PROCESS_BASIC_INFORMATION__ -{ - LONG ExitStatus; - PVOID PebBaseAddress; - ULONG AffinityMask; - LONG BasePriority; - ULONG UniqueProcessId; - ULONG InheritedFromUniqueProcessId; -} PROCESS_BASIC_INFORMATION_; -typedef LONG (__stdcall * NtQueryInformationProcess__)( - HANDLE ProcessHandle, - LONG ProcessInformationClass, - PVOID ProcessInformation, - ULONG ProcessInformationLength, - PULONG ReturnLength); -static NtQueryInformationProcess__ NtQueryInformationProcess_ = NULL; -static HMODULE NTDLL_ = NULL; -DWORD get_process_id( HANDLE process ) -{ - PROCESS_BASIC_INFORMATION_ pinfo; - if ( !NtQueryInformationProcess_ ) - { - if ( ! NTDLL_ ) - NTDLL_ = GetModuleHandleA( "ntdll" ); - if ( NTDLL_ ) - NtQueryInformationProcess_ - = (NtQueryInformationProcess__)GetProcAddress( NTDLL_, "NtQueryInformationProcess" ); - } - if ( NtQueryInformationProcess_ ) - { - LONG r = (*NtQueryInformationProcess_)( process, - /* ProcessBasicInformation == */ 0, &pinfo, - sizeof( PROCESS_BASIC_INFORMATION_ ), NULL ); - return pinfo.UniqueProcessId; - } - return 0; -} - - -/* - * Not really optimal, or efficient, but it is easier this way, and it is not - * like we are going to be killing thousands, or even tens of processes. - */ - -static void kill_process_tree( DWORD pid, HANDLE process ) -{ - HANDLE process_snapshot_h = INVALID_HANDLE_VALUE; - if ( !pid ) - pid = get_process_id( process ); - process_snapshot_h = CreateToolhelp32Snapshot( TH32CS_SNAPPROCESS, 0 ); - - if ( INVALID_HANDLE_VALUE != process_snapshot_h ) - { - BOOL ok = TRUE; - PROCESSENTRY32 pinfo; - pinfo.dwSize = sizeof( PROCESSENTRY32 ); - for ( - ok = Process32First( process_snapshot_h, &pinfo ); - ok == TRUE; - ok = Process32Next( process_snapshot_h, &pinfo ) ) - { - if ( pinfo.th32ParentProcessID == pid ) - { - /* Found a child, recurse to kill it and anything else below it. - */ - HANDLE ph = OpenProcess( PROCESS_ALL_ACCESS, FALSE, - pinfo.th32ProcessID ); - if ( NULL != ph ) - { - kill_process_tree( pinfo.th32ProcessID, ph ); - CloseHandle( ph ); - } - } - } - CloseHandle( process_snapshot_h ); - } - /* Now that the children are all dead, kill the root. */ - TerminateProcess( process, -2 ); -} - - -static double creation_time( HANDLE process ) -{ - FILETIME creation; - FILETIME exit; - FILETIME kernel; - FILETIME user; - FILETIME current; - return GetProcessTimes( process, &creation, &exit, &kernel, &user ) - ? filetime_seconds( creation ) - : 0.0; -} - - -/* - * Recursive check if first process is parent (directly or indirectly) of the - * second one. Both processes are passed as process ids, not handles. Special - * return value 2 means that the second process is smss.exe and its parent - * process is System (first argument is ignored). - */ - -static int is_parent_child( DWORD parent, DWORD child ) -{ - HANDLE process_snapshot_h = INVALID_HANDLE_VALUE; - - if ( !child ) - return 0; - if ( parent == child ) - return 1; - - process_snapshot_h = CreateToolhelp32Snapshot( TH32CS_SNAPPROCESS, 0 ); - if ( INVALID_HANDLE_VALUE != process_snapshot_h ) - { - BOOL ok = TRUE; - PROCESSENTRY32 pinfo; - pinfo.dwSize = sizeof( PROCESSENTRY32 ); - for ( - ok = Process32First( process_snapshot_h, &pinfo ); - ok == TRUE; - ok = Process32Next( process_snapshot_h, &pinfo ) ) - { - if ( pinfo.th32ProcessID == child ) - { - /* Unfortunately, process ids are not really unique. There might - * be spurious "parent and child" relationship match between two - * non-related processes if real parent process of a given - * process has exited (while child process kept running as an - * "orphan") and the process id of such parent process has been - * reused by internals of the operating system when creating - * another process. - * - * Thus additional check is needed - process creation time. This - * check may fail (i.e. return 0) for system processes due to - * insufficient privileges, and that is OK. - */ - double tchild = 0.0; - double tparent = 0.0; - HANDLE hchild = OpenProcess( PROCESS_QUERY_INFORMATION, FALSE, pinfo.th32ProcessID ); - CloseHandle( process_snapshot_h ); - - /* csrss.exe may display message box like following: - * xyz.exe - Unable To Locate Component - * This application has failed to start because - * boost_foo-bar.dll was not found. Re-installing the - * application may fix the problem - * This actually happens when starting test process that depends - * on a dynamic library which failed to build. We want to - * automatically close these message boxes even though csrss.exe - * is not our child process. We may depend on the fact that (in - * all current versions of Windows) csrss.exe is directly child - * of the smss.exe process, which in turn is directly child of - * the System process, which always has process id == 4. This - * check must be performed before comparison of process creation - * times. - */ - if ( !stricmp( pinfo.szExeFile, "csrss.exe" ) && - ( is_parent_child( parent, pinfo.th32ParentProcessID ) == 2 ) ) - return 1; - if ( !stricmp( pinfo.szExeFile, "smss.exe" ) && - ( pinfo.th32ParentProcessID == 4 ) ) - return 2; - - if ( hchild ) - { - HANDLE hparent = OpenProcess( PROCESS_QUERY_INFORMATION, - FALSE, pinfo.th32ParentProcessID ); - if ( hparent ) - { - tchild = creation_time( hchild ); - tparent = creation_time( hparent ); - CloseHandle( hparent ); - } - CloseHandle( hchild ); - } - - /* Return 0 if one of the following is true: - * 1. we failed to read process creation time - * 2. child was created before alleged parent - */ - if ( ( tchild == 0.0 ) || ( tparent == 0.0 ) || - ( tchild < tparent ) ) - return 0; - - return is_parent_child( parent, pinfo.th32ParentProcessID ) & 1; - } - } - - CloseHandle( process_snapshot_h ); - } - - return 0; -} - -typedef struct PROCESS_HANDLE_ID { HANDLE h; DWORD pid; } PROCESS_HANDLE_ID; - - -/* - * This function is called by the operating system for each topmost window. - */ - -BOOL CALLBACK close_alert_window_enum( HWND hwnd, LPARAM lParam ) -{ - char buf[ 7 ] = { 0 }; - PROCESS_HANDLE_ID p = *( (PROCESS_HANDLE_ID *)lParam ); - DWORD pid = 0; - DWORD tid = 0; - - /* We want to find and close any window that: - * 1. is visible and - * 2. is a dialog and - * 3. is displayed by any of our child processes - */ - if ( !IsWindowVisible( hwnd ) ) - return TRUE; - - if ( !GetClassNameA( hwnd, buf, sizeof( buf ) ) ) - return TRUE; /* Failed to read class name; presume it is not a dialog. */ - - if ( strcmp( buf, "#32770" ) ) - return TRUE; /* Not a dialog */ - - /* GetWindowThreadProcessId() returns 0 on error, otherwise thread id of - * window message pump thread. - */ - tid = GetWindowThreadProcessId( hwnd, &pid ); - - if ( tid && is_parent_child( p.pid, pid ) ) - { - /* Ask really nice. */ - PostMessageA( hwnd, WM_CLOSE, 0, 0 ); - /* Now wait and see if it worked. If not, insist. */ - if ( WaitForSingleObject( p.h, 200 ) == WAIT_TIMEOUT ) - { - PostThreadMessageA( tid, WM_QUIT, 0, 0 ); - WaitForSingleObject( p.h, 300 ); - } - - /* Done, we do not want to check any other window now. */ - return FALSE; - } - - return TRUE; -} - - -static void close_alert( HANDLE process ) -{ - DWORD pid = get_process_id( process ); - /* If process already exited or we just can not get its process id, do not - * go any further. - */ - if ( pid ) - { - PROCESS_HANDLE_ID p; - p.h = process; - p.pid = pid; - EnumWindows( &close_alert_window_enum, (LPARAM)&p ); - } -} - -#endif /* USE_EXECNT */ diff --git a/jam-files/engine/execunix.c b/jam-files/engine/execunix.c deleted file mode 100644 index ef9dba00..00000000 --- a/jam-files/engine/execunix.c +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * Copyright 2007 Noel Belcourt. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#include "jam.h" -#include "lists.h" -#include "execcmd.h" -#include "output.h" -#include <errno.h> -#include <signal.h> -#include <stdio.h> -#include <time.h> -#include <unistd.h> /* needed for vfork(), _exit() prototypes */ -#include <sys/resource.h> -#include <sys/times.h> -#include <sys/wait.h> - -#if defined(sun) || defined(__sun) || defined(linux) - #include <wait.h> -#endif - -#ifdef USE_EXECUNIX - -#include <sys/times.h> - -#if defined(__APPLE__) - #define NO_VFORK -#endif - -#ifdef NO_VFORK - #define vfork() fork() -#endif - - -/* - * execunix.c - execute a shell script on UNIX/WinNT/OS2/AmigaOS - * - * If $(JAMSHELL) is defined, uses that to formulate execvp()/spawnvp(). - * The default is: - * - * /bin/sh -c % [ on UNIX/AmigaOS ] - * cmd.exe /c % [ on OS2/WinNT ] - * - * Each word must be an individual element in a jam variable value. - * - * In $(JAMSHELL), % expands to the command string and ! expands to the slot - * number (starting at 1) for multiprocess (-j) invocations. If $(JAMSHELL) does - * not include a %, it is tacked on as the last argument. - * - * Do not just set JAMSHELL to /bin/sh or cmd.exe - it will not work! - * - * External routines: - * exec_cmd() - launch an async command execution. - * exec_wait() - wait and drive at most one execution completion. - * - * Internal routines: - * onintr() - bump intr to note command interruption. - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 05/04/94 (seiwald) - async multiprocess interface - * 01/22/95 (seiwald) - $(JAMSHELL) support - * 06/02/97 (gsar) - full async multiprocess support for Win32 - */ - -static clock_t tps = 0; -static struct timeval tv; -static int select_timeout = 0; -static int intr = 0; -static int cmdsrunning = 0; -static struct tms old_time; - -#define OUT 0 -#define ERR 1 - -static struct -{ - int pid; /* on win32, a real process handle */ - int fd[2]; /* file descriptors for stdout and stderr */ - FILE *stream[2]; /* child's stdout (0) and stderr (1) file stream */ - clock_t start_time; /* start time of child process */ - int exit_reason; /* termination status */ - int action_length; /* length of action string */ - int target_length; /* length of target string */ - char *action; /* buffer to hold action and target invoked */ - char *target; /* buffer to hold action and target invoked */ - char *command; /* buffer to hold command being invoked */ - char *buffer[2]; /* buffer to hold stdout and stderr, if any */ - void (*func)( void *closure, int status, timing_info*, char *, char * ); - void *closure; - time_t start_dt; /* start of command timestamp */ -} cmdtab[ MAXJOBS ] = {{0}}; - -/* - * onintr() - bump intr to note command interruption - */ - -void onintr( int disp ) -{ - ++intr; - printf( "...interrupted\n" ); -} - - -/* - * exec_cmd() - launch an async command execution. - */ - -void exec_cmd -( - char * string, - void (*func)( void *closure, int status, timing_info*, char *, char * ), - void * closure, - LIST * shell, - char * action, - char * target -) -{ - static int initialized = 0; - int out[2]; - int err[2]; - int slot; - int len; - char * argv[ MAXARGC + 1 ]; /* +1 for NULL */ - - /* Find a slot in the running commands table for this one. */ - for ( slot = 0; slot < MAXJOBS; ++slot ) - if ( !cmdtab[ slot ].pid ) - break; - - if ( slot == MAXJOBS ) - { - printf( "no slots for child!\n" ); - exit( EXITBAD ); - } - - /* Forumulate argv. If shell was defined, be prepared for % and ! subs. - * Otherwise, use stock /bin/sh on unix or cmd.exe on NT. - */ - if ( shell ) - { - int i; - char jobno[4]; - int gotpercent = 0; - - sprintf( jobno, "%d", slot + 1 ); - - for ( i = 0; shell && i < MAXARGC; ++i, shell = list_next( shell ) ) - { - switch ( shell->string[0] ) - { - case '%': argv[ i ] = string; ++gotpercent; break; - case '!': argv[ i ] = jobno; break; - default : argv[ i ] = shell->string; - } - if ( DEBUG_EXECCMD ) - printf( "argv[%d] = '%s'\n", i, argv[ i ] ); - } - - if ( !gotpercent ) - argv[ i++ ] = string; - - argv[ i ] = 0; - } - else - { - argv[ 0 ] = "/bin/sh"; - argv[ 1 ] = "-c"; - argv[ 2 ] = string; - argv[ 3 ] = 0; - } - - /* Increment jobs running. */ - ++cmdsrunning; - - /* Save off actual command string. */ - cmdtab[ slot ].command = BJAM_MALLOC_ATOMIC( strlen( string ) + 1 ); - strcpy( cmdtab[ slot ].command, string ); - - /* Initialize only once. */ - if ( !initialized ) - { - times( &old_time ); - initialized = 1; - } - - /* Create pipes from child to parent. */ - { - if ( pipe( out ) < 0 ) - exit( EXITBAD ); - - if ( pipe( err ) < 0 ) - exit( EXITBAD ); - } - - /* Start the command */ - - cmdtab[ slot ].start_dt = time(0); - - if ( 0 < globs.timeout ) - { - /* - * Handle hung processes by manually tracking elapsed time and signal - * process when time limit expires. - */ - struct tms buf; - cmdtab[ slot ].start_time = times( &buf ); - - /* Make a global, only do this once. */ - if ( tps == 0 ) tps = sysconf( _SC_CLK_TCK ); - } - - if ( ( cmdtab[ slot ].pid = vfork() ) == 0 ) - { - int pid = getpid(); - - close( out[0] ); - close( err[0] ); - - dup2( out[1], STDOUT_FILENO ); - - if ( globs.pipe_action == 0 ) - dup2( out[1], STDERR_FILENO ); - else - dup2( err[1], STDERR_FILENO ); - - close( out[1] ); - close( err[1] ); - - /* Make this process a process group leader so that when we kill it, all - * child processes of this process are terminated as well. We use - * killpg(pid, SIGKILL) to kill the process group leader and all its - * children. - */ - if ( 0 < globs.timeout ) - { - struct rlimit r_limit; - r_limit.rlim_cur = globs.timeout; - r_limit.rlim_max = globs.timeout; - setrlimit( RLIMIT_CPU, &r_limit ); - } - setpgid( pid,pid ); - execvp( argv[0], argv ); - perror( "execvp" ); - _exit( 127 ); - } - else if ( cmdtab[ slot ].pid == -1 ) - { - perror( "vfork" ); - exit( EXITBAD ); - } - - setpgid( cmdtab[ slot ].pid, cmdtab[ slot ].pid ); - - /* close write end of pipes */ - close( out[1] ); - close( err[1] ); - - /* set both file descriptors to non-blocking */ - fcntl(out[0], F_SETFL, O_NONBLOCK); - fcntl(err[0], F_SETFL, O_NONBLOCK); - - /* child writes stdout to out[1], parent reads from out[0] */ - cmdtab[ slot ].fd[ OUT ] = out[0]; - cmdtab[ slot ].stream[ OUT ] = fdopen( cmdtab[ slot ].fd[ OUT ], "rb" ); - if ( cmdtab[ slot ].stream[ OUT ] == NULL ) - { - perror( "fdopen" ); - exit( EXITBAD ); - } - - /* child writes stderr to err[1], parent reads from err[0] */ - if (globs.pipe_action == 0) - { - close(err[0]); - } - else - { - cmdtab[ slot ].fd[ ERR ] = err[0]; - cmdtab[ slot ].stream[ ERR ] = fdopen( cmdtab[ slot ].fd[ ERR ], "rb" ); - if ( cmdtab[ slot ].stream[ ERR ] == NULL ) - { - perror( "fdopen" ); - exit( EXITBAD ); - } - } - - /* Ensure enough room for rule and target name. */ - if ( action && target ) - { - len = strlen( action ) + 1; - if ( cmdtab[ slot ].action_length < len ) - { - BJAM_FREE( cmdtab[ slot ].action ); - cmdtab[ slot ].action = BJAM_MALLOC_ATOMIC( len ); - cmdtab[ slot ].action_length = len; - } - strcpy( cmdtab[ slot ].action, action ); - len = strlen( target ) + 1; - if ( cmdtab[ slot ].target_length < len ) - { - BJAM_FREE( cmdtab[ slot ].target ); - cmdtab[ slot ].target = BJAM_MALLOC_ATOMIC( len ); - cmdtab[ slot ].target_length = len; - } - strcpy( cmdtab[ slot ].target, target ); - } - else - { - BJAM_FREE( cmdtab[ slot ].action ); - BJAM_FREE( cmdtab[ slot ].target ); - cmdtab[ slot ].action = 0; - cmdtab[ slot ].target = 0; - cmdtab[ slot ].action_length = 0; - cmdtab[ slot ].target_length = 0; - } - - /* Save the operation for exec_wait() to find. */ - cmdtab[ slot ].func = func; - cmdtab[ slot ].closure = closure; - - /* Wait until we are under the limit of concurrent commands. Do not trust - * globs.jobs alone. - */ - while ( ( cmdsrunning >= MAXJOBS ) || ( cmdsrunning >= globs.jobs ) ) - if ( !exec_wait() ) - break; -} - - -/* Returns 1 if file is closed, 0 if descriptor is still live. - * - * i is index into cmdtab - * - * s (stream) indexes: - * - cmdtab[ i ].stream[ s ] - * - cmdtab[ i ].buffer[ s ] - * - cmdtab[ i ].fd [ s ] - */ - -int read_descriptor( int i, int s ) -{ - int ret; - int len; - char buffer[BUFSIZ]; - - while ( 0 < ( ret = fread( buffer, sizeof(char), BUFSIZ-1, cmdtab[ i ].stream[ s ] ) ) ) - { - buffer[ret] = 0; - if ( !cmdtab[ i ].buffer[ s ] ) - { - /* Never been allocated. */ - cmdtab[ i ].buffer[ s ] = (char*)BJAM_MALLOC_ATOMIC( ret + 1 ); - memcpy( cmdtab[ i ].buffer[ s ], buffer, ret + 1 ); - } - else - { - /* Previously allocated. */ - char * tmp = cmdtab[ i ].buffer[ s ]; - len = strlen( tmp ); - cmdtab[ i ].buffer[ s ] = (char*)BJAM_MALLOC_ATOMIC( len + ret + 1 ); - memcpy( cmdtab[ i ].buffer[ s ], tmp, len ); - memcpy( cmdtab[ i ].buffer[ s ] + len, buffer, ret + 1 ); - BJAM_FREE( tmp ); - } - } - - return feof(cmdtab[ i ].stream[ s ]); -} - - -void close_streams( int i, int s ) -{ - /* Close the stream and pipe descriptor. */ - fclose(cmdtab[ i ].stream[ s ]); - cmdtab[ i ].stream[ s ] = 0; - - close(cmdtab[ i ].fd[ s ]); - cmdtab[ i ].fd[ s ] = 0; -} - - -void populate_file_descriptors( int * fmax, fd_set * fds) -{ - int i, fd_max = 0; - struct tms buf; - clock_t current = times( &buf ); - select_timeout = globs.timeout; - - /* Compute max read file descriptor for use in select. */ - FD_ZERO(fds); - for ( i = 0; i < globs.jobs; ++i ) - { - if ( 0 < cmdtab[ i ].fd[ OUT ] ) - { - fd_max = fd_max < cmdtab[ i ].fd[ OUT ] ? cmdtab[ i ].fd[ OUT ] : fd_max; - FD_SET(cmdtab[ i ].fd[ OUT ], fds); - } - if ( globs.pipe_action != 0 ) - { - if (0 < cmdtab[ i ].fd[ ERR ]) - { - fd_max = fd_max < cmdtab[ i ].fd[ ERR ] ? cmdtab[ i ].fd[ ERR ] : fd_max; - FD_SET(cmdtab[ i ].fd[ ERR ], fds); - } - } - - if (globs.timeout && cmdtab[ i ].pid) { - clock_t consumed = (current - cmdtab[ i ].start_time) / tps; - clock_t process_timesout = globs.timeout - consumed; - if (0 < process_timesout && process_timesout < select_timeout) { - select_timeout = process_timesout; - } - if ( globs.timeout <= consumed ) - { - killpg( cmdtab[ i ].pid, SIGKILL ); - cmdtab[ i ].exit_reason = EXIT_TIMEOUT; - } - } - } - *fmax = fd_max; -} - - -/* - * exec_wait() - wait and drive at most one execution completion. - */ - -int exec_wait() -{ - int i; - int ret; - int fd_max; - int pid; - int status; - int finished; - int rstat; - timing_info time_info; - fd_set fds; - struct tms new_time; - - /* Handle naive make1() which does not know if commands are running. */ - if ( !cmdsrunning ) - return 0; - - /* Process children that signaled. */ - finished = 0; - while ( !finished && cmdsrunning ) - { - /* Compute max read file descriptor for use in select(). */ - populate_file_descriptors( &fd_max, &fds ); - - if ( 0 < globs.timeout ) - { - /* Force select() to timeout so we can terminate expired processes. - */ - tv.tv_sec = select_timeout; - tv.tv_usec = 0; - - /* select() will wait until: i/o on a descriptor, a signal, or we - * time out. - */ - ret = select( fd_max + 1, &fds, 0, 0, &tv ); - } - else - { - /* select() will wait until i/o on a descriptor or a signal. */ - ret = select( fd_max + 1, &fds, 0, 0, 0 ); - } - - if ( 0 < ret ) - { - for ( i = 0; i < globs.jobs; ++i ) - { - int out = 0; - int err = 0; - if ( FD_ISSET( cmdtab[ i ].fd[ OUT ], &fds ) ) - out = read_descriptor( i, OUT ); - - if ( ( globs.pipe_action != 0 ) && - ( FD_ISSET( cmdtab[ i ].fd[ ERR ], &fds ) ) ) - err = read_descriptor( i, ERR ); - - /* If feof on either descriptor, then we are done. */ - if ( out || err ) - { - /* Close the stream and pipe descriptors. */ - close_streams( i, OUT ); - if ( globs.pipe_action != 0 ) - close_streams( i, ERR ); - - /* Reap the child and release resources. */ - pid = waitpid( cmdtab[ i ].pid, &status, 0 ); - - if ( pid == cmdtab[ i ].pid ) - { - finished = 1; - pid = 0; - cmdtab[ i ].pid = 0; - - /* Set reason for exit if not timed out. */ - if ( WIFEXITED( status ) ) - { - cmdtab[ i ].exit_reason = 0 == WEXITSTATUS( status ) - ? EXIT_OK - : EXIT_FAIL; - } - - /* Print out the rule and target name. */ - out_action( cmdtab[ i ].action, cmdtab[ i ].target, - cmdtab[ i ].command, cmdtab[ i ].buffer[ OUT ], - cmdtab[ i ].buffer[ ERR ], cmdtab[ i ].exit_reason - ); - - times( &new_time ); - - time_info.system = (double)( new_time.tms_cstime - old_time.tms_cstime ) / CLOCKS_PER_SEC; - time_info.user = (double)( new_time.tms_cutime - old_time.tms_cutime ) / CLOCKS_PER_SEC; - time_info.start = cmdtab[ i ].start_dt; - time_info.end = time( 0 ); - - old_time = new_time; - - /* Drive the completion. */ - --cmdsrunning; - - if ( intr ) - rstat = EXEC_CMD_INTR; - else if ( status != 0 ) - rstat = EXEC_CMD_FAIL; - else - rstat = EXEC_CMD_OK; - - /* Assume -p0 in effect so only pass buffer[ 0 ] - * containing merged output. - */ - (*cmdtab[ i ].func)( cmdtab[ i ].closure, rstat, - &time_info, cmdtab[ i ].command, - cmdtab[ i ].buffer[ 0 ] ); - - BJAM_FREE( cmdtab[ i ].buffer[ OUT ] ); - cmdtab[ i ].buffer[ OUT ] = 0; - - BJAM_FREE( cmdtab[ i ].buffer[ ERR ] ); - cmdtab[ i ].buffer[ ERR ] = 0; - - BJAM_FREE( cmdtab[ i ].command ); - cmdtab[ i ].command = 0; - - cmdtab[ i ].func = 0; - cmdtab[ i ].closure = 0; - cmdtab[ i ].start_time = 0; - } - else - { - printf( "unknown pid %d with errno = %d\n", pid, errno ); - exit( EXITBAD ); - } - } - } - } - } - - return 1; -} - -# endif /* USE_EXECUNIX */ diff --git a/jam-files/engine/execvms.c b/jam-files/engine/execvms.c deleted file mode 100644 index 729917d3..00000000 --- a/jam-files/engine/execvms.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#include "jam.h" -#include "lists.h" -#include "execcmd.h" - -#ifdef OS_VMS - -#include <stdio.h> -#include <string.h> -#include <stdlib.h> -#include <iodef.h> -#include <ssdef.h> -#include <descrip.h> -#include <dvidef.h> -#include <clidef.h> - -/* - * execvms.c - execute a shell script, ala VMS. - * - * The approach is this: - * - * If the command is a single line, and shorter than WRTLEN (what we believe to - * be the maximum line length), we just system() it. - * - * If the command is multi-line, or longer than WRTLEN, we write the command - * block to a temp file, splitting long lines (using "-" at the end of the line - * to indicate contiuation), and then source that temp file. We use special - * logic to make sure we do not continue in the middle of a quoted string. - * - * 05/04/94 (seiwald) - async multiprocess interface; noop on VMS - * 12/20/96 (seiwald) - rewritten to handle multi-line commands well - * 01/14/96 (seiwald) - do not put -'s between "'s - */ - -#define WRTLEN 240 - -#define MIN( a, b ) ((a) < (b) ? (a) : (b)) - -/* 1 for the @ and 4 for the .com */ - -char tempnambuf[ L_tmpnam + 1 + 4 ] = { 0 }; - - -void exec_cmd -( - char * string, - void (* func)( void * closure, int status, timing_info *, char *, char * ), - void * closure, - LIST * shell, - char * rule_name, - char * target -) -{ - char * s; - char * e; - cahr * p; - int rstat = EXEC_CMD_OK; - int status; - - /* See if string is more than one line discounting leading/trailing white - * space. - */ - for ( s = string; *s && isspace( *s ); ++s ); - - e = p = strchr( s, '\n' ); - - while ( p && isspace( *p ) ) - ++p; - - /* If multi line or long, write to com file. Otherwise, exec directly. */ - if ( ( p && *p ) || ( e - s > WRTLEN ) ) - { - FILE * f; - - /* Create temp file invocation "@sys$scratch:tempfile.com". */ - if ( !*tempnambuf ) - { - tempnambuf[0] = '@'; - (void)tmpnam( tempnambuf + 1 ); - strcat( tempnambuf, ".com" ); - } - - /* Open tempfile. */ - if ( !( f = fopen( tempnambuf + 1, "w" ) ) ) - { - printf( "can't open command file\n" ); - (*func)( closure, EXEC_CMD_FAIL ); - return; - } - - /* For each line of the string. */ - while ( *string ) - { - char * s = strchr( string, '\n' ); - int len = s ? s + 1 - string : strlen( string ); - - fputc( '$', f ); - - /* For each chunk of a line that needs to be split. */ - while ( len > 0 ) - { - char * q = string; - char * qe = string + MIN( len, WRTLEN ); - char * qq = q; - int quote = 0; - - /* Look for matching "s. */ - for ( ; q < qe; ++q ) - if ( ( *q == '"' ) && ( quote = !quote ) ) - qq = q; - - /* Back up to opening quote, if in one. */ - if ( quote ) - q = qq; - - fwrite( string, ( q - string ), 1, f ); - - len -= ( q - string ); - string = q; - - if ( len ) - { - fputc( '-', f ); - fputc( '\n', f ); - } - } - } - - fclose( f ); - - status = system( tempnambuf ) & 0x07; - - unlink( tempnambuf + 1 ); - } - else - { - /* Execute single line command. Strip trailing newline before execing. - */ - if ( e ) *e = 0; - status = system( s ) & 0x07; - } - - /* Fail for error or fatal error. OK on OK, warning or info exit. */ - if ( ( status == 2 ) || ( status == 4 ) ) - rstat = EXEC_CMD_FAIL; - - (*func)( closure, rstat ); -} - - -int exec_wait() -{ - return 0; -} - -# endif /* VMS */ diff --git a/jam-files/engine/expand.c b/jam-files/engine/expand.c deleted file mode 100644 index d8e58827..00000000 --- a/jam-files/engine/expand.c +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "lists.h" -# include "variable.h" -# include "expand.h" -# include "pathsys.h" -# include "newstr.h" -# include <assert.h> -# include <stdlib.h> -# include <limits.h> - -# ifdef OS_CYGWIN -# include <sys/cygwin.h> -# include <windows.h> -# endif - -/* - * expand.c - expand a buffer, given variable values - * - * External routines: - * - * var_expand() - variable-expand input string into list of strings - * - * Internal routines: - * - * var_edit_parse() - parse : modifiers into PATHNAME structure. - * var_edit_file() - copy input target name to output, modifying filename. - * var_edit_shift() - do upshift/downshift mods. - * - * 01/25/94 (seiwald) - $(X)$(UNDEF) was expanding like plain $(X) - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 01/11/01 (seiwald) - added support for :E=emptyvalue, :J=joinval - */ - -typedef struct -{ - PATHNAME f; /* :GDBSMR -- pieces */ - char parent; /* :P -- go to parent directory */ - char filemods; /* one of the above applied */ - char downshift; /* :L -- downshift result */ - char upshift; /* :U -- upshift result */ - char to_slashes; /* :T -- convert "\" to "/" */ - char to_windows; /* :W -- convert cygwin to native paths */ - PATHPART empty; /* :E -- default for empties */ - PATHPART join; /* :J -- join list with char */ -} VAR_EDITS ; - -static void var_edit_parse( char * mods, VAR_EDITS * edits ); -static void var_edit_file ( char * in, string * out, VAR_EDITS * edits ); -static void var_edit_shift( string * out, VAR_EDITS * edits ); - -#define MAGIC_COLON '\001' -#define MAGIC_LEFT '\002' -#define MAGIC_RIGHT '\003' - - -/* - * var_expand() - variable-expand input string into list of strings. - * - * Would just copy input to output, performing variable expansion, except that - * since variables can contain multiple values the result of variable expansion - * may contain multiple values (a list). Properly performs "product" operations - * that occur in "$(var1)xxx$(var2)" or even "$($(var2))". - * - * Returns a newly created list. - */ - -LIST * var_expand( LIST * l, char * in, char * end, LOL * lol, int cancopyin ) -{ - char out_buf[ MAXSYM ]; - string buf[ 1 ]; - string out1[ 1 ]; /* temporary buffer */ - size_t prefix_length; - char * out; - char * inp = in; - char * ov; /* for temp copy of variable in outbuf */ - int depth; - - if ( DEBUG_VAREXP ) - printf( "expand '%.*s'\n", end - in, in ); - - /* This gets a lot of cases: $(<) and $(>). */ - if - ( - ( in[ 0 ] == '$' ) && - ( in[ 1 ] == '(' ) && - ( in[ 3 ] == ')' ) && - ( in[ 4 ] == '\0' ) - ) - { - switch ( in[ 2 ] ) - { - case '<': return list_copy( l, lol_get( lol, 0 ) ); - case '>': return list_copy( l, lol_get( lol, 1 ) ); - - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - return list_copy( l, lol_get( lol, in[ 2 ] - '1' ) ); - } - } - else if ( in[0] == '$' && in[1] == '(' && in[2] == '1' && in[4] == ')' && - in[5] == '\0') { - - switch( in[3] ) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - return list_copy( l, lol_get( lol, in[3]-'0'+10-1 ) ); - } - } - - /* Expand @() files, to single item plus accompanying file. */ - if ( ( in[ 0 ] == '@' ) && ( in[ 1 ] == '(' ) && ( *( end - 1 ) == ')' ) ) - { - /* We try the expansion until it fits within the propective output - * buffer. - */ - char * at_buf = 0; - int at_size = MAXJPATH; - int at_len = 0; - do - { - BJAM_FREE( at_buf ); - at_buf = (char *)BJAM_MALLOC_ATOMIC( at_size + 1 ); - at_len = var_string( in, at_buf, at_size, lol ); - at_size *= 2; - } - while ( ( at_len < 0 ) && ( at_size < INT_MAX / 2 ) ); - /* Return the result as a single item list. */ - if ( at_len > 0 ) - { - LIST * r; - string_copy( buf, at_buf ); - r = list_new( l, newstr( buf->value ) ); - string_free( buf ); - BJAM_FREE( at_buf ); - return r; - } - BJAM_FREE( at_buf ); - } - - /* Just try simple copy of in to out. */ - while ( in < end ) - if ( ( *in++ == '$' ) && ( *in == '(' ) ) - goto expand; - - /* No variables expanded - just add copy of input string to list. */ - - /* 'cancopyin' is an optimization: if the input was already a list item, we - * can use copystr() to put it on the new list. Otherwise, we use the slower - * newstr(). - */ - if ( cancopyin ) - return list_new( l, copystr( inp ) ); - - { - LIST * r; - string_new( buf ); - string_append_range( buf, inp, end ); - r = list_new( l, newstr( buf->value ) ); - string_free( buf ); - return r; - } - -expand: - string_new( buf ); - string_append_range( buf, inp, in - 1 ); /* Copy the part before '$'. */ - /* - * Input so far (ignore blanks): - * - * stuff-in-outbuf $(variable) remainder - * ^ ^ - * in end - * Output so far: - * - * stuff-in-outbuf $ - * ^ ^ - * out_buf out - * - * - * We just copied the $ of $(...), so back up one on the output. We now find - * the matching close paren, copying the variable and modifiers between the - * $( and ) temporarily into out_buf, so that we can replace :'s with - * MAGIC_COLON. This is necessary to avoid being confused by modifier values - * that are variables containing :'s. Ugly. - */ - - depth = 1; - inp = ++in; /* Skip over the '('. */ - - while ( ( in < end ) && depth ) - { - switch ( *in++ ) - { - case '(': ++depth; break; - case ')': --depth; break; - } - } - - /* - * Input so far (ignore blanks): - * - * stuff-in-outbuf $(variable) remainder - * ^ ^ ^ - * inp in end - */ - prefix_length = buf->size; - string_append_range( buf, inp, in - 1 ); - - out = buf->value + prefix_length; - for ( ov = out; ov < buf->value + buf->size; ++ov ) - { - switch ( *ov ) - { - case ':': *ov = MAGIC_COLON; break; - case '[': *ov = MAGIC_LEFT ; break; - case ']': *ov = MAGIC_RIGHT; break; - } - } - - /* - * Input so far (ignore blanks): - * - * stuff-in-outbuf $(variable) remainder - * ^ ^ - * in end - * Output so far: - * - * stuff-in-outbuf variable - * ^ ^ ^ - * out_buf out ov - * - * Later we will overwrite 'variable' in out_buf, but we will be done with - * it by then. 'variable' may be a multi-element list, so may each value for - * '$(variable element)', and so may 'remainder'. Thus we produce a product - * of three lists. - */ - { - LIST * variables = 0; - LIST * remainder = 0; - LIST * vars; - - /* Recursively expand variable name & rest of input. */ - if ( out < ov ) variables = var_expand( L0, out, ov, lol, 0 ); - if ( in < end ) remainder = var_expand( L0, in, end, lol, 0 ); - - /* Now produce the result chain. */ - - /* For each variable name. */ - for ( vars = variables; vars; vars = list_next( vars ) ) - { - LIST * value = 0; - LIST * evalue = 0; - char * colon; - char * bracket; - string variable[1]; - char * varname; - int sub1 = 0; - int sub2 = -1; - VAR_EDITS edits; - - /* Look for a : modifier in the variable name. Must copy into - * varname so we can modify it. - */ - string_copy( variable, vars->string ); - varname = variable->value; - - if ( ( colon = strchr( varname, MAGIC_COLON ) ) ) - { - string_truncate( variable, colon - varname ); - var_edit_parse( colon + 1, &edits ); - } - - /* Look for [x-y] subscripting. sub1 and sub2 are x and y. */ - if ( ( bracket = strchr( varname, MAGIC_LEFT ) ) ) - { - /* Make all syntax errors in [] subscripting result in the same - * behavior: silenty return an empty expansion (by setting sub2 - * = 0). Brute force parsing; May get moved into yacc someday. - */ - - char * s = bracket + 1; - - string_truncate( variable, bracket - varname ); - - do /* so we can use "break" */ - { - /* Allow negative indexes. */ - if ( !isdigit( *s ) && ( *s != '-' ) ) - { - sub2 = 0; - break; - } - sub1 = atoi( s ); - - /* Skip over the first symbol, which is either a digit or dash. */ - ++s; - while ( isdigit( *s ) ) ++s; - - if ( *s == MAGIC_RIGHT ) - { - sub2 = sub1; - break; - } - - if ( *s != '-' ) - { - sub2 = 0; - break; - } - - ++s; - - if ( *s == MAGIC_RIGHT ) - { - sub2 = -1; - break; - } - - if ( !isdigit( *s ) && ( *s != '-' ) ) - { - sub2 = 0; - break; - } - - /* First, compute the index of the last element. */ - sub2 = atoi( s ); - while ( isdigit( *++s ) ); - - if ( *s != MAGIC_RIGHT ) - sub2 = 0; - - } while ( 0 ); - - /* Anything but the end of the string, or the colon introducing - * a modifier is a syntax error. - */ - ++s; - if ( *s && ( *s != MAGIC_COLON ) ) - sub2 = 0; - - *bracket = '\0'; - } - - /* Get variable value, with special handling for $(<), $(>), $(n). - */ - if ( !varname[1] ) - { - if ( varname[0] == '<' ) - value = lol_get( lol, 0 ); - else if ( varname[0] == '>' ) - value = lol_get( lol, 1 ); - else if ( ( varname[0] >= '1' ) && ( varname[0] <= '9' ) ) - value = lol_get( lol, varname[0] - '1' ); - else if( varname[0] == '1' && varname[1] >= '0' && - varname[1] <= '9' && !varname[2] ) - value = lol_get( lol, varname[1] - '0' + 10 - 1 ); - } - - if ( !value ) - value = var_get( varname ); - - /* Handle negitive indexes: part two. */ - { - int length = list_length( value ); - - if ( sub1 < 0 ) - sub1 = length + sub1; - else - sub1 -= 1; - - if ( sub2 < 0 ) - sub2 = length + 1 + sub2 - sub1; - else - sub2 -= sub1; - /* The "sub2 < 0" test handles the semantic error of sub2 < - * sub1. - */ - if ( sub2 < 0 ) - sub2 = 0; - } - - /* The fast path: $(x) - just copy the variable value. This is only - * an optimization. - */ - if ( ( out == out_buf ) && !bracket && !colon && ( in == end ) ) - { - string_free( variable ); - l = list_copy( l, value ); - continue; - } - - /* Handle start subscript. */ - while ( ( sub1 > 0 ) && value ) - --sub1, value = list_next( value ); - - /* Empty w/ :E=default?. */ - if ( !value && colon && edits.empty.ptr ) - evalue = value = list_new( L0, newstr( edits.empty.ptr ) ); - - /* For each variable value. */ - string_new( out1 ); - for ( ; value; value = list_next( value ) ) - { - LIST * rem; - size_t postfix_start; - - /* Handle end subscript (length actually). */ - - if ( sub2 >= 0 && --sub2 < 0 ) - break; - - string_truncate( buf, prefix_length ); - - /* Apply : mods, if present */ - - if ( colon && edits.filemods ) - var_edit_file( value->string, out1, &edits ); - else - string_append( out1, value->string ); - - if ( colon && ( edits.upshift || edits.downshift || edits.to_slashes || edits.to_windows ) ) - var_edit_shift( out1, &edits ); - - /* Handle :J=joinval */ - /* If we have more values for this var, just keep appending them - * (using the join value) rather than creating separate LIST - * elements. - */ - if ( colon && edits.join.ptr && - ( list_next( value ) || list_next( vars ) ) ) - { - string_append( out1, edits.join.ptr ); - continue; - } - - string_append( buf, out1->value ); - string_free( out1 ); - string_new( out1 ); - - /* If no remainder, append result to output chain. */ - if ( in == end ) - { - l = list_new( l, newstr( buf->value ) ); - continue; - } - - /* For each remainder, append the complete string to the output - * chain. Remember the end of the variable expansion so we can - * just tack on each instance of 'remainder'. - */ - postfix_start = buf->size; - for ( rem = remainder; rem; rem = list_next( rem ) ) - { - string_truncate( buf, postfix_start ); - string_append( buf, rem->string ); - l = list_new( l, newstr( buf->value ) ); - } - } - string_free( out1 ); - - /* Toss used empty. */ - if ( evalue ) - list_free( evalue ); - - string_free( variable ); - } - - /* variables & remainder were gifts from var_expand and must be freed. */ - if ( variables ) list_free( variables ); - if ( remainder ) list_free( remainder ); - - if ( DEBUG_VAREXP ) - { - printf( "expanded to " ); - list_print( l ); - printf( "\n" ); - } - - string_free( buf ); - return l; - } -} - - -/* - * var_edit_parse() - parse : modifiers into PATHNAME structure - * - * The : modifiers in a $(varname:modifier) currently support replacing or - * omitting elements of a filename, and so they are parsed into a PATHNAME - * structure (which contains pointers into the original string). - * - * Modifiers of the form "X=value" replace the component X with the given value. - * Modifiers without the "=value" cause everything but the component X to be - * omitted. X is one of: - * - * G <grist> - * D directory name - * B base name - * S .suffix - * M (member) - * R root directory - prepended to whole path - * - * This routine sets: - * - * f->f_xxx.ptr = 0 - * f->f_xxx.len = 0 - * -> leave the original component xxx - * - * f->f_xxx.ptr = string - * f->f_xxx.len = strlen( string ) - * -> replace component xxx with string - * - * f->f_xxx.ptr = "" - * f->f_xxx.len = 0 - * -> omit component xxx - * - * var_edit_file() below and path_build() obligingly follow this convention. - */ - -static void var_edit_parse( char * mods, VAR_EDITS * edits ) -{ - int havezeroed = 0; - memset( (char *)edits, 0, sizeof( *edits ) ); - - while ( *mods ) - { - char * p; - PATHPART * fp; - - switch ( *mods++ ) - { - case 'L': edits->downshift = 1; continue; - case 'U': edits->upshift = 1; continue; - case 'P': edits->parent = edits->filemods = 1; continue; - case 'E': fp = &edits->empty; goto strval; - case 'J': fp = &edits->join; goto strval; - case 'G': fp = &edits->f.f_grist; goto fileval; - case 'R': fp = &edits->f.f_root; goto fileval; - case 'D': fp = &edits->f.f_dir; goto fileval; - case 'B': fp = &edits->f.f_base; goto fileval; - case 'S': fp = &edits->f.f_suffix; goto fileval; - case 'M': fp = &edits->f.f_member; goto fileval; - case 'T': edits->to_slashes = 1; continue; - case 'W': edits->to_windows = 1; continue; - default: - return; /* Should complain, but so what... */ - } - - fileval: - /* Handle :CHARS, where each char (without a following =) selects a - * particular file path element. On the first such char, we deselect all - * others (by setting ptr = "", len = 0) and for each char we select - * that element (by setting ptr = 0). - */ - edits->filemods = 1; - - if ( *mods != '=' ) - { - if ( !havezeroed++ ) - { - int i; - for ( i = 0; i < 6; ++i ) - { - edits->f.part[ i ].len = 0; - edits->f.part[ i ].ptr = ""; - } - } - - fp->ptr = 0; - continue; - } - - strval: - /* Handle :X=value, or :X */ - if ( *mods != '=' ) - { - fp->ptr = ""; - fp->len = 0; - } - else if ( ( p = strchr( mods, MAGIC_COLON ) ) ) - { - *p = 0; - fp->ptr = ++mods; - fp->len = p - mods; - mods = p + 1; - } - else - { - fp->ptr = ++mods; - fp->len = strlen( mods ); - mods += fp->len; - } - } -} - - -/* - * var_edit_file() - copy input target name to output, modifying filename. - */ - -static void var_edit_file( char * in, string * out, VAR_EDITS * edits ) -{ - PATHNAME pathname; - - /* Parse apart original filename, putting parts into "pathname". */ - path_parse( in, &pathname ); - - /* Replace any pathname with edits->f */ - if ( edits->f.f_grist .ptr ) pathname.f_grist = edits->f.f_grist; - if ( edits->f.f_root .ptr ) pathname.f_root = edits->f.f_root; - if ( edits->f.f_dir .ptr ) pathname.f_dir = edits->f.f_dir; - if ( edits->f.f_base .ptr ) pathname.f_base = edits->f.f_base; - if ( edits->f.f_suffix.ptr ) pathname.f_suffix = edits->f.f_suffix; - if ( edits->f.f_member.ptr ) pathname.f_member = edits->f.f_member; - - /* If requested, modify pathname to point to parent. */ - if ( edits->parent ) - path_parent( &pathname ); - - /* Put filename back together. */ - path_build( &pathname, out, 0 ); -} - - -/* - * var_edit_shift() - do upshift/downshift mods. - */ - -static void var_edit_shift( string * out, VAR_EDITS * edits ) -{ - /* Handle upshifting, downshifting and slash translation now. */ - char * p; - for ( p = out->value; *p; ++p) - { - if ( edits->upshift ) - *p = toupper( *p ); - else if ( edits->downshift ) - *p = tolower( *p ); - if ( edits->to_slashes && ( *p == '\\' ) ) - *p = '/'; -# ifdef OS_CYGWIN - if ( edits->to_windows ) - { - char result[ MAX_PATH + 1 ]; - cygwin_conv_to_win32_path( out->value, result ); - assert( strlen( result ) <= MAX_PATH ); - string_free( out ); - string_copy( out, result ); - } -# endif - } - out->size = p - out->value; -} - - -#ifndef NDEBUG -void var_expand_unit_test() -{ - LOL lol[ 1 ]; - LIST * l; - LIST * l2; - LIST * expected = list_new( list_new( L0, newstr( "axb" ) ), newstr( "ayb" ) ); - LIST * e2; - char axyb[] = "a$(xy)b"; - char azb[] = "a$($(z))b"; - char path[] = "$(p:W)"; - -# ifdef OS_CYGWIN - char cygpath[ 256 ]; - cygwin_conv_to_posix_path( "c:\\foo\\bar", cygpath ); -# else - char cygpath[] = "/cygdrive/c/foo/bar"; -# endif - - lol_init(lol); - var_set( "xy", list_new( list_new( L0, newstr( "x" ) ), newstr( "y" ) ), VAR_SET ); - var_set( "z", list_new( L0, newstr( "xy" ) ), VAR_SET ); - var_set( "p", list_new( L0, newstr( cygpath ) ), VAR_SET ); - - l = var_expand( 0, axyb, axyb + sizeof( axyb ) - 1, lol, 0 ); - for ( l2 = l, e2 = expected; l2 && e2; l2 = list_next( l2 ), e2 = list_next( e2 ) ) - assert( !strcmp( e2->string, l2->string ) ); - assert( l2 == 0 ); - assert( e2 == 0 ); - list_free( l ); - - l = var_expand( 0, azb, azb + sizeof( azb ) - 1, lol, 0 ); - for ( l2 = l, e2 = expected; l2 && e2; l2 = list_next( l2 ), e2 = list_next( e2 ) ) - assert( !strcmp( e2->string, l2->string ) ); - assert( l2 == 0 ); - assert( e2 == 0 ); - list_free( l ); - - l = var_expand( 0, path, path + sizeof( path ) - 1, lol, 0 ); - assert( l != 0 ); - assert( list_next( l ) == 0 ); -# ifdef OS_CYGWIN - /* On some installations of cygwin the drive letter is expanded to other - * case. This has been reported to be the case if cygwin has been installed - * to C:\ as opposed to C:\cygwin. Since case of the drive letter will not - * matter, we allow for both. - */ - assert( !strcmp( l->string, "c:\\foo\\bar" ) || - !strcmp( l->string, "C:\\foo\\bar" ) ); -# else - assert( !strcmp( l->string, cygpath ) ); -# endif - list_free( l ); - list_free( expected ); - lol_free( lol ); -} -#endif diff --git a/jam-files/engine/expand.h b/jam-files/engine/expand.h deleted file mode 100644 index cc25d190..00000000 --- a/jam-files/engine/expand.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * expand.h - expand a buffer, given variable values - */ - -#include "lists.h" - -LIST *var_expand( LIST *l, char *in, char *end, LOL *lol, int cancopyin ); -void var_expand_unit_test(); diff --git a/jam-files/engine/filemac.c b/jam-files/engine/filemac.c deleted file mode 100644 index e69aa648..00000000 --- a/jam-files/engine/filemac.c +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "filesys.h" -# include "pathsys.h" - -# ifdef OS_MAC - -#include <Files.h> -#include <Folders.h> - -# include <:sys:stat.h> - -/* - * filemac.c - manipulate file names and scan directories on macintosh - * - * External routines: - * - * file_dirscan() - scan a directory for files - * file_time() - get timestamp of file, if not done by file_dirscan() - * file_archscan() - scan an archive for files - * - * File_dirscan() and file_archscan() call back a caller provided function - * for each file found. A flag to this callback function lets file_dirscan() - * and file_archscan() indicate that a timestamp is being provided with the - * file. If file_dirscan() or file_archscan() do not provide the file's - * timestamp, interested parties may later call file_time(). - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 12/19/94 (mikem) - solaris string table insanity support - * 02/14/95 (seiwald) - parse and build /xxx properly - * 05/03/96 (seiwald) - split into pathunix.c - * 11/21/96 (peterk) - BEOS does not have Unix-style archives - */ - - -void CopyC2PStr( char const * cstr, StringPtr pstr ) -{ - int len; - for ( len = 0; *cstr && ( len < 255 ); pstr[ ++len ] = *cstr++ ); - pstr[ 0 ] = len; -} - - -/* - * file_dirscan() - scan a directory for files. - */ - -void file_dirscan( char * dir, scanback func, void * closure ) -{ - PATHNAME f; - string filename[ 1 ]; - unsigned char fullPath[ 512 ]; - - FSSpec spec; - WDPBRec vol; - Str63 volName; - CInfoPBRec lastInfo; - int index = 1; - - /* First enter directory itself. */ - - memset( (char *)&f, '\0', sizeof( f ) ); - - f.f_dir.ptr = dir; - f.f_dir.len = strlen(dir); - - if ( DEBUG_BINDSCAN ) - printf( "scan directory %s\n", dir ); - - /* Special case ":" - enter it */ - - if ( ( f.f_dir.len == 1 ) && ( f.f_dir.ptr[0] == ':' ) ) - (*func)( closure, dir, 0 /* not stat()'ed */, (time_t)0 ); - - /* Now enter contents of directory */ - - vol.ioNamePtr = volName; - - if ( PBHGetVolSync( &vol ) ) - return; - - CopyC2PStr( dir, fullPath ); - - if ( FSMakeFSSpec( vol.ioWDVRefNum, vol.ioWDDirID, fullPath, &spec ) ) - return; - - lastInfo.dirInfo.ioVRefNum = spec.vRefNum; - lastInfo.dirInfo.ioDrDirID = spec.parID; - lastInfo.dirInfo.ioNamePtr = spec.name; - lastInfo.dirInfo.ioFDirIndex = 0; - lastInfo.dirInfo.ioACUser = 0; - - if ( PBGetCatInfoSync( &lastInfo ) ) - return; - - if ( !( lastInfo.dirInfo.ioFlAttrib & 0x10 ) ) - return; - - /* ioDrDirID must be reset each time. */ - spec.parID = lastInfo.dirInfo.ioDrDirID; - - string_new( filename ); - for ( ; ; ) - { - lastInfo.dirInfo.ioVRefNum = spec.vRefNum; - lastInfo.dirInfo.ioDrDirID = spec.parID; - lastInfo.dirInfo.ioNamePtr = fullPath; - lastInfo.dirInfo.ioFDirIndex = index++; - - if ( PBGetCatInfoSync( &lastInfo ) ) - return; - - f.f_base.ptr = (char *)fullPath + 1; - f.f_base.len = *fullPath; - - string_truncate( filename, 0 ); - path_build( &f, filename, 0 ); - (*func)( closure, filename->value, 0 /* not stat()'ed */, (time_t)0 ); - } - string_free( filename ); -} - - -/* - * file_time() - get timestamp of file, if not done by file_dirscan(). - */ - -int file_time( char * filename, time_t * time ) -{ - struct stat statbuf; - - if ( stat( filename, &statbuf ) < 0 ) - return -1; - - *time = statbuf.st_mtime; - - return 0; -} - - -int file_is_file( char * filename ) -{ - struct stat statbuf; - if ( stat( filename, &statbuf ) < 0 ) - return -1; - return S_ISREG( statbuf.st_mode ) ? 1 : 0; -} - -int file_mkdir(char *pathname) -{ - return mkdir(pathname, 0766); -} - - -/* - * file_archscan() - scan an archive for files. - */ - -void file_archscan( char * archive, scanback func, void * closure ) -{ -} - - -# endif /* macintosh */ diff --git a/jam-files/engine/filent.c b/jam-files/engine/filent.c deleted file mode 100644 index ab189576..00000000 --- a/jam-files/engine/filent.c +++ /dev/null @@ -1,387 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Copyright 2005 Rene Rivera. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" - -# include "filesys.h" -# include "pathsys.h" -# include "strings.h" -# include "newstr.h" - -# ifdef OS_NT - -# ifdef __BORLANDC__ -# if __BORLANDC__ < 0x550 -# include <dir.h> -# include <dos.h> -# endif -# undef FILENAME /* cpp namespace collision */ -# define _finddata_t ffblk -# endif - -# include <io.h> -# include <sys/stat.h> -# include <ctype.h> -# include <direct.h> - -/* - * filent.c - scan directories and archives on NT - * - * External routines: - * - * file_dirscan() - scan a directory for files - * file_time() - get timestamp of file, if not done by file_dirscan() - * file_archscan() - scan an archive for files - * - * File_dirscan() and file_archscan() call back a caller provided function - * for each file found. A flag to this callback function lets file_dirscan() - * and file_archscan() indicate that a timestamp is being provided with the - * file. If file_dirscan() or file_archscan() do not provide the file's - * timestamp, interested parties may later call file_time(). - * - * 07/10/95 (taylor) Findfirst() returns the first file on NT. - * 05/03/96 (seiwald) split apart into pathnt.c - */ - -/* - * file_dirscan() - scan a directory for files - */ - -void file_dirscan( char * dir, scanback func, void * closure ) -{ - PROFILE_ENTER( FILE_DIRSCAN ); - - file_info_t * d = 0; - - dir = short_path_to_long_path( dir ); - - /* First enter directory itself */ - - d = file_query( dir ); - - if ( !d || !d->is_dir ) - { - PROFILE_EXIT( FILE_DIRSCAN ); - return; - } - - if ( !d->files ) - { - PATHNAME f; - string filespec[ 1 ]; - string filename[ 1 ]; - long handle; - int ret; - struct _finddata_t finfo[ 1 ]; - LIST * files = L0; - int d_length = strlen( d->name ); - - memset( (char *)&f, '\0', sizeof( f ) ); - - f.f_dir.ptr = d->name; - f.f_dir.len = d_length; - - /* Now enter contents of directory */ - - /* Prepare file search specification for the findfirst() API. */ - if ( d_length == 0 ) - string_copy( filespec, ".\\*" ); - else - { - /* - * We can not simply assume the given folder name will never include - * its trailing path separator or otherwise we would not support the - * Windows root folder specified without its drive letter, i.e. '\'. - */ - char trailingChar = d->name[ d_length - 1 ] ; - string_copy( filespec, d->name ); - if ( ( trailingChar != '\\' ) && ( trailingChar != '/' ) ) - string_append( filespec, "\\" ); - string_append( filespec, "*" ); - } - - if ( DEBUG_BINDSCAN ) - printf( "scan directory %s\n", dir ); - - #if defined(__BORLANDC__) && __BORLANDC__ < 0x550 - if ( ret = findfirst( filespec->value, finfo, FA_NORMAL | FA_DIREC ) ) - { - string_free( filespec ); - PROFILE_EXIT( FILE_DIRSCAN ); - return; - } - - string_new ( filename ); - while ( !ret ) - { - file_info_t * ff = 0; - - f.f_base.ptr = finfo->ff_name; - f.f_base.len = strlen( finfo->ff_name ); - - string_truncate( filename, 0 ); - path_build( &f, filename ); - - files = list_new( files, newstr(filename->value) ); - ff = file_info( filename->value ); - ff->is_file = finfo->ff_attrib & FA_DIREC ? 0 : 1; - ff->is_dir = finfo->ff_attrib & FA_DIREC ? 1 : 0; - ff->size = finfo->ff_fsize; - ff->time = (finfo->ff_ftime << 16) | finfo->ff_ftime; - - ret = findnext( finfo ); - } - # else - handle = _findfirst( filespec->value, finfo ); - - if ( ret = ( handle < 0L ) ) - { - string_free( filespec ); - PROFILE_EXIT( FILE_DIRSCAN ); - return; - } - - string_new( filename ); - while ( !ret ) - { - file_info_t * ff = 0; - - f.f_base.ptr = finfo->name; - f.f_base.len = strlen( finfo->name ); - - string_truncate( filename, 0 ); - path_build( &f, filename, 0 ); - - files = list_new( files, newstr( filename->value ) ); - ff = file_info( filename->value ); - ff->is_file = finfo->attrib & _A_SUBDIR ? 0 : 1; - ff->is_dir = finfo->attrib & _A_SUBDIR ? 1 : 0; - ff->size = finfo->size; - ff->time = finfo->time_write; - - ret = _findnext( handle, finfo ); - } - - _findclose( handle ); - # endif - string_free( filename ); - string_free( filespec ); - - d->files = files; - } - - /* Special case \ or d:\ : enter it */ - { - unsigned long len = strlen(d->name); - if ( len == 1 && d->name[0] == '\\' ) - (*func)( closure, d->name, 1 /* stat()'ed */, d->time ); - else if ( len == 3 && d->name[1] == ':' ) { - (*func)( closure, d->name, 1 /* stat()'ed */, d->time ); - /* We've just entered 3-letter drive name spelling (with trailing - slash), into the hash table. Now enter two-letter variant, - without trailing slash, so that if we try to check whether - "c:" exists, we hit it. - - Jam core has workarounds for that. Given: - x = c:\whatever\foo ; - p = $(x:D) ; - p2 = $(p:D) ; - There will be no trailing slash in $(p), but there will be one - in $(p2). But, that seems rather fragile. - */ - d->name[2] = 0; - (*func)( closure, d->name, 1 /* stat()'ed */, d->time ); - } - } - - /* Now enter contents of directory */ - if ( d->files ) - { - LIST * files = d->files; - while ( files ) - { - file_info_t * ff = file_info( files->string ); - (*func)( closure, ff->name, 1 /* stat()'ed */, ff->time ); - files = list_next( files ); - } - } - - PROFILE_EXIT( FILE_DIRSCAN ); -} - -file_info_t * file_query( char * filename ) -{ - file_info_t * ff = file_info( filename ); - if ( ! ff->time ) - { - struct stat statbuf; - - if ( stat( *filename ? filename : ".", &statbuf ) < 0 ) - return 0; - - ff->is_file = statbuf.st_mode & S_IFREG ? 1 : 0; - ff->is_dir = statbuf.st_mode & S_IFDIR ? 1 : 0; - ff->size = statbuf.st_size; - ff->time = statbuf.st_mtime ? statbuf.st_mtime : 1; - } - return ff; -} - -/* - * file_time() - get timestamp of file, if not done by file_dirscan() - */ - -int -file_time( - char *filename, - time_t *time ) -{ - file_info_t * ff = file_query( filename ); - if ( !ff ) return -1; - *time = ff->time; - return 0; -} - -int file_is_file(char* filename) -{ - file_info_t * ff = file_query( filename ); - if ( !ff ) return -1; - return ff->is_file; -} - -int file_mkdir(char *pathname) -{ - return _mkdir(pathname); -} - -/* - * file_archscan() - scan an archive for files - */ - -/* Straight from SunOS */ - -#define ARMAG "!<arch>\n" -#define SARMAG 8 - -#define ARFMAG "`\n" - -struct ar_hdr { - char ar_name[16]; - char ar_date[12]; - char ar_uid[6]; - char ar_gid[6]; - char ar_mode[8]; - char ar_size[10]; - char ar_fmag[2]; -}; - -# define SARFMAG 2 -# define SARHDR sizeof( struct ar_hdr ) - -void -file_archscan( - char *archive, - scanback func, - void *closure ) -{ - struct ar_hdr ar_hdr; - char *string_table = 0; - char buf[ MAXJPATH ]; - long offset; - int fd; - - if ( ( fd = open( archive, O_RDONLY | O_BINARY, 0 ) ) < 0 ) - return; - - if ( read( fd, buf, SARMAG ) != SARMAG || - strncmp( ARMAG, buf, SARMAG ) ) - { - close( fd ); - return; - } - - offset = SARMAG; - - if ( DEBUG_BINDSCAN ) - printf( "scan archive %s\n", archive ); - - while ( ( read( fd, &ar_hdr, SARHDR ) == SARHDR ) && - !memcmp( ar_hdr.ar_fmag, ARFMAG, SARFMAG ) ) - { - long lar_date; - long lar_size; - char *name = 0; - char *endname; - char *c; - - sscanf( ar_hdr.ar_date, "%ld", &lar_date ); - sscanf( ar_hdr.ar_size, "%ld", &lar_size ); - - lar_size = ( lar_size + 1 ) & ~1; - - if (ar_hdr.ar_name[0] == '/' && ar_hdr.ar_name[1] == '/' ) - { - /* this is the "string table" entry of the symbol table, - ** which holds strings of filenames that are longer than - ** 15 characters (ie. don't fit into a ar_name - */ - - string_table = BJAM_MALLOC_ATOMIC(lar_size+1); - if (read(fd, string_table, lar_size) != lar_size) - printf("error reading string table\n"); - string_table[lar_size] = '\0'; - offset += SARHDR + lar_size; - continue; - } - else if (ar_hdr.ar_name[0] == '/' && ar_hdr.ar_name[1] != ' ') - { - /* Long filenames are recognized by "/nnnn" where nnnn is - ** the offset of the string in the string table represented - ** in ASCII decimals. - */ - - name = string_table + atoi( ar_hdr.ar_name + 1 ); - for ( endname = name; *endname && *endname != '\n'; ++endname) {} - } - else - { - /* normal name */ - name = ar_hdr.ar_name; - endname = name + sizeof( ar_hdr.ar_name ); - } - - /* strip trailing white-space, slashes, and backslashes */ - - while ( endname-- > name ) - if ( !isspace(*endname) && ( *endname != '\\' ) && ( *endname != '/' ) ) - break; - *++endname = 0; - - /* strip leading directory names, an NT specialty */ - - if ( c = strrchr( name, '/' ) ) - name = c + 1; - if ( c = strrchr( name, '\\' ) ) - name = c + 1; - - sprintf( buf, "%s(%.*s)", archive, endname - name, name ); - (*func)( closure, buf, 1 /* time valid */, (time_t)lar_date ); - - offset += SARHDR + lar_size; - lseek( fd, offset, 0 ); - } - - close( fd ); -} - -# endif /* NT */ diff --git a/jam-files/engine/fileos2.c b/jam-files/engine/fileos2.c deleted file mode 100644 index af2373ea..00000000 --- a/jam-files/engine/fileos2.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "filesys.h" -# include "pathsys.h" - -/* note that we use "fileunix.c" when compiling with EMX on OS/2 */ -# if defined(OS_OS2) && !defined(__EMX__) - -# include <io.h> -# include <dos.h> - -/* - * fileos2.c - scan directories and archives on NT - * - * External routines: - * - * file_dirscan() - scan a directory for files - * file_time() - get timestamp of file, if not done by file_dirscan() - * file_archscan() - scan an archive for files - * - * File_dirscan() and file_archscan() call back a caller provided function - * for each file found. A flag to this callback function lets file_dirscan() - * and file_archscan() indicate that a timestamp is being provided with the - * file. If file_dirscan() or file_archscan() do not provide the file's - * timestamp, interested parties may later call file_time(). - * - * 07/10/95 (taylor) Findfirst() returns the first file on NT. - * 05/03/96 (seiwald) split apart into pathnt.c - * 09/22/00 (seiwald) handle \ and c:\ specially: don't add extra / - */ - -/* - * file_dirscan() - scan a directory for files - */ - -void -file_dirscan( - char *dir, - scanback func, - void *closure ) -{ - PATHNAME f; - string filespec[1]; - long handle; - int ret; - struct _find_t finfo[1]; - - /* First enter directory itself */ - - memset( (char *)&f, '\0', sizeof( f ) ); - - f.f_dir.ptr = dir; - f.f_dir.len = strlen(dir); - - dir = *dir ? dir : "."; - - /* Special case \ or d:\ : enter it */ - string_copy( filespec, dir ); - - if ( f.f_dir.len == 1 && f.f_dir.ptr[0] == '\\' ) - (*func)( closure, dir, 0 /* not stat()'ed */, (time_t)0 ); - else if ( f.f_dir.len == 3 && f.f_dir.ptr[1] == ':' ) - (*func)( closure, dir, 0 /* not stat()'ed */, (time_t)0 ); - else - string_push_back( filespec, '/' ); - - string_push_back( filespec, '*' ); - - /* Now enter contents of directory */ - - if ( DEBUG_BINDSCAN ) - printf( "scan directory %s\n", filespec->value ); - - /* Time info in dos find_t is not very useful. It consists */ - /* of a separate date and time, and putting them together is */ - /* not easy. So we leave that to a later stat() call. */ - - if ( !_dos_findfirst( filespec->value, _A_NORMAL|_A_RDONLY|_A_SUBDIR, finfo ) ) - { - string filename[1]; - string_new( filename ); - do - { - f.f_base.ptr = finfo->name; - f.f_base.len = strlen( finfo->name ); - - string_truncate( filename, 0 ); - path_build( &f, filename, 0 ); - (*func)( closure, filename->value, 0 /* not stat()'ed */, (time_t)0 ); - } - while ( !_dos_findnext( finfo ) ); - string_free( filename ); - } -} - -/* - * file_time() - get timestamp of file, if not done by file_dirscan() - */ - -int -file_time( - char *filename, - time_t *time ) -{ - /* This is called on OS2, not NT. */ - /* NT fills in the time in the dirscan. */ - - struct stat statbuf; - - if ( stat( filename, &statbuf ) < 0 ) - return -1; - - *time = statbuf.st_mtime; - - return 0; -} - -void -file_archscan( - char *archive, - scanback func, - void *closure ) -{ -} - -# endif /* OS2 && !__EMX__ */ - diff --git a/jam-files/engine/filesys.c b/jam-files/engine/filesys.c deleted file mode 100644 index eb62ed40..00000000 --- a/jam-files/engine/filesys.c +++ /dev/null @@ -1,83 +0,0 @@ -# include "jam.h" -# include "pathsys.h" -# include "strings.h" -# include "newstr.h" -# include "filesys.h" -# include "lists.h" - -void file_build1( PATHNAME * f, string * file ) -{ - if ( DEBUG_SEARCH ) - { - printf("build file: "); - if ( f->f_root.len ) - printf( "root = '%.*s' ", f->f_root.len, f->f_root.ptr ); - if ( f->f_dir.len ) - printf( "dir = '%.*s' ", f->f_dir.len, f->f_dir.ptr ); - if ( f->f_base.len ) - printf( "base = '%.*s' ", f->f_base.len, f->f_base.ptr ); - printf( "\n" ); - } - - /* Start with the grist. If the current grist isn't */ - /* surrounded by <>'s, add them. */ - - if ( f->f_grist.len ) - { - if ( f->f_grist.ptr[0] != '<' ) - string_push_back( file, '<' ); - string_append_range( - file, f->f_grist.ptr, f->f_grist.ptr + f->f_grist.len ); - if ( file->value[file->size - 1] != '>' ) - string_push_back( file, '>' ); - } -} - -static struct hash * filecache_hash = 0; -static file_info_t filecache_finfo; - -file_info_t * file_info(char * filename) -{ - file_info_t *finfo = &filecache_finfo; - - if ( !filecache_hash ) - filecache_hash = hashinit( sizeof( file_info_t ), "file_info" ); - - finfo->name = filename; - finfo->is_file = 0; - finfo->is_dir = 0; - finfo->size = 0; - finfo->time = 0; - finfo->files = 0; - if ( hashenter( filecache_hash, (HASHDATA**)&finfo ) ) - { - /* printf( "file_info: %s\n", filename ); */ - finfo->name = newstr( finfo->name ); - } - - return finfo; -} - -static LIST * files_to_remove = L0; - -static void remove_files_atexit(void) -{ - /* we do pop front in case this exit function is called - more than once */ - while ( files_to_remove ) - { - remove( files_to_remove->string ); - files_to_remove = list_pop_front( files_to_remove ); - } -} - -void file_done() -{ - remove_files_atexit(); - hashdone( filecache_hash ); -} - -void file_remove_atexit( const char * path ) -{ - files_to_remove = list_new( files_to_remove, newstr((char*)path) ); -} diff --git a/jam-files/engine/filesys.h b/jam-files/engine/filesys.h deleted file mode 100644 index efc081d1..00000000 --- a/jam-files/engine/filesys.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * filesys.h - OS specific file routines - */ - -#ifndef FILESYS_DWA20011025_H -# define FILESYS_DWA20011025_H - -# include "pathsys.h" -#include "hash.h" -#include "lists.h" - -typedef void (*scanback)( void *closure, char *file, int found, time_t t ); - -void file_dirscan( char *dir, scanback func, void *closure ); -void file_archscan( char *arch, scanback func, void *closure ); - -int file_time( char *filename, time_t *time ); - -void file_build1(PATHNAME *f, string* file) ; -int file_is_file(char* filename); -int file_mkdir(char *pathname); - -typedef struct file_info_t file_info_t ; -struct file_info_t -{ - char * name; - short is_file; - short is_dir; - unsigned long size; - time_t time; - LIST * files; -}; - - -/* Creates a pointer to information about file 'filename', creating it as - * necessary. If created, the structure will be default initialized. - */ -file_info_t * file_info( char * filename ); - -/* Returns information about a file, queries the OS if needed. */ -file_info_t * file_query( char * filename ); - -void file_done(); - -/* Marks a path/file to be removed when jam exits. */ -void file_remove_atexit( const char * path ); - -#endif diff --git a/jam-files/engine/fileunix.c b/jam-files/engine/fileunix.c deleted file mode 100644 index 680c3f53..00000000 --- a/jam-files/engine/fileunix.c +++ /dev/null @@ -1,501 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Copyright 2005 Rene Rivera. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "filesys.h" -# include "strings.h" -# include "pathsys.h" -# include "newstr.h" -# include <stdio.h> -# include <sys/stat.h> - -#if defined(sun) || defined(__sun) || defined(linux) -# include <unistd.h> /* needed for read and close prototype */ -#endif - -# ifdef USE_FILEUNIX - -#if defined(sun) || defined(__sun) -# include <unistd.h> /* needed for read and close prototype */ -#endif - -# if defined( OS_SEQUENT ) || \ - defined( OS_DGUX ) || \ - defined( OS_SCO ) || \ - defined( OS_ISC ) -# define PORTAR 1 -# endif - -# ifdef __EMX__ -# include <sys/types.h> -# include <sys/stat.h> -# endif - -# if defined( OS_RHAPSODY ) || \ - defined( OS_MACOSX ) || \ - defined( OS_NEXT ) -/* need unistd for rhapsody's proper lseek */ -# include <sys/dir.h> -# include <unistd.h> -# define STRUCT_DIRENT struct direct -# else -# include <dirent.h> -# define STRUCT_DIRENT struct dirent -# endif - -# ifdef OS_COHERENT -# include <arcoff.h> -# define HAVE_AR -# endif - -# if defined( OS_MVS ) || \ - defined( OS_INTERIX ) - -#define ARMAG "!<arch>\n" -#define SARMAG 8 -#define ARFMAG "`\n" - -struct ar_hdr /* archive file member header - printable ascii */ -{ - char ar_name[16]; /* file member name - `/' terminated */ - char ar_date[12]; /* file member date - decimal */ - char ar_uid[6]; /* file member user id - decimal */ - char ar_gid[6]; /* file member group id - decimal */ - char ar_mode[8]; /* file member mode - octal */ - char ar_size[10]; /* file member size - decimal */ - char ar_fmag[2]; /* ARFMAG - string to end header */ -}; - -# define HAVE_AR -# endif - -# if defined( OS_QNX ) || \ - defined( OS_BEOS ) || \ - defined( OS_MPEIX ) -# define NO_AR -# define HAVE_AR -# endif - -# ifndef HAVE_AR - -# ifdef OS_AIX -/* Define those for AIX to get the definitions for both the small and the - * big variant of the archive file format. */ -# define __AR_SMALL__ -# define __AR_BIG__ -# endif - -# include <ar.h> -# endif - -/* - * fileunix.c - manipulate file names and scan directories on UNIX/AmigaOS - * - * External routines: - * - * file_dirscan() - scan a directory for files - * file_time() - get timestamp of file, if not done by file_dirscan() - * file_archscan() - scan an archive for files - * - * File_dirscan() and file_archscan() call back a caller provided function - * for each file found. A flag to this callback function lets file_dirscan() - * and file_archscan() indicate that a timestamp is being provided with the - * file. If file_dirscan() or file_archscan() do not provide the file's - * timestamp, interested parties may later call file_time(). - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 12/19/94 (mikem) - solaris string table insanity support - * 02/14/95 (seiwald) - parse and build /xxx properly - * 05/03/96 (seiwald) - split into pathunix.c - * 11/21/96 (peterk) - BEOS does not have Unix-style archives - */ - - -/* - * file_dirscan() - scan a directory for files. - */ - -void file_dirscan( char * dir, scanback func, void * closure ) -{ - PROFILE_ENTER( FILE_DIRSCAN ); - - file_info_t * d = 0; - - d = file_query( dir ); - - if ( !d || !d->is_dir ) - { - PROFILE_EXIT( FILE_DIRSCAN ); - return; - } - - if ( ! d->files ) - { - LIST* files = L0; - PATHNAME f; - DIR *dd; - STRUCT_DIRENT *dirent; - string filename[1]; - - /* First enter directory itself */ - - memset( (char *)&f, '\0', sizeof( f ) ); - - f.f_dir.ptr = dir; - f.f_dir.len = strlen(dir); - - dir = *dir ? dir : "."; - - /* Now enter contents of directory. */ - - if ( !( dd = opendir( dir ) ) ) - { - PROFILE_EXIT( FILE_DIRSCAN ); - return; - } - - if ( DEBUG_BINDSCAN ) - printf( "scan directory %s\n", dir ); - - string_new( filename ); - while ( ( dirent = readdir( dd ) ) ) - { - # ifdef old_sinix - /* Broken structure definition on sinix. */ - f.f_base.ptr = dirent->d_name - 2; - # else - f.f_base.ptr = dirent->d_name; - # endif - f.f_base.len = strlen( f.f_base.ptr ); - - string_truncate( filename, 0 ); - path_build( &f, filename, 0 ); - - files = list_new( files, newstr(filename->value) ); - file_query( filename->value ); - } - string_free( filename ); - - closedir( dd ); - - d->files = files; - } - - /* Special case / : enter it */ - { - unsigned long len = strlen(d->name); - if ( ( len == 1 ) && ( d->name[0] == '/' ) ) - (*func)( closure, d->name, 1 /* stat()'ed */, d->time ); - } - - /* Now enter contents of directory */ - if ( d->files ) - { - LIST * files = d->files; - while ( files ) - { - file_info_t * ff = file_info( files->string ); - (*func)( closure, ff->name, 1 /* stat()'ed */, ff->time ); - files = list_next( files ); - } - } - - PROFILE_EXIT( FILE_DIRSCAN ); -} - - -file_info_t * file_query( char * filename ) -{ - file_info_t * ff = file_info( filename ); - if ( ! ff->time ) - { - struct stat statbuf; - - if ( stat( *filename ? filename : ".", &statbuf ) < 0 ) - return 0; - - ff->is_file = statbuf.st_mode & S_IFREG ? 1 : 0; - ff->is_dir = statbuf.st_mode & S_IFDIR ? 1 : 0; - ff->size = statbuf.st_size; - ff->time = statbuf.st_mtime ? statbuf.st_mtime : 1; - } - return ff; -} - -/* - * file_time() - get timestamp of file, if not done by file_dirscan() - */ - -int -file_time( - char *filename, - time_t *time ) -{ - file_info_t * ff = file_query( filename ); - if ( !ff ) return -1; - *time = ff->time; - return 0; -} - -int file_is_file(char* filename) -{ - file_info_t * ff = file_query( filename ); - if ( !ff ) return -1; - return ff->is_file; -} - -int file_mkdir(char* pathname) -{ - return mkdir(pathname, 0766); -} - -/* - * file_archscan() - scan an archive for files - */ - -# ifndef AIAMAG /* God-fearing UNIX */ - -# define SARFMAG 2 -# define SARHDR sizeof( struct ar_hdr ) - -void -file_archscan( - char *archive, - scanback func, - void *closure ) -{ -# ifndef NO_AR - struct ar_hdr ar_hdr; - char buf[ MAXJPATH ]; - long offset; - char *string_table = 0; - int fd; - - if ( ( fd = open( archive, O_RDONLY, 0 ) ) < 0 ) - return; - - if ( read( fd, buf, SARMAG ) != SARMAG || - strncmp( ARMAG, buf, SARMAG ) ) - { - close( fd ); - return; - } - - offset = SARMAG; - - if ( DEBUG_BINDSCAN ) - printf( "scan archive %s\n", archive ); - - while ( ( read( fd, &ar_hdr, SARHDR ) == SARHDR ) - && !( memcmp( ar_hdr.ar_fmag, ARFMAG, SARFMAG ) -#ifdef ARFZMAG - /* OSF also has a compressed format */ - && memcmp( ar_hdr.ar_fmag, ARFZMAG, SARFMAG ) -#endif - ) ) - { - char lar_name_[257]; - char * lar_name = lar_name_ + 1; - long lar_date; - long lar_size; - long lar_offset; - char * c; - char * src; - char * dest; - - strncpy( lar_name, ar_hdr.ar_name, sizeof(ar_hdr.ar_name) ); - - sscanf( ar_hdr.ar_date, "%ld", &lar_date ); - sscanf( ar_hdr.ar_size, "%ld", &lar_size ); - - if (ar_hdr.ar_name[0] == '/') - { - if (ar_hdr.ar_name[1] == '/') - { - /* this is the "string table" entry of the symbol table, - ** which holds strings of filenames that are longer than - ** 15 characters (ie. don't fit into a ar_name - */ - - string_table = (char *)BJAM_MALLOC_ATOMIC(lar_size); - lseek(fd, offset + SARHDR, 0); - if (read(fd, string_table, lar_size) != lar_size) - printf("error reading string table\n"); - } - else if (string_table && ar_hdr.ar_name[1] != ' ') - { - /* Long filenames are recognized by "/nnnn" where nnnn is - ** the offset of the string in the string table represented - ** in ASCII decimals. - */ - dest = lar_name; - lar_offset = atoi(lar_name + 1); - src = &string_table[lar_offset]; - while (*src != '/') - *dest++ = *src++; - *dest = '/'; - } - } - - c = lar_name - 1; - while ( ( *++c != ' ' ) && ( *c != '/' ) ) ; - *c = '\0'; - - if ( DEBUG_BINDSCAN ) - printf( "archive name %s found\n", lar_name ); - - sprintf( buf, "%s(%s)", archive, lar_name ); - - (*func)( closure, buf, 1 /* time valid */, (time_t)lar_date ); - - offset += SARHDR + ( ( lar_size + 1 ) & ~1 ); - lseek( fd, offset, 0 ); - } - - if (string_table) - BJAM_FREE(string_table); - - close( fd ); - -# endif /* NO_AR */ - -} - -# else /* AIAMAG - RS6000 AIX */ - -static void file_archscan_small( - int fd, char const *archive, scanback func, void *closure) -{ - struct fl_hdr fl_hdr; - - struct { - struct ar_hdr hdr; - char pad[ 256 ]; - } ar_hdr ; - - char buf[ MAXJPATH ]; - long offset; - - if ( read( fd, (char *)&fl_hdr, FL_HSZ ) != FL_HSZ) - return; - - sscanf( fl_hdr.fl_fstmoff, "%ld", &offset ); - - if ( DEBUG_BINDSCAN ) - printf( "scan archive %s\n", archive ); - - while ( ( offset > 0 ) - && ( lseek( fd, offset, 0 ) >= 0 ) - && ( read( fd, &ar_hdr, sizeof( ar_hdr ) ) >= sizeof( ar_hdr.hdr ) ) ) - { - long lar_date; - int lar_namlen; - - sscanf( ar_hdr.hdr.ar_namlen, "%d" , &lar_namlen ); - sscanf( ar_hdr.hdr.ar_date , "%ld", &lar_date ); - sscanf( ar_hdr.hdr.ar_nxtmem, "%ld", &offset ); - - if ( !lar_namlen ) - continue; - - ar_hdr.hdr._ar_name.ar_name[ lar_namlen ] = '\0'; - - sprintf( buf, "%s(%s)", archive, ar_hdr.hdr._ar_name.ar_name ); - - (*func)( closure, buf, 1 /* time valid */, (time_t)lar_date ); - } -} - -/* Check for OS version which supports the big variant. */ -#ifdef AR_HSZ_BIG - -static void file_archscan_big( - int fd, char const *archive, scanback func, void *closure) -{ - struct fl_hdr_big fl_hdr; - - struct { - struct ar_hdr_big hdr; - char pad[ 256 ]; - } ar_hdr ; - - char buf[ MAXJPATH ]; - long long offset; - - if ( read( fd, (char *)&fl_hdr, FL_HSZ_BIG) != FL_HSZ_BIG) - return; - - sscanf( fl_hdr.fl_fstmoff, "%lld", &offset ); - - if ( DEBUG_BINDSCAN ) - printf( "scan archive %s\n", archive ); - - while ( ( offset > 0 ) - && ( lseek( fd, offset, 0 ) >= 0 ) - && ( read( fd, &ar_hdr, sizeof( ar_hdr ) ) >= sizeof( ar_hdr.hdr ) ) ) - { - long lar_date; - int lar_namlen; - - sscanf( ar_hdr.hdr.ar_namlen, "%d" , &lar_namlen ); - sscanf( ar_hdr.hdr.ar_date , "%ld" , &lar_date ); - sscanf( ar_hdr.hdr.ar_nxtmem, "%lld", &offset ); - - if ( !lar_namlen ) - continue; - - ar_hdr.hdr._ar_name.ar_name[ lar_namlen ] = '\0'; - - sprintf( buf, "%s(%s)", archive, ar_hdr.hdr._ar_name.ar_name ); - - (*func)( closure, buf, 1 /* time valid */, (time_t)lar_date ); - } - -} - -#endif /* AR_HSZ_BIG */ - -void file_archscan(char *archive, scanback func, void *closure) -{ - int fd; - char fl_magic[SAIAMAG]; - - if (( fd = open(archive, O_RDONLY, 0)) < 0) - return; - - if (read( fd, fl_magic, SAIAMAG) != SAIAMAG - || lseek(fd, 0, SEEK_SET) == -1) - { - close(fd); - return; - } - - if (strncmp(AIAMAG, fl_magic, SAIAMAG) == 0) - { - /* read small variant */ - file_archscan_small(fd, archive, func, closure); - } -#ifdef AR_HSZ_BIG - else if (strncmp(AIAMAGBIG, fl_magic, SAIAMAG) == 0) - { - /* read big variant */ - file_archscan_big(fd, archive, func, closure); - } -#endif - - close( fd ); -} - -# endif /* AIAMAG - RS6000 AIX */ - -# endif /* USE_FILEUNIX */ diff --git a/jam-files/engine/filevms.c b/jam-files/engine/filevms.c deleted file mode 100644 index d2ab2047..00000000 --- a/jam-files/engine/filevms.c +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "filesys.h" -# include "pathsys.h" - -# ifdef OS_VMS - -/* - * filevms.c - scan directories and libaries on VMS - * - * External routines: - * - * file_dirscan() - scan a directory for files - * file_time() - get timestamp of file, if not done by file_dirscan() - * file_archscan() - scan an archive for files - * - * File_dirscan() and file_archscan() call back a caller provided function - * for each file found. A flag to this callback function lets file_dirscan() - * and file_archscan() indicate that a timestamp is being provided with the - * file. If file_dirscan() or file_archscan() do not provide the file's - * timestamp, interested parties may later call file_time(). - * - * 02/09/95 (seiwald) - bungled R=[xxx] - was using directory length! - * 05/03/96 (seiwald) - split into pathvms.c - */ - -# include <rms.h> -# include <iodef.h> -# include <ssdef.h> -# include <string.h> -# include <stdlib.h> -# include <stdio.h> -# include <descrip.h> - -#include <lbrdef.h> -#include <credef.h> -#include <mhddef.h> -#include <lhidef.h> -#include <lib$routines.h> -#include <starlet.h> - -/* Supply missing prototypes for lbr$-routines*/ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -int lbr$set_module( - void **, - unsigned long *, - struct dsc$descriptor_s *, - unsigned short *, - void * ); - -int lbr$open( void **, - struct dsc$descriptor_s *, - void *, - void *, - void *, - void *, - void * ); - -int lbr$ini_control( - void **, - unsigned long *, - unsigned long *, - void * ); - -int lbr$get_index( - void **, - unsigned long *, - int (*func)( struct dsc$descriptor_s *, unsigned long *), - void * ); - -int lbr$close( - void ** ); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -static void -file_cvttime( - unsigned int *curtime, - time_t *unixtime ) -{ - static const size_t divisor = 10000000; - static unsigned int bastim[2] = { 0x4BEB4000, 0x007C9567 }; /* 1/1/1970 */ - int delta[2], remainder; - - lib$subx( curtime, bastim, delta ); - lib$ediv( &divisor, delta, unixtime, &remainder ); -} - -# define DEFAULT_FILE_SPECIFICATION "[]*.*;0" - -# define min( a,b ) ((a)<(b)?(a):(b)) - -void -file_dirscan( - char *dir, - scanback func, - void *closure ) -{ - - struct FAB xfab; - struct NAM xnam; - struct XABDAT xab; - char esa[256]; - char filename[256]; - string filename2[1]; - char dirname[256]; - register int status; - PATHNAME f; - - memset( (char *)&f, '\0', sizeof( f ) ); - - f.f_root.ptr = dir; - f.f_root.len = strlen( dir ); - - /* get the input file specification - */ - xnam = cc$rms_nam; - xnam.nam$l_esa = esa; - xnam.nam$b_ess = sizeof( esa ) - 1; - xnam.nam$l_rsa = filename; - xnam.nam$b_rss = min( sizeof( filename ) - 1, NAM$C_MAXRSS ); - - xab = cc$rms_xabdat; /* initialize extended attributes */ - xab.xab$b_cod = XAB$C_DAT; /* ask for date */ - xab.xab$l_nxt = NULL; /* terminate XAB chain */ - - xfab = cc$rms_fab; - xfab.fab$l_dna = DEFAULT_FILE_SPECIFICATION; - xfab.fab$b_dns = sizeof( DEFAULT_FILE_SPECIFICATION ) - 1; - xfab.fab$l_fop = FAB$M_NAM; - xfab.fab$l_fna = dir; /* address of file name */ - xfab.fab$b_fns = strlen( dir ); /* length of file name */ - xfab.fab$l_nam = &xnam; /* address of NAB block */ - xfab.fab$l_xab = (char *)&xab; /* address of XAB block */ - - - status = sys$parse( &xfab ); - - if ( DEBUG_BINDSCAN ) - printf( "scan directory %s\n", dir ); - - if ( !( status & 1 ) ) - return; - - - - /* Add bogus directory for [000000] */ - - if ( !strcmp( dir, "[000000]" ) ) - { - (*func)( closure, "[000000]", 1 /* time valid */, 1 /* old but true */ ); - } - - /* Add bogus directory for [] */ - - if ( !strcmp( dir, "[]" ) ) - { - (*func)( closure, "[]", 1 /* time valid */, 1 /* old but true */ ); - (*func)( closure, "[-]", 1 /* time valid */, 1 /* old but true */ ); - } - - string_new( filename2 ); - while ( (status = sys$search( &xfab )) & 1 ) - { - char *s; - time_t time; - - /* "I think that might work" - eml */ - - sys$open( &xfab ); - sys$close( &xfab ); - - file_cvttime( (unsigned int *)&xab.xab$q_rdt, &time ); - - filename[xnam.nam$b_rsl] = '\0'; - - /* What we do with the name depends on the suffix: */ - /* .dir is a directory */ - /* .xxx is a file with a suffix */ - /* . is no suffix at all */ - - if ( xnam.nam$b_type == 4 && !strncmp( xnam.nam$l_type, ".DIR", 4 ) ) - { - /* directory */ - sprintf( dirname, "[.%.*s]", xnam.nam$b_name, xnam.nam$l_name ); - f.f_dir.ptr = dirname; - f.f_dir.len = strlen( dirname ); - f.f_base.ptr = 0; - f.f_base.len = 0; - f.f_suffix.ptr = 0; - f.f_suffix.len = 0; - } - else - { - /* normal file with a suffix */ - f.f_dir.ptr = 0; - f.f_dir.len = 0; - f.f_base.ptr = xnam.nam$l_name; - f.f_base.len = xnam.nam$b_name; - f.f_suffix.ptr = xnam.nam$l_type; - f.f_suffix.len = xnam.nam$b_type; - } - - string_truncate( filename2, 0 ); - path_build( &f, filename2, 0 ); - - /* - if ( DEBUG_SEARCH ) - printf("root '%s' base %.*s suf %.*s = %s\n", - dir, - xnam.nam$b_name, xnam.nam$l_name, - xnam.nam$b_type, xnam.nam$l_type, - filename2 ); - */ - - (*func)( closure, filename2->value, 1 /* time valid */, time ); - } - string_free( filename2 ); -} - -int -file_time( - char *filename, - time_t *time ) -{ - /* This should never be called, as all files are */ - /* timestampped in file_dirscan() and file_archscan() */ - return -1; -} - -static char *VMS_archive = 0; -static scanback VMS_func; -static void *VMS_closure; -static void *context; - -static int -file_archmember( - struct dsc$descriptor_s *module, - unsigned long *rfa ) -{ - static struct dsc$descriptor_s bufdsc = - {0, DSC$K_DTYPE_T, DSC$K_CLASS_S, NULL}; - - struct mhddef *mhd; - char filename[128]; - char buf[ MAXJPATH ]; - - int status; - time_t library_date; - - register int i; - register char *p; - - bufdsc.dsc$a_pointer = filename; - bufdsc.dsc$w_length = sizeof( filename ); - status = lbr$set_module( &context, rfa, &bufdsc, - &bufdsc.dsc$w_length, NULL ); - - if ( !(status & 1) ) - return ( 1 ); - - mhd = (struct mhddef *)filename; - - file_cvttime( &mhd->mhd$l_datim, &library_date ); - - for ( i = 0, p = module->dsc$a_pointer; i < module->dsc$w_length; ++i, ++p ) - filename[ i ] = *p; - - filename[ i ] = '\0'; - - sprintf( buf, "%s(%s.obj)", VMS_archive, filename ); - - (*VMS_func)( VMS_closure, buf, 1 /* time valid */, (time_t)library_date ); - - return ( 1 ); -} - - -void file_archscan( char * archive, scanback func, void * closure ) -{ - static struct dsc$descriptor_s library = - {0, DSC$K_DTYPE_T, DSC$K_CLASS_S, NULL}; - - unsigned long lfunc = LBR$C_READ; - unsigned long typ = LBR$C_TYP_UNK; - unsigned long index = 1; - - register int status; - - VMS_archive = archive; - VMS_func = func; - VMS_closure = closure; - - status = lbr$ini_control( &context, &lfunc, &typ, NULL ); - if ( !( status & 1 ) ) - return; - - library.dsc$a_pointer = archive; - library.dsc$w_length = strlen( archive ); - - status = lbr$open( &context, &library, NULL, NULL, NULL, NULL, NULL ); - if ( !( status & 1 ) ) - return; - - (void) lbr$get_index( &context, &index, file_archmember, NULL ); - - (void) lbr$close( &context ); -} - -# endif /* VMS */ diff --git a/jam-files/engine/frames.c b/jam-files/engine/frames.c deleted file mode 100644 index 84889f09..00000000 --- a/jam-files/engine/frames.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "frames.h" -# include "lists.h" - -void frame_init( FRAME* frame ) -{ - frame->prev = 0; - lol_init(frame->args); - frame->module = root_module(); - frame->rulename = "module scope"; - frame->procedure = 0; -} - -void frame_free( FRAME* frame ) -{ - lol_free( frame->args ); -} diff --git a/jam-files/engine/frames.h b/jam-files/engine/frames.h deleted file mode 100644 index 693d77fa..00000000 --- a/jam-files/engine/frames.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef FRAMES_DWA20011021_H -#define FRAMES_DWA20011021_H - -#include "lists.h" -#include "modules.h" - -typedef struct _PARSE PARSE; -typedef struct frame FRAME; - -struct frame -{ - FRAME * prev; - /* The nearest enclosing frame for which module->user_module is true. */ - FRAME * prev_user; - LOL args[ 1 ]; - module_t * module; - PARSE * procedure; - char * rulename; -}; - - -/* When call into Python is in progress, this variable points to the bjam frame - * that was current at the moment of call. When the call completes, the variable - * is not defined. Further, if Jam calls Python which calls Jam and so on, this - * variable only keeps the most recent Jam frame. - */ -extern struct frame * frame_before_python_call; - -void frame_init( FRAME * ); /* implemented in compile.c */ -void frame_free( FRAME * ); /* implemented in compile.c */ - -#endif diff --git a/jam-files/engine/glob.c b/jam-files/engine/glob.c deleted file mode 100644 index 527d6c80..00000000 --- a/jam-files/engine/glob.c +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 1994 Christopher Seiwald. All rights reserved. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * glob.c - match a string against a simple pattern - * - * Understands the following patterns: - * - * * any number of characters - * ? any single character - * [a-z] any single character in the range a-z - * [^a-z] any single character not in the range a-z - * \x match x - * - * External functions: - * - * glob() - match a string against a simple pattern - * - * Internal functions: - * - * globchars() - build a bitlist to check for character group match - */ - -# include "jam.h" - -# define CHECK_BIT( tab, bit ) ( tab[ (bit)/8 ] & (1<<( (bit)%8 )) ) -# define BITLISTSIZE 16 /* bytes used for [chars] in compiled expr */ - -static void globchars( char * s, char * e, char * b ); - - -/* - * glob() - match a string against a simple pattern. - */ - -int glob( char * c, char * s ) -{ - char bitlist[ BITLISTSIZE ]; - char * here; - - for ( ; ; ) - switch ( *c++ ) - { - case '\0': - return *s ? -1 : 0; - - case '?': - if ( !*s++ ) - return 1; - break; - - case '[': - /* Scan for matching ]. */ - - here = c; - do if ( !*c++ ) return 1; - while ( ( here == c ) || ( *c != ']' ) ); - ++c; - - /* Build character class bitlist. */ - - globchars( here, c, bitlist ); - - if ( !CHECK_BIT( bitlist, *(unsigned char *)s ) ) - return 1; - ++s; - break; - - case '*': - here = s; - - while ( *s ) - ++s; - - /* Try to match the rest of the pattern in a recursive */ - /* call. If the match fails we'll back up chars, retrying. */ - - while ( s != here ) - { - int r; - - /* A fast path for the last token in a pattern. */ - r = *c ? glob( c, s ) : *s ? -1 : 0; - - if ( !r ) - return 0; - if ( r < 0 ) - return 1; - --s; - } - break; - - case '\\': - /* Force literal match of next char. */ - if ( !*c || ( *s++ != *c++ ) ) - return 1; - break; - - default: - if ( *s++ != c[ -1 ] ) - return 1; - break; - } -} - - -/* - * globchars() - build a bitlist to check for character group match. - */ - -static void globchars( char * s, char * e, char * b ) -{ - int neg = 0; - - memset( b, '\0', BITLISTSIZE ); - - if ( *s == '^' ) - { - ++neg; - ++s; - } - - while ( s < e ) - { - int c; - - if ( ( s + 2 < e ) && ( s[1] == '-' ) ) - { - for ( c = s[0]; c <= s[2]; ++c ) - b[ c/8 ] |= ( 1 << ( c % 8 ) ); - s += 3; - } - else - { - c = *s++; - b[ c/8 ] |= ( 1 << ( c % 8 ) ); - } - } - - if ( neg ) - { - int i; - for ( i = 0; i < BITLISTSIZE; ++i ) - b[ i ] ^= 0377; - } - - /* Do not include \0 in either $[chars] or $[^chars]. */ - b[0] &= 0376; -} diff --git a/jam-files/engine/hash.c b/jam-files/engine/hash.c deleted file mode 100644 index fbd1a899..00000000 --- a/jam-files/engine/hash.c +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "hash.h" -# include "compile.h" -# include <assert.h> - -/* - * hash.c - simple in-memory hashing routines - * - * External routines: - * - * hashinit() - initialize a hash table, returning a handle - * hashitem() - find a record in the table, and optionally enter a new one - * hashdone() - free a hash table, given its handle - * - * Internal routines: - * - * hashrehash() - resize and rebuild hp->tab, the hash table - * - * 4/29/93 - ensure ITEM's are aligned - */ - -/* */ -#define HASH_DEBUG_PROFILE 1 -/* */ - -char *hashsccssid="@(#)hash.c 1.14 () 6/20/88"; - -/* Header attached to all data items entered into a hash table. */ - -struct hashhdr -{ - struct item * next; - unsigned int keyval; /* for quick comparisons */ -}; - -/* This structure overlays the one handed to hashenter(). Its actual size is - * given to hashinit(). - */ - -struct hashdata -{ - char * key; - /* rest of user data */ -}; - -typedef struct item -{ - struct hashhdr hdr; - struct hashdata data; -} ITEM ; - -# define MAX_LISTS 32 - -struct hash -{ - /* - * the hash table, just an array of item pointers - */ - struct { - int nel; - ITEM **base; - } tab; - - int bloat; /* tab.nel / items.nel */ - int inel; /* initial number of elements */ - - /* - * the array of records, maintained by these routines - * essentially a microallocator - */ - struct { - int more; /* how many more ITEMs fit in lists[ list ] */ - ITEM *free; /* free list of items */ - char *next; /* where to put more ITEMs in lists[ list ] */ - int datalen; /* length of records in this hash table */ - int size; /* sizeof( ITEM ) + aligned datalen */ - int nel; /* total ITEMs held by all lists[] */ - int list; /* index into lists[] */ - - struct { - int nel; /* total ITEMs held by this list */ - char *base; /* base of ITEMs array */ - } lists[ MAX_LISTS ]; - } items; - - char * name; /* just for hashstats() */ -}; - -static void hashrehash( struct hash *hp ); -static void hashstat( struct hash *hp ); -static void * hash_mem_alloc(size_t datalen, size_t size); -static void hash_mem_free(size_t datalen, void * data); -#ifdef OPT_BOEHM_GC -static void hash_mem_finalizer(char * key, struct hash * hp); -#endif - -static unsigned int jenkins_one_at_a_time_hash(const unsigned char *key) -{ - unsigned int hash = 0; - - while ( *key ) - { - hash += *key++; - hash += (hash << 10); - hash ^= (hash >> 6); - } - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); - - return hash; -} - -/* -static unsigned int knuth_hash(const unsigned char *key) -{ - unsigned int keyval = *key; - while ( *key ) - keyval = keyval * 2147059363 + *key++; - return keyval; -} -*/ - -static unsigned int hash_keyval( const char * key_ ) -{ - /* - return knuth_hash((const unsigned char *)key_); - */ - return jenkins_one_at_a_time_hash((const unsigned char *)key_); -} - -#define hash_bucket(hp,keyval) ((hp)->tab.base + ( (keyval) % (hp)->tab.nel )) - -/* Find the hash item for the given data. Returns pointer to the - item and if given a pointer to the item before the found item. - If it's the first item in a bucket, there is no previous item, - and zero is returned for the previous item instead. -*/ -static ITEM * hash_search( - struct hash *hp, - unsigned int keyval, - const char * keydata, - ITEM * * previous ) -{ - ITEM * i = *hash_bucket(hp,keyval); - ITEM * p = 0; - - for ( ; i; i = i->hdr.next ) - { - if ( ( keyval == i->hdr.keyval ) && - !strcmp( i->data.key, keydata ) ) - { - if (previous) - { - *previous = p; - } - return i; - } - p = i; - } - - return 0; -} - -/* - * hash_free() - remove the given item from the table if it's there. - * Returns 1 if found, 0 otherwise. - * - * NOTE: 2nd argument is HASHDATA*, not HASHDATA** as elsewhere. - */ -int -hash_free( - register struct hash *hp, - HASHDATA *data) -{ - ITEM * i = 0; - ITEM * prev = 0; - unsigned int keyval = hash_keyval(data->key); - - i = hash_search( hp, keyval, data->key, &prev ); - if (i) - { - /* mark it free so we skip it during enumeration */ - i->data.key = 0; - /* unlink the record from the hash chain */ - if (prev) prev->hdr.next = i->hdr.next; - else *hash_bucket(hp,keyval) = i->hdr.next; - /* link it into the freelist */ - i->hdr.next = hp->items.free; - hp->items.free = i; - /* we have another item */ - hp->items.more++; - - return 1; - } - return 0; -} - -/* - * hashitem() - find a record in the table, and optionally enter a new one - */ - -int -hashitem( - register struct hash *hp, - HASHDATA **data, - int enter ) -{ - register ITEM *i; - char *b = (*data)->key; - unsigned int keyval = hash_keyval(b); - - #ifdef HASH_DEBUG_PROFILE - profile_frame prof[1]; - if ( DEBUG_PROFILE ) - profile_enter( 0, prof ); - #endif - - if ( enter && !hp->items.more ) - hashrehash( hp ); - - if ( !enter && !hp->items.nel ) - { - #ifdef HASH_DEBUG_PROFILE - if ( DEBUG_PROFILE ) - profile_exit( prof ); - #endif - return 0; - } - - i = hash_search( hp, keyval, (*data)->key, 0 ); - if (i) - { - *data = &i->data; - #ifdef HASH_DEBUG_PROFILE - if ( DEBUG_PROFILE ) profile_exit( prof ); - #endif - return !0; - } - - if ( enter ) - { - ITEM * * base = hash_bucket(hp,keyval); - - /* try to grab one from the free list */ - if ( hp->items.free ) - { - i = hp->items.free; - hp->items.free = i->hdr.next; - assert( i->data.key == 0 ); - } - else - { - i = (ITEM *)hp->items.next; - hp->items.next += hp->items.size; - } - hp->items.more--; - memcpy( (char *)&i->data, (char *)*data, hp->items.datalen ); - i->hdr.keyval = keyval; - i->hdr.next = *base; - *base = i; - *data = &i->data; - #ifdef OPT_BOEHM_GC - if (sizeof(HASHDATA) == hp->items.datalen) - { - GC_REGISTER_FINALIZER(i->data.key,&hash_mem_finalizer,hp,0,0); - } - #endif - } - - #ifdef HASH_DEBUG_PROFILE - if ( DEBUG_PROFILE ) - profile_exit( prof ); - #endif - return 0; -} - -/* - * hashrehash() - resize and rebuild hp->tab, the hash table - */ - -static void hashrehash( register struct hash *hp ) -{ - int i = ++hp->items.list; - hp->items.more = i ? 2 * hp->items.nel : hp->inel; - hp->items.next = (char *)hash_mem_alloc( hp->items.datalen, hp->items.more * hp->items.size ); - hp->items.free = 0; - - hp->items.lists[i].nel = hp->items.more; - hp->items.lists[i].base = hp->items.next; - hp->items.nel += hp->items.more; - - if ( hp->tab.base ) - hash_mem_free( hp->items.datalen, (char *)hp->tab.base ); - - hp->tab.nel = hp->items.nel * hp->bloat; - hp->tab.base = (ITEM **)hash_mem_alloc( hp->items.datalen, hp->tab.nel * sizeof(ITEM **) ); - - memset( (char *)hp->tab.base, '\0', hp->tab.nel * sizeof( ITEM * ) ); - - for ( i = 0; i < hp->items.list; ++i ) - { - int nel = hp->items.lists[i].nel; - char *next = hp->items.lists[i].base; - - for ( ; nel--; next += hp->items.size ) - { - register ITEM *i = (ITEM *)next; - ITEM **ip = hp->tab.base + i->hdr.keyval % hp->tab.nel; - /* code currently assumes rehashing only when there are no free items */ - assert( i->data.key != 0 ); - - i->hdr.next = *ip; - *ip = i; - } - } -} - -void hashenumerate( struct hash * hp, void (* f)( void *, void * ), void * data ) -{ - int i; - for ( i = 0; i <= hp->items.list; ++i ) - { - char * next = hp->items.lists[i].base; - int nel = hp->items.lists[i].nel; - if ( i == hp->items.list ) - nel -= hp->items.more; - - for ( ; nel--; next += hp->items.size ) - { - ITEM * i = (ITEM *)next; - if ( i->data.key != 0 ) /* DO not enumerate freed items. */ - f( &i->data, data ); - } - } -} - -/* --- */ - -# define ALIGNED(x) ( ( x + sizeof( ITEM ) - 1 ) & ~( sizeof( ITEM ) - 1 ) ) - -/* - * hashinit() - initialize a hash table, returning a handle - */ - -struct hash * -hashinit( - int datalen, - char *name ) -{ - struct hash *hp = (struct hash *)hash_mem_alloc( datalen, sizeof( *hp ) ); - - hp->bloat = 3; - hp->tab.nel = 0; - hp->tab.base = (ITEM **)0; - hp->items.more = 0; - hp->items.free = 0; - hp->items.datalen = datalen; - hp->items.size = sizeof( struct hashhdr ) + ALIGNED( datalen ); - hp->items.list = -1; - hp->items.nel = 0; - hp->inel = 11 /* 47 */; - hp->name = name; - - return hp; -} - -/* - * hashdone() - free a hash table, given its handle - */ - -void -hashdone( struct hash *hp ) -{ - int i; - - if ( !hp ) - return; - - if ( DEBUG_MEM || DEBUG_PROFILE ) - hashstat( hp ); - - if ( hp->tab.base ) - hash_mem_free( hp->items.datalen, (char *)hp->tab.base ); - for ( i = 0; i <= hp->items.list; ++i ) - hash_mem_free( hp->items.datalen, hp->items.lists[i].base ); - hash_mem_free( hp->items.datalen, (char *)hp ); -} - -static void * hash_mem_alloc(size_t datalen, size_t size) -{ - if (sizeof(HASHDATA) == datalen) - { - return BJAM_MALLOC_RAW(size); - } - else - { - return BJAM_MALLOC(size); - } -} - -static void hash_mem_free(size_t datalen, void * data) -{ - if (sizeof(HASHDATA) == datalen) - { - BJAM_FREE_RAW(data); - } - else - { - BJAM_FREE(data); - } -} - -#ifdef OPT_BOEHM_GC -static void hash_mem_finalizer(char * key, struct hash * hp) -{ - HASHDATA d; - d.key = key; - hash_free(hp,&d); -} -#endif - - -/* ---- */ - -static void hashstat( struct hash * hp ) -{ - ITEM * * tab = hp->tab.base; - int nel = hp->tab.nel; - int count = 0; - int sets = 0; - int run = ( tab[ nel - 1 ] != (ITEM *)0 ); - int i; - int here; - - for ( i = nel; i > 0; --i ) - { - if ( ( here = ( *tab++ != (ITEM *)0 ) ) ) - count++; - if ( here && !run ) - sets++; - run = here; - } - - printf( "%s table: %d+%d+%d (%dK+%luK) items+table+hash, %f density\n", - hp->name, - count, - hp->items.nel, - hp->tab.nel, - hp->items.nel * hp->items.size / 1024, - (long unsigned)hp->tab.nel * sizeof( ITEM ** ) / 1024, - (float)count / (float)sets ); -} diff --git a/jam-files/engine/hash.h b/jam-files/engine/hash.h deleted file mode 100644 index 7195b414..00000000 --- a/jam-files/engine/hash.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * hash.h - simple in-memory hashing routines - */ - -#ifndef BOOST_JAM_HASH_H -#define BOOST_JAM_HASH_H - -typedef struct hashdata HASHDATA; - -struct hash * hashinit ( int datalen, char * name ); -int hashitem ( struct hash * hp, HASHDATA * * data, int enter ); -void hashdone ( struct hash * hp ); -void hashenumerate( struct hash * hp, void (* f)( void *, void * ), void * data ); -int hash_free ( struct hash * hp, HASHDATA * data); - -#define hashenter( hp, data ) ( !hashitem( hp, data, !0 ) ) -#define hashcheck( hp, data ) hashitem( hp, data, 0 ) - -#endif diff --git a/jam-files/engine/hcache.c b/jam-files/engine/hcache.c deleted file mode 100644 index 70bb798c..00000000 --- a/jam-files/engine/hcache.c +++ /dev/null @@ -1,434 +0,0 @@ -/* - * This file has been donated to Jam. - */ - -# include "jam.h" -# include "lists.h" -# include "parse.h" -# include "rules.h" -# include "regexp.h" -# include "headers.h" -# include "newstr.h" -# include "hash.h" -# include "hcache.h" -# include "variable.h" -# include "search.h" - -#ifdef OPT_HEADER_CACHE_EXT - -/* - * Craig W. McPheeters, Alias|Wavefront. - * - * hcache.c hcache.h - handle cacheing of #includes in source files. - * - * Create a cache of files scanned for headers. When starting jam, look for the - * cache file and load it if present. When finished the binding phase, create a - * new header cache. The cache contains files, their timestamps and the header - * files found in their scan. During the binding phase of jam, look in the - * header cache first for the headers contained in a file. If the cache is - * present and valid, use its contents. This results in dramatic speedups with - * large projects (eg. 3min -> 1min startup for one project.) - * - * External routines: - * hcache_init() - read and parse the local .jamdeps file. - * hcache_done() - write a new .jamdeps file. - * hcache() - return list of headers on target. Use cache or do a scan. - * - * The dependency file format is an ASCII file with 1 line per target. Each line - * has the following fields: - * @boundname@ timestamp @file@ @file@ @file@ ... \n - */ - -typedef struct hcachedata HCACHEDATA ; - -struct hcachedata -{ - char * boundname; - time_t time; - LIST * includes; - LIST * hdrscan; /* the HDRSCAN value for this target */ - int age; /* if too old, we'll remove it from cache */ - HCACHEDATA * next; -}; - - -static struct hash * hcachehash = 0; -static HCACHEDATA * hcachelist = 0; - -static int queries = 0; -static int hits = 0; - -#define CACHE_FILE_VERSION "version 4" -#define CACHE_RECORD_HEADER "header" -#define CACHE_RECORD_END "end" - - -/* - * Return the name of the header cache file. May return NULL. - * - * The user sets this by setting the HCACHEFILE variable in a Jamfile. We cache - * the result so the user can not change the cache file during header scanning. - */ - -static char * cache_name( void ) -{ - static char * name = 0; - if ( !name ) - { - LIST * hcachevar = var_get( "HCACHEFILE" ); - - if ( hcachevar ) - { - TARGET * t = bindtarget( hcachevar->string ); - - pushsettings( t->settings ); - /* Do not expect the cache file to be generated, so pass 0 as the - * third argument to search. Expect the location to be specified via - * LOCATE, so pass 0 as the fourth arugment. - */ - t->boundname = search( t->name, &t->time, 0, 0 ); - popsettings( t->settings ); - - if ( hcachevar ) - name = copystr( t->boundname ); - } - } - return name; -} - - -/* - * Return the maximum age a cache entry can have before it is purged ftom the - * cache. - */ - -static int cache_maxage( void ) -{ - int age = 100; - LIST * var = var_get( "HCACHEMAXAGE" ); - if ( var ) - { - age = atoi( var->string ); - if ( age < 0 ) - age = 0; - } - return age; -} - - -/* - * Read a netstring. The caveat is that the string can not contain ASCII 0. The - * returned value is as returned by newstr(), so it need not be freed. - */ - -char * read_netstring( FILE * f ) -{ - unsigned long len; - static char * buf = NULL; - static unsigned long buf_len = 0; - - if ( fscanf( f, " %9lu", &len ) != 1 ) - return NULL; - if ( fgetc( f ) != (int)'\t' ) - return NULL; - - if ( len > 1024 * 64 ) - return NULL; /* sanity check */ - - if ( len > buf_len ) - { - unsigned long new_len = buf_len * 2; - if ( new_len < len ) - new_len = len; - buf = (char *)BJAM_REALLOC( buf, new_len + 1 ); - if ( buf ) - buf_len = new_len; - } - - if ( !buf ) - return NULL; - - if ( fread( buf, 1, len, f ) != len ) - return NULL; - if ( fgetc( f ) != (int)'\n' ) - return NULL; - - buf[ len ] = 0; - return newstr( buf ); -} - - -/* - * Write a netstring. - */ - -void write_netstring( FILE * f, char const * s ) -{ - if ( !s ) - s = ""; - fprintf( f, "%lu\t%s\n", (long unsigned)strlen( s ), s ); -} - - -void hcache_init() -{ - HCACHEDATA cachedata; - HCACHEDATA * c; - FILE * f; - char * version; - int header_count = 0; - char * hcachename; - - hcachehash = hashinit( sizeof( HCACHEDATA ), "hcache" ); - - if ( !( hcachename = cache_name() ) ) - return; - - if ( !( f = fopen( hcachename, "rb" ) ) ) - return; - - version = read_netstring( f ); - if ( !version || strcmp( version, CACHE_FILE_VERSION ) ) - { - fclose( f ); - return; - } - - while ( 1 ) - { - char * record_type; - char * time_str; - char * age_str; - char * includes_count_str; - char * hdrscan_count_str; - int i; - int count; - LIST * l; - - record_type = read_netstring( f ); - if ( !record_type ) - { - fprintf( stderr, "invalid %s\n", hcachename ); - goto bail; - } - if ( !strcmp( record_type, CACHE_RECORD_END ) ) - break; - if ( strcmp( record_type, CACHE_RECORD_HEADER ) ) - { - fprintf( stderr, "invalid %s with record separator <%s>\n", - hcachename, record_type ? record_type : "<null>" ); - goto bail; - } - - c = &cachedata; - - c->boundname = read_netstring( f ); - time_str = read_netstring( f ); - age_str = read_netstring( f ); - includes_count_str = read_netstring( f ); - - if ( !c->boundname || !time_str || !age_str || !includes_count_str ) - { - fprintf( stderr, "invalid %s\n", hcachename ); - goto bail; - } - - c->time = atoi( time_str ); - c->age = atoi( age_str ) + 1; - - count = atoi( includes_count_str ); - for ( l = 0, i = 0; i < count; ++i ) - { - char * s = read_netstring( f ); - if ( !s ) - { - fprintf( stderr, "invalid %s\n", hcachename ); - goto bail; - } - l = list_new( l, s ); - } - c->includes = l; - - hdrscan_count_str = read_netstring( f ); - if ( !includes_count_str ) - { - list_free( c->includes ); - fprintf( stderr, "invalid %s\n", hcachename ); - goto bail; - } - - count = atoi( hdrscan_count_str ); - for ( l = 0, i = 0; i < count; ++i ) - { - char * s = read_netstring( f ); - if ( !s ) - { - fprintf( stderr, "invalid %s\n", hcachename ); - goto bail; - } - l = list_new( l, s ); - } - c->hdrscan = l; - - if ( !hashenter( hcachehash, (HASHDATA * *)&c ) ) - { - fprintf( stderr, "can't insert header cache item, bailing on %s\n", - hcachename ); - goto bail; - } - - c->next = hcachelist; - hcachelist = c; - - ++header_count; - } - - if ( DEBUG_HEADER ) - printf( "hcache read from file %s\n", hcachename ); - - bail: - fclose( f ); -} - - -void hcache_done() -{ - FILE * f; - HCACHEDATA * c; - int header_count = 0; - char * hcachename; - int maxage; - - if ( !hcachehash ) - return; - - if ( !( hcachename = cache_name() ) ) - return; - - if ( !( f = fopen( hcachename, "wb" ) ) ) - return; - - maxage = cache_maxage(); - - /* Print out the version. */ - write_netstring( f, CACHE_FILE_VERSION ); - - c = hcachelist; - for ( c = hcachelist; c; c = c->next ) - { - LIST * l; - char time_str[ 30 ]; - char age_str[ 30 ]; - char includes_count_str[ 30 ]; - char hdrscan_count_str[ 30 ]; - - if ( maxage == 0 ) - c->age = 0; - else if ( c->age > maxage ) - continue; - - sprintf( includes_count_str, "%lu", (long unsigned) list_length( c->includes ) ); - sprintf( hdrscan_count_str, "%lu", (long unsigned) list_length( c->hdrscan ) ); - sprintf( time_str, "%lu", (long unsigned) c->time ); - sprintf( age_str, "%lu", (long unsigned) c->age ); - - write_netstring( f, CACHE_RECORD_HEADER ); - write_netstring( f, c->boundname ); - write_netstring( f, time_str ); - write_netstring( f, age_str ); - write_netstring( f, includes_count_str ); - for ( l = c->includes; l; l = list_next( l ) ) - write_netstring( f, l->string ); - write_netstring( f, hdrscan_count_str ); - for ( l = c->hdrscan; l; l = list_next( l ) ) - write_netstring( f, l->string ); - fputs( "\n", f ); - ++header_count; - } - write_netstring( f, CACHE_RECORD_END ); - - if ( DEBUG_HEADER ) - printf( "hcache written to %s. %d dependencies, %.0f%% hit rate\n", - hcachename, header_count, queries ? 100.0 * hits / queries : 0 ); - - fclose ( f ); -} - - -LIST * hcache( TARGET * t, int rec, regexp * re[], LIST * hdrscan ) -{ - HCACHEDATA cachedata; - HCACHEDATA * c = &cachedata; - - LIST * l = 0; - - ++queries; - - c->boundname = t->boundname; - - if (hashcheck (hcachehash, (HASHDATA **) &c)) - { - if (c->time == t->time) - { - LIST *l1 = hdrscan, *l2 = c->hdrscan; - while (l1 && l2) { - if (l1->string != l2->string) { - l1 = NULL; - } else { - l1 = list_next(l1); - l2 = list_next(l2); - } - } - if (l1 || l2) { - if (DEBUG_HEADER) - printf("HDRSCAN out of date in cache for %s\n", - t->boundname); - - printf("HDRSCAN out of date for %s\n", t->boundname); - printf(" real : "); - list_print(hdrscan); - printf("\n cached: "); - list_print(c->hdrscan); - printf("\n"); - - list_free(c->includes); - list_free(c->hdrscan); - c->includes = 0; - c->hdrscan = 0; - } else { - if (DEBUG_HEADER) - printf ("using header cache for %s\n", t->boundname); - c->age = 0; - ++hits; - l = list_copy (0, c->includes); - return l; - } - } else { - if (DEBUG_HEADER) - printf ("header cache out of date for %s\n", t->boundname); - list_free (c->includes); - list_free(c->hdrscan); - c->includes = 0; - c->hdrscan = 0; - } - } else { - if (hashenter (hcachehash, (HASHDATA **)&c)) { - c->boundname = newstr (c->boundname); - c->next = hcachelist; - hcachelist = c; - } - } - - /* 'c' points at the cache entry. Its out of date. */ - - l = headers1 (0, t->boundname, rec, re); - - c->time = t->time; - c->age = 0; - c->includes = list_copy (0, l); - c->hdrscan = list_copy(0, hdrscan); - - return l; -} - -#endif diff --git a/jam-files/engine/hcache.h b/jam-files/engine/hcache.h deleted file mode 100644 index c316e3bc..00000000 --- a/jam-files/engine/hcache.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This file is not part of Jam - */ - -/* - * hcache.h - handle #includes in source files - */ -#ifndef HCACHE_H -# define HCACHE_H - -# include "regexp.h" -# include "lists.h" - -void hcache_init(void); -void hcache_done(void); -LIST *hcache(TARGET *t, int rec, regexp *re[], LIST *hdrscan); - -#endif diff --git a/jam-files/engine/hdrmacro.c b/jam-files/engine/hdrmacro.c deleted file mode 100644 index 43031d48..00000000 --- a/jam-files/engine/hdrmacro.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "lists.h" -# include "parse.h" -# include "compile.h" -# include "rules.h" -# include "variable.h" -# include "regexp.h" -# include "hdrmacro.h" -# include "hash.h" -# include "newstr.h" -# include "strings.h" - -/* - * hdrmacro.c - handle header files that define macros used in - * #include statements. - * - * we look for lines like "#define MACRO <....>" or '#define MACRO " "' - * in the target file. When found, we - * - * we then phony up a rule invocation like: - * - * $(HDRRULE) <target> : <resolved included files> ; - * - * External routines: - * headers1() - scan a target for "#include MACRO" lines and try - * to resolve them when needed - * - * Internal routines: - * headers1() - using regexp, scan a file and build include LIST - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 09/10/00 (seiwald) - replaced call to compile_rule with evaluate_rule, - * so that headers() doesn't have to mock up a parse structure - * just to invoke a rule. - */ - -/* this type is used to store a dictionary of file header macros */ -typedef struct header_macro -{ - char * symbol; - char * filename; /* we could maybe use a LIST here ?? */ -} HEADER_MACRO; - -static struct hash * header_macros_hash = 0; - - -/* - * headers() - scan a target for include files and call HDRRULE - */ - -# define MAXINC 10 - -void -macro_headers( TARGET *t ) -{ - static regexp *re = 0; - FILE *f; - char buf[ 1024 ]; - - if ( DEBUG_HEADER ) - printf( "macro header scan for %s\n", t->name ); - - /* this regexp is used to detect lines of the form */ - /* "#define MACRO <....>" or "#define MACRO "....." */ - /* in the header macro files.. */ - if ( re == 0 ) - { - re = regex_compile( - "^[ ]*#[ ]*define[ ]*([A-Za-z][A-Za-z0-9_]*)[ ]*" - "[<\"]([^\">]*)[\">].*$" ); - } - - if ( !( f = fopen( t->boundname, "r" ) ) ) - return; - - while ( fgets( buf, sizeof( buf ), f ) ) - { - HEADER_MACRO var; - HEADER_MACRO *v = &var; - - if ( regexec( re, buf ) && re->startp[1] ) - { - /* we detected a line that looks like "#define MACRO filename */ - re->endp[1][0] = '\0'; - re->endp[2][0] = '\0'; - - if ( DEBUG_HEADER ) - printf( "macro '%s' used to define filename '%s' in '%s'\n", - re->startp[1], re->startp[2], t->boundname ); - - /* add macro definition to hash table */ - if ( !header_macros_hash ) - header_macros_hash = hashinit( sizeof( HEADER_MACRO ), "hdrmacros" ); - - v->symbol = re->startp[1]; - v->filename = 0; - if ( hashenter( header_macros_hash, (HASHDATA **)&v ) ) - { - v->symbol = newstr( re->startp[1] ); /* never freed */ - v->filename = newstr( re->startp[2] ); /* never freed */ - } - /* XXXX: FOR NOW, WE IGNORE MULTIPLE MACRO DEFINITIONS !! */ - /* WE MIGHT AS WELL USE A LIST TO STORE THEM.. */ - } - } - - fclose( f ); -} - - -char * macro_header_get( const char * macro_name ) -{ - HEADER_MACRO var; - HEADER_MACRO * v = &var; - - v->symbol = (char* )macro_name; - - if ( header_macros_hash && hashcheck( header_macros_hash, (HASHDATA **)&v ) ) - { - if ( DEBUG_HEADER ) - printf( "### macro '%s' evaluated to '%s'\n", macro_name, v->filename ); - return v->filename; - } - return 0; -} diff --git a/jam-files/engine/hdrmacro.h b/jam-files/engine/hdrmacro.h deleted file mode 100644 index 08cc1116..00000000 --- a/jam-files/engine/hdrmacro.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * hdrmacro.h - parses header files for #define MACRO <filename> or - * #define MACRO "filename" definitions - */ - -void macro_headers( TARGET *t ); - -char* macro_header_get( const char* macro_name ); diff --git a/jam-files/engine/headers.c b/jam-files/engine/headers.c deleted file mode 100644 index b9d8f637..00000000 --- a/jam-files/engine/headers.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "lists.h" -# include "parse.h" -# include "compile.h" -# include "rules.h" -# include "variable.h" -# include "regexp.h" -# include "headers.h" -# include "hdrmacro.h" -# include "newstr.h" - -#ifdef OPT_HEADER_CACHE_EXT -# include "hcache.h" -#endif - -/* - * headers.c - handle #includes in source files - * - * Using regular expressions provided as the variable $(HDRSCAN), - * headers() searches a file for #include files and phonies up a - * rule invocation: - * - * $(HDRRULE) <target> : <include files> ; - * - * External routines: - * headers() - scan a target for include files and call HDRRULE - * - * Internal routines: - * headers1() - using regexp, scan a file and build include LIST - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 09/10/00 (seiwald) - replaced call to compile_rule with evaluate_rule, - * so that headers() doesn't have to mock up a parse structure - * just to invoke a rule. - */ - -#ifndef OPT_HEADER_CACHE_EXT -static LIST *headers1( LIST *l, char *file, int rec, regexp *re[]); -#endif - -/* - * headers() - scan a target for include files and call HDRRULE - */ - -# define MAXINC 10 - -void -headers( TARGET *t ) -{ - LIST * hdrscan; - LIST * hdrrule; - #ifndef OPT_HEADER_CACHE_EXT - LIST * headlist = 0; - #endif - regexp * re[ MAXINC ]; - int rec = 0; - - if ( !( hdrscan = var_get( "HDRSCAN" ) ) || - !( hdrrule = var_get( "HDRRULE" ) ) ) - return; - - if ( DEBUG_HEADER ) - printf( "header scan %s\n", t->name ); - - /* Compile all regular expressions in HDRSCAN */ - while ( ( rec < MAXINC ) && hdrscan ) - { - re[ rec++ ] = regex_compile( hdrscan->string ); - hdrscan = list_next( hdrscan ); - } - - /* Doctor up call to HDRRULE rule */ - /* Call headers1() to get LIST of included files. */ - { - FRAME frame[1]; - frame_init( frame ); - lol_add( frame->args, list_new( L0, t->name ) ); -#ifdef OPT_HEADER_CACHE_EXT - lol_add( frame->args, hcache( t, rec, re, hdrscan ) ); -#else - lol_add( frame->args, headers1( headlist, t->boundname, rec, re ) ); -#endif - - if ( lol_get( frame->args, 1 ) ) - { - /* The third argument to HDRRULE is the bound name of - * $(<) */ - lol_add( frame->args, list_new( L0, t->boundname ) ); - - list_free( evaluate_rule( hdrrule->string, frame ) ); - } - - /* Clean up. */ - frame_free( frame ); - } -} - - -/* - * headers1() - using regexp, scan a file and build include LIST. - */ - -#ifdef OPT_HEADER_CACHE_EXT -LIST * -#else -static LIST * -#endif -headers1( - LIST * l, - char * file, - int rec, - regexp * re[] ) -{ - FILE * f; - char buf[ 1024 ]; - int i; - static regexp * re_macros = 0; - -#ifdef OPT_IMPROVED_PATIENCE_EXT - static int count = 0; - ++count; - if ( ((count == 100) || !( count % 1000 )) && DEBUG_MAKE ) - printf("...patience...\n"); -#endif - - /* the following regexp is used to detect cases where a */ - /* file is included through a line line "#include MACRO" */ - if ( re_macros == 0 ) - re_macros = regex_compile( - "^[ ]*#[ ]*include[ ]*([A-Za-z][A-Za-z0-9_]*).*$" ); - - if ( !( f = fopen( file, "r" ) ) ) - return l; - - while ( fgets( buf, sizeof( buf ), f ) ) - { - int size = strlen( buf ); - /* Remove trailing \r and \n, if any. */ - while ( ( size > 0 ) && - ( buf[ size - 1 ] == '\n' ) && - ( buf[ size - 1 ] == '\r' ) ) - { - buf[ size - 1 ] = '\0'; - --size; - } - - for ( i = 0; i < rec; ++i ) - if ( regexec( re[i], buf ) && re[i]->startp[1] ) - { - re[i]->endp[1][0] = '\0'; - - if ( DEBUG_HEADER ) - printf( "header found: %s\n", re[i]->startp[1] ); - - l = list_new( l, newstr( re[i]->startp[1] ) ); - } - - /* special treatment for #include MACRO */ - if ( regexec( re_macros, buf ) && re_macros->startp[1] ) - { - char* header_filename; - - re_macros->endp[1][0] = '\0'; - - if ( DEBUG_HEADER ) - printf( "macro header found: %s", re_macros->startp[1] ); - - header_filename = macro_header_get( re_macros->startp[1] ); - if ( header_filename ) - { - if ( DEBUG_HEADER ) - printf( " resolved to '%s'\n", header_filename ); - l = list_new( l, newstr( header_filename ) ); - } - else - { - if ( DEBUG_HEADER ) - printf( " ignored !!\n" ); - } - } - } - - fclose( f ); - - return l; -} - - -void regerror( char * s ) -{ - printf( "re error %s\n", s ); -} diff --git a/jam-files/engine/headers.h b/jam-files/engine/headers.h deleted file mode 100644 index 624475fe..00000000 --- a/jam-files/engine/headers.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * headers.h - handle #includes in source files - */ - -void headers( TARGET *t ); - -#ifdef OPT_HEADER_CACHE_EXT -struct regexp; -LIST *headers1( LIST *l, char *file, int rec, struct regexp *re[] ); -#endif diff --git a/jam-files/engine/jam.c b/jam-files/engine/jam.c deleted file mode 100644 index e11d082b..00000000 --- a/jam-files/engine/jam.c +++ /dev/null @@ -1,632 +0,0 @@ -/* - * /+\ - * +\ Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * \+/ - * - * This file is part of jam. - * - * License is hereby granted to use this software and distribute it - * freely, as long as this copyright notice is retained and modifications - * are clearly marked. - * - * ALL WARRANTIES ARE HEREBY DISCLAIMED. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * jam.c - make redux - * - * See Jam.html for usage information. - * - * These comments document the code. - * - * The top half of the code is structured such: - * - * jam - * / | \ - * +---+ | \ - * / | \ - * jamgram option \ - * / | \ \ - * / | \ \ - * / | \ | - * scan | compile make - * | | / | \ / | \ - * | | / | \ / | \ - * | | / | \ / | \ - * jambase parse | rules search make1 - * | | | \ - * | | | \ - * | | | \ - * builtins timestamp command execute - * | - * | - * | - * filesys - * - * - * The support routines are called by all of the above, but themselves - * are layered thus: - * - * variable|expand - * / | | | - * / | | | - * / | | | - * lists | | pathsys - * \ | | - * \ | | - * \ | | - * newstr | - * \ | - * \ | - * \ | - * hash - * - * Roughly, the modules are: - * - * builtins.c - jam's built-in rules - * command.c - maintain lists of commands - * compile.c - compile parsed jam statements - * execunix.c - execute a shell script on UNIX - * execvms.c - execute a shell script, ala VMS - * expand.c - expand a buffer, given variable values - * file*.c - scan directories and archives on * - * hash.c - simple in-memory hashing routines - * hdrmacro.c - handle header file parsing for filename macro definitions - * headers.c - handle #includes in source files - * jambase.c - compilable copy of Jambase - * jamgram.y - jam grammar - * lists.c - maintain lists of strings - * make.c - bring a target up to date, once rules are in place - * make1.c - execute command to bring targets up to date - * newstr.c - string manipulation routines - * option.c - command line option processing - * parse.c - make and destroy parse trees as driven by the parser - * path*.c - manipulate file names on * - * hash.c - simple in-memory hashing routines - * regexp.c - Henry Spencer's regexp - * rules.c - access to RULEs, TARGETs, and ACTIONs - * scan.c - the jam yacc scanner - * search.c - find a target along $(SEARCH) or $(LOCATE) - * timestamp.c - get the timestamp of a file or archive member - * variable.c - handle jam multi-element variables - * - * 05/04/94 (seiwald) - async multiprocess (-j) support - * 02/08/95 (seiwald) - -n implies -d2. - * 02/22/95 (seiwald) - -v for version info. - * 09/11/00 (seiwald) - PATCHLEVEL folded into VERSION. - * 01/10/01 (seiwald) - pathsys.h split from filesys.h - */ - - -#include "jam.h" -#include "option.h" -#include "patchlevel.h" - -/* These get various function declarations. */ -#include "lists.h" -#include "parse.h" -#include "variable.h" -#include "compile.h" -#include "builtins.h" -#include "rules.h" -#include "newstr.h" -#include "scan.h" -#include "timestamp.h" -#include "make.h" -#include "strings.h" -#include "expand.h" -#include "filesys.h" -#include "output.h" - -/* Macintosh is "special" */ -#ifdef OS_MAC - #include <QuickDraw.h> -#endif - -/* And UNIX for this. */ -#ifdef unix - #include <sys/utsname.h> - #include <signal.h> -#endif - -struct globs globs = -{ - 0, /* noexec */ - 1, /* jobs */ - 0, /* quitquick */ - 0, /* newestfirst */ - 0, /* pipes action stdout and stderr merged to action output */ -#ifdef OS_MAC - { 0, 0 }, /* debug - suppress tracing output */ -#else - { 0, 1 }, /* debug ... */ -#endif - 0, /* output commands, not run them */ - 0 /* action timeout */ -}; - -/* Symbols to be defined as true for use in Jambase. */ -static char * othersyms[] = { OSMAJOR, OSMINOR, OSPLAT, JAMVERSYM, 0 }; - - -/* Known for sure: - * mac needs arg_enviro - * OS2 needs extern environ - */ - -#ifdef OS_MAC - #define use_environ arg_environ - #ifdef MPW - QDGlobals qd; - #endif -#endif - -/* on Win32-LCC */ -#if defined( OS_NT ) && defined( __LCC__ ) - #define use_environ _environ -#endif - -# if defined( __MWERKS__) - #define use_environ _environ - extern char * * _environ; -#endif - -#ifndef use_environ - #define use_environ environ - #if !defined( __WATCOM__ ) && !defined( OS_OS2 ) && !defined( OS_NT ) - extern char **environ; - #endif -#endif - -#if YYDEBUG != 0 - extern int yydebug; -#endif - -#ifndef NDEBUG -static void run_unit_tests() -{ -#if defined( USE_EXECNT ) - extern void execnt_unit_test(); - execnt_unit_test(); -#endif - string_unit_test(); - var_expand_unit_test(); -} -#endif - -int anyhow = 0; - -#ifdef HAVE_PYTHON - extern PyObject * bjam_call ( PyObject * self, PyObject * args ); - extern PyObject * bjam_import_rule ( PyObject * self, PyObject * args ); - extern PyObject * bjam_define_action( PyObject * self, PyObject * args ); - extern PyObject * bjam_variable ( PyObject * self, PyObject * args ); - extern PyObject * bjam_backtrace ( PyObject * self, PyObject * args ); - extern PyObject * bjam_caller ( PyObject * self, PyObject * args ); -#endif - -char *saved_argv0; - -int main( int argc, char * * argv, char * * arg_environ ) -{ - int n; - char * s; - struct bjam_option optv[N_OPTS]; - char const * all = "all"; - int status; - int arg_c = argc; - char * * arg_v = argv; - char const * progname = argv[0]; - - saved_argv0 = argv[0]; - - BJAM_MEM_INIT(); - -# ifdef OS_MAC - InitGraf(&qd.thePort); -# endif - - --argc; - ++argv; - - if ( getoptions( argc, argv, "-:l:d:j:p:f:gs:t:ano:qv", optv ) < 0 ) - { - printf( "\nusage: %s [ options ] targets...\n\n", progname ); - - printf( "-a Build all targets, even if they are current.\n" ); - printf( "-dx Set the debug level to x (0-9).\n" ); - printf( "-fx Read x instead of Jambase.\n" ); - /* printf( "-g Build from newest sources first.\n" ); */ - printf( "-jx Run up to x shell commands concurrently.\n" ); - printf( "-lx Limit actions to x number of seconds after which they are stopped.\n" ); - printf( "-n Don't actually execute the updating actions.\n" ); - printf( "-ox Write the updating actions to file x.\n" ); - printf( "-px x=0, pipes action stdout and stderr merged into action output.\n" ); - printf( "-q Quit quickly as soon as a target fails.\n" ); - printf( "-sx=y Set variable x=y, overriding environment.\n" ); - printf( "-tx Rebuild x, even if it is up-to-date.\n" ); - printf( "-v Print the version of jam and exit.\n" ); - printf( "--x Option is ignored.\n\n" ); - - exit( EXITBAD ); - } - - /* Version info. */ - if ( ( s = getoptval( optv, 'v', 0 ) ) ) - { - printf( "Boost.Jam " ); - printf( "Version %s. %s.\n", VERSION, OSMINOR ); - printf( " Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. \n" ); - printf( " Copyright 2001 David Turner.\n" ); - printf( " Copyright 2001-2004 David Abrahams.\n" ); - printf( " Copyright 2002-2008 Rene Rivera.\n" ); - printf( " Copyright 2003-2008 Vladimir Prus.\n" ); - - return EXITOK; - } - - /* Pick up interesting options. */ - if ( ( s = getoptval( optv, 'n', 0 ) ) ) - globs.noexec++, globs.debug[2] = 1; - - if ( ( s = getoptval( optv, 'p', 0 ) ) ) - { - /* Undocumented -p3 (acts like both -p1 -p2) means separate pipe action - * stdout and stderr. - */ - globs.pipe_action = atoi( s ); - if ( ( 3 < globs.pipe_action ) || ( globs.pipe_action < 0 ) ) - { - printf( - "Invalid pipe descriptor '%d', valid values are -p[0..3].\n", - globs.pipe_action ); - exit( EXITBAD ); - } - } - - if ( ( s = getoptval( optv, 'q', 0 ) ) ) - globs.quitquick = 1; - - if ( ( s = getoptval( optv, 'a', 0 ) ) ) - anyhow++; - - if ( ( s = getoptval( optv, 'j', 0 ) ) ) - { - globs.jobs = atoi( s ); - if (globs.jobs == 0) - { - printf("Invalid value for the '-j' option.\n"); - exit(EXITBAD); - } - } - - if ( ( s = getoptval( optv, 'g', 0 ) ) ) - globs.newestfirst = 1; - - if ( ( s = getoptval( optv, 'l', 0 ) ) ) - globs.timeout = atoi( s ); - - /* Turn on/off debugging */ - for ( n = 0; ( s = getoptval( optv, 'd', n ) ); ++n ) - { - int i; - - /* First -d, turn off defaults. */ - if ( !n ) - for ( i = 0; i < DEBUG_MAX; ++i ) - globs.debug[i] = 0; - - i = atoi( s ); - - if ( ( i < 0 ) || ( i >= DEBUG_MAX ) ) - { - printf( "Invalid debug level '%s'.\n", s ); - continue; - } - - /* n turns on levels 1-n. */ - /* +n turns on level n. */ - if ( *s == '+' ) - globs.debug[i] = 1; - else while ( i ) - globs.debug[i--] = 1; - } - - { - PROFILE_ENTER( MAIN ); - -#ifdef HAVE_PYTHON - { - PROFILE_ENTER( MAIN_PYTHON ); - Py_Initialize(); - { - static PyMethodDef BjamMethods[] = { - {"call", bjam_call, METH_VARARGS, - "Call the specified bjam rule."}, - {"import_rule", bjam_import_rule, METH_VARARGS, - "Imports Python callable to bjam."}, - {"define_action", bjam_define_action, METH_VARARGS, - "Defines a command line action."}, - {"variable", bjam_variable, METH_VARARGS, - "Obtains a variable from bjam's global module."}, - {"backtrace", bjam_backtrace, METH_VARARGS, - "Returns bjam backtrace from the last call into Python."}, - {"caller", bjam_caller, METH_VARARGS, - "Returns the module from which the last call into Python is made."}, - {NULL, NULL, 0, NULL} - }; - - Py_InitModule( "bjam", BjamMethods ); - } - PROFILE_EXIT( MAIN_PYTHON ); - } -#endif - -#ifndef NDEBUG - run_unit_tests(); -#endif -#if YYDEBUG != 0 - if ( DEBUG_PARSE ) - yydebug = 1; -#endif - - /* Set JAMDATE. */ - var_set( "JAMDATE", list_new( L0, outf_time(time(0)) ), VAR_SET ); - - /* Set JAM_VERSION. */ - var_set( "JAM_VERSION", - list_new( list_new( list_new( L0, - newstr( VERSION_MAJOR_SYM ) ), - newstr( VERSION_MINOR_SYM ) ), - newstr( VERSION_PATCH_SYM ) ), - VAR_SET ); - - /* Set JAMUNAME. */ -#ifdef unix - { - struct utsname u; - - if ( uname( &u ) >= 0 ) - { - var_set( "JAMUNAME", - list_new( - list_new( - list_new( - list_new( - list_new( L0, - newstr( u.sysname ) ), - newstr( u.nodename ) ), - newstr( u.release ) ), - newstr( u.version ) ), - newstr( u.machine ) ), VAR_SET ); - } - } -#endif /* unix */ - - /* Load up environment variables. */ - - /* First into the global module, with splitting, for backward - * compatibility. - */ - var_defines( use_environ, 1 ); - - /* Then into .ENVIRON, without splitting. */ - enter_module( bindmodule(".ENVIRON") ); - var_defines( use_environ, 0 ); - exit_module( bindmodule(".ENVIRON") ); - - /* - * Jam defined variables OS & OSPLAT. We load them after environment, so - * that setting OS in environment does not change Jam's notion of the - * current platform. - */ - var_defines( othersyms, 1 ); - - /* Load up variables set on command line. */ - for ( n = 0; ( s = getoptval( optv, 's', n ) ); ++n ) - { - char *symv[2]; - symv[ 0 ] = s; - symv[ 1 ] = 0; - var_defines( symv, 1 ); - enter_module( bindmodule(".ENVIRON") ); - var_defines( symv, 0 ); - exit_module( bindmodule(".ENVIRON") ); - } - - /* Set the ARGV to reflect the complete list of arguments of invocation. - */ - for ( n = 0; n < arg_c; ++n ) - var_set( "ARGV", list_new( L0, newstr( arg_v[n] ) ), VAR_APPEND ); - - /* Initialize built-in rules. */ - load_builtins(); - - /* Add the targets in the command line to the update list. */ - for ( n = 1; n < arg_c; ++n ) - { - if ( arg_v[ n ][ 0 ] == '-' ) - { - char * f = "-:l:d:j:f:gs:t:ano:qv"; - for ( ; *f; ++f ) if ( *f == arg_v[ n ][ 1 ] ) break; - if ( ( f[ 1 ] == ':' ) && ( arg_v[ n ][ 2 ] == '\0' ) ) ++n; - } - else - { - mark_target_for_updating( arg_v[ n ] ); - } - } - - if (!targets_to_update()) - mark_target_for_updating("all"); - - /* Parse ruleset. */ - { - FRAME frame[ 1 ]; - frame_init( frame ); - for ( n = 0; ( s = getoptval( optv, 'f', n ) ); ++n ) - parse_file( s, frame ); - - if ( !n ) - parse_file( "+", frame ); - } - - status = yyanyerrors(); - - /* Manually touch -t targets. */ - for ( n = 0; ( s = getoptval( optv, 't', n ) ); ++n ) - touch_target( s ); - - /* If an output file is specified, set globs.cmdout to that. */ - if ( ( s = getoptval( optv, 'o', 0 ) ) ) - { - if ( !( globs.cmdout = fopen( s, "w" ) ) ) - { - printf( "Failed to write to '%s'\n", s ); - exit( EXITBAD ); - } - ++globs.noexec; - } - - /* The build system may set the PARALLELISM variable to override -j - options. */ - { - LIST *p = L0; - p = var_get ("PARALLELISM"); - if (p) - { - int j = atoi (p->string); - if (j == -1) - { - printf( "Invalid value of PARALLELISM: %s\n", p->string); - } - else - { - globs.jobs = j; - } - } - } - - /* KEEP_GOING overrides -q option. */ - { - LIST *p = L0; - p = var_get ("KEEP_GOING"); - if (p) - { - int v = atoi (p->string); - if (v == 0) - globs.quitquick = 1; - else - globs.quitquick = 0; - } - } - - /* Now make target. */ - { - PROFILE_ENTER( MAIN_MAKE ); - - LIST * targets = targets_to_update(); - if (targets) - { - int targets_count = list_length( targets ); - const char * * targets2 = (const char * *) - BJAM_MALLOC( targets_count * sizeof( char * ) ); - int n = 0; - for ( ; targets; targets = list_next( targets ) ) - targets2[ n++ ] = targets->string; - status |= make( targets_count, targets2, anyhow ); - free( targets ); - } - else - { - status = last_update_now_status; - } - - PROFILE_EXIT( MAIN_MAKE ); - } - - PROFILE_EXIT( MAIN ); - } - - if ( DEBUG_PROFILE ) - profile_dump(); - - /* Widely scattered cleanup. */ - var_done(); - file_done(); - rules_done(); - stamps_done(); - str_done(); - - /* Close cmdout. */ - if ( globs.cmdout ) - fclose( globs.cmdout ); - -#ifdef HAVE_PYTHON - Py_Finalize(); -#endif - - BJAM_MEM_CLOSE(); - - return status ? EXITBAD : EXITOK; -} - -#if defined(_WIN32) -#include <windows.h> -char *executable_path(char *argv0) { - char buf[1024]; - DWORD ret = GetModuleFileName(NULL, buf, sizeof(buf)); - if (ret == 0 || ret == sizeof(buf)) return NULL; - return strdup (buf); -} -#elif defined(__APPLE__) /* Not tested */ -#include <mach-o/dyld.h> -char *executable_path(char *argv0) { - char buf[1024]; - uint32_t size = sizeof(buf); - int ret = _NSGetExecutablePath(buf, &size); - if (ret != 0) return NULL; - return strdup(buf); -} -#elif defined(sun) || defined(__sun) /* Not tested */ -#include <stdlib.h> - -char *executable_path(char *argv0) { - return strdup(getexecname()); -} -#elif defined(__FreeBSD__) -#include <sys/sysctl.h> -char *executable_path(char *argv0) { - int mib[4]; - mib[0] = CTL_KERN; - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_PATHNAME; - mib[3] = -1; - char buf[1024]; - size_t size = sizeof(buf); - sysctl(mib, 4, buf, &size, NULL, 0); - if (size == 0 || size == sizeof(buf)) return NULL; - return strndup(buf, size); -} -#elif defined(__linux__) -#include <unistd.h> -char *executable_path(char *argv0) { - char buf[1024]; - ssize_t ret = readlink("/proc/self/exe", buf, sizeof(buf)); - if (ret == 0 || ret == sizeof(buf)) return NULL; - return strndup(buf, ret); -} -#else -char *executable_path(char *argv0) { - /* If argv0 is absolute path, assume it's the right absolute path. */ - if (argv0[0] == "/") - return strdup(argv0); - return NULL; -} -#endif diff --git a/jam-files/engine/jam.h b/jam-files/engine/jam.h deleted file mode 100644 index 73a7a04c..00000000 --- a/jam-files/engine/jam.h +++ /dev/null @@ -1,579 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * jam.h - includes and globals for jam - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 04/21/94 (seiwald) - DGUX is __DGUX__, not just __DGUX. - * 05/04/94 (seiwald) - new globs.jobs (-j jobs) - * 11/01/94 (wingerd) - let us define path of Jambase at compile time. - * 12/30/94 (wingerd) - changed command buffer size for NT (MS-DOS shell). - * 02/22/95 (seiwald) - Jambase now in /usr/local/lib. - * 04/30/95 (seiwald) - FreeBSD added. Live Free or Die. - * 05/10/95 (seiwald) - SPLITPATH character set up here. - * 08/20/95 (seiwald) - added LINUX. - * 08/21/95 (seiwald) - added NCR. - * 10/23/95 (seiwald) - added SCO. - * 01/03/96 (seiwald) - SINIX (nixdorf) added. - * 03/13/96 (seiwald) - Jambase now compiled in; remove JAMBASE variable. - * 04/29/96 (seiwald) - AIX now has 31 and 42 OSVERs. - * 11/21/96 (peterk) - added BeOS with MW CW mwcc - * 12/21/96 (seiwald) - OSPLAT now defined for NT. - * 07/19/99 (sickel) - Mac OS X Server and Client support added - * 02/18/00 (belmonte)- Support for Cygwin. - * 09/12/00 (seiwald) - OSSYMS split to OSMAJOR/OSMINOR/OSPLAT - * 12/29/00 (seiwald) - OSVER dropped. - */ - -#ifndef JAM_H_VP_2003_08_01 -#define JAM_H_VP_2003_08_01 - -#ifdef HAVE_PYTHON -#include <Python.h> -#endif - -/* Assume popen support is available unless known otherwise. */ -#define HAVE_POPEN 1 - -/* - * VMS, OPENVMS - */ - -#ifdef VMS - -#include <types.h> -#include <file.h> -#include <stat.h> -#include <stdio.h> -#include <ctype.h> -#include <stdlib.h> -#include <signal.h> -#include <string.h> -#include <time.h> -#include <unistd.h> -#include <unixlib.h> - -#define OSMINOR "OS=VMS" -#define OSMAJOR "VMS=true" -#define OS_VMS -#define MAXLINE 1024 /* longest 'together' actions */ -#define SPLITPATH ',' -#define EXITOK 1 -#define EXITBAD 0 -#define DOWNSHIFT_PATHS - -/* This may be inaccurate. */ -#ifndef __DECC -#define OSPLAT "OSPLAT=VAX" -#endif - -#endif - -/* - * Windows NT - */ - -#ifdef NT - -#include <fcntl.h> -#include <stdlib.h> -#include <stdio.h> -#include <ctype.h> -#include <malloc.h> -#ifndef __MWERKS__ - #include <memory.h> -#endif -#include <signal.h> -#include <string.h> -#include <time.h> - -#define OSMAJOR "NT=true" -#define OSMINOR "OS=NT" -#define OS_NT -#define SPLITPATH ';' -/* Windows NT 3.51 only allows 996 chars per line, but we deal with the problem - * in "execnt.c". - */ -#define MAXLINE (maxline()) /* longest 'together' actions */ -#define USE_EXECNT -#define USE_PATHUNIX -#define PATH_DELIM '\\' -#define DOWNSHIFT_PATHS - -/* AS400 cross-compile from NT. */ - -#ifdef AS400 - #undef OSMINOR - #undef OSMAJOR - #define OSMAJOR "AS400=true" - #define OSMINOR "OS=AS400" - #define OS_AS400 -#endif - -/* Metrowerks Standard Library on Windows. */ - -#ifdef __MSL__ - #undef HAVE_POPEN -#endif - -# endif - -/* - * Windows MingW32 - */ - -#ifdef MINGW - -#include <fcntl.h> -#include <stdlib.h> -#include <stdio.h> -#include <ctype.h> -#include <malloc.h> -#include <memory.h> -#include <signal.h> -#include <string.h> -#include <time.h> - -#define OSMAJOR "MINGW=true" -#define OSMINOR "OS=MINGW" -#define OS_NT -#define SPLITPATH ';' -#define MAXLINE 996 /* longest 'together' actions */ -#define USE_EXECUNIX -#define USE_PATHUNIX -#define PATH_DELIM '\\' -#define DOWNSHIFT_PATHS - -#endif - -/* - * OS2 - */ - -#ifdef __OS2__ - -#include <fcntl.h> -#include <stdlib.h> -#include <stdio.h> -#include <ctype.h> -#include <malloc.h> -#include <signal.h> -#include <string.h> -#include <time.h> - -#define OSMAJOR "OS2=true" -#define OSMINOR "OS=OS2" -#define OS_OS2 -#define SPLITPATH ';' -#define MAXLINE 996 /* longest 'together' actions */ -#define USE_EXECUNIX -#define USE_PATHUNIX -#define PATH_DELIM '\\' -#define DOWNSHIFT_PATHS - -#ifdef __EMX__ - #define USE_FILEUNIX -#endif - -#endif - -/* - * Macintosh MPW - */ - -#ifdef macintosh - -#include <time.h> -#include <stdlib.h> -#include <string.h> -#include <stdio.h> - -#define OSMAJOR "MAC=true" -#define OSMINOR "OS=MAC" -#define OS_MAC -#define SPLITPATH ',' - -#endif - -/* - * God fearing UNIX. - */ - -#ifndef OSMINOR - -#define OSMAJOR "UNIX=true" -#define USE_EXECUNIX -#define USE_FILEUNIX -#define USE_PATHUNIX -#define PATH_DELIM '/' - -#ifdef _AIX - #define unix - #define MAXLINE 23552 /* 24k - 1k, longest 'together' actions */ - #define OSMINOR "OS=AIX" - #define OS_AIX - #define NO_VFORK -#endif -#ifdef AMIGA - #define OSMINOR "OS=AMIGA" - #define OS_AMIGA -#endif -#ifdef __BEOS__ - #define unix - #define OSMINOR "OS=BEOS" - #define OS_BEOS - #define NO_VFORK -#endif -#ifdef __bsdi__ - #define OSMINOR "OS=BSDI" - #define OS_BSDI -#endif -#if defined (COHERENT) && defined (_I386) - #define OSMINOR "OS=COHERENT" - #define OS_COHERENT - #define NO_VFORK -#endif -#if defined(__cygwin__) || defined(__CYGWIN__) - #define OSMINOR "OS=CYGWIN" - #define OS_CYGWIN -#endif -#if defined(__FreeBSD__) && !defined(__DragonFly__) - #define OSMINOR "OS=FREEBSD" - #define OS_FREEBSD -#endif -#ifdef __DragonFly__ - #define OSMINOR "OS=DRAGONFLYBSD" - #define OS_DRAGONFLYBSD -#endif -#ifdef __DGUX__ - #define OSMINOR "OS=DGUX" - #define OS_DGUX -#endif -#ifdef __hpux - #define OSMINOR "OS=HPUX" - #define OS_HPUX -#endif -#ifdef __OPENNT - #define unix - #define OSMINOR "OS=INTERIX" - #define OS_INTERIX - #define NO_VFORK -#endif -#ifdef __sgi - #define OSMINOR "OS=IRIX" - #define OS_IRIX - #define NO_VFORK -#endif -#ifdef __ISC - #define OSMINOR "OS=ISC" - #define OS_ISC - #define NO_VFORK -#endif -#ifdef linux - #define OSMINOR "OS=LINUX" - #define OS_LINUX -#endif -#ifdef __Lynx__ - #define OSMINOR "OS=LYNX" - #define OS_LYNX - #define NO_VFORK - #define unix -#endif -#ifdef __MACHTEN__ - #define OSMINOR "OS=MACHTEN" - #define OS_MACHTEN -#endif -#ifdef mpeix - #define unix - #define OSMINOR "OS=MPEIX" - #define OS_MPEIX - #define NO_VFORK -#endif -#ifdef __MVS__ - #define unix - #define OSMINOR "OS=MVS" - #define OS_MVS -#endif -#ifdef _ATT4 - #define OSMINOR "OS=NCR" - #define OS_NCR -#endif -#ifdef __NetBSD__ - #define unix - #define OSMINOR "OS=NETBSD" - #define OS_NETBSD - #define NO_VFORK -#endif -#ifdef __QNX__ - #define unix - #ifdef __QNXNTO__ - #define OSMINOR "OS=QNXNTO" - #define OS_QNXNTO - #else - #define OSMINOR "OS=QNX" - #define OS_QNX - #define NO_VFORK - #define MAXLINE 996 - #endif -#endif -#ifdef NeXT - #ifdef __APPLE__ - #define OSMINOR "OS=RHAPSODY" - #define OS_RHAPSODY - #else - #define OSMINOR "OS=NEXT" - #define OS_NEXT - #endif -#endif -#ifdef __APPLE__ - #define unix - #define OSMINOR "OS=MACOSX" - #define OS_MACOSX -#endif -#ifdef __osf__ - #ifndef unix - #define unix - #endif - #define OSMINOR "OS=OSF" - #define OS_OSF -#endif -#ifdef _SEQUENT_ - #define OSMINOR "OS=PTX" - #define OS_PTX -#endif -#ifdef M_XENIX - #define OSMINOR "OS=SCO" - #define OS_SCO - #define NO_VFORK -#endif -#ifdef sinix - #define unix - #define OSMINOR "OS=SINIX" - #define OS_SINIX -#endif -#ifdef sun - #if defined(__svr4__) || defined(__SVR4) - #define OSMINOR "OS=SOLARIS" - #define OS_SOLARIS - #else - #define OSMINOR "OS=SUNOS" - #define OS_SUNOS - #endif -#endif -#ifdef ultrix - #define OSMINOR "OS=ULTRIX" - #define OS_ULTRIX -#endif -#ifdef _UNICOS - #define OSMINOR "OS=UNICOS" - #define OS_UNICOS -#endif -#if defined(__USLC__) && !defined(M_XENIX) - #define OSMINOR "OS=UNIXWARE" - #define OS_UNIXWARE -#endif -#ifdef __OpenBSD__ - #define OSMINOR "OS=OPENBSD" - #define OS_OPENBSD - #define unix -#endif -#if defined (__FreeBSD_kernel__) && !defined(__FreeBSD__) - #define OSMINOR "OS=KFREEBSD" - #define OS_KFREEBSD -#endif -#ifndef OSMINOR - #define OSMINOR "OS=UNKNOWN" -#endif - -/* All the UNIX includes */ - -#include <sys/types.h> -#include <sys/stat.h> - -#ifndef OS_MPEIX - #include <sys/file.h> -#endif - -#include <fcntl.h> -#include <stdio.h> -#include <ctype.h> -#include <signal.h> -#include <string.h> -#include <time.h> -#include <unistd.h> - -#ifndef OS_QNX - #include <memory.h> -#endif - -#ifndef OS_ULTRIX - #include <stdlib.h> -#endif - -#if !defined( OS_BSDI ) && \ - !defined( OS_FREEBSD ) && \ - !defined( OS_DRAGONFLYBSD ) && \ - !defined( OS_NEXT ) && \ - !defined( OS_MACHTEN ) && \ - !defined( OS_MACOSX ) && \ - !defined( OS_RHAPSODY ) && \ - !defined( OS_MVS ) && \ - !defined( OS_OPENBSD ) - #include <malloc.h> -#endif - -#endif - -/* - * OSPLAT definitions - suppressed when it is a one-of-a-kind. - */ - -#if defined( _M_PPC ) || \ - defined( PPC ) || \ - defined( ppc ) || \ - defined( __powerpc__ ) || \ - defined( __ppc__ ) - #define OSPLAT "OSPLAT=PPC" -#endif - -#if defined( _ALPHA_ ) || \ - defined( __alpha__ ) - #define OSPLAT "OSPLAT=AXP" -#endif - -#if defined( _i386_ ) || \ - defined( __i386__ ) || \ - defined( __i386 ) || \ - defined( _M_IX86 ) - #define OSPLAT "OSPLAT=X86" -#endif - -#if defined( __ia64__ ) || \ - defined( __IA64__ ) || \ - defined( __ia64 ) - #define OSPLAT "OSPLAT=IA64" -#endif - -#if defined( __x86_64__ ) || \ - defined( __amd64__ ) || \ - defined( _M_AMD64 ) - #define OSPLAT "OSPLAT=X86_64" -#endif - - -#if defined( __sparc__ ) || \ - defined( __sparc ) - #define OSPLAT "OSPLAT=SPARC" -#endif - -#ifdef __mips__ - #define OSPLAT "OSPLAT=MIPS" -#endif - -#ifdef __arm__ - #define OSPLAT "OSPLAT=ARM" -#endif - -#ifdef __s390__ - #define OSPLAT "OSPLAT=390" -#endif - -#ifdef __hppa - #define OSPLAT "OSPLAT=PARISC" -#endif - -#ifndef OSPLAT - #define OSPLAT "" -#endif - -/* - * Jam implementation misc. - */ - -#ifndef MAXLINE - #define MAXLINE 102400 /* longest 'together' actions' */ -#endif - -#ifndef EXITOK - #define EXITOK 0 - #define EXITBAD 1 -#endif - -#ifndef SPLITPATH - #define SPLITPATH ':' -#endif - -/* You probably do not need to muck with these. */ - -#define MAXSYM 1024 /* longest symbol in the environment */ -#define MAXJPATH 1024 /* longest filename */ - -#define MAXJOBS 64 /* silently enforced -j limit */ -#define MAXARGC 32 /* words in $(JAMSHELL) */ - -/* Jam private definitions below. */ - -#define DEBUG_MAX 14 - - -struct globs -{ - int noexec; - int jobs; - int quitquick; - int newestfirst; /* build newest sources first */ - int pipe_action; - char debug[ DEBUG_MAX ]; - FILE * cmdout; /* print cmds, not run them */ - long timeout; /* number of seconds to limit actions to, - * default 0 for no limit. - */ - int dart; /* output build and test results formatted for Dart */ -}; - -extern struct globs globs; - -#define DEBUG_MAKE ( globs.debug[ 1 ] ) /* show actions when executed */ -#define DEBUG_MAKEQ ( globs.debug[ 2 ] ) /* show even quiet actions */ -#define DEBUG_EXEC ( globs.debug[ 2 ] ) /* show text of actons */ -#define DEBUG_MAKEPROG ( globs.debug[ 3 ] ) /* show progress of make0 */ -#define DEBUG_BIND ( globs.debug[ 3 ] ) /* show when files bound */ - -#define DEBUG_EXECCMD ( globs.debug[ 4 ] ) /* show execcmds()'s work */ - -#define DEBUG_COMPILE ( globs.debug[ 5 ] ) /* show rule invocations */ - -#define DEBUG_HEADER ( globs.debug[ 6 ] ) /* show result of header scan */ -#define DEBUG_BINDSCAN ( globs.debug[ 6 ] ) /* show result of dir scan */ -#define DEBUG_SEARCH ( globs.debug[ 6 ] ) /* show attempts at binding */ - -#define DEBUG_VARSET ( globs.debug[ 7 ] ) /* show variable settings */ -#define DEBUG_VARGET ( globs.debug[ 8 ] ) /* show variable fetches */ -#define DEBUG_VAREXP ( globs.debug[ 8 ] ) /* show variable expansions */ -#define DEBUG_IF ( globs.debug[ 8 ] ) /* show 'if' calculations */ -#define DEBUG_LISTS ( globs.debug[ 9 ] ) /* show list manipulation */ -#define DEBUG_SCAN ( globs.debug[ 9 ] ) /* show scanner tokens */ -#define DEBUG_MEM ( globs.debug[ 9 ] ) /* show memory use */ - -#define DEBUG_PROFILE ( globs.debug[ 10 ] ) /* dump rule execution times */ -#define DEBUG_PARSE ( globs.debug[ 11 ] ) /* debug parsing */ -#define DEBUG_GRAPH ( globs.debug[ 12 ] ) /* debug dependencies */ -#define DEBUG_FATE ( globs.debug[ 13 ] ) /* show changes to fate in make0() */ - -/* Everyone gets the memory definitions. */ -#include "mem.h" - -/* They also get the profile functions. */ -#include "debug.h" - -#endif diff --git a/jam-files/engine/jambase.c b/jam-files/engine/jambase.c deleted file mode 100644 index b15282bc..00000000 --- a/jam-files/engine/jambase.c +++ /dev/null @@ -1,1691 +0,0 @@ -/* Generated by mkjambase from Jambase */ -char *jambase[] = { -/* Jambase */ -"if $(NT)\n", -"{\n", -"SLASH ?= \\\\ ;\n", -"}\n", -"SLASH ?= / ;\n", -"rule find-to-root ( dir : patterns + )\n", -"{\n", -"local globs = [ GLOB $(dir) : $(patterns) ] ;\n", -"while ! $(globs) && $(dir:P) != $(dir)\n", -"{\n", -"dir = $(dir:P) ;\n", -"globs = [ GLOB $(dir) : $(patterns) ] ;\n", -"}\n", -"return $(globs) ;\n", -"}\n", -".boost-build-file = ;\n", -".bootstrap-file = ;\n", -"BOOST_BUILD_PATH.user-value = $(BOOST_BUILD_PATH) ;\n", -"if ! $(BOOST_BUILD_PATH) && $(UNIX)\n", -"{\n", -"BOOST_BUILD_PATH = /usr/share/boost-build ;\n", -"}\n", -"rule _poke ( module-name ? : variables + : value * )\n", -"{\n", -"module $(<)\n", -"{\n", -"$(>) = $(3) ;\n", -"}\n", -"}\n", -"rule boost-build ( dir ? )\n", -"{\n", -"if $(.bootstrap-file)\n", -"{\n", -"ECHO \"Error: Illegal attempt to re-bootstrap the build system by invoking\" ;\n", -"ECHO ;\n", -"ECHO \" 'boost-build\" $(dir) \";'\" ;\n", -"ECHO ;\n", -"EXIT \"Please consult the documentation at 'http://www.boost.org'.\" ;\n", -"}\n", -"BOOST_BUILD_PATH = $(dir:R=$(.boost-build-file:D)) $(BOOST_BUILD_PATH) ;\n", -"_poke .ENVIRON : BOOST_BUILD_PATH : $(BOOST_BUILD_PATH) ;\n", -"local bootstrap-file = [ GLOB $(BOOST_BUILD_PATH) : bootstrap.jam ] ;\n", -".bootstrap-file = $(bootstrap-file[1]) ;\n", -"if ! $(.bootstrap-file)\n", -"{\n", -"ECHO \"Unable to load Boost.Build: could not find build system.\" ;\n", -"ECHO --------------------------------------------------------- ;\n", -"ECHO \"$(.boost-build-file) attempted to load the build system by invoking\" ;\n", -"ECHO ;\n", -"ECHO \" 'boost-build\" $(dir) \";'\" ;\n", -"ECHO ;\n", -"ECHO \"but we were unable to find \\\"bootstrap.jam\\\" in the specified directory\" ;\n", -"ECHO \"or in BOOST_BUILD_PATH (searching \"$(BOOST_BUILD_PATH:J=\", \")\").\" ;\n", -"ECHO ;\n", -"EXIT \"Please consult the documentation at 'http://www.boost.org'.\" ;\n", -"}\n", -"if [ MATCH .*(--debug-configuration).* : $(ARGV) ]\n", -"{\n", -"ECHO \"notice: loading Boost.Build from\"\n", -"[ NORMALIZE_PATH $(.bootstrap-file:D) ] ;\n", -"}\n", -"include $(.bootstrap-file) ;\n", -"}\n", -"if [ MATCH .*(b2).* : $(ARGV[1]:BL) ] \n", -"|| [ MATCH .*(bjam).* : $(ARGV[1]:BL) ]\n", -"|| $(BOOST_ROOT) # A temporary measure so Jam works with Boost.Build v1.\n", -"{\n", -"local search-path = $(BOOST_BUILD_PATH) $(BOOST_ROOT) ;\n", -"local self = [ SELF_PATH ] ;\n", -"local boost-build-relative = ../../share/boost-build ;\n", -"local self-based-path = [ NORMALIZE_PATH $(boost-build-relative:R=$(self)) ] ;\n", -"local boost-build-files =\n", -"[ find-to-root [ PWD ] : boost-build.jam ]\n", -"[ GLOB $(self-based-path) : boost-build.jam ]\n", -"[ GLOB $(search-path) : boost-build.jam ] ;\n", -".boost-build-file = $(boost-build-files[1]) ;\n", -"if ! $(.boost-build-file)\n", -"{\n", -"ECHO \"Unable to load Boost.Build: could not find \\\"boost-build.jam\\\"\" ;\n", -"ECHO --------------------------------------------------------------- ;\n", -"if ! [ MATCH .*(bjam).* : $(ARGV[1]:BL) ]\n", -"{\n", -"ECHO \"BOOST_ROOT must be set, either in the environment, or \" ;\n", -"ECHO \"on the command-line with -sBOOST_ROOT=..., to the root\" ;\n", -"ECHO \"of the boost installation.\" ;\n", -"ECHO ;\n", -"}\n", -"ECHO \"Attempted search from\" [ PWD ] \"up to the root\" ;\n", -"ECHO \"at\" $(self-based-path) ;\n", -"ECHO \"and in these directories from BOOST_BUILD_PATH and BOOST_ROOT: \"$(search-path:J=\", \")\".\" ;\n", -"EXIT \"Please consult the documentation at 'http://www.boost.org'.\" ;\n", -"}\n", -"if [ MATCH .*(--debug-configuration).* : $(ARGV) ]\n", -"{\n", -"ECHO \"notice: found boost-build.jam at\"\n", -"[ NORMALIZE_PATH $(.boost-build-file) ] ;\n", -"}\n", -"include $(.boost-build-file) ;\n", -"if ! $(.bootstrap-file)\n", -"{\n", -"ECHO \"Unable to load Boost.Build\" ;\n", -"ECHO -------------------------- ;\n", -"ECHO \"\\\"$(.boost-build-file)\\\" was found by searching from\" [ PWD ] \"up to the root\" ;\n", -"ECHO \"and in these directories from BOOST_BUILD_PATH and BOOST_ROOT: \"$(search-path:J=\", \")\".\" ;\n", -"ECHO ;\n", -"ECHO \"However, it failed to call the \\\"boost-build\\\" rule to indicate\" ;\n", -"ECHO \"the location of the build system.\" ;\n", -"ECHO ;\n", -"EXIT \"Please consult the documentation at 'http://www.boost.org'.\" ;\n", -"}\n", -"}\n", -"else\n", -"{\n", -"if $(NT)\n", -"{\n", -"local SUPPORTED_TOOLSETS = \"BORLANDC\" \"VC7\" \"VISUALC\" \"VISUALC16\" \"INTELC\" \"WATCOM\"\n", -"\"MINGW\" \"LCC\" ;\n", -"TOOLSET = \"\" ;\n", -"if $(JAM_TOOLSET)\n", -"{\n", -"local t ;\n", -"for t in $(SUPPORTED_TOOLSETS)\n", -"{\n", -"$(t) = $($(t):J=\" \") ; # reconstitute paths with spaces in them\n", -"if $(t) = $(JAM_TOOLSET) { TOOLSET = $(t) ; }\n", -"}\n", -"if ! $(TOOLSET)\n", -"{\n", -"ECHO \"The JAM_TOOLSET environment variable is defined but its value\" ;\n", -"ECHO \"is invalid, please use one of the following:\" ;\n", -"ECHO ;\n", -"for t in $(SUPPORTED_TOOLSETS) { ECHO \" \" $(t) ; }\n", -"EXIT ;\n", -"}\n", -"}\n", -"if ! $(TOOLSET)\n", -"{\n", -"if $(BCCROOT)\n", -"{\n", -"TOOLSET = BORLANDC ;\n", -"BORLANDC = $(BCCROOT:J=\" \") ;\n", -"}\n", -"else if $(MSVC)\n", -"{\n", -"TOOLSET = VISUALC16 ;\n", -"VISUALC16 = $(MSVC:J=\" \") ;\n", -"}\n", -"else if $(MSVCNT)\n", -"{\n", -"TOOLSET = VISUALC ;\n", -"VISUALC = $(MSVCNT:J=\" \") ;\n", -"}\n", -"else if $(MSVCDir)\n", -"{\n", -"TOOLSET = VISUALC ;\n", -"VISUALC = $(MSVCDir:J=\" \") ;\n", -"}\n", -"else if $(MINGW)\n", -"{\n", -"TOOLSET = MINGW ;\n", -"}\n", -"else\n", -"{\n", -"ECHO \"Jam cannot be run because, either:\" ;\n", -"ECHO \" a. You didn't set BOOST_ROOT to indicate the root of your\" ;\n", -"ECHO \" Boost installation.\" ;\n", -"ECHO \" b. You are trying to use stock Jam but didn't indicate which\" ;\n", -"ECHO \" compilation toolset to use. To do so, follow these simple\" ;\n", -"ECHO \" instructions:\" ;\n", -"ECHO ;\n", -"ECHO \" - define one of the following environment variable, with the\" ;\n", -"ECHO \" appropriate value according to this list:\" ;\n", -"ECHO ;\n", -"ECHO \" Variable Toolset Description\" ;\n", -"ECHO ;\n", -"ECHO \" BORLANDC Borland C++ BC++ install path\" ;\n", -"ECHO \" VISUALC Microsoft Visual C++ VC++ install path\" ;\n", -"ECHO \" VISUALC16 Microsoft Visual C++ 16 bit VC++ 16 bit install\" ;\n", -"ECHO \" INTELC Intel C/C++ IC++ install path\" ;\n", -"ECHO \" WATCOM Watcom C/C++ Watcom install path\" ;\n", -"ECHO \" MINGW MinGW (gcc) MinGW install path\" ;\n", -"ECHO \" LCC Win32-LCC LCC-Win32 install path\" ;\n", -"ECHO ;\n", -"ECHO \" - define the JAM_TOOLSET environment variable with the *name*\" ;\n", -"ECHO \" of the toolset variable you want to use.\" ;\n", -"ECHO ;\n", -"ECHO \" e.g.: set VISUALC=C:\\\\Visual6\" ;\n", -"ECHO \" set JAM_TOOLSET=VISUALC\" ;\n", -"EXIT ;\n", -"}\n", -"}\n", -"CP ?= copy ;\n", -"RM ?= del /f/q ;\n", -"SLASH ?= \\\\ ;\n", -"SUFLIB ?= .lib ;\n", -"SUFOBJ ?= .obj ;\n", -"SUFEXE ?= .exe ;\n", -"if $(TOOLSET) = BORLANDC\n", -"{\n", -"ECHO \"Compiler is Borland C++\" ;\n", -"AR ?= tlib /C /P64 ;\n", -"CC ?= bcc32 ;\n", -"CCFLAGS ?= -q -y -d -v -w-par -w-ccc -w-rch -w-pro -w-aus ;\n", -"C++ ?= bcc32 ;\n", -"C++FLAGS ?= -q -y -d -v -w-par -w-ccc -w-rch -w-pro -w-aus -P ;\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= $(CCFLAGS) ;\n", -"STDLIBPATH ?= $(BORLANDC)\\\\lib ;\n", -"STDHDRS ?= $(BORLANDC)\\\\include ;\n", -"NOARSCAN ?= true ;\n", -"}\n", -"else if $(TOOLSET) = VISUALC16\n", -"{\n", -"ECHO \"Compiler is Microsoft Visual C++ 16 bit\" ;\n", -"AR ?= lib /nologo ;\n", -"CC ?= cl /nologo ;\n", -"CCFLAGS ?= /D \\\"WIN\\\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= $(CCFLAGS) ;\n", -"LINKLIBS ?=\n", -"\\\"$(VISUALC16)\\\\lib\\\\mlibce.lib\\\"\n", -"\\\"$(VISUALC16)\\\\lib\\\\oldnames.lib\\\"\n", -";\n", -"LINKLIBS ?= ;\n", -"NOARSCAN ?= true ;\n", -"OPTIM ?= \"\" ;\n", -"STDHDRS ?= $(VISUALC16)\\\\include ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = VISUALC\n", -"{\n", -"ECHO \"Compiler is Microsoft Visual C++\" ;\n", -"AR ?= lib ;\n", -"AS ?= masm386 ;\n", -"CC ?= cl /nologo ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= link /nologo ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= \\\"$(VISUALC)\\\\lib\\\\advapi32.lib\\\"\n", -"\\\"$(VISUALC)\\\\lib\\\\gdi32.lib\\\"\n", -"\\\"$(VISUALC)\\\\lib\\\\user32.lib\\\"\n", -"\\\"$(VISUALC)\\\\lib\\\\kernel32.lib\\\" ;\n", -"OPTIM ?= \"\" ;\n", -"STDHDRS ?= $(VISUALC)\\\\include ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = VC7\n", -"{\n", -"ECHO \"Compiler is Microsoft Visual C++ .NET\" ;\n", -"AR ?= lib ;\n", -"AS ?= masm386 ;\n", -"CC ?= cl /nologo ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= link /nologo ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= \\\"$(VISUALC)\\\\PlatformSDK\\\\lib\\\\advapi32.lib\\\"\n", -"\\\"$(VISUALC)\\\\PlatformSDK\\\\lib\\\\gdi32.lib\\\"\n", -"\\\"$(VISUALC)\\\\PlatformSDK\\\\lib\\\\user32.lib\\\"\n", -"\\\"$(VISUALC)\\\\PlatformSDK\\\\lib\\\\kernel32.lib\\\" ;\n", -"OPTIM ?= \"\" ;\n", -"STDHDRS ?= \\\"$(VISUALC)\\\\include\\\"\n", -"\\\"$(VISUALC)\\\\PlatformSDK\\\\include\\\" ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = INTELC\n", -"{\n", -"ECHO \"Compiler is Intel C/C++\" ;\n", -"if ! $(VISUALC)\n", -"{\n", -"ECHO \"As a special exception, when using the Intel C++ compiler, you need\" ;\n", -"ECHO \"to define the VISUALC environment variable to indicate the location\" ;\n", -"ECHO \"of your Visual C++ installation. Aborting..\" ;\n", -"EXIT ;\n", -"}\n", -"AR ?= lib ;\n", -"AS ?= masm386 ;\n", -"CC ?= icl /nologo ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= link /nologo ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= $(VISUALC)\\\\lib\\\\advapi32.lib\n", -"$(VISUALC)\\\\lib\\\\kernel32.lib\n", -";\n", -"OPTIM ?= \"\" ;\n", -"STDHDRS ?= $(INTELC)\\include $(VISUALC)\\\\include ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = WATCOM\n", -"{\n", -"ECHO \"Compiler is Watcom C/C++\" ;\n", -"AR ?= wlib ;\n", -"CC ?= wcc386 ;\n", -"CCFLAGS ?= /zq /DWIN32 /I$(WATCOM)\\\\h ; # zq=quiet\n", -"C++ ?= wpp386 ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"CP ?= copy ;\n", -"DOT ?= . ;\n", -"DOTDOT ?= .. ;\n", -"LINK ?= wcl386 ;\n", -"LINKFLAGS ?= /zq ; # zq=quiet\n", -"LINKLIBS ?= ;\n", -"MV ?= move ;\n", -"NOARSCAN ?= true ;\n", -"OPTIM ?= ;\n", -"RM ?= del /f ;\n", -"SLASH ?= \\\\ ;\n", -"STDHDRS ?= $(WATCOM)\\\\h $(WATCOM)\\\\h\\\\nt ;\n", -"SUFEXE ?= .exe ;\n", -"SUFLIB ?= .lib ;\n", -"SUFOBJ ?= .obj ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = MINGW\n", -"{\n", -"ECHO \"Compiler is GCC with Mingw\" ;\n", -"AR ?= ar -ru ;\n", -"CC ?= gcc ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= \"\" ;\n", -"OPTIM ?= ;\n", -"SUFOBJ = .o ;\n", -"SUFLIB = .a ;\n", -"SLASH = / ;\n", -"}\n", -"else if $(TOOLSET) = LCC\n", -"{\n", -"ECHO \"Compiler is Win32-LCC\" ;\n", -"AR ?= lcclib ;\n", -"CC ?= lcc ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= lcclnk ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= \"\" ;\n", -"OPTIM ?= ;\n", -"NOARSCAN = true ;\n", -"}\n", -"else\n", -"{\n", -"EXIT On NT, set BCCROOT, MSVCNT, MINGW or MSVC to the root of the\n", -"Borland or Microsoft directories. ;\n", -"}\n", -"}\n", -"else if $(OS2)\n", -"{\n", -"local SUPPORTED_TOOLSETS = \"EMX\" \"WATCOM\" ;\n", -"TOOLSET = \"\" ;\n", -"if $(JAM_TOOLSET)\n", -"{\n", -"local t ;\n", -"for t in $(SUPPORTED_TOOLSETS)\n", -"{\n", -"$(t) = $($(t):J=\" \") ; # reconstitute paths with spaces in them\n", -"if $(t) = $(JAM_TOOLSET) { TOOLSET = $(t) ; }\n", -"}\n", -"if ! $(TOOLSET)\n", -"{\n", -"ECHO \"The JAM_TOOLSET environment variable is defined but its value\" ;\n", -"ECHO \"is invalid, please use one of the following:\" ;\n", -"ECHO ;\n", -"for t in $(SUPPORTED_TOOLSETS) { ECHO \" \" $(t) ; }\n", -"EXIT ;\n", -"}\n", -"}\n", -"if ! $(TOOLSET)\n", -"{\n", -"if $(watcom)\n", -"{\n", -"WATCOM = $(watcom:J=\" \") ;\n", -"TOOLSET = WATCOM ;\n", -"}\n", -"else\n", -"{\n", -"ECHO \"Jam cannot be run because you didn't indicate which compilation toolset\" ;\n", -"ECHO \"to use. To do so, follow these simple instructions:\" ;\n", -"ECHO ;\n", -"ECHO \" - define one of the following environment variable, with the\" ;\n", -"ECHO \" appropriate value according to this list:\" ;\n", -"ECHO ;\n", -"ECHO \" Variable Toolset Description\" ;\n", -"ECHO ;\n", -"ECHO \" WATCOM Watcom C/C++ Watcom install path\" ;\n", -"ECHO \" EMX EMX (gcc) EMX install path\" ;\n", -"ECHO \" VISUALAGE IBM Visual Age C/C++ VisualAge install path\" ;\n", -"ECHO ;\n", -"ECHO \" - define the JAM_TOOLSET environment variable with the *name*\" ;\n", -"ECHO \" of the toolset variable you want to use.\" ;\n", -"ECHO ;\n", -"ECHO \" e.g.: set WATCOM=C:\\WATCOM\" ;\n", -"ECHO \" set JAM_TOOLSET=WATCOM\" ;\n", -"ECHO ;\n", -"EXIT ;\n", -"}\n", -"}\n", -"RM = del /f ;\n", -"CP = copy ;\n", -"MV ?= move ;\n", -"DOT ?= . ;\n", -"DOTDOT ?= .. ;\n", -"SUFLIB ?= .lib ;\n", -"SUFOBJ ?= .obj ;\n", -"SUFEXE ?= .exe ;\n", -"if $(TOOLSET) = WATCOM\n", -"{\n", -"AR ?= wlib ;\n", -"BINDIR ?= \\\\os2\\\\apps ;\n", -"CC ?= wcc386 ;\n", -"CCFLAGS ?= /zq /DOS2 /I$(WATCOM)\\\\h ; # zq=quiet\n", -"C++ ?= wpp386 ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= wcl386 ;\n", -"LINKFLAGS ?= /zq ; # zq=quiet\n", -"LINKLIBS ?= ;\n", -"NOARSCAN ?= true ;\n", -"OPTIM ?= ;\n", -"SLASH ?= \\\\ ;\n", -"STDHDRS ?= $(WATCOM)\\\\h ;\n", -"UNDEFFLAG ?= \"/u _\" ;\n", -"}\n", -"else if $(TOOLSET) = EMX\n", -"{\n", -"ECHO \"Compiler is GCC-EMX\" ;\n", -"AR ?= ar -ru ;\n", -"CC ?= gcc ;\n", -"CCFLAGS ?= \"\" ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= \"\" ;\n", -"OPTIM ?= ;\n", -"SUFOBJ = .o ;\n", -"SUFLIB = .a ;\n", -"UNDEFFLAG ?= \"-U\" ;\n", -"SLASH = / ;\n", -"}\n", -"else\n", -"{\n", -"EXIT \"Sorry, but the $(JAM_TOOLSET) toolset isn't supported for now\" ;\n", -"}\n", -"}\n", -"else if $(VMS)\n", -"{\n", -"C++ ?= cxx ;\n", -"C++FLAGS ?= ;\n", -"CC ?= cc ;\n", -"CCFLAGS ?= ;\n", -"CHMOD ?= set file/prot= ;\n", -"CP ?= copy/replace ;\n", -"CRELIB ?= true ;\n", -"DOT ?= [] ;\n", -"DOTDOT ?= [-] ;\n", -"EXEMODE ?= (w:e) ;\n", -"FILEMODE ?= (w:r) ;\n", -"HDRS ?= ;\n", -"LINK ?= link ;\n", -"LINKFLAGS ?= \"\" ;\n", -"LINKLIBS ?= ;\n", -"MKDIR ?= create/dir ;\n", -"MV ?= rename ;\n", -"OPTIM ?= \"\" ;\n", -"RM ?= delete ;\n", -"RUNVMS ?= mcr ;\n", -"SHELLMODE ?= (w:er) ;\n", -"SLASH ?= . ;\n", -"STDHDRS ?= decc$library_include ;\n", -"SUFEXE ?= .exe ;\n", -"SUFLIB ?= .olb ;\n", -"SUFOBJ ?= .obj ;\n", -"switch $(OS)\n", -"{\n", -"case OPENVMS : CCFLAGS ?= /stand=vaxc ;\n", -"case VMS : LINKLIBS ?= sys$library:vaxcrtl.olb/lib ;\n", -"}\n", -"}\n", -"else if $(MAC)\n", -"{\n", -"local OPT ;\n", -"CW ?= \"{CW}\" ;\n", -"MACHDRS ?=\n", -"\"$(UMACHDRS):Universal:Interfaces:CIncludes\"\n", -"\"$(CW):MSL:MSL_C:MSL_Common:Include\"\n", -"\"$(CW):MSL:MSL_C:MSL_MacOS:Include\" ;\n", -"MACLIBS ?=\n", -"\"$(CW):MacOS Support:Universal:Libraries:StubLibraries:Interfacelib\"\n", -"\"$(CW):MacOS Support:Universal:Libraries:StubLibraries:Mathlib\" ;\n", -"MPWLIBS ?=\n", -"\"$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL MPWCRuntime.lib\"\n", -"\"$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC MPW.Lib\" ;\n", -"MPWNLLIBS ?=\n", -"\"$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL MPWCRuntime.lib\"\n", -"\"$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC MPW(NL).Lib\" ;\n", -"SIOUXHDRS ?= ;\n", -"SIOUXLIBS ?=\n", -"\"$(CW):MacOS Support:Libraries:Runtime:Runtime PPC:MSL RuntimePPC.lib\"\n", -"\"$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL SIOUX.PPC.Lib\"\n", -"\"$(CW):MSL:MSL_C:MSL_MacOS:Lib:PPC:MSL C.PPC.Lib\" ;\n", -"C++ ?= mwcppc ;\n", -"C++FLAGS ?= -w off -nomapcr ;\n", -"CC ?= mwcppc ;\n", -"CCFLAGS ?= -w off -nomapcr ;\n", -"CP ?= duplicate -y ;\n", -"DOT ?= \":\" ;\n", -"DOTDOT ?= \"::\" ;\n", -"HDRS ?= $(MACHDRS) $(MPWHDRS) ;\n", -"LINK ?= mwlinkppc ;\n", -"LINKFLAGS ?= -mpwtool -warn ;\n", -"LINKLIBS ?= $(MACLIBS) $(MPWLIBS) ;\n", -"MKDIR ?= newfolder ;\n", -"MV ?= rename -y ;\n", -"NOARSCAN ?= true ;\n", -"OPTIM ?= ;\n", -"RM ?= delete -y ;\n", -"SLASH ?= \":\" ;\n", -"STDHDRS ?= ;\n", -"SUFLIB ?= .lib ;\n", -"SUFOBJ ?= .o ;\n", -"}\n", -"else if $(OS) = BEOS && $(METROWERKS)\n", -"{\n", -"AR ?= mwld -xml -o ;\n", -"BINDIR ?= /boot/apps ;\n", -"CC ?= mwcc ;\n", -"CCFLAGS ?= -nosyspath ;\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= -nosyspath ;\n", -"FORTRAN ?= \"\" ;\n", -"LIBDIR ?= /boot/develop/libraries ;\n", -"LINK ?= mwld ;\n", -"LINKFLAGS ?= \"\" ;\n", -"MANDIR ?= /boot/documentation/\"Shell Tools\"/HTML ;\n", -"NOARSCAN ?= true ;\n", -"STDHDRS ?= /boot/develop/headers/posix ;\n", -"}\n", -"else if $(OS) = BEOS\n", -"{\n", -"BINDIR ?= /boot/apps ;\n", -"CC ?= gcc ;\n", -"C++ ?= $(CC) ;\n", -"FORTRAN ?= \"\" ;\n", -"LIBDIR ?= /boot/develop/libraries ;\n", -"LINK ?= gcc ;\n", -"LINKLIBS ?= -lnet ;\n", -"NOARSCAN ?= true ;\n", -"STDHDRS ?= /boot/develop/headers/posix ;\n", -"}\n", -"else if $(UNIX)\n", -"{\n", -"switch $(OS)\n", -"{\n", -"case AIX :\n", -"LINKLIBS ?= -lbsd ;\n", -"case AMIGA :\n", -"CC ?= gcc ;\n", -"YACC ?= \"bison -y\" ;\n", -"case CYGWIN :\n", -"CC ?= gcc ;\n", -"CCFLAGS += -D__cygwin__ ;\n", -"LEX ?= flex ;\n", -"RANLIB ?= \"\" ;\n", -"SUFEXE ?= .exe ;\n", -"YACC ?= \"bison -y\" ;\n", -"case DGUX :\n", -"RANLIB ?= \"\" ;\n", -"RELOCATE ?= true ;\n", -"case HPUX :\n", -"YACC = ;\n", -"CFLAGS += -Ae ;\n", -"CCFLAGS += -Ae ;\n", -"RANLIB ?= \"\" ;\n", -"case INTERIX :\n", -"CC ?= gcc ;\n", -"RANLIB ?= \"\" ;\n", -"case IRIX :\n", -"RANLIB ?= \"\" ;\n", -"case MPEIX :\n", -"CC ?= gcc ;\n", -"C++ ?= gcc ;\n", -"CCFLAGS += -D_POSIX_SOURCE ;\n", -"HDRS += /usr/include ;\n", -"RANLIB ?= \"\" ;\n", -"NOARSCAN ?= true ;\n", -"NOARUPDATE ?= true ;\n", -"case MVS :\n", -"RANLIB ?= \"\" ;\n", -"case NEXT :\n", -"AR ?= libtool -o ;\n", -"RANLIB ?= \"\" ;\n", -"case MACOSX :\n", -"AR ?= libtool -o ;\n", -"C++ ?= c++ ;\n", -"MANDIR ?= /usr/local/share/man ;\n", -"RANLIB ?= \"\" ;\n", -"case NCR :\n", -"RANLIB ?= \"\" ;\n", -"case PTX :\n", -"RANLIB ?= \"\" ;\n", -"case QNX :\n", -"AR ?= wlib ;\n", -"CC ?= cc ;\n", -"CCFLAGS ?= -Q ; # quiet\n", -"C++ ?= $(CC) ;\n", -"C++FLAGS ?= -Q ; # quiet\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= -Q ; # quiet\n", -"NOARSCAN ?= true ;\n", -"RANLIB ?= \"\" ;\n", -"case SCO :\n", -"RANLIB ?= \"\" ;\n", -"RELOCATE ?= true ;\n", -"case SINIX :\n", -"RANLIB ?= \"\" ;\n", -"case SOLARIS :\n", -"RANLIB ?= \"\" ;\n", -"AR ?= \"/usr/ccs/bin/ar ru\" ;\n", -"case UNICOS :\n", -"NOARSCAN ?= true ;\n", -"OPTIM ?= -O0 ;\n", -"case UNIXWARE :\n", -"RANLIB ?= \"\" ;\n", -"RELOCATE ?= true ;\n", -"}\n", -"CCFLAGS ?= ;\n", -"C++FLAGS ?= $(CCFLAGS) ;\n", -"CHMOD ?= chmod ;\n", -"CHGRP ?= chgrp ;\n", -"CHOWN ?= chown ;\n", -"LEX ?= lex ;\n", -"LINKFLAGS ?= $(CCFLAGS) ;\n", -"LINKLIBS ?= ;\n", -"OPTIM ?= -O ;\n", -"RANLIB ?= ranlib ;\n", -"YACC ?= yacc ;\n", -"YACCFILES ?= y.tab ;\n", -"YACCFLAGS ?= -d ;\n", -"}\n", -"AR ?= ar ru ;\n", -"AS ?= as ;\n", -"ASFLAGS ?= ;\n", -"AWK ?= awk ;\n", -"BINDIR ?= /usr/local/bin ;\n", -"C++ ?= cc ;\n", -"C++FLAGS ?= ;\n", -"CC ?= cc ;\n", -"CCFLAGS ?= ;\n", -"CP ?= cp -f ;\n", -"CRELIB ?= ;\n", -"DOT ?= . ;\n", -"DOTDOT ?= .. ;\n", -"EXEMODE ?= 711 ;\n", -"FILEMODE ?= 644 ;\n", -"FORTRAN ?= f77 ;\n", -"FORTRANFLAGS ?= ;\n", -"HDRS ?= ;\n", -"INSTALLGRIST ?= installed ;\n", -"JAMFILE ?= Jamfile ;\n", -"JAMRULES ?= Jamrules ;\n", -"LEX ?= ;\n", -"LIBDIR ?= /usr/local/lib ;\n", -"LINK ?= $(CC) ;\n", -"LINKFLAGS ?= ;\n", -"LINKLIBS ?= ;\n", -"LN ?= ln ;\n", -"MANDIR ?= /usr/local/man ;\n", -"MKDIR ?= mkdir ;\n", -"MV ?= mv -f ;\n", -"OPTIM ?= ;\n", -"RCP ?= rcp ;\n", -"RM ?= rm -f ;\n", -"RSH ?= rsh ;\n", -"SED ?= sed ;\n", -"SHELLHEADER ?= \"#!/bin/sh\" ;\n", -"SHELLMODE ?= 755 ;\n", -"SLASH ?= / ;\n", -"STDHDRS ?= /usr/include ;\n", -"SUFEXE ?= \"\" ;\n", -"SUFLIB ?= .a ;\n", -"SUFOBJ ?= .o ;\n", -"UNDEFFLAG ?= \"-u _\" ;\n", -"YACC ?= ;\n", -"YACCFILES ?= ;\n", -"YACCFLAGS ?= ;\n", -"HDRPATTERN =\n", -"\"^[ ]*#[ ]*include[ ]*[<\\\"]([^\\\">]*)[\\\">].*$\" ;\n", -"OSFULL = $(OS)$(OSVER)$(OSPLAT) $(OS)$(OSPLAT) $(OS)$(OSVER) $(OS) ;\n", -"DEPENDS all : shell files lib exe obj ;\n", -"DEPENDS all shell files lib exe obj : first ;\n", -"NOTFILE all first shell files lib exe obj dirs clean uninstall ;\n", -"ALWAYS clean uninstall ;\n", -"rule As\n", -"{\n", -"DEPENDS $(<) : $(>) ;\n", -"ASFLAGS on $(<) += $(ASFLAGS) $(SUBDIRASFLAGS) ;\n", -"}\n", -"rule Bulk\n", -"{\n", -"local i ;\n", -"for i in $(>)\n", -"{\n", -"File $(i:D=$(<)) : $(i) ;\n", -"}\n", -"}\n", -"rule Cc\n", -"{\n", -"local _h ;\n", -"DEPENDS $(<) : $(>) ;\n", -"CCFLAGS on $(<) += $(CCFLAGS) $(SUBDIRCCFLAGS) ;\n", -"if $(RELOCATE)\n", -"{\n", -"CcMv $(<) : $(>) ;\n", -"}\n", -"_h = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ;\n", -"if $(VMS) && $(_h)\n", -"{\n", -"SLASHINC on $(<) = \"/inc=(\" $(_h[1]) ,$(_h[2-]) \")\" ;\n", -"}\n", -"else if $(MAC) && $(_h)\n", -"{\n", -"local _i _j ;\n", -"_j = $(_h[1]) ;\n", -"for _i in $(_h[2-])\n", -"{\n", -"_j = $(_j),$(_i) ;\n", -"}\n", -"MACINC on $(<) = \\\"$(_j)\\\" ;\n", -"}\n", -"}\n", -"rule C++\n", -"{\n", -"local _h ;\n", -"DEPENDS $(<) : $(>) ;\n", -"C++FLAGS on $(<) += $(C++FLAGS) $(SUBDIRC++FLAGS) ;\n", -"if $(RELOCATE)\n", -"{\n", -"CcMv $(<) : $(>) ;\n", -"}\n", -"_h = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ;\n", -"if $(VMS) && $(_h)\n", -"{\n", -"SLASHINC on $(<) = \"/inc=(\" $(_h[1]) ,$(_h[2-]) \")\" ;\n", -"}\n", -"else if $(MAC) && $(_h)\n", -"{\n", -"local _i _j ;\n", -"_j = $(_h[1]) ;\n", -"for _i in $(_h[2-])\n", -"{\n", -"_j = $(_j),$(_i) ;\n", -"}\n", -"MACINC on $(<) = \\\"$(_j)\\\" ;\n", -"}\n", -"}\n", -"rule Chmod\n", -"{\n", -"if $(CHMOD) { Chmod1 $(<) ; }\n", -"}\n", -"rule File\n", -"{\n", -"DEPENDS files : $(<) ;\n", -"DEPENDS $(<) : $(>) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"MODE on $(<) = $(FILEMODE) ;\n", -"Chmod $(<) ;\n", -"}\n", -"rule Fortran\n", -"{\n", -"DEPENDS $(<) : $(>) ;\n", -"}\n", -"rule GenFile\n", -"{\n", -"local _t = [ FGristSourceFiles $(<) ] ;\n", -"local _s = [ FAppendSuffix $(>[1]) : $(SUFEXE) ] ;\n", -"Depends $(_t) : $(_s) $(>[2-]) ;\n", -"GenFile1 $(_t) : $(_s) $(>[2-]) ;\n", -"Clean clean : $(_t) ;\n", -"}\n", -"rule GenFile1\n", -"{\n", -"MakeLocate $(<) : $(LOCATE_SOURCE) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"}\n", -"rule HardLink\n", -"{\n", -"DEPENDS files : $(<) ;\n", -"DEPENDS $(<) : $(>) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"}\n", -"rule HdrMacroFile\n", -"{\n", -"HDRMACRO $(<) ;\n", -"}\n", -"rule HdrRule\n", -"{\n", -"local s ;\n", -"if $(HDRGRIST)\n", -"{\n", -"s = $(>:G=$(HDRGRIST)) ;\n", -"} else {\n", -"s = $(>) ;\n", -"}\n", -"INCLUDES $(<) : $(s) ;\n", -"SEARCH on $(s) = $(HDRSEARCH) ;\n", -"NOCARE $(s) ;\n", -"HDRSEARCH on $(s) = $(HDRSEARCH) ;\n", -"HDRSCAN on $(s) = $(HDRSCAN) ;\n", -"HDRRULE on $(s) = $(HDRRULE) ;\n", -"HDRGRIST on $(s) = $(HDRGRIST) ;\n", -"}\n", -"rule InstallInto\n", -"{\n", -"local i t ;\n", -"t = $(>:G=$(INSTALLGRIST)) ;\n", -"Depends install : $(t) ;\n", -"Clean uninstall : $(t) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"MakeLocate $(t) : $(<) ;\n", -"for i in $(>)\n", -"{\n", -"local tt = $(i:G=$(INSTALLGRIST)) ;\n", -"Depends $(tt) : $(i) ;\n", -"Install $(tt) : $(i) ;\n", -"Chmod $(tt) ;\n", -"if $(OWNER) && $(CHOWN)\n", -"{\n", -"Chown $(tt) ;\n", -"OWNER on $(tt) = $(OWNER) ;\n", -"}\n", -"if $(GROUP) && $(CHGRP)\n", -"{\n", -"Chgrp $(tt) ;\n", -"GROUP on $(tt) = $(GROUP) ;\n", -"}\n", -"}\n", -"}\n", -"rule InstallBin\n", -"{\n", -"local _t = [ FAppendSuffix $(>) : $(SUFEXE) ] ;\n", -"InstallInto $(<) : $(_t) ;\n", -"MODE on $(_t:G=installed) = $(EXEMODE) ;\n", -"}\n", -"rule InstallFile\n", -"{\n", -"InstallInto $(<) : $(>) ;\n", -"MODE on $(>:G=installed) = $(FILEMODE) ;\n", -"}\n", -"rule InstallLib\n", -"{\n", -"InstallInto $(<) : $(>) ;\n", -"MODE on $(>:G=installed) = $(FILEMODE) ;\n", -"}\n", -"rule InstallMan\n", -"{\n", -"local i s d ;\n", -"for i in $(>)\n", -"{\n", -"switch $(i:S)\n", -"{\n", -"case .1 : s = 1 ; case .2 : s = 2 ; case .3 : s = 3 ;\n", -"case .4 : s = 4 ; case .5 : s = 5 ; case .6 : s = 6 ;\n", -"case .7 : s = 7 ; case .8 : s = 8 ; case .l : s = l ;\n", -"case .n : s = n ; case .man : s = 1 ;\n", -"}\n", -"d = man$(s) ;\n", -"InstallInto $(d:R=$(<)) : $(i) ;\n", -"}\n", -"MODE on $(>:G=installed) = $(FILEMODE) ;\n", -"}\n", -"rule InstallShell\n", -"{\n", -"InstallInto $(<) : $(>) ;\n", -"MODE on $(>:G=installed) = $(SHELLMODE) ;\n", -"}\n", -"rule Lex\n", -"{\n", -"LexMv $(<) : $(>) ;\n", -"DEPENDS $(<) : $(>) ;\n", -"MakeLocate $(<) : $(LOCATE_SOURCE) ;\n", -"Clean clean : $(<) ;\n", -"}\n", -"rule Library\n", -"{\n", -"LibraryFromObjects $(<) : $(>:S=$(SUFOBJ)) ;\n", -"Objects $(>) ;\n", -"}\n", -"rule LibraryFromObjects\n", -"{\n", -"local _i _l _s ;\n", -"_s = [ FGristFiles $(>) ] ;\n", -"_l = $(<:S=$(SUFLIB)) ;\n", -"if $(KEEPOBJS)\n", -"{\n", -"DEPENDS obj : $(_s) ;\n", -"}\n", -"else\n", -"{\n", -"DEPENDS lib : $(_l) ;\n", -"}\n", -"if ! $(_l:D)\n", -"{\n", -"MakeLocate $(_l) $(_l)($(_s:BS)) : $(LOCATE_TARGET) ;\n", -"}\n", -"if $(NOARSCAN)\n", -"{\n", -"DEPENDS $(_l) : $(_s) ;\n", -"}\n", -"else\n", -"{\n", -"DEPENDS $(_l) : $(_l)($(_s:BS)) ;\n", -"for _i in $(_s)\n", -"{\n", -"DEPENDS $(_l)($(_i:BS)) : $(_i) ;\n", -"}\n", -"}\n", -"Clean clean : $(_l) ;\n", -"if $(CRELIB) { CreLib $(_l) : $(_s[1]) ; }\n", -"Archive $(_l) : $(_s) ;\n", -"if $(RANLIB) { Ranlib $(_l) ; }\n", -"if ! ( $(NOARSCAN) || $(KEEPOBJS) ) { RmTemps $(_l) : $(_s) ; }\n", -"}\n", -"rule Link\n", -"{\n", -"MODE on $(<) = $(EXEMODE) ;\n", -"Chmod $(<) ;\n", -"}\n", -"rule LinkLibraries\n", -"{\n", -"local _t = [ FAppendSuffix $(<) : $(SUFEXE) ] ;\n", -"DEPENDS $(_t) : $(>:S=$(SUFLIB)) ;\n", -"NEEDLIBS on $(_t) += $(>:S=$(SUFLIB)) ;\n", -"}\n", -"rule Main\n", -"{\n", -"MainFromObjects $(<) : $(>:S=$(SUFOBJ)) ;\n", -"Objects $(>) ;\n", -"}\n", -"rule MainFromObjects\n", -"{\n", -"local _s _t ;\n", -"_s = [ FGristFiles $(>) ] ;\n", -"_t = [ FAppendSuffix $(<) : $(SUFEXE) ] ;\n", -"if $(_t) != $(<)\n", -"{\n", -"DEPENDS $(<) : $(_t) ;\n", -"NOTFILE $(<) ;\n", -"}\n", -"DEPENDS exe : $(_t) ;\n", -"DEPENDS $(_t) : $(_s) ;\n", -"MakeLocate $(_t) : $(LOCATE_TARGET) ;\n", -"Clean clean : $(_t) ;\n", -"Link $(_t) : $(_s) ;\n", -"}\n", -"rule MakeLocate\n", -"{\n", -"if $(>)\n", -"{\n", -"LOCATE on $(<) = $(>) ;\n", -"Depends $(<) : $(>[1]) ;\n", -"MkDir $(>[1]) ;\n", -"}\n", -"}\n", -"rule MkDir\n", -"{\n", -"NOUPDATE $(<) ;\n", -"if $(<) != $(DOT) && ! $($(<)-mkdir)\n", -"{\n", -"local s ;\n", -"$(<)-mkdir = true ;\n", -"MkDir1 $(<) ;\n", -"Depends dirs : $(<) ;\n", -"s = $(<:P) ;\n", -"if $(NT)\n", -"{\n", -"switch $(s)\n", -"{\n", -"case *: : s = ;\n", -"case *:\\\\ : s = ;\n", -"}\n", -"}\n", -"if $(s) && $(s) != $(<)\n", -"{\n", -"Depends $(<) : $(s) ;\n", -"MkDir $(s) ;\n", -"}\n", -"else if $(s)\n", -"{\n", -"NOTFILE $(s) ;\n", -"}\n", -"}\n", -"}\n", -"rule Object\n", -"{\n", -"local h ;\n", -"Clean clean : $(<) ;\n", -"MakeLocate $(<) : $(LOCATE_TARGET) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"HDRS on $(<) = $(SEARCH_SOURCE) $(HDRS) $(SUBDIRHDRS) ;\n", -"if $(SEARCH_SOURCE)\n", -"{\n", -"h = $(SEARCH_SOURCE) ;\n", -"}\n", -"else\n", -"{\n", -"h = \"\" ;\n", -"}\n", -"HDRRULE on $(>) = HdrRule ;\n", -"HDRSCAN on $(>) = $(HDRPATTERN) ;\n", -"HDRSEARCH on $(>) = $(HDRS) $(SUBDIRHDRS) $(h) $(STDHDRS) ;\n", -"HDRGRIST on $(>) = $(HDRGRIST) ;\n", -"switch $(>:S)\n", -"{\n", -"case .asm : As $(<) : $(>) ;\n", -"case .c : Cc $(<) : $(>) ;\n", -"case .C : C++ $(<) : $(>) ;\n", -"case .cc : C++ $(<) : $(>) ;\n", -"case .cpp : C++ $(<) : $(>) ;\n", -"case .f : Fortran $(<) : $(>) ;\n", -"case .l : Cc $(<) : $(<:S=.c) ;\n", -"Lex $(<:S=.c) : $(>) ;\n", -"case .s : As $(<) : $(>) ;\n", -"case .y : Cc $(<) : $(<:S=.c) ;\n", -"Yacc $(<:S=.c) : $(>) ;\n", -"case * : UserObject $(<) : $(>) ;\n", -"}\n", -"}\n", -"rule ObjectCcFlags\n", -"{\n", -"CCFLAGS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ;\n", -"}\n", -"rule ObjectC++Flags\n", -"{\n", -"C++FLAGS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ;\n", -"}\n", -"rule ObjectHdrs\n", -"{\n", -"HDRS on [ FGristFiles $(<:S=$(SUFOBJ)) ] += $(>) ;\n", -"}\n", -"rule Objects\n", -"{\n", -"local _i ;\n", -"for _i in [ FGristFiles $(<) ]\n", -"{\n", -"Object $(_i:S=$(SUFOBJ)) : $(_i) ;\n", -"DEPENDS obj : $(_i:S=$(SUFOBJ)) ;\n", -"}\n", -"}\n", -"rule RmTemps\n", -"{\n", -"TEMPORARY $(>) ;\n", -"}\n", -"rule Setuid\n", -"{\n", -"MODE on [ FAppendSuffix $(<) : $(SUFEXE) ] = 4711 ;\n", -"}\n", -"rule Shell\n", -"{\n", -"DEPENDS shell : $(<) ;\n", -"DEPENDS $(<) : $(>) ;\n", -"SEARCH on $(>) = $(SEARCH_SOURCE) ;\n", -"MODE on $(<) = $(SHELLMODE) ;\n", -"Clean clean : $(<) ;\n", -"Chmod $(<) ;\n", -"}\n", -"rule SubDir\n", -"{\n", -"local _r _s ;\n", -"if ! $($(<[1]))\n", -"{\n", -"if ! $(<[1])\n", -"{\n", -"EXIT SubDir syntax error ;\n", -"}\n", -"$(<[1]) = [ FSubDir $(<[2-]) ] ;\n", -"}\n", -"if ! $($(<[1])-included)\n", -"{\n", -"$(<[1])-included = TRUE ;\n", -"_r = $($(<[1])RULES) ;\n", -"if ! $(_r)\n", -"{\n", -"_r = $(JAMRULES:R=$($(<[1]))) ;\n", -"}\n", -"include $(_r) ;\n", -"}\n", -"_s = [ FDirName $(<[2-]) ] ;\n", -"SUBDIR = $(_s:R=$($(<[1]))) ;\n", -"SUBDIR_TOKENS = $(<[2-]) ;\n", -"SEARCH_SOURCE = $(SUBDIR) ;\n", -"LOCATE_SOURCE = $(ALL_LOCATE_TARGET) $(SUBDIR) ;\n", -"LOCATE_TARGET = $(ALL_LOCATE_TARGET) $(SUBDIR) ;\n", -"SOURCE_GRIST = [ FGrist $(<[2-]) ] ;\n", -"SUBDIRCCFLAGS = ;\n", -"SUBDIRC++FLAGS = ;\n", -"SUBDIRHDRS = ;\n", -"}\n", -"rule SubDirCcFlags\n", -"{\n", -"SUBDIRCCFLAGS += $(<) ;\n", -"}\n", -"rule SubDirC++Flags\n", -"{\n", -"SUBDIRC++FLAGS += $(<) ;\n", -"}\n", -"rule SubDirHdrs\n", -"{\n", -"SUBDIRHDRS += $(<) ;\n", -"}\n", -"rule SubInclude\n", -"{\n", -"local _s ;\n", -"if ! $($(<[1]))\n", -"{\n", -"EXIT Top level of source tree has not been set with $(<[1]) ;\n", -"}\n", -"_s = [ FDirName $(<[2-]) ] ;\n", -"include $(JAMFILE:D=$(_s):R=$($(<[1]))) ;\n", -"}\n", -"rule Undefines\n", -"{\n", -"UNDEFS on [ FAppendSuffix $(<) : $(SUFEXE) ] += $(UNDEFFLAG)$(>) ;\n", -"}\n", -"rule UserObject\n", -"{\n", -"EXIT \"Unknown suffix on\" $(>) \"- see UserObject rule in Jamfile(5).\" ;\n", -"}\n", -"rule Yacc\n", -"{\n", -"local _h ;\n", -"_h = $(<:BS=.h) ;\n", -"MakeLocate $(<) $(_h) : $(LOCATE_SOURCE) ;\n", -"if $(YACC)\n", -"{\n", -"DEPENDS $(<) $(_h) : $(>) ;\n", -"Yacc1 $(<) $(_h) : $(>) ;\n", -"YaccMv $(<) $(_h) : $(>) ;\n", -"Clean clean : $(<) $(_h) ;\n", -"}\n", -"INCLUDES $(<) : $(_h) ;\n", -"}\n", -"rule FGrist\n", -"{\n", -"local _g _i ;\n", -"_g = $(<[1]) ;\n", -"for _i in $(<[2-])\n", -"{\n", -"_g = $(_g)!$(_i) ;\n", -"}\n", -"return $(_g) ;\n", -"}\n", -"rule FGristFiles\n", -"{\n", -"if ! $(SOURCE_GRIST)\n", -"{\n", -"return $(<) ;\n", -"}\n", -"else\n", -"{\n", -"return $(<:G=$(SOURCE_GRIST)) ;\n", -"}\n", -"}\n", -"rule FGristSourceFiles\n", -"{\n", -"if ! $(SOURCE_GRIST)\n", -"{\n", -"return $(<) ;\n", -"}\n", -"else\n", -"{\n", -"local _i _o ;\n", -"for _i in $(<)\n", -"{\n", -"switch $(_i)\n", -"{\n", -"case *.h : _o += $(_i) ;\n", -"case * : _o += $(_i:G=$(SOURCE_GRIST)) ;\n", -"}\n", -"}\n", -"return $(_o) ;\n", -"}\n", -"}\n", -"rule FConcat\n", -"{\n", -"local _t _r ;\n", -"$(_r) = $(<[1]) ;\n", -"for _t in $(<[2-])\n", -"{\n", -"$(_r) = $(_r)$(_t) ;\n", -"}\n", -"return $(_r) ;\n", -"}\n", -"rule FSubDir\n", -"{\n", -"local _i _d ;\n", -"if ! $(<[1])\n", -"{\n", -"_d = $(DOT) ;\n", -"}\n", -"else\n", -"{\n", -"_d = $(DOTDOT) ;\n", -"for _i in $(<[2-])\n", -"{\n", -"_d = $(_d:R=$(DOTDOT)) ;\n", -"}\n", -"}\n", -"return $(_d) ;\n", -"}\n", -"rule FDirName\n", -"{\n", -"local _s _i ;\n", -"if ! $(<)\n", -"{\n", -"_s = $(DOT) ;\n", -"}\n", -"else if $(VMS)\n", -"{\n", -"switch $(<[1])\n", -"{\n", -"case *:* : _s = $(<[1]) ;\n", -"case \\\\[*\\\\] : _s = $(<[1]) ;\n", -"case * : _s = [.$(<[1])] ;\n", -"}\n", -"for _i in [.$(<[2-])]\n", -"{\n", -"_s = $(_i:R=$(_s)) ;\n", -"}\n", -"}\n", -"else if $(MAC)\n", -"{\n", -"_s = $(DOT) ;\n", -"for _i in $(<)\n", -"{\n", -"_s = $(_i:R=$(_s)) ;\n", -"}\n", -"}\n", -"else\n", -"{\n", -"_s = $(<[1]) ;\n", -"for _i in $(<[2-])\n", -"{\n", -"_s = $(_i:R=$(_s)) ;\n", -"}\n", -"}\n", -"return $(_s) ;\n", -"}\n", -"rule _makeCommon\n", -"{\n", -"if $($(<)[1]) && $($(<)[1]) = $($(>)[1])\n", -"{\n", -"$(<) = $($(<)[2-]) ;\n", -"$(>) = $($(>)[2-]) ;\n", -"_makeCommon $(<) : $(>) ;\n", -"}\n", -"}\n", -"rule FRelPath\n", -"{\n", -"local _l _r ;\n", -"_l = $(<) ;\n", -"_r = $(>) ;\n", -"_makeCommon _l : _r ;\n", -"_l = [ FSubDir $(_l) ] ;\n", -"_r = [ FDirName $(_r) ] ;\n", -"if $(_r) = $(DOT) {\n", -"return $(_l) ;\n", -"} else {\n", -"return $(_r:R=$(_l)) ;\n", -"}\n", -"}\n", -"rule FAppendSuffix\n", -"{\n", -"if $(>)\n", -"{\n", -"local _i _o ;\n", -"for _i in $(<)\n", -"{\n", -"if $(_i:S)\n", -"{\n", -"_o += $(_i) ;\n", -"}\n", -"else\n", -"{\n", -"_o += $(_i:S=$(>)) ;\n", -"}\n", -"}\n", -"return $(_o) ;\n", -"}\n", -"else\n", -"{\n", -"return $(<) ;\n", -"}\n", -"}\n", -"rule unmakeDir\n", -"{\n", -"if $(>[1]:D) && $(>[1]:D) != $(>[1]) && $(>[1]:D) != \\\\\\\\\n", -"{\n", -"unmakeDir $(<) : $(>[1]:D) $(>[1]:BS) $(>[2-]) ;\n", -"}\n", -"else\n", -"{\n", -"$(<) = $(>) ;\n", -"}\n", -"}\n", -"rule FConvertToSlashes\n", -"{\n", -"local _d, _s, _i ;\n", -"unmakeDir _d : $(<) ;\n", -"_s = $(_d[1]) ;\n", -"for _i in $(_d[2-])\n", -"{\n", -"_s = $(_s)/$(_i) ;\n", -"}\n", -"return $(_s) ;\n", -"}\n", -"actions updated together piecemeal Archive\n", -"{\n", -"$(AR) $(<) $(>)\n", -"}\n", -"actions As\n", -"{\n", -"$(AS) $(ASFLAGS) -I$(HDRS) -o $(<) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o $(<) $(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o $(<) $(>)\n", -"}\n", -"actions Chgrp\n", -"{\n", -"$(CHGRP) $(GROUP) $(<)\n", -"}\n", -"actions Chmod1\n", -"{\n", -"$(CHMOD) $(MODE) $(<)\n", -"}\n", -"actions Chown\n", -"{\n", -"$(CHOWN) $(OWNER) $(<)\n", -"}\n", -"actions piecemeal together existing Clean\n", -"{\n", -"$(RM) $(>)\n", -"}\n", -"actions File\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"actions GenFile1\n", -"{\n", -"$(>[1]) $(<) $(>[2-])\n", -"}\n", -"actions Fortran\n", -"{\n", -"$(FORTRAN) $(FORTRANFLAGS) -o $(<) $(>)\n", -"}\n", -"actions HardLink\n", -"{\n", -"$(RM) $(<) && $(LN) $(>) $(<)\n", -"}\n", -"actions Install\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"actions Lex\n", -"{\n", -"$(LEX) $(>)\n", -"}\n", -"actions LexMv\n", -"{\n", -"$(MV) lex.yy.c $(<)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) -o $(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"actions MkDir1\n", -"{\n", -"$(MKDIR) $(<)\n", -"}\n", -"actions together Ranlib\n", -"{\n", -"$(RANLIB) $(<)\n", -"}\n", -"actions quietly updated piecemeal together RmTemps\n", -"{\n", -"$(RM) $(>)\n", -"}\n", -"actions Shell\n", -"{\n", -"$(AWK) '\n", -"NR == 1 { print \"$(SHELLHEADER)\" }\n", -"NR == 1 && /^[#:]/ { next }\n", -"/^##/ { next }\n", -"{ print }\n", -"' < $(>) > $(<)\n", -"}\n", -"actions Yacc1\n", -"{\n", -"$(YACC) $(YACCFLAGS) $(>)\n", -"}\n", -"actions YaccMv\n", -"{\n", -"$(MV) $(YACCFILES).c $(<[1])\n", -"$(MV) $(YACCFILES).h $(<[2])\n", -"}\n", -"if $(RELOCATE)\n", -"{\n", -"actions C++\n", -"{\n", -"$(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) $(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) $(>)\n", -"}\n", -"actions ignore CcMv\n", -"{\n", -"[ $(<) != $(>:BS=$(SUFOBJ)) ] && $(MV) $(>:BS=$(SUFOBJ)) $(<)\n", -"}\n", -"}\n", -"if $(NOARUPDATE)\n", -"{\n", -"actions Archive\n", -"{\n", -"$(AR) $(<) $(>)\n", -"}\n", -"}\n", -"if $(NT)\n", -"{\n", -"if $(TOOLSET) = VISUALC || $(TOOLSET) = VC7 || $(TOOLSET) = INTELC\n", -"{\n", -"actions updated together piecemeal Archive\n", -"{\n", -"if exist $(<) set _$(<:B)_=$(<)\n", -"$(AR) /out:$(<) %_$(<:B)_% $(>)\n", -"}\n", -"actions As\n", -"{\n", -"$(AS) /Ml /p /v /w2 $(>) $(<) ,nul,nul;\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) /c $(CCFLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /I$(STDHDRS) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) /c $(C++FLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /I$(STDHDRS) /Tp$(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) /out:$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = VISUALC16\n", -"{\n", -"actions updated together piecemeal Archive\n", -"{\n", -"$(AR) $(<) -+$(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) /c $(CCFLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) /c $(C++FLAGS) $(OPTIM) /Fo$(<) /I$(HDRS) /Tp$(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) /out:$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = BORLANDC\n", -"{\n", -"actions updated together piecemeal Archive\n", -"{\n", -"$(AR) $(<) -+$(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) -e$(<) $(LINKFLAGS) $(UNDEFS) -L$(LINKLIBS) $(NEEDLIBS) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = MINGW\n", -"{\n", -"actions together piecemeal Archive\n", -"{\n", -"$(AR) $(<) $(>:T)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = WATCOM\n", -"{\n", -"actions together piecemeal Archive\n", -"{\n", -"$(AR) $(<) +-$(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) $(CCFLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) $(C++FLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) /Fe=$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"actions Shell\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = LCC\n", -"{\n", -"actions together piecemeal Archive\n", -"{\n", -"$(AR) /out:$(<) $(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) $(CCFLAGS) $(OPTIM) -Fo$(<) -I$(HDRS) $(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) -o $(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"actions Shell\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"}\n", -"}\n", -"else if $(OS2)\n", -"{\n", -"if $(TOOLSET) = WATCOM\n", -"{\n", -"actions together piecemeal Archive\n", -"{\n", -"$(AR) $(<) +-$(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) $(CCFLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) $(C++FLAGS) $(OPTIM) /Fo=$(<) /I$(HDRS) $(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) $(LINKFLAGS) /Fe=$(<) $(UNDEFS) $(>) $(NEEDLIBS) $(LINKLIBS)\n", -"}\n", -"actions Shell\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"}\n", -"else if $(TOOLSET) = EMX\n", -"{\n", -"actions together piecemeal Archive\n", -"{\n", -"$(AR) $(<) $(>:T)\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC) -c $(CCFLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++) -c $(C++FLAGS) $(OPTIM) -I$(HDRS) -o$(<) $(>)\n", -"}\n", -"}\n", -"}\n", -"else if $(VMS)\n", -"{\n", -"actions updated together piecemeal Archive\n", -"{\n", -"lib/replace $(<) $(>[1]) ,$(>[2-])\n", -"}\n", -"actions Cc\n", -"{\n", -"$(CC)/obj=$(<) $(CCFLAGS) $(OPTIM) $(SLASHINC) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"$(C++)/obj=$(<) $(C++FLAGS) $(OPTIM) $(SLASHINC) $(>)\n", -"}\n", -"actions piecemeal together existing Clean\n", -"{\n", -"$(RM) $(>[1]);* ,$(>[2-]);*\n", -"}\n", -"actions together quietly CreLib\n", -"{\n", -"if f$search(\"$(<)\") .eqs. \"\" then lib/create $(<)\n", -"}\n", -"actions GenFile1\n", -"{\n", -"mcr $(>[1]) $(<) $(>[2-])\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK)/exe=$(<) $(LINKFLAGS) $(>[1]) ,$(>[2-]) ,$(NEEDLIBS)/lib ,$(LINKLIBS)\n", -"}\n", -"actions quietly updated piecemeal together RmTemps\n", -"{\n", -"$(RM) $(>[1]);* ,$(>[2-]);*\n", -"}\n", -"actions Shell\n", -"{\n", -"$(CP) $(>) $(<)\n", -"}\n", -"}\n", -"else if $(MAC)\n", -"{\n", -"actions together Archive\n", -"{\n", -"$(LINK) -library -o $(<) $(>)\n", -"}\n", -"actions Cc\n", -"{\n", -"set -e MWCincludes $(MACINC)\n", -"$(CC) -o $(<) $(CCFLAGS) $(OPTIM) $(>)\n", -"}\n", -"actions C++\n", -"{\n", -"set -e MWCincludes $(MACINC)\n", -"$(CC) -o $(<) $(C++FLAGS) $(OPTIM) $(>)\n", -"}\n", -"actions Link bind NEEDLIBS\n", -"{\n", -"$(LINK) -o $(<) $(LINKFLAGS) $(>) $(NEEDLIBS) \"$(LINKLIBS)\"\n", -"}\n", -"}\n", -"rule BULK { Bulk $(<) : $(>) ; }\n", -"rule FILE { File $(<) : $(>) ; }\n", -"rule HDRRULE { HdrRule $(<) : $(>) ; }\n", -"rule INSTALL { Install $(<) : $(>) ; }\n", -"rule LIBRARY { Library $(<) : $(>) ; }\n", -"rule LIBS { LinkLibraries $(<) : $(>) ; }\n", -"rule LINK { Link $(<) : $(>) ; }\n", -"rule MAIN { Main $(<) : $(>) ; }\n", -"rule SETUID { Setuid $(<) ; }\n", -"rule SHELL { Shell $(<) : $(>) ; }\n", -"rule UNDEFINES { Undefines $(<) : $(>) ; }\n", -"rule INSTALLBIN { InstallBin $(BINDIR) : $(<) ; }\n", -"rule INSTALLLIB { InstallLib $(LIBDIR) : $(<) ; }\n", -"rule INSTALLMAN { InstallMan $(MANDIR) : $(<) ; }\n", -"rule addDirName { $(<) += [ FDirName $(>) ] ; }\n", -"rule makeDirName { $(<) = [ FDirName $(>) ] ; }\n", -"rule makeGristedName { $(<) = [ FGristSourceFiles $(>) ] ; }\n", -"rule makeRelPath { $(<[1]) = [ FRelPath $(<[2-]) : $(>) ] ; }\n", -"rule makeSuffixed { $(<[1]) = [ FAppendSuffix $(>) : $(<[2]) ] ; }\n", -"{\n", -"if $(JAMFILE) { include $(JAMFILE) ; }\n", -"}\n", -"}\n", -0 }; diff --git a/jam-files/engine/jambase.h b/jam-files/engine/jambase.h deleted file mode 100644 index c05ec792..00000000 --- a/jam-files/engine/jambase.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * jambase.h - declaration for the internal jambase - * - * The file Jambase is turned into a C array of strings in jambase.c - * so that it can be built in to the executable. This is the - * declaration for that array. - */ - -extern char *jambase[]; diff --git a/jam-files/engine/jamgram.c b/jam-files/engine/jamgram.c deleted file mode 100644 index b1fa0835..00000000 --- a/jam-files/engine/jamgram.c +++ /dev/null @@ -1,1830 +0,0 @@ -/* A Bison parser, made by GNU Bison 1.875. */ - -/* Skeleton parser for Yacc-like parsing with Bison, - Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. */ - -/* As a special exception, when this file is copied by Bison into a - Bison output file, you may use that output file without restriction. - This special exception was added by the Free Software Foundation - in version 1.24 of Bison. */ - -/* Written by Richard Stallman by simplifying the original so called - ``semantic'' parser. */ - -/* All symbols defined below should begin with yy or YY, to avoid - infringing on user name space. This should be done even for local - variables, as they might otherwise be expanded by user macros. - There are some unavoidable exceptions within include files to - define necessary library symbols; they are noted "INFRINGES ON - USER NAME SPACE" below. */ - -/* Identify Bison output. */ -#define YYBISON 1 - -/* Skeleton name. */ -#define YYSKELETON_NAME "yacc.c" - -/* Pure parsers. */ -#define YYPURE 0 - -/* Using locations. */ -#define YYLSP_NEEDED 0 - - - -/* Tokens. */ -#ifndef YYTOKENTYPE -# define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - _BANG_t = 258, - _BANG_EQUALS_t = 259, - _AMPER_t = 260, - _AMPERAMPER_t = 261, - _LPAREN_t = 262, - _RPAREN_t = 263, - _PLUS_EQUALS_t = 264, - _COLON_t = 265, - _SEMIC_t = 266, - _LANGLE_t = 267, - _LANGLE_EQUALS_t = 268, - _EQUALS_t = 269, - _RANGLE_t = 270, - _RANGLE_EQUALS_t = 271, - _QUESTION_EQUALS_t = 272, - _LBRACKET_t = 273, - _RBRACKET_t = 274, - ACTIONS_t = 275, - BIND_t = 276, - CASE_t = 277, - CLASS_t = 278, - DEFAULT_t = 279, - ELSE_t = 280, - EXISTING_t = 281, - FOR_t = 282, - IF_t = 283, - IGNORE_t = 284, - IN_t = 285, - INCLUDE_t = 286, - LOCAL_t = 287, - MODULE_t = 288, - ON_t = 289, - PIECEMEAL_t = 290, - QUIETLY_t = 291, - RETURN_t = 292, - RULE_t = 293, - SWITCH_t = 294, - TOGETHER_t = 295, - UPDATED_t = 296, - WHILE_t = 297, - _LBRACE_t = 298, - _BAR_t = 299, - _BARBAR_t = 300, - _RBRACE_t = 301, - ARG = 302, - STRING = 303 - }; -#endif -#define _BANG_t 258 -#define _BANG_EQUALS_t 259 -#define _AMPER_t 260 -#define _AMPERAMPER_t 261 -#define _LPAREN_t 262 -#define _RPAREN_t 263 -#define _PLUS_EQUALS_t 264 -#define _COLON_t 265 -#define _SEMIC_t 266 -#define _LANGLE_t 267 -#define _LANGLE_EQUALS_t 268 -#define _EQUALS_t 269 -#define _RANGLE_t 270 -#define _RANGLE_EQUALS_t 271 -#define _QUESTION_EQUALS_t 272 -#define _LBRACKET_t 273 -#define _RBRACKET_t 274 -#define ACTIONS_t 275 -#define BIND_t 276 -#define CASE_t 277 -#define CLASS_t 278 -#define DEFAULT_t 279 -#define ELSE_t 280 -#define EXISTING_t 281 -#define FOR_t 282 -#define IF_t 283 -#define IGNORE_t 284 -#define IN_t 285 -#define INCLUDE_t 286 -#define LOCAL_t 287 -#define MODULE_t 288 -#define ON_t 289 -#define PIECEMEAL_t 290 -#define QUIETLY_t 291 -#define RETURN_t 292 -#define RULE_t 293 -#define SWITCH_t 294 -#define TOGETHER_t 295 -#define UPDATED_t 296 -#define WHILE_t 297 -#define _LBRACE_t 298 -#define _BAR_t 299 -#define _BARBAR_t 300 -#define _RBRACE_t 301 -#define ARG 302 -#define STRING 303 - - - - -/* Copy the first part of user declarations. */ -#line 96 "jamgram.y" - -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "scan.h" -#include "compile.h" -#include "newstr.h" -#include "rules.h" - -# define YYMAXDEPTH 10000 /* for OSF and other less endowed yaccs */ - -# define F0 (LIST *(*)(PARSE *, FRAME *))0 -# define P0 (PARSE *)0 -# define S0 (char *)0 - -# define pappend( l,r ) parse_make( compile_append,l,r,P0,S0,S0,0 ) -# define peval( c,l,r ) parse_make( compile_eval,l,r,P0,S0,S0,c ) -# define pfor( s,l,r,x ) parse_make( compile_foreach,l,r,P0,s,S0,x ) -# define pif( l,r,t ) parse_make( compile_if,l,r,t,S0,S0,0 ) -# define pincl( l ) parse_make( compile_include,l,P0,P0,S0,S0,0 ) -# define plist( s ) parse_make( compile_list,P0,P0,P0,s,S0,0 ) -# define plocal( l,r,t ) parse_make( compile_local,l,r,t,S0,S0,0 ) -# define pmodule( l,r ) parse_make( compile_module,l,r,P0,S0,S0,0 ) -# define pclass( l,r ) parse_make( compile_class,l,r,P0,S0,S0,0 ) -# define pnull() parse_make( compile_null,P0,P0,P0,S0,S0,0 ) -# define pon( l,r ) parse_make( compile_on,l,r,P0,S0,S0,0 ) -# define prule( s,p ) parse_make( compile_rule,p,P0,P0,s,S0,0 ) -# define prules( l,r ) parse_make( compile_rules,l,r,P0,S0,S0,0 ) -# define pset( l,r,a ) parse_make( compile_set,l,r,P0,S0,S0,a ) -# define pset1( l,r,t,a ) parse_make( compile_settings,l,r,t,S0,S0,a ) -# define psetc( s,p,a,l ) parse_make( compile_setcomp,p,a,P0,s,S0,l ) -# define psete( s,l,s1,f ) parse_make( compile_setexec,l,P0,P0,s,s1,f ) -# define pswitch( l,r ) parse_make( compile_switch,l,r,P0,S0,S0,0 ) -# define pwhile( l,r ) parse_make( compile_while,l,r,P0,S0,S0,0 ) - -# define pnode( l,r ) parse_make( F0,l,r,P0,S0,S0,0 ) -# define psnode( s,l ) parse_make( F0,l,P0,P0,s,S0,0 ) - - - -/* Enabling traces. */ -#ifndef YYDEBUG -# define YYDEBUG 0 -#endif - -/* Enabling verbose error messages. */ -#ifdef YYERROR_VERBOSE -# undef YYERROR_VERBOSE -# define YYERROR_VERBOSE 1 -#else -# define YYERROR_VERBOSE 0 -#endif - -#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED) -typedef int YYSTYPE; -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ -# define YYSTYPE_IS_DECLARED 1 -# define YYSTYPE_IS_TRIVIAL 1 -#endif - - - -/* Copy the second part of user declarations. */ - - -/* Line 214 of yacc.c. */ -#line 223 "y.tab.c" - -#if ! defined (yyoverflow) || YYERROR_VERBOSE - -/* The parser invokes alloca or malloc; define the necessary symbols. */ - -# if YYSTACK_USE_ALLOCA -# define YYSTACK_ALLOC alloca -# else -# ifndef YYSTACK_USE_ALLOCA -# if defined (alloca) || defined (_ALLOCA_H) -# define YYSTACK_ALLOC alloca -# else -# ifdef __GNUC__ -# define YYSTACK_ALLOC __builtin_alloca -# endif -# endif -# endif -# endif - -# ifdef YYSTACK_ALLOC - /* Pacify GCC's `empty if-body' warning. */ -# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) -# else -# if defined (__STDC__) || defined (__cplusplus) -# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ -# define YYSIZE_T size_t -# endif -# define YYSTACK_ALLOC malloc -# define YYSTACK_FREE free -# endif -#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */ - - -#if (! defined (yyoverflow) \ - && (! defined (__cplusplus) \ - || (YYSTYPE_IS_TRIVIAL))) - -/* A type that is properly aligned for any stack member. */ -union yyalloc -{ - short yyss; - YYSTYPE yyvs; - }; - -/* The size of the maximum gap between one aligned stack and the next. */ -# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) - -/* The size of an array large to enough to hold all stacks, each with - N elements. */ -# define YYSTACK_BYTES(N) \ - ((N) * (sizeof (short) + sizeof (YYSTYPE)) \ - + YYSTACK_GAP_MAXIMUM) - -/* Copy COUNT objects from FROM to TO. The source and destination do - not overlap. */ -# ifndef YYCOPY -# if 1 < __GNUC__ -# define YYCOPY(To, From, Count) \ - __builtin_memcpy (To, From, (Count) * sizeof (*(From))) -# else -# define YYCOPY(To, From, Count) \ - do \ - { \ - register YYSIZE_T yyi; \ - for (yyi = 0; yyi < (Count); yyi++) \ - (To)[yyi] = (From)[yyi]; \ - } \ - while (0) -# endif -# endif - -/* Relocate STACK from its old location to the new one. The - local variables YYSIZE and YYSTACKSIZE give the old and new number of - elements in the stack, and YYPTR gives the new location of the - stack. Advance YYPTR to a properly aligned location for the next - stack. */ -# define YYSTACK_RELOCATE(Stack) \ - do \ - { \ - YYSIZE_T yynewbytes; \ - YYCOPY (&yyptr->Stack, Stack, yysize); \ - Stack = &yyptr->Stack; \ - yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ - } \ - while (0) - -#endif - -#if defined (__STDC__) || defined (__cplusplus) - typedef signed char yysigned_char; -#else - typedef short yysigned_char; -#endif - -/* YYFINAL -- State number of the termination state. */ -#define YYFINAL 43 -/* YYLAST -- Last index in YYTABLE. */ -#define YYLAST 261 - -/* YYNTOKENS -- Number of terminals. */ -#define YYNTOKENS 49 -/* YYNNTS -- Number of nonterminals. */ -#define YYNNTS 24 -/* YYNRULES -- Number of rules. */ -#define YYNRULES 75 -/* YYNRULES -- Number of states. */ -#define YYNSTATES 159 - -/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ -#define YYUNDEFTOK 2 -#define YYMAXUTOK 303 - -#define YYTRANSLATE(YYX) \ - ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) - -/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ -static const unsigned char yytranslate[] = -{ - 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, - 45, 46, 47, 48 -}; - -#if YYDEBUG -/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in - YYRHS. */ -static const unsigned char yyprhs[] = -{ - 0, 0, 3, 4, 6, 8, 10, 12, 15, 21, - 22, 25, 27, 31, 32, 34, 35, 39, 43, 47, - 52, 59, 63, 72, 78, 84, 90, 96, 102, 110, - 116, 120, 121, 122, 132, 134, 136, 138, 141, 143, - 147, 151, 155, 159, 163, 167, 171, 175, 179, 183, - 187, 190, 194, 195, 198, 203, 205, 209, 211, 212, - 215, 217, 218, 223, 226, 231, 236, 237, 240, 242, - 244, 246, 248, 250, 252, 253 -}; - -/* YYRHS -- A `-1'-separated list of the rules' RHS. */ -static const yysigned_char yyrhs[] = -{ - 50, 0, -1, -1, 52, -1, 53, -1, 52, -1, - 57, -1, 57, 52, -1, 32, 65, 54, 11, 51, - -1, -1, 14, 65, -1, 53, -1, 7, 64, 8, - -1, -1, 32, -1, -1, 43, 51, 46, -1, 31, - 65, 11, -1, 47, 64, 11, -1, 67, 60, 65, - 11, -1, 67, 34, 65, 60, 65, 11, -1, 37, - 65, 11, -1, 27, 56, 47, 30, 65, 43, 51, - 46, -1, 39, 65, 43, 62, 46, -1, 28, 61, - 43, 51, 46, -1, 33, 65, 43, 51, 46, -1, - 23, 64, 43, 51, 46, -1, 42, 61, 43, 51, - 46, -1, 28, 61, 43, 51, 46, 25, 57, -1, - 56, 38, 47, 55, 57, -1, 34, 67, 57, -1, - -1, -1, 20, 70, 47, 72, 43, 58, 48, 59, - 46, -1, 14, -1, 9, -1, 17, -1, 24, 14, - -1, 67, -1, 61, 14, 61, -1, 61, 4, 61, - -1, 61, 12, 61, -1, 61, 13, 61, -1, 61, - 15, 61, -1, 61, 16, 61, -1, 61, 5, 61, - -1, 61, 6, 61, -1, 61, 44, 61, -1, 61, - 45, 61, -1, 67, 30, 65, -1, 3, 61, -1, - 7, 61, 8, -1, -1, 63, 62, -1, 22, 47, - 10, 51, -1, 65, -1, 65, 10, 64, -1, 66, - -1, -1, 66, 67, -1, 47, -1, -1, 18, 68, - 69, 19, -1, 67, 64, -1, 34, 67, 67, 64, - -1, 34, 67, 37, 65, -1, -1, 70, 71, -1, - 41, -1, 40, -1, 29, -1, 36, -1, 35, -1, - 26, -1, -1, 21, 65, -1 -}; - -/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ -static const unsigned short yyrline[] = -{ - 0, 139, 139, 141, 152, 154, 158, 160, 162, 167, - 170, 172, 176, 179, 182, 185, 188, 190, 192, 194, - 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, - 216, 219, 221, 218, 230, 232, 234, 236, 243, 245, - 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, - 267, 269, 281, 282, 286, 295, 297, 307, 312, 313, - 317, 319, 319, 328, 330, 332, 343, 344, 348, 350, - 352, 354, 356, 358, 368, 369 -}; -#endif - -#if YYDEBUG || YYERROR_VERBOSE -/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. - First, the terminals, then, starting at YYNTOKENS, nonterminals. */ -static const char *const yytname[] = -{ - "$end", "error", "$undefined", "_BANG_t", "_BANG_EQUALS_t", "_AMPER_t", - "_AMPERAMPER_t", "_LPAREN_t", "_RPAREN_t", "_PLUS_EQUALS_t", "_COLON_t", - "_SEMIC_t", "_LANGLE_t", "_LANGLE_EQUALS_t", "_EQUALS_t", "_RANGLE_t", - "_RANGLE_EQUALS_t", "_QUESTION_EQUALS_t", "_LBRACKET_t", "_RBRACKET_t", - "ACTIONS_t", "BIND_t", "CASE_t", "CLASS_t", "DEFAULT_t", "ELSE_t", - "EXISTING_t", "FOR_t", "IF_t", "IGNORE_t", "IN_t", "INCLUDE_t", - "LOCAL_t", "MODULE_t", "ON_t", "PIECEMEAL_t", "QUIETLY_t", "RETURN_t", - "RULE_t", "SWITCH_t", "TOGETHER_t", "UPDATED_t", "WHILE_t", "_LBRACE_t", - "_BAR_t", "_BARBAR_t", "_RBRACE_t", "ARG", "STRING", "$accept", "run", - "block", "rules", "null", "assign_list_opt", "arglist_opt", "local_opt", - "rule", "@1", "@2", "assign", "expr", "cases", "case", "lol", "list", - "listp", "arg", "@3", "func", "eflags", "eflag", "bindlist", 0 -}; -#endif - -# ifdef YYPRINT -/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to - token YYLEX-NUM. */ -static const unsigned short yytoknum[] = -{ - 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, - 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, - 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, - 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, - 295, 296, 297, 298, 299, 300, 301, 302, 303 -}; -# endif - -/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ -static const unsigned char yyr1[] = -{ - 0, 49, 50, 50, 51, 51, 52, 52, 52, 53, - 54, 54, 55, 55, 56, 56, 57, 57, 57, 57, - 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, - 57, 58, 59, 57, 60, 60, 60, 60, 61, 61, - 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, - 61, 61, 62, 62, 63, 64, 64, 65, 66, 66, - 67, 68, 67, 69, 69, 69, 70, 70, 71, 71, - 71, 71, 71, 71, 72, 72 -}; - -/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ -static const unsigned char yyr2[] = -{ - 0, 2, 0, 1, 1, 1, 1, 2, 5, 0, - 2, 1, 3, 0, 1, 0, 3, 3, 3, 4, - 6, 3, 8, 5, 5, 5, 5, 5, 7, 5, - 3, 0, 0, 9, 1, 1, 1, 2, 1, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 3, 0, 2, 4, 1, 3, 1, 0, 2, - 1, 0, 4, 2, 4, 4, 0, 2, 1, 1, - 1, 1, 1, 1, 0, 2 -}; - -/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state - STATE-NUM when YYTABLE doesn't specify something else to do. Zero - means the default is an error. */ -static const unsigned char yydefact[] = -{ - 2, 61, 66, 58, 15, 0, 58, 58, 58, 0, - 58, 58, 0, 9, 60, 0, 3, 0, 6, 0, - 0, 0, 0, 55, 57, 14, 0, 0, 0, 60, - 0, 38, 0, 9, 0, 15, 0, 0, 0, 0, - 5, 4, 0, 1, 0, 7, 35, 34, 36, 0, - 58, 58, 0, 58, 0, 73, 70, 72, 71, 69, - 68, 74, 67, 9, 58, 59, 0, 50, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, - 58, 17, 58, 11, 0, 9, 30, 21, 52, 9, - 16, 18, 13, 37, 0, 0, 0, 63, 62, 58, - 0, 0, 56, 58, 51, 40, 45, 46, 41, 42, - 39, 43, 44, 0, 47, 48, 49, 10, 9, 0, - 0, 0, 52, 0, 58, 15, 58, 19, 58, 58, - 75, 31, 26, 0, 24, 8, 25, 0, 23, 53, - 27, 0, 29, 0, 65, 64, 0, 9, 15, 9, - 12, 20, 32, 0, 28, 54, 0, 22, 33 -}; - -/* YYDEFGOTO[NTERM-NUM]. */ -static const short yydefgoto[] = -{ - -1, 15, 39, 40, 41, 84, 125, 17, 18, 146, - 156, 51, 30, 121, 122, 22, 23, 24, 31, 20, - 54, 21, 62, 100 -}; - -/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing - STATE-NUM. */ -#define YYPACT_NINF -48 -static const short yypact[] = -{ - 179, -48, -48, -48, -15, 7, -48, -16, -48, 3, - -48, -48, 7, 179, 1, 27, -48, -9, 179, 19, - -3, 33, -11, 24, 3, -48, -10, 7, 7, -48, - 138, 9, 30, 35, 13, 205, 53, 22, 151, 20, - -48, -48, 56, -48, 23, -48, -48, -48, -48, 61, - -48, -48, 3, -48, 62, -48, -48, -48, -48, -48, - -48, 58, -48, 179, -48, -48, 52, -48, 164, 7, - 7, 7, 7, 7, 7, 7, 7, 179, 7, 7, - -48, -48, -48, -48, 72, 179, -48, -48, 68, 179, - -48, -48, 85, -48, 77, 73, 8, -48, -48, -48, - 50, 57, -48, -48, -48, 45, 93, 93, -48, -48, - 45, -48, -48, 64, 245, 245, -48, -48, 179, 66, - 67, 69, 68, 71, -48, 205, -48, -48, -48, -48, - -48, -48, -48, 70, 79, -48, -48, 109, -48, -48, - -48, 112, -48, 115, -48, -48, 75, 179, 205, 179, - -48, -48, -48, 81, -48, -48, 82, -48, -48 -}; - -/* YYPGOTO[NTERM-NUM]. */ -static const short yypgoto[] = -{ - -48, -48, -47, 5, 104, -48, -48, 136, -27, -48, - -48, 47, 60, 36, -48, -13, -4, -48, 0, -48, - -48, -48, -48, -48 -}; - -/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If - positive, shift that token. If negative, reduce the rule which - number is the opposite. If zero, do what YYDEFACT says. - If YYTABLE_NINF, syntax error. */ -#define YYTABLE_NINF -59 -static const short yytable[] = -{ - 19, 42, 32, 33, 34, 16, 36, 37, 86, 35, - 27, -58, -58, 19, 28, 1, 101, 25, 19, -58, - 53, 1, -14, 45, 65, 1, 1, 43, 46, 44, - 113, 52, 63, 47, 64, 19, 48, 66, 119, 80, - 97, 81, 123, 49, 29, 128, 94, 95, -58, 82, - 29, 102, 96, 50, 29, 29, 85, 72, 73, 55, - 75, 76, 56, 19, 87, 88, 90, 91, 57, 58, - 92, 135, 38, 59, 60, 93, 116, 19, 117, 99, - 61, 98, 103, 118, 127, 19, 46, 67, 68, 19, - 120, 47, 124, 131, 48, 130, 129, 69, 142, 133, - 153, 49, 155, 132, 148, 72, 73, 74, 75, 76, - 134, 141, 136, 147, 137, 138, 145, 140, 19, 149, - 150, 154, 143, 152, 144, 19, 151, 157, 158, 105, - 106, 107, 108, 109, 110, 111, 112, 83, 114, 115, - 26, 126, 69, 70, 71, 0, 0, 19, 19, 19, - 72, 73, 74, 75, 76, 69, 70, 71, 139, 0, - 0, 0, 0, 72, 73, 74, 75, 76, 69, 70, - 71, 0, 104, 0, 0, 0, 72, 73, 74, 75, - 76, 77, 78, 79, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 89, 78, 79, 1, 0, 2, - 0, 0, 3, 0, 0, 0, 4, 5, 78, 79, - 6, 7, 8, 9, 0, 0, 10, -15, 11, 0, - 0, 12, 13, 1, 0, 2, 14, 0, 3, 0, - 0, 0, 4, 5, 0, 0, 6, 25, 8, 9, - 0, 0, 10, 0, 11, 0, 0, 12, 13, 69, - 70, 71, 14, 0, 0, 0, 0, 72, 73, 74, - 75, 76 -}; - -static const short yycheck[] = -{ - 0, 14, 6, 7, 8, 0, 10, 11, 35, 9, - 3, 10, 11, 13, 7, 18, 63, 32, 18, 18, - 20, 18, 38, 18, 24, 18, 18, 0, 9, 38, - 77, 34, 43, 14, 10, 35, 17, 47, 85, 30, - 53, 11, 89, 24, 47, 37, 50, 51, 47, 14, - 47, 64, 52, 34, 47, 47, 43, 12, 13, 26, - 15, 16, 29, 63, 11, 43, 46, 11, 35, 36, - 47, 118, 12, 40, 41, 14, 80, 77, 82, 21, - 47, 19, 30, 11, 11, 85, 9, 27, 28, 89, - 22, 14, 7, 43, 17, 99, 96, 4, 125, 103, - 147, 24, 149, 46, 25, 12, 13, 14, 15, 16, - 46, 124, 46, 43, 47, 46, 129, 46, 118, 10, - 8, 148, 126, 48, 128, 125, 11, 46, 46, 69, - 70, 71, 72, 73, 74, 75, 76, 33, 78, 79, - 4, 94, 4, 5, 6, -1, -1, 147, 148, 149, - 12, 13, 14, 15, 16, 4, 5, 6, 122, -1, - -1, -1, -1, 12, 13, 14, 15, 16, 4, 5, - 6, -1, 8, -1, -1, -1, 12, 13, 14, 15, - 16, 43, 44, 45, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, 43, 44, 45, 18, -1, 20, - -1, -1, 23, -1, -1, -1, 27, 28, 44, 45, - 31, 32, 33, 34, -1, -1, 37, 38, 39, -1, - -1, 42, 43, 18, -1, 20, 47, -1, 23, -1, - -1, -1, 27, 28, -1, -1, 31, 32, 33, 34, - -1, -1, 37, -1, 39, -1, -1, 42, 43, 4, - 5, 6, 47, -1, -1, -1, -1, 12, 13, 14, - 15, 16 -}; - -/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing - symbol of state STATE-NUM. */ -static const unsigned char yystos[] = -{ - 0, 18, 20, 23, 27, 28, 31, 32, 33, 34, - 37, 39, 42, 43, 47, 50, 52, 56, 57, 67, - 68, 70, 64, 65, 66, 32, 56, 3, 7, 47, - 61, 67, 65, 65, 65, 67, 65, 65, 61, 51, - 52, 53, 64, 0, 38, 52, 9, 14, 17, 24, - 34, 60, 34, 67, 69, 26, 29, 35, 36, 40, - 41, 47, 71, 43, 10, 67, 47, 61, 61, 4, - 5, 6, 12, 13, 14, 15, 16, 43, 44, 45, - 30, 11, 14, 53, 54, 43, 57, 11, 43, 43, - 46, 11, 47, 14, 65, 65, 67, 64, 19, 21, - 72, 51, 64, 30, 8, 61, 61, 61, 61, 61, - 61, 61, 61, 51, 61, 61, 65, 65, 11, 51, - 22, 62, 63, 51, 7, 55, 60, 11, 37, 67, - 65, 43, 46, 65, 46, 51, 46, 47, 46, 62, - 46, 64, 57, 65, 65, 64, 58, 43, 25, 10, - 8, 11, 48, 51, 57, 51, 59, 46, 46 -}; - -#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__) -# define YYSIZE_T __SIZE_TYPE__ -#endif -#if ! defined (YYSIZE_T) && defined (size_t) -# define YYSIZE_T size_t -#endif -#if ! defined (YYSIZE_T) -# if defined (__STDC__) || defined (__cplusplus) -# include <stddef.h> /* INFRINGES ON USER NAME SPACE */ -# define YYSIZE_T size_t -# endif -#endif -#if ! defined (YYSIZE_T) -# define YYSIZE_T unsigned int -#endif - -#define yyerrok (yyerrstatus = 0) -#define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 - -#define YYACCEPT goto yyacceptlab -#define YYABORT goto yyabortlab -#define YYERROR goto yyerrlab1 - -/* Like YYERROR except do call yyerror. This remains here temporarily - to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. */ - -#define YYFAIL goto yyerrlab - -#define YYRECOVERING() (!!yyerrstatus) - -#define YYBACKUP(Token, Value) \ -do \ - if (yychar == YYEMPTY && yylen == 1) \ - { \ - yychar = (Token); \ - yylval = (Value); \ - yytoken = YYTRANSLATE (yychar); \ - YYPOPSTACK; \ - goto yybackup; \ - } \ - else \ - { \ - yyerror ("syntax error: cannot back up");\ - YYERROR; \ - } \ -while (0) - -#define YYTERROR 1 -#define YYERRCODE 256 - -/* YYLLOC_DEFAULT -- Compute the default location (before the actions - are run). */ - -#ifndef YYLLOC_DEFAULT -# define YYLLOC_DEFAULT(Current, Rhs, N) \ - Current.first_line = Rhs[1].first_line; \ - Current.first_column = Rhs[1].first_column; \ - Current.last_line = Rhs[N].last_line; \ - Current.last_column = Rhs[N].last_column; -#endif - -/* YYLEX -- calling `yylex' with the right arguments. */ - -#ifdef YYLEX_PARAM -# define YYLEX yylex (YYLEX_PARAM) -#else -# define YYLEX yylex () -#endif - -/* Enable debugging if requested. */ -#if YYDEBUG - -# ifndef YYFPRINTF -# include <stdio.h> /* INFRINGES ON USER NAME SPACE */ -# define YYFPRINTF fprintf -# endif - -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ -} while (0) - -# define YYDSYMPRINT(Args) \ -do { \ - if (yydebug) \ - yysymprint Args; \ -} while (0) - -# define YYDSYMPRINTF(Title, Token, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yysymprint (stderr, \ - Token, Value); \ - YYFPRINTF (stderr, "\n"); \ - } \ -} while (0) - -/*------------------------------------------------------------------. -| yy_stack_print -- Print the state stack from its BOTTOM up to its | -| TOP (cinluded). | -`------------------------------------------------------------------*/ - -#if defined (__STDC__) || defined (__cplusplus) -static void -yy_stack_print (short *bottom, short *top) -#else -static void -yy_stack_print (bottom, top) - short *bottom; - short *top; -#endif -{ - YYFPRINTF (stderr, "Stack now"); - for (/* Nothing. */; bottom <= top; ++bottom) - YYFPRINTF (stderr, " %d", *bottom); - YYFPRINTF (stderr, "\n"); -} - -# define YY_STACK_PRINT(Bottom, Top) \ -do { \ - if (yydebug) \ - yy_stack_print ((Bottom), (Top)); \ -} while (0) - - -/*------------------------------------------------. -| Report that the YYRULE is going to be reduced. | -`------------------------------------------------*/ - -#if defined (__STDC__) || defined (__cplusplus) -static void -yy_reduce_print (int yyrule) -#else -static void -yy_reduce_print (yyrule) - int yyrule; -#endif -{ - int yyi; - unsigned int yylineno = yyrline[yyrule]; - YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ", - yyrule - 1, yylineno); - /* Print the symbols being reduced, and their result. */ - for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++) - YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]); - YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]); -} - -# define YY_REDUCE_PRINT(Rule) \ -do { \ - if (yydebug) \ - yy_reduce_print (Rule); \ -} while (0) - -/* Nonzero means print parse trace. It is left uninitialized so that - multiple parsers can coexist. */ -int yydebug; -#else /* !YYDEBUG */ -# define YYDPRINTF(Args) -# define YYDSYMPRINT(Args) -# define YYDSYMPRINTF(Title, Token, Value, Location) -# define YY_STACK_PRINT(Bottom, Top) -# define YY_REDUCE_PRINT(Rule) -#endif /* !YYDEBUG */ - - -/* YYINITDEPTH -- initial size of the parser's stacks. */ -#ifndef YYINITDEPTH -# define YYINITDEPTH 200 -#endif - -/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only - if the built-in stack extension method is used). - - Do not make this value too large; the results are undefined if - SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH) - evaluated with infinite-precision integer arithmetic. */ - -#if YYMAXDEPTH == 0 -# undef YYMAXDEPTH -#endif - -#ifndef YYMAXDEPTH -# define YYMAXDEPTH 10000 -#endif - - - -#if YYERROR_VERBOSE - -# ifndef yystrlen -# if defined (__GLIBC__) && defined (_STRING_H) -# define yystrlen strlen -# else -/* Return the length of YYSTR. */ -static YYSIZE_T -# if defined (__STDC__) || defined (__cplusplus) -yystrlen (const char *yystr) -# else -yystrlen (yystr) - const char *yystr; -# endif -{ - register const char *yys = yystr; - - while (*yys++ != '\0') - continue; - - return yys - yystr - 1; -} -# endif -# endif - -# ifndef yystpcpy -# if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE) -# define yystpcpy stpcpy -# else -/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in - YYDEST. */ -static char * -# if defined (__STDC__) || defined (__cplusplus) -yystpcpy (char *yydest, const char *yysrc) -# else -yystpcpy (yydest, yysrc) - char *yydest; - const char *yysrc; -# endif -{ - register char *yyd = yydest; - register const char *yys = yysrc; - - while ((*yyd++ = *yys++) != '\0') - continue; - - return yyd - 1; -} -# endif -# endif - -#endif /* !YYERROR_VERBOSE */ - - - -#if YYDEBUG -/*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ - -#if defined (__STDC__) || defined (__cplusplus) -static void -yysymprint (FILE *yyoutput, int yytype, YYSTYPE *yyvaluep) -#else -static void -yysymprint (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE *yyvaluep; -#endif -{ - /* Pacify ``unused variable'' warnings. */ - (void) yyvaluep; - - if (yytype < YYNTOKENS) - { - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); -# ifdef YYPRINT - YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); -# endif - } - else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); - - switch (yytype) - { - default: - break; - } - YYFPRINTF (yyoutput, ")"); -} - -#endif /* ! YYDEBUG */ -/*-----------------------------------------------. -| Release the memory associated to this symbol. | -`-----------------------------------------------*/ - -#if defined (__STDC__) || defined (__cplusplus) -static void -yydestruct (int yytype, YYSTYPE *yyvaluep) -#else -static void -yydestruct (yytype, yyvaluep) - int yytype; - YYSTYPE *yyvaluep; -#endif -{ - /* Pacify ``unused variable'' warnings. */ - (void) yyvaluep; - - switch (yytype) - { - - default: - break; - } -} - - -/* Prevent warnings from -Wmissing-prototypes. */ - -#ifdef YYPARSE_PARAM -# if defined (__STDC__) || defined (__cplusplus) -int yyparse (void *YYPARSE_PARAM); -# else -int yyparse (); -# endif -#else /* ! YYPARSE_PARAM */ -#if defined (__STDC__) || defined (__cplusplus) -int yyparse (void); -#else -int yyparse (); -#endif -#endif /* ! YYPARSE_PARAM */ - - - -/* The lookahead symbol. */ -int yychar; - -/* The semantic value of the lookahead symbol. */ -YYSTYPE yylval; - -/* Number of syntax errors so far. */ -int yynerrs; - - - -/*----------. -| yyparse. | -`----------*/ - -#ifdef YYPARSE_PARAM -# if defined (__STDC__) || defined (__cplusplus) -int yyparse (void *YYPARSE_PARAM) -# else -int yyparse (YYPARSE_PARAM) - void *YYPARSE_PARAM; -# endif -#else /* ! YYPARSE_PARAM */ -#if defined (__STDC__) || defined (__cplusplus) -int -yyparse (void) -#else -int -yyparse () - -#endif -#endif -{ - - register int yystate; - register int yyn; - int yyresult; - /* Number of tokens to shift before error messages enabled. */ - int yyerrstatus; - /* Lookahead token as an internal (translated) token number. */ - int yytoken = 0; - - /* Three stacks and their tools: - `yyss': related to states, - `yyvs': related to semantic values, - `yyls': related to locations. - - Refer to the stacks thru separate pointers, to allow yyoverflow - to reallocate them elsewhere. */ - - /* The state stack. */ - short yyssa[YYINITDEPTH]; - short *yyss = yyssa; - register short *yyssp; - - /* The semantic value stack. */ - YYSTYPE yyvsa[YYINITDEPTH]; - YYSTYPE *yyvs = yyvsa; - register YYSTYPE *yyvsp; - - - -#define YYPOPSTACK (yyvsp--, yyssp--) - - YYSIZE_T yystacksize = YYINITDEPTH; - - /* The variables used to return semantic value and location from the - action routines. */ - YYSTYPE yyval; - - - /* When reducing, the number of symbols on the RHS of the reduced - rule. */ - int yylen; - - YYDPRINTF ((stderr, "Starting parse\n")); - - yystate = 0; - yyerrstatus = 0; - yynerrs = 0; - yychar = YYEMPTY; /* Cause a token to be read. */ - - /* Initialize stack pointers. - Waste one element of value and location stack - so that they stay on the same level as the state stack. - The wasted elements are never initialized. */ - - yyssp = yyss; - yyvsp = yyvs; - - goto yysetstate; - -/*------------------------------------------------------------. -| yynewstate -- Push a new state, which is found in yystate. | -`------------------------------------------------------------*/ - yynewstate: - /* In all cases, when you get here, the value and location stacks - have just been pushed. so pushing a state here evens the stacks. - */ - yyssp++; - - yysetstate: - *yyssp = yystate; - - if (yyss + yystacksize - 1 <= yyssp) - { - /* Get the current used size of the three stacks, in elements. */ - YYSIZE_T yysize = yyssp - yyss + 1; - -#ifdef yyoverflow - { - /* Give user a chance to reallocate the stack. Use copies of - these so that the &'s don't force the real ones into - memory. */ - YYSTYPE *yyvs1 = yyvs; - short *yyss1 = yyss; - - - /* Each stack pointer address is followed by the size of the - data in use in that stack, in bytes. This used to be a - conditional around just the two extra args, but that might - be undefined if yyoverflow is a macro. */ - yyoverflow ("parser stack overflow", - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), - - &yystacksize); - - yyss = yyss1; - yyvs = yyvs1; - } -#else /* no yyoverflow */ -# ifndef YYSTACK_RELOCATE - goto yyoverflowlab; -# else - /* Extend the stack our own way. */ - if (YYMAXDEPTH <= yystacksize) - goto yyoverflowlab; - yystacksize *= 2; - if (YYMAXDEPTH < yystacksize) - yystacksize = YYMAXDEPTH; - - { - short *yyss1 = yyss; - union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); - if (! yyptr) - goto yyoverflowlab; - YYSTACK_RELOCATE (yyss); - YYSTACK_RELOCATE (yyvs); - -# undef YYSTACK_RELOCATE - if (yyss1 != yyssa) - YYSTACK_FREE (yyss1); - } -# endif -#endif /* no yyoverflow */ - - yyssp = yyss + yysize - 1; - yyvsp = yyvs + yysize - 1; - - - YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); - - if (yyss + yystacksize - 1 <= yyssp) - YYABORT; - } - - YYDPRINTF ((stderr, "Entering state %d\n", yystate)); - - goto yybackup; - -/*-----------. -| yybackup. | -`-----------*/ -yybackup: - -/* Do appropriate processing given the current state. */ -/* Read a lookahead token if we need one and don't already have one. */ -/* yyresume: */ - - /* First try to decide what to do without reference to lookahead token. */ - - yyn = yypact[yystate]; - if (yyn == YYPACT_NINF) - goto yydefault; - - /* Not known => get a lookahead token if don't already have one. */ - - /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ - if (yychar == YYEMPTY) - { - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - } - - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YYDSYMPRINTF ("Next token is", yytoken, &yylval, &yylloc); - } - - /* If the proper action on seeing token YYTOKEN is to reduce or to - detect an error, take that action. */ - yyn += yytoken; - if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) - goto yydefault; - yyn = yytable[yyn]; - if (yyn <= 0) - { - if (yyn == 0 || yyn == YYTABLE_NINF) - goto yyerrlab; - yyn = -yyn; - goto yyreduce; - } - - if (yyn == YYFINAL) - YYACCEPT; - - /* Shift the lookahead token. */ - YYDPRINTF ((stderr, "Shifting token %s, ", yytname[yytoken])); - - /* Discard the token being shifted unless it is eof. */ - if (yychar != YYEOF) - yychar = YYEMPTY; - - *++yyvsp = yylval; - - - /* Count tokens shifted since error; after three, turn off error - status. */ - if (yyerrstatus) - yyerrstatus--; - - yystate = yyn; - goto yynewstate; - - -/*-----------------------------------------------------------. -| yydefault -- do the default action for the current state. | -`-----------------------------------------------------------*/ -yydefault: - yyn = yydefact[yystate]; - if (yyn == 0) - goto yyerrlab; - goto yyreduce; - - -/*-----------------------------. -| yyreduce -- Do a reduction. | -`-----------------------------*/ -yyreduce: - /* yyn is the number of a rule to reduce with. */ - yylen = yyr2[yyn]; - - /* If YYLEN is nonzero, implement the default value of the action: - `$$ = $1'. - - Otherwise, the following line sets YYVAL to garbage. - This behavior is undocumented and Bison - users should not rely upon it. Assigning to YYVAL - unconditionally makes the parser a bit smaller, and it avoids a - GCC warning that YYVAL may be used uninitialized. */ - yyval = yyvsp[1-yylen]; - - - YY_REDUCE_PRINT (yyn); - switch (yyn) - { - case 3: -#line 142 "jamgram.y" - { parse_save( yyvsp[0].parse ); } - break; - - case 4: -#line 153 "jamgram.y" - { yyval.parse = yyvsp[0].parse; } - break; - - case 5: -#line 155 "jamgram.y" - { yyval.parse = yyvsp[0].parse; } - break; - - case 6: -#line 159 "jamgram.y" - { yyval.parse = yyvsp[0].parse; } - break; - - case 7: -#line 161 "jamgram.y" - { yyval.parse = prules( yyvsp[-1].parse, yyvsp[0].parse ); } - break; - - case 8: -#line 163 "jamgram.y" - { yyval.parse = plocal( yyvsp[-3].parse, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 9: -#line 167 "jamgram.y" - { yyval.parse = pnull(); } - break; - - case 10: -#line 171 "jamgram.y" - { yyval.parse = yyvsp[0].parse; yyval.number = ASSIGN_SET; } - break; - - case 11: -#line 173 "jamgram.y" - { yyval.parse = yyvsp[0].parse; yyval.number = ASSIGN_APPEND; } - break; - - case 12: -#line 177 "jamgram.y" - { yyval.parse = yyvsp[-1].parse; } - break; - - case 13: -#line 179 "jamgram.y" - { yyval.parse = P0; } - break; - - case 14: -#line 183 "jamgram.y" - { yyval.number = 1; } - break; - - case 15: -#line 185 "jamgram.y" - { yyval.number = 0; } - break; - - case 16: -#line 189 "jamgram.y" - { yyval.parse = yyvsp[-1].parse; } - break; - - case 17: -#line 191 "jamgram.y" - { yyval.parse = pincl( yyvsp[-1].parse ); } - break; - - case 18: -#line 193 "jamgram.y" - { yyval.parse = prule( yyvsp[-2].string, yyvsp[-1].parse ); } - break; - - case 19: -#line 195 "jamgram.y" - { yyval.parse = pset( yyvsp[-3].parse, yyvsp[-1].parse, yyvsp[-2].number ); } - break; - - case 20: -#line 197 "jamgram.y" - { yyval.parse = pset1( yyvsp[-5].parse, yyvsp[-3].parse, yyvsp[-1].parse, yyvsp[-2].number ); } - break; - - case 21: -#line 199 "jamgram.y" - { yyval.parse = yyvsp[-1].parse; } - break; - - case 22: -#line 201 "jamgram.y" - { yyval.parse = pfor( yyvsp[-5].string, yyvsp[-3].parse, yyvsp[-1].parse, yyvsp[-6].number ); } - break; - - case 23: -#line 203 "jamgram.y" - { yyval.parse = pswitch( yyvsp[-3].parse, yyvsp[-1].parse ); } - break; - - case 24: -#line 205 "jamgram.y" - { yyval.parse = pif( yyvsp[-3].parse, yyvsp[-1].parse, pnull() ); } - break; - - case 25: -#line 207 "jamgram.y" - { yyval.parse = pmodule( yyvsp[-3].parse, yyvsp[-1].parse ); } - break; - - case 26: -#line 209 "jamgram.y" - { yyval.parse = pclass( yyvsp[-3].parse, yyvsp[-1].parse ); } - break; - - case 27: -#line 211 "jamgram.y" - { yyval.parse = pwhile( yyvsp[-3].parse, yyvsp[-1].parse ); } - break; - - case 28: -#line 213 "jamgram.y" - { yyval.parse = pif( yyvsp[-5].parse, yyvsp[-3].parse, yyvsp[0].parse ); } - break; - - case 29: -#line 215 "jamgram.y" - { yyval.parse = psetc( yyvsp[-2].string, yyvsp[0].parse, yyvsp[-1].parse, yyvsp[-4].number ); } - break; - - case 30: -#line 217 "jamgram.y" - { yyval.parse = pon( yyvsp[-1].parse, yyvsp[0].parse ); } - break; - - case 31: -#line 219 "jamgram.y" - { yymode( SCAN_STRING ); } - break; - - case 32: -#line 221 "jamgram.y" - { yymode( SCAN_NORMAL ); } - break; - - case 33: -#line 223 "jamgram.y" - { yyval.parse = psete( yyvsp[-6].string,yyvsp[-5].parse,yyvsp[-2].string,yyvsp[-7].number ); } - break; - - case 34: -#line 231 "jamgram.y" - { yyval.number = ASSIGN_SET; } - break; - - case 35: -#line 233 "jamgram.y" - { yyval.number = ASSIGN_APPEND; } - break; - - case 36: -#line 235 "jamgram.y" - { yyval.number = ASSIGN_DEFAULT; } - break; - - case 37: -#line 237 "jamgram.y" - { yyval.number = ASSIGN_DEFAULT; } - break; - - case 38: -#line 244 "jamgram.y" - { yyval.parse = peval( EXPR_EXISTS, yyvsp[0].parse, pnull() ); } - break; - - case 39: -#line 246 "jamgram.y" - { yyval.parse = peval( EXPR_EQUALS, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 40: -#line 248 "jamgram.y" - { yyval.parse = peval( EXPR_NOTEQ, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 41: -#line 250 "jamgram.y" - { yyval.parse = peval( EXPR_LESS, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 42: -#line 252 "jamgram.y" - { yyval.parse = peval( EXPR_LESSEQ, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 43: -#line 254 "jamgram.y" - { yyval.parse = peval( EXPR_MORE, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 44: -#line 256 "jamgram.y" - { yyval.parse = peval( EXPR_MOREEQ, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 45: -#line 258 "jamgram.y" - { yyval.parse = peval( EXPR_AND, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 46: -#line 260 "jamgram.y" - { yyval.parse = peval( EXPR_AND, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 47: -#line 262 "jamgram.y" - { yyval.parse = peval( EXPR_OR, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 48: -#line 264 "jamgram.y" - { yyval.parse = peval( EXPR_OR, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 49: -#line 266 "jamgram.y" - { yyval.parse = peval( EXPR_IN, yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 50: -#line 268 "jamgram.y" - { yyval.parse = peval( EXPR_NOT, yyvsp[0].parse, pnull() ); } - break; - - case 51: -#line 270 "jamgram.y" - { yyval.parse = yyvsp[-1].parse; } - break; - - case 52: -#line 281 "jamgram.y" - { yyval.parse = P0; } - break; - - case 53: -#line 283 "jamgram.y" - { yyval.parse = pnode( yyvsp[-1].parse, yyvsp[0].parse ); } - break; - - case 54: -#line 287 "jamgram.y" - { yyval.parse = psnode( yyvsp[-2].string, yyvsp[0].parse ); } - break; - - case 55: -#line 296 "jamgram.y" - { yyval.parse = pnode( P0, yyvsp[0].parse ); } - break; - - case 56: -#line 298 "jamgram.y" - { yyval.parse = pnode( yyvsp[0].parse, yyvsp[-2].parse ); } - break; - - case 57: -#line 308 "jamgram.y" - { yyval.parse = yyvsp[0].parse; yymode( SCAN_NORMAL ); } - break; - - case 58: -#line 312 "jamgram.y" - { yyval.parse = pnull(); yymode( SCAN_PUNCT ); } - break; - - case 59: -#line 314 "jamgram.y" - { yyval.parse = pappend( yyvsp[-1].parse, yyvsp[0].parse ); } - break; - - case 60: -#line 318 "jamgram.y" - { yyval.parse = plist( yyvsp[0].string ); } - break; - - case 61: -#line 319 "jamgram.y" - { yymode( SCAN_NORMAL ); } - break; - - case 62: -#line 320 "jamgram.y" - { yyval.parse = yyvsp[-1].parse; } - break; - - case 63: -#line 329 "jamgram.y" - { yyval.parse = prule( yyvsp[-1].string, yyvsp[0].parse ); } - break; - - case 64: -#line 331 "jamgram.y" - { yyval.parse = pon( yyvsp[-2].parse, prule( yyvsp[-1].string, yyvsp[0].parse ) ); } - break; - - case 65: -#line 333 "jamgram.y" - { yyval.parse = pon( yyvsp[-2].parse, yyvsp[0].parse ); } - break; - - case 66: -#line 343 "jamgram.y" - { yyval.number = 0; } - break; - - case 67: -#line 345 "jamgram.y" - { yyval.number = yyvsp[-1].number | yyvsp[0].number; } - break; - - case 68: -#line 349 "jamgram.y" - { yyval.number = EXEC_UPDATED; } - break; - - case 69: -#line 351 "jamgram.y" - { yyval.number = EXEC_TOGETHER; } - break; - - case 70: -#line 353 "jamgram.y" - { yyval.number = EXEC_IGNORE; } - break; - - case 71: -#line 355 "jamgram.y" - { yyval.number = EXEC_QUIETLY; } - break; - - case 72: -#line 357 "jamgram.y" - { yyval.number = EXEC_PIECEMEAL; } - break; - - case 73: -#line 359 "jamgram.y" - { yyval.number = EXEC_EXISTING; } - break; - - case 74: -#line 368 "jamgram.y" - { yyval.parse = pnull(); } - break; - - case 75: -#line 370 "jamgram.y" - { yyval.parse = yyvsp[0].parse; } - break; - - - } - -/* Line 991 of yacc.c. */ -#line 1621 "y.tab.c" - - yyvsp -= yylen; - yyssp -= yylen; - - - YY_STACK_PRINT (yyss, yyssp); - - *++yyvsp = yyval; - - - /* Now `shift' the result of the reduction. Determine what state - that goes to, based on the state we popped back to and the rule - number reduced by. */ - - yyn = yyr1[yyn]; - - yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; - if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) - yystate = yytable[yystate]; - else - yystate = yydefgoto[yyn - YYNTOKENS]; - - goto yynewstate; - - -/*------------------------------------. -| yyerrlab -- here on detecting error | -`------------------------------------*/ -yyerrlab: - /* If not already recovering from an error, report this error. */ - if (!yyerrstatus) - { - ++yynerrs; -#if YYERROR_VERBOSE - yyn = yypact[yystate]; - - if (YYPACT_NINF < yyn && yyn < YYLAST) - { - YYSIZE_T yysize = 0; - int yytype = YYTRANSLATE (yychar); - char *yymsg; - int yyx, yycount; - - yycount = 0; - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. */ - for (yyx = yyn < 0 ? -yyn : 0; - yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) - yysize += yystrlen (yytname[yyx]) + 15, yycount++; - yysize += yystrlen ("syntax error, unexpected ") + 1; - yysize += yystrlen (yytname[yytype]); - yymsg = (char *) YYSTACK_ALLOC (yysize); - if (yymsg != 0) - { - char *yyp = yystpcpy (yymsg, "syntax error, unexpected "); - yyp = yystpcpy (yyp, yytname[yytype]); - - if (yycount < 5) - { - yycount = 0; - for (yyx = yyn < 0 ? -yyn : 0; - yyx < (int) (sizeof (yytname) / sizeof (char *)); - yyx++) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) - { - const char *yyq = ! yycount ? ", expecting " : " or "; - yyp = yystpcpy (yyp, yyq); - yyp = yystpcpy (yyp, yytname[yyx]); - yycount++; - } - } - yyerror (yymsg); - YYSTACK_FREE (yymsg); - } - else - yyerror ("syntax error; also virtual memory exhausted"); - } - else -#endif /* YYERROR_VERBOSE */ - yyerror ("syntax error"); - } - - - - if (yyerrstatus == 3) - { - /* If just tried and failed to reuse lookahead token after an - error, discard it. */ - - /* Return failure if at end of input. */ - if (yychar == YYEOF) - { - /* Pop the error token. */ - YYPOPSTACK; - /* Pop the rest of the stack. */ - while (yyss < yyssp) - { - YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp); - yydestruct (yystos[*yyssp], yyvsp); - YYPOPSTACK; - } - YYABORT; - } - - YYDSYMPRINTF ("Error: discarding", yytoken, &yylval, &yylloc); - yydestruct (yytoken, &yylval); - yychar = YYEMPTY; - - } - - /* Else will try to reuse lookahead token after shifting the error - token. */ - goto yyerrlab2; - - -/*----------------------------------------------------. -| yyerrlab1 -- error raised explicitly by an action. | -`----------------------------------------------------*/ -yyerrlab1: - - /* Suppress GCC warning that yyerrlab1 is unused when no action - invokes YYERROR. */ -#if defined (__GNUC_MINOR__) && 2093 <= (__GNUC__ * 1000 + __GNUC_MINOR__) - __attribute__ ((__unused__)) -#endif - - - goto yyerrlab2; - - -/*---------------------------------------------------------------. -| yyerrlab2 -- pop states until the error token can be shifted. | -`---------------------------------------------------------------*/ -yyerrlab2: - yyerrstatus = 3; /* Each real token shifted decrements this. */ - - for (;;) - { - yyn = yypact[yystate]; - if (yyn != YYPACT_NINF) - { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) - { - yyn = yytable[yyn]; - if (0 < yyn) - break; - } - } - - /* Pop the current state because it cannot handle the error token. */ - if (yyssp == yyss) - YYABORT; - - YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp); - yydestruct (yystos[yystate], yyvsp); - yyvsp--; - yystate = *--yyssp; - - YY_STACK_PRINT (yyss, yyssp); - } - - if (yyn == YYFINAL) - YYACCEPT; - - YYDPRINTF ((stderr, "Shifting error token, ")); - - *++yyvsp = yylval; - - - yystate = yyn; - goto yynewstate; - - -/*-------------------------------------. -| yyacceptlab -- YYACCEPT comes here. | -`-------------------------------------*/ -yyacceptlab: - yyresult = 0; - goto yyreturn; - -/*-----------------------------------. -| yyabortlab -- YYABORT comes here. | -`-----------------------------------*/ -yyabortlab: - yyresult = 1; - goto yyreturn; - -#ifndef yyoverflow -/*----------------------------------------------. -| yyoverflowlab -- parser overflow comes here. | -`----------------------------------------------*/ -yyoverflowlab: - yyerror ("parser stack overflow"); - yyresult = 2; - /* Fall through. */ -#endif - -yyreturn: -#ifndef yyoverflow - if (yyss != yyssa) - YYSTACK_FREE (yyss); -#endif - return yyresult; -} - - - diff --git a/jam-files/engine/jamgram.h b/jam-files/engine/jamgram.h deleted file mode 100644 index 3cb76564..00000000 --- a/jam-files/engine/jamgram.h +++ /dev/null @@ -1,140 +0,0 @@ -/* A Bison parser, made by GNU Bison 1.875. */ - -/* Skeleton parser for Yacc-like parsing with Bison, - Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. */ - -/* As a special exception, when this file is copied by Bison into a - Bison output file, you may use that output file without restriction. - This special exception was added by the Free Software Foundation - in version 1.24 of Bison. */ - -/* Tokens. */ -#ifndef YYTOKENTYPE -# define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - _BANG_t = 258, - _BANG_EQUALS_t = 259, - _AMPER_t = 260, - _AMPERAMPER_t = 261, - _LPAREN_t = 262, - _RPAREN_t = 263, - _PLUS_EQUALS_t = 264, - _COLON_t = 265, - _SEMIC_t = 266, - _LANGLE_t = 267, - _LANGLE_EQUALS_t = 268, - _EQUALS_t = 269, - _RANGLE_t = 270, - _RANGLE_EQUALS_t = 271, - _QUESTION_EQUALS_t = 272, - _LBRACKET_t = 273, - _RBRACKET_t = 274, - ACTIONS_t = 275, - BIND_t = 276, - CASE_t = 277, - CLASS_t = 278, - DEFAULT_t = 279, - ELSE_t = 280, - EXISTING_t = 281, - FOR_t = 282, - IF_t = 283, - IGNORE_t = 284, - IN_t = 285, - INCLUDE_t = 286, - LOCAL_t = 287, - MODULE_t = 288, - ON_t = 289, - PIECEMEAL_t = 290, - QUIETLY_t = 291, - RETURN_t = 292, - RULE_t = 293, - SWITCH_t = 294, - TOGETHER_t = 295, - UPDATED_t = 296, - WHILE_t = 297, - _LBRACE_t = 298, - _BAR_t = 299, - _BARBAR_t = 300, - _RBRACE_t = 301, - ARG = 302, - STRING = 303 - }; -#endif -#define _BANG_t 258 -#define _BANG_EQUALS_t 259 -#define _AMPER_t 260 -#define _AMPERAMPER_t 261 -#define _LPAREN_t 262 -#define _RPAREN_t 263 -#define _PLUS_EQUALS_t 264 -#define _COLON_t 265 -#define _SEMIC_t 266 -#define _LANGLE_t 267 -#define _LANGLE_EQUALS_t 268 -#define _EQUALS_t 269 -#define _RANGLE_t 270 -#define _RANGLE_EQUALS_t 271 -#define _QUESTION_EQUALS_t 272 -#define _LBRACKET_t 273 -#define _RBRACKET_t 274 -#define ACTIONS_t 275 -#define BIND_t 276 -#define CASE_t 277 -#define CLASS_t 278 -#define DEFAULT_t 279 -#define ELSE_t 280 -#define EXISTING_t 281 -#define FOR_t 282 -#define IF_t 283 -#define IGNORE_t 284 -#define IN_t 285 -#define INCLUDE_t 286 -#define LOCAL_t 287 -#define MODULE_t 288 -#define ON_t 289 -#define PIECEMEAL_t 290 -#define QUIETLY_t 291 -#define RETURN_t 292 -#define RULE_t 293 -#define SWITCH_t 294 -#define TOGETHER_t 295 -#define UPDATED_t 296 -#define WHILE_t 297 -#define _LBRACE_t 298 -#define _BAR_t 299 -#define _BARBAR_t 300 -#define _RBRACE_t 301 -#define ARG 302 -#define STRING 303 - - - - -#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED) -typedef int YYSTYPE; -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ -# define YYSTYPE_IS_DECLARED 1 -# define YYSTYPE_IS_TRIVIAL 1 -#endif - -extern YYSTYPE yylval; - - - diff --git a/jam-files/engine/jamgram.y b/jam-files/engine/jamgram.y deleted file mode 100644 index c26b1e1b..00000000 --- a/jam-files/engine/jamgram.y +++ /dev/null @@ -1,371 +0,0 @@ -%token _BANG_t -%token _BANG_EQUALS_t -%token _AMPER_t -%token _AMPERAMPER_t -%token _LPAREN_t -%token _RPAREN_t -%token _PLUS_EQUALS_t -%token _COLON_t -%token _SEMIC_t -%token _LANGLE_t -%token _LANGLE_EQUALS_t -%token _EQUALS_t -%token _RANGLE_t -%token _RANGLE_EQUALS_t -%token _QUESTION_EQUALS_t -%token _LBRACKET_t -%token _RBRACKET_t -%token ACTIONS_t -%token BIND_t -%token CASE_t -%token CLASS_t -%token DEFAULT_t -%token ELSE_t -%token EXISTING_t -%token FOR_t -%token IF_t -%token IGNORE_t -%token IN_t -%token INCLUDE_t -%token LOCAL_t -%token MODULE_t -%token ON_t -%token PIECEMEAL_t -%token QUIETLY_t -%token RETURN_t -%token RULE_t -%token SWITCH_t -%token TOGETHER_t -%token UPDATED_t -%token WHILE_t -%token _LBRACE_t -%token _BAR_t -%token _BARBAR_t -%token _RBRACE_t -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * jamgram.yy - jam grammar - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 06/01/94 (seiwald) - new 'actions existing' does existing sources - * 08/23/94 (seiwald) - Support for '+=' (append to variable) - * 08/31/94 (seiwald) - Allow ?= as alias for "default =". - * 09/15/94 (seiwald) - if conditionals take only single arguments, so - * that 'if foo == bar' gives syntax error (use =). - * 02/11/95 (seiwald) - when scanning arguments to rules, only treat - * punctuation keywords as keywords. All arg lists - * are terminated with punctuation keywords. - * - * 09/11/00 (seiwald) - Support for function calls: - * - * Rules now return lists (LIST *), rather than void. - * - * New "[ rule ]" syntax evals rule into a LIST. - * - * Lists are now generated by compile_list() and - * compile_append(), and any other rule that indirectly - * makes a list, rather than being built directly here, - * so that lists values can contain rule evaluations. - * - * New 'return' rule sets the return value, though - * other statements also may have return values. - * - * 'run' production split from 'block' production so - * that empty blocks can be handled separately. - */ - -%token ARG STRING - -%left _BARBAR_t _BAR_t -%left _AMPERAMPER_t _AMPER_t -%left _EQUALS_t _BANG_EQUALS_t IN_t -%left _LANGLE_t _LANGLE_EQUALS_t _RANGLE_t _RANGLE_EQUALS_t -%left _BANG_t - -%{ -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "scan.h" -#include "compile.h" -#include "newstr.h" -#include "rules.h" - -# define YYMAXDEPTH 10000 /* for OSF and other less endowed yaccs */ - -# define F0 (LIST *(*)(PARSE *, FRAME *))0 -# define P0 (PARSE *)0 -# define S0 (char *)0 - -# define pappend( l,r ) parse_make( compile_append,l,r,P0,S0,S0,0 ) -# define peval( c,l,r ) parse_make( compile_eval,l,r,P0,S0,S0,c ) -# define pfor( s,l,r,x ) parse_make( compile_foreach,l,r,P0,s,S0,x ) -# define pif( l,r,t ) parse_make( compile_if,l,r,t,S0,S0,0 ) -# define pincl( l ) parse_make( compile_include,l,P0,P0,S0,S0,0 ) -# define plist( s ) parse_make( compile_list,P0,P0,P0,s,S0,0 ) -# define plocal( l,r,t ) parse_make( compile_local,l,r,t,S0,S0,0 ) -# define pmodule( l,r ) parse_make( compile_module,l,r,P0,S0,S0,0 ) -# define pclass( l,r ) parse_make( compile_class,l,r,P0,S0,S0,0 ) -# define pnull() parse_make( compile_null,P0,P0,P0,S0,S0,0 ) -# define pon( l,r ) parse_make( compile_on,l,r,P0,S0,S0,0 ) -# define prule( s,p ) parse_make( compile_rule,p,P0,P0,s,S0,0 ) -# define prules( l,r ) parse_make( compile_rules,l,r,P0,S0,S0,0 ) -# define pset( l,r,a ) parse_make( compile_set,l,r,P0,S0,S0,a ) -# define pset1( l,r,t,a ) parse_make( compile_settings,l,r,t,S0,S0,a ) -# define psetc( s,p,a,l ) parse_make( compile_setcomp,p,a,P0,s,S0,l ) -# define psete( s,l,s1,f ) parse_make( compile_setexec,l,P0,P0,s,s1,f ) -# define pswitch( l,r ) parse_make( compile_switch,l,r,P0,S0,S0,0 ) -# define pwhile( l,r ) parse_make( compile_while,l,r,P0,S0,S0,0 ) - -# define pnode( l,r ) parse_make( F0,l,r,P0,S0,S0,0 ) -# define psnode( s,l ) parse_make( F0,l,P0,P0,s,S0,0 ) - -%} - -%% - -run : /* empty */ - /* do nothing */ - | rules - { parse_save( $1.parse ); } - ; - -/* - * block - zero or more rules - * rules - one or more rules - * rule - any one of jam's rules - * right-recursive so rules execute in order. - */ - -block : null - { $$.parse = $1.parse; } - | rules - { $$.parse = $1.parse; } - ; - -rules : rule - { $$.parse = $1.parse; } - | rule rules - { $$.parse = prules( $1.parse, $2.parse ); } - | LOCAL_t list assign_list_opt _SEMIC_t block - { $$.parse = plocal( $2.parse, $3.parse, $5.parse ); } - ; - -null : /* empty */ - { $$.parse = pnull(); } - ; - -assign_list_opt : _EQUALS_t list - { $$.parse = $2.parse; $$.number = ASSIGN_SET; } - | null - { $$.parse = $1.parse; $$.number = ASSIGN_APPEND; } - ; - -arglist_opt : _LPAREN_t lol _RPAREN_t - { $$.parse = $2.parse; } - | - { $$.parse = P0; } - ; - -local_opt : LOCAL_t - { $$.number = 1; } - | /* empty */ - { $$.number = 0; } - ; - -rule : _LBRACE_t block _RBRACE_t - { $$.parse = $2.parse; } - | INCLUDE_t list _SEMIC_t - { $$.parse = pincl( $2.parse ); } - | ARG lol _SEMIC_t - { $$.parse = prule( $1.string, $2.parse ); } - | arg assign list _SEMIC_t - { $$.parse = pset( $1.parse, $3.parse, $2.number ); } - | arg ON_t list assign list _SEMIC_t - { $$.parse = pset1( $1.parse, $3.parse, $5.parse, $4.number ); } - | RETURN_t list _SEMIC_t - { $$.parse = $2.parse; } - | FOR_t local_opt ARG IN_t list _LBRACE_t block _RBRACE_t - { $$.parse = pfor( $3.string, $5.parse, $7.parse, $2.number ); } - | SWITCH_t list _LBRACE_t cases _RBRACE_t - { $$.parse = pswitch( $2.parse, $4.parse ); } - | IF_t expr _LBRACE_t block _RBRACE_t - { $$.parse = pif( $2.parse, $4.parse, pnull() ); } - | MODULE_t list _LBRACE_t block _RBRACE_t - { $$.parse = pmodule( $2.parse, $4.parse ); } - | CLASS_t lol _LBRACE_t block _RBRACE_t - { $$.parse = pclass( $2.parse, $4.parse ); } - | WHILE_t expr _LBRACE_t block _RBRACE_t - { $$.parse = pwhile( $2.parse, $4.parse ); } - | IF_t expr _LBRACE_t block _RBRACE_t ELSE_t rule - { $$.parse = pif( $2.parse, $4.parse, $7.parse ); } - | local_opt RULE_t ARG arglist_opt rule - { $$.parse = psetc( $3.string, $5.parse, $4.parse, $1.number ); } - | ON_t arg rule - { $$.parse = pon( $2.parse, $3.parse ); } - | ACTIONS_t eflags ARG bindlist _LBRACE_t - { yymode( SCAN_STRING ); } - STRING - { yymode( SCAN_NORMAL ); } - _RBRACE_t - { $$.parse = psete( $3.string,$4.parse,$7.string,$2.number ); } - ; - -/* - * assign - = or += - */ - -assign : _EQUALS_t - { $$.number = ASSIGN_SET; } - | _PLUS_EQUALS_t - { $$.number = ASSIGN_APPEND; } - | _QUESTION_EQUALS_t - { $$.number = ASSIGN_DEFAULT; } - | DEFAULT_t _EQUALS_t - { $$.number = ASSIGN_DEFAULT; } - ; - -/* - * expr - an expression for if - */ -expr : arg - { $$.parse = peval( EXPR_EXISTS, $1.parse, pnull() ); } - | expr _EQUALS_t expr - { $$.parse = peval( EXPR_EQUALS, $1.parse, $3.parse ); } - | expr _BANG_EQUALS_t expr - { $$.parse = peval( EXPR_NOTEQ, $1.parse, $3.parse ); } - | expr _LANGLE_t expr - { $$.parse = peval( EXPR_LESS, $1.parse, $3.parse ); } - | expr _LANGLE_EQUALS_t expr - { $$.parse = peval( EXPR_LESSEQ, $1.parse, $3.parse ); } - | expr _RANGLE_t expr - { $$.parse = peval( EXPR_MORE, $1.parse, $3.parse ); } - | expr _RANGLE_EQUALS_t expr - { $$.parse = peval( EXPR_MOREEQ, $1.parse, $3.parse ); } - | expr _AMPER_t expr - { $$.parse = peval( EXPR_AND, $1.parse, $3.parse ); } - | expr _AMPERAMPER_t expr - { $$.parse = peval( EXPR_AND, $1.parse, $3.parse ); } - | expr _BAR_t expr - { $$.parse = peval( EXPR_OR, $1.parse, $3.parse ); } - | expr _BARBAR_t expr - { $$.parse = peval( EXPR_OR, $1.parse, $3.parse ); } - | arg IN_t list - { $$.parse = peval( EXPR_IN, $1.parse, $3.parse ); } - | _BANG_t expr - { $$.parse = peval( EXPR_NOT, $2.parse, pnull() ); } - | _LPAREN_t expr _RPAREN_t - { $$.parse = $2.parse; } - ; - - -/* - * cases - action elements inside a 'switch' - * case - a single action element inside a 'switch' - * right-recursive rule so cases can be examined in order. - */ - -cases : /* empty */ - { $$.parse = P0; } - | case cases - { $$.parse = pnode( $1.parse, $2.parse ); } - ; - -case : CASE_t ARG _COLON_t block - { $$.parse = psnode( $2.string, $4.parse ); } - ; - -/* - * lol - list of lists - * right-recursive rule so that lists can be added in order. - */ - -lol : list - { $$.parse = pnode( P0, $1.parse ); } - | list _COLON_t lol - { $$.parse = pnode( $3.parse, $1.parse ); } - ; - -/* - * list - zero or more args in a LIST - * listp - list (in puncutation only mode) - * arg - one ARG or function call - */ - -list : listp - { $$.parse = $1.parse; yymode( SCAN_NORMAL ); } - ; - -listp : /* empty */ - { $$.parse = pnull(); yymode( SCAN_PUNCT ); } - | listp arg - { $$.parse = pappend( $1.parse, $2.parse ); } - ; - -arg : ARG - { $$.parse = plist( $1.string ); } - | _LBRACKET_t { yymode( SCAN_NORMAL ); } func _RBRACKET_t - { $$.parse = $3.parse; } - ; - -/* - * func - a function call (inside []) - * This needs to be split cleanly out of 'rule' - */ - -func : arg lol - { $$.parse = prule( $1.string, $2.parse ); } - | ON_t arg arg lol - { $$.parse = pon( $2.parse, prule( $3.string, $4.parse ) ); } - | ON_t arg RETURN_t list - { $$.parse = pon( $2.parse, $4.parse ); } - ; - - -/* - * eflags - zero or more modifiers to 'executes' - * eflag - a single modifier to 'executes' - */ - -eflags : /* empty */ - { $$.number = 0; } - | eflags eflag - { $$.number = $1.number | $2.number; } - ; - -eflag : UPDATED_t - { $$.number = EXEC_UPDATED; } - | TOGETHER_t - { $$.number = EXEC_TOGETHER; } - | IGNORE_t - { $$.number = EXEC_IGNORE; } - | QUIETLY_t - { $$.number = EXEC_QUIETLY; } - | PIECEMEAL_t - { $$.number = EXEC_PIECEMEAL; } - | EXISTING_t - { $$.number = EXEC_EXISTING; } - ; - - -/* - * bindlist - list of variable to bind for an action - */ - -bindlist : /* empty */ - { $$.parse = pnull(); } - | BIND_t list - { $$.parse = $2.parse; } - ; diff --git a/jam-files/engine/jamgram.yy b/jam-files/engine/jamgram.yy deleted file mode 100644 index 15243487..00000000 --- a/jam-files/engine/jamgram.yy +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * jamgram.yy - jam grammar - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 06/01/94 (seiwald) - new 'actions existing' does existing sources - * 08/23/94 (seiwald) - Support for '+=' (append to variable) - * 08/31/94 (seiwald) - Allow ?= as alias for "default =". - * 09/15/94 (seiwald) - if conditionals take only single arguments, so - * that 'if foo == bar' gives syntax error (use =). - * 02/11/95 (seiwald) - when scanning arguments to rules, only treat - * punctuation keywords as keywords. All arg lists - * are terminated with punctuation keywords. - * - * 09/11/00 (seiwald) - Support for function calls: - * - * Rules now return lists (LIST *), rather than void. - * - * New "[ rule ]" syntax evals rule into a LIST. - * - * Lists are now generated by compile_list() and - * compile_append(), and any other rule that indirectly - * makes a list, rather than being built directly here, - * so that lists values can contain rule evaluations. - * - * New 'return' rule sets the return value, though - * other statements also may have return values. - * - * 'run' production split from 'block' production so - * that empty blocks can be handled separately. - */ - -%token ARG STRING - -%left `||` `|` -%left `&&` `&` -%left `=` `!=` `in` -%left `<` `<=` `>` `>=` -%left `!` - -%{ -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "scan.h" -#include "compile.h" -#include "newstr.h" -#include "rules.h" - -# define YYMAXDEPTH 10000 /* for OSF and other less endowed yaccs */ - -# define F0 (LIST *(*)(PARSE *, FRAME *))0 -# define P0 (PARSE *)0 -# define S0 (char *)0 - -# define pappend( l,r ) parse_make( compile_append,l,r,P0,S0,S0,0 ) -# define peval( c,l,r ) parse_make( compile_eval,l,r,P0,S0,S0,c ) -# define pfor( s,l,r,x ) parse_make( compile_foreach,l,r,P0,s,S0,x ) -# define pif( l,r,t ) parse_make( compile_if,l,r,t,S0,S0,0 ) -# define pincl( l ) parse_make( compile_include,l,P0,P0,S0,S0,0 ) -# define plist( s ) parse_make( compile_list,P0,P0,P0,s,S0,0 ) -# define plocal( l,r,t ) parse_make( compile_local,l,r,t,S0,S0,0 ) -# define pmodule( l,r ) parse_make( compile_module,l,r,P0,S0,S0,0 ) -# define pclass( l,r ) parse_make( compile_class,l,r,P0,S0,S0,0 ) -# define pnull() parse_make( compile_null,P0,P0,P0,S0,S0,0 ) -# define pon( l,r ) parse_make( compile_on,l,r,P0,S0,S0,0 ) -# define prule( s,p ) parse_make( compile_rule,p,P0,P0,s,S0,0 ) -# define prules( l,r ) parse_make( compile_rules,l,r,P0,S0,S0,0 ) -# define pset( l,r,a ) parse_make( compile_set,l,r,P0,S0,S0,a ) -# define pset1( l,r,t,a ) parse_make( compile_settings,l,r,t,S0,S0,a ) -# define psetc( s,p,a,l ) parse_make( compile_setcomp,p,a,P0,s,S0,l ) -# define psete( s,l,s1,f ) parse_make( compile_setexec,l,P0,P0,s,s1,f ) -# define pswitch( l,r ) parse_make( compile_switch,l,r,P0,S0,S0,0 ) -# define pwhile( l,r ) parse_make( compile_while,l,r,P0,S0,S0,0 ) - -# define pnode( l,r ) parse_make( F0,l,r,P0,S0,S0,0 ) -# define psnode( s,l ) parse_make( F0,l,P0,P0,s,S0,0 ) - -%} - -%% - -run : /* empty */ - /* do nothing */ - | rules - { parse_save( $1.parse ); } - ; - -/* - * block - zero or more rules - * rules - one or more rules - * rule - any one of jam's rules - * right-recursive so rules execute in order. - */ - -block : null - { $$.parse = $1.parse; } - | rules - { $$.parse = $1.parse; } - ; - -rules : rule - { $$.parse = $1.parse; } - | rule rules - { $$.parse = prules( $1.parse, $2.parse ); } - | `local` list assign_list_opt `;` block - { $$.parse = plocal( $2.parse, $3.parse, $5.parse ); } - ; - -null : /* empty */ - { $$.parse = pnull(); } - ; - -assign_list_opt : `=` list - { $$.parse = $2.parse; $$.number = ASSIGN_SET; } - | null - { $$.parse = $1.parse; $$.number = ASSIGN_APPEND; } - ; - -arglist_opt : `(` lol `)` - { $$.parse = $2.parse; } - | - { $$.parse = P0; } - ; - -local_opt : `local` - { $$.number = 1; } - | /* empty */ - { $$.number = 0; } - ; - -rule : `{` block `}` - { $$.parse = $2.parse; } - | `include` list `;` - { $$.parse = pincl( $2.parse ); } - | ARG lol `;` - { $$.parse = prule( $1.string, $2.parse ); } - | arg assign list `;` - { $$.parse = pset( $1.parse, $3.parse, $2.number ); } - | arg `on` list assign list `;` - { $$.parse = pset1( $1.parse, $3.parse, $5.parse, $4.number ); } - | `return` list `;` - { $$.parse = $2.parse; } - | `for` local_opt ARG `in` list `{` block `}` - { $$.parse = pfor( $3.string, $5.parse, $7.parse, $2.number ); } - | `switch` list `{` cases `}` - { $$.parse = pswitch( $2.parse, $4.parse ); } - | `if` expr `{` block `}` - { $$.parse = pif( $2.parse, $4.parse, pnull() ); } - | `module` list `{` block `}` - { $$.parse = pmodule( $2.parse, $4.parse ); } - | `class` lol `{` block `}` - { $$.parse = pclass( $2.parse, $4.parse ); } - | `while` expr `{` block `}` - { $$.parse = pwhile( $2.parse, $4.parse ); } - | `if` expr `{` block `}` `else` rule - { $$.parse = pif( $2.parse, $4.parse, $7.parse ); } - | local_opt `rule` ARG arglist_opt rule - { $$.parse = psetc( $3.string, $5.parse, $4.parse, $1.number ); } - | `on` arg rule - { $$.parse = pon( $2.parse, $3.parse ); } - | `actions` eflags ARG bindlist `{` - { yymode( SCAN_STRING ); } - STRING - { yymode( SCAN_NORMAL ); } - `}` - { $$.parse = psete( $3.string,$4.parse,$7.string,$2.number ); } - ; - -/* - * assign - = or += - */ - -assign : `=` - { $$.number = ASSIGN_SET; } - | `+=` - { $$.number = ASSIGN_APPEND; } - | `?=` - { $$.number = ASSIGN_DEFAULT; } - | `default` `=` - { $$.number = ASSIGN_DEFAULT; } - ; - -/* - * expr - an expression for if - */ -expr : arg - { $$.parse = peval( EXPR_EXISTS, $1.parse, pnull() ); } - | expr `=` expr - { $$.parse = peval( EXPR_EQUALS, $1.parse, $3.parse ); } - | expr `!=` expr - { $$.parse = peval( EXPR_NOTEQ, $1.parse, $3.parse ); } - | expr `<` expr - { $$.parse = peval( EXPR_LESS, $1.parse, $3.parse ); } - | expr `<=` expr - { $$.parse = peval( EXPR_LESSEQ, $1.parse, $3.parse ); } - | expr `>` expr - { $$.parse = peval( EXPR_MORE, $1.parse, $3.parse ); } - | expr `>=` expr - { $$.parse = peval( EXPR_MOREEQ, $1.parse, $3.parse ); } - | expr `&` expr - { $$.parse = peval( EXPR_AND, $1.parse, $3.parse ); } - | expr `&&` expr - { $$.parse = peval( EXPR_AND, $1.parse, $3.parse ); } - | expr `|` expr - { $$.parse = peval( EXPR_OR, $1.parse, $3.parse ); } - | expr `||` expr - { $$.parse = peval( EXPR_OR, $1.parse, $3.parse ); } - | arg `in` list - { $$.parse = peval( EXPR_IN, $1.parse, $3.parse ); } - | `!` expr - { $$.parse = peval( EXPR_NOT, $2.parse, pnull() ); } - | `(` expr `)` - { $$.parse = $2.parse; } - ; - - -/* - * cases - action elements inside a 'switch' - * case - a single action element inside a 'switch' - * right-recursive rule so cases can be examined in order. - */ - -cases : /* empty */ - { $$.parse = P0; } - | case cases - { $$.parse = pnode( $1.parse, $2.parse ); } - ; - -case : `case` ARG `:` block - { $$.parse = psnode( $2.string, $4.parse ); } - ; - -/* - * lol - list of lists - * right-recursive rule so that lists can be added in order. - */ - -lol : list - { $$.parse = pnode( P0, $1.parse ); } - | list `:` lol - { $$.parse = pnode( $3.parse, $1.parse ); } - ; - -/* - * list - zero or more args in a LIST - * listp - list (in puncutation only mode) - * arg - one ARG or function call - */ - -list : listp - { $$.parse = $1.parse; yymode( SCAN_NORMAL ); } - ; - -listp : /* empty */ - { $$.parse = pnull(); yymode( SCAN_PUNCT ); } - | listp arg - { $$.parse = pappend( $1.parse, $2.parse ); } - ; - -arg : ARG - { $$.parse = plist( $1.string ); } - | `[` { yymode( SCAN_NORMAL ); } func `]` - { $$.parse = $3.parse; } - ; - -/* - * func - a function call (inside []) - * This needs to be split cleanly out of 'rule' - */ - -func : arg lol - { $$.parse = prule( $1.string, $2.parse ); } - | `on` arg arg lol - { $$.parse = pon( $2.parse, prule( $3.string, $4.parse ) ); } - | `on` arg `return` list - { $$.parse = pon( $2.parse, $4.parse ); } - ; - - -/* - * eflags - zero or more modifiers to 'executes' - * eflag - a single modifier to 'executes' - */ - -eflags : /* empty */ - { $$.number = 0; } - | eflags eflag - { $$.number = $1.number | $2.number; } - ; - -eflag : `updated` - { $$.number = EXEC_UPDATED; } - | `together` - { $$.number = EXEC_TOGETHER; } - | `ignore` - { $$.number = EXEC_IGNORE; } - | `quietly` - { $$.number = EXEC_QUIETLY; } - | `piecemeal` - { $$.number = EXEC_PIECEMEAL; } - | `existing` - { $$.number = EXEC_EXISTING; } - ; - - -/* - * bindlist - list of variable to bind for an action - */ - -bindlist : /* empty */ - { $$.parse = pnull(); } - | `bind` list - { $$.parse = $2.parse; } - ; - - diff --git a/jam-files/engine/jamgramtab.h b/jam-files/engine/jamgramtab.h deleted file mode 100644 index a0fd43f6..00000000 --- a/jam-files/engine/jamgramtab.h +++ /dev/null @@ -1,44 +0,0 @@ - { "!", _BANG_t }, - { "!=", _BANG_EQUALS_t }, - { "&", _AMPER_t }, - { "&&", _AMPERAMPER_t }, - { "(", _LPAREN_t }, - { ")", _RPAREN_t }, - { "+=", _PLUS_EQUALS_t }, - { ":", _COLON_t }, - { ";", _SEMIC_t }, - { "<", _LANGLE_t }, - { "<=", _LANGLE_EQUALS_t }, - { "=", _EQUALS_t }, - { ">", _RANGLE_t }, - { ">=", _RANGLE_EQUALS_t }, - { "?=", _QUESTION_EQUALS_t }, - { "[", _LBRACKET_t }, - { "]", _RBRACKET_t }, - { "actions", ACTIONS_t }, - { "bind", BIND_t }, - { "case", CASE_t }, - { "class", CLASS_t }, - { "default", DEFAULT_t }, - { "else", ELSE_t }, - { "existing", EXISTING_t }, - { "for", FOR_t }, - { "if", IF_t }, - { "ignore", IGNORE_t }, - { "in", IN_t }, - { "include", INCLUDE_t }, - { "local", LOCAL_t }, - { "module", MODULE_t }, - { "on", ON_t }, - { "piecemeal", PIECEMEAL_t }, - { "quietly", QUIETLY_t }, - { "return", RETURN_t }, - { "rule", RULE_t }, - { "switch", SWITCH_t }, - { "together", TOGETHER_t }, - { "updated", UPDATED_t }, - { "while", WHILE_t }, - { "{", _LBRACE_t }, - { "|", _BAR_t }, - { "||", _BARBAR_t }, - { "}", _RBRACE_t }, diff --git a/jam-files/engine/lists.c b/jam-files/engine/lists.c deleted file mode 100644 index ebabb63e..00000000 --- a/jam-files/engine/lists.c +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "newstr.h" -# include "lists.h" - -/* - * lists.c - maintain lists of strings - * - * This implementation essentially uses a singly linked list, but - * guarantees that the head element of every list has a valid pointer - * to the tail of the list, so the new elements can efficiently and - * properly be appended to the end of a list. - * - * To avoid massive allocation, list_free() just tacks the whole freed - * chain onto freelist and list_new() looks on freelist first for an - * available list struct. list_free() does not free the strings in the - * chain: it lazily lets list_new() do so. - * - * 08/23/94 (seiwald) - new list_append() - * 09/07/00 (seiwald) - documented lol_*() functions - */ - -static LIST *freelist = 0; /* junkpile for list_free() */ - -/* - * list_append() - append a list onto another one, returning total - */ - -LIST * list_append( LIST * l, LIST * nl ) -{ - if ( !nl ) - { - /* Just return l */ - } - else if ( !l ) - { - l = nl; - } - else - { - /* Graft two non-empty lists. */ - l->tail->next = nl; - l->tail = nl->tail; - } - - return l; -} - -/* - * list_new() - tack a string onto the end of a list of strings - */ - -LIST * list_new( LIST * head, char * string ) -{ - LIST * l; - - if ( DEBUG_LISTS ) - printf( "list > %s <\n", string ); - - /* Get list struct from freelist, if one available. */ - /* Otherwise allocate. */ - /* If from freelist, must free string first */ - - if ( freelist ) - { - l = freelist; - freestr( l->string ); - freelist = freelist->next; - } - else - { - l = (LIST *)BJAM_MALLOC( sizeof( LIST ) ); - } - - /* If first on chain, head points here. */ - /* If adding to chain, tack us on. */ - /* Tail must point to this new, last element. */ - - if ( !head ) head = l; - else head->tail->next = l; - head->tail = l; - l->next = 0; - - l->string = string; - - return head; -} - - -/* - * list_copy() - copy a whole list of strings (nl) onto end of another (l). - */ - -LIST * list_copy( LIST * l, LIST * nl ) -{ - for ( ; nl; nl = list_next( nl ) ) - l = list_new( l, copystr( nl->string ) ); - return l; -} - - -/* - * list_sublist() - copy a subset of a list of strings. - */ - -LIST * list_sublist( LIST * l, int start, int count ) -{ - LIST * nl = 0; - for ( ; l && start--; l = list_next( l ) ); - for ( ; l && count--; l = list_next( l ) ) - nl = list_new( nl, copystr( l->string ) ); - return nl; -} - - -static int str_ptr_compare( void const * va, void const * vb ) -{ - char * a = *( (char * *)va ); - char * b = *( (char * *)vb ); - return strcmp(a, b); -} - - -LIST * list_sort( LIST * l ) -{ - int len; - int ii; - char * * strings; - LIST * listp; - LIST * result = 0; - - if ( !l ) - return L0; - - len = list_length( l ); - strings = (char * *)BJAM_MALLOC( len * sizeof(char*) ); - - listp = l; - for ( ii = 0; ii < len; ++ii ) - { - strings[ ii ] = listp->string; - listp = listp->next; - } - - qsort( strings, len, sizeof( char * ), str_ptr_compare ); - - for ( ii = 0; ii < len; ++ii ) - result = list_append( result, list_new( 0, strings[ ii ] ) ); - - BJAM_FREE( strings ); - - return result; -} - - -/* - * list_free() - free a list of strings - */ - -void list_free( LIST * head ) -{ - /* Just tack onto freelist. */ - if ( head ) - { - head->tail->next = freelist; - freelist = head; - } -} - - -/* - * list_pop_front() - remove the front element from a list of strings - */ - -LIST * list_pop_front( LIST * l ) -{ - LIST * result = l->next; - if ( result ) - { - result->tail = l->tail; - l->next = L0; - l->tail = l; - } - list_free( l ); - return result; -} - - -/* - * list_print() - print a list of strings to stdout - */ - -void list_print( LIST * l ) -{ - LIST * p = 0; - for ( ; l; p = l, l = list_next( l ) ) - if ( p ) - printf( "%s ", p->string ); - if ( p ) - printf( "%s", p->string ); -} - - -/* - * list_length() - return the number of items in the list - */ - -int list_length( LIST * l ) -{ - int n = 0; - for ( ; l; l = list_next( l ), ++n ); - return n; -} - - -int list_in( LIST * l, char * value ) -{ - for ( ; l; l = l->next ) - if ( strcmp( l->string, value ) == 0 ) - return 1; - return 0; -} - - -LIST * list_unique( LIST * sorted_list ) -{ - LIST * result = 0; - LIST * last_added = 0; - - for ( ; sorted_list; sorted_list = sorted_list->next ) - { - if ( !last_added || strcmp( sorted_list->string, last_added->string ) != 0 ) - { - result = list_new( result, sorted_list->string ); - last_added = sorted_list; - } - } - return result; -} - - -/* - * lol_init() - initialize a LOL (list of lists). - */ - -void lol_init( LOL * lol ) -{ - lol->count = 0; -} - - -/* - * lol_add() - append a LIST onto an LOL. - */ - -void lol_add( LOL * lol, LIST * l ) -{ - if ( lol->count < LOL_MAX ) - lol->list[ lol->count++ ] = l; -} - - -/* - * lol_free() - free the LOL and its LISTs. - */ - -void lol_free( LOL * lol ) -{ - int i; - for ( i = 0; i < lol->count; ++i ) - list_free( lol->list[ i ] ); - lol->count = 0; -} - - -/* - * lol_get() - return one of the LISTs in the LOL. - */ - -LIST * lol_get( LOL * lol, int i ) -{ - return i < lol->count ? lol->list[ i ] : 0; -} - - -/* - * lol_print() - debug print LISTS separated by ":". - */ - -void lol_print( LOL * lol ) -{ - int i; - - for ( i = 0; i < lol->count; ++i ) - { - if ( i ) - printf( " : " ); - list_print( lol->list[ i ] ); - } -} - -#ifdef HAVE_PYTHON - -PyObject *list_to_python(LIST *l) -{ - PyObject *result = PyList_New(0); - - for (; l; l = l->next) - { - PyObject* s = PyString_FromString(l->string); - PyList_Append(result, s); - Py_DECREF(s); - } - - return result; -} - -LIST *list_from_python(PyObject *l) -{ - LIST * result = 0; - - Py_ssize_t i, n; - n = PySequence_Size(l); - for (i = 0; i < n; ++i) - { - PyObject *v = PySequence_GetItem(l, i); - result = list_new (result, newstr (PyString_AsString(v))); - Py_DECREF(v); - } - - return result; -} - -#endif diff --git a/jam-files/engine/lists.h b/jam-files/engine/lists.h deleted file mode 100644 index 1dc59827..00000000 --- a/jam-files/engine/lists.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * lists.h - the LIST structure and routines to manipulate them - * - * The whole of jam relies on lists of strings as a datatype. This - * module, in conjunction with newstr.c, handles these relatively - * efficiently. - * - * Structures defined: - * - * LIST - list of strings - * LOL - list of LISTs - * - * External routines: - * - * list_append() - append a list onto another one, returning total - * list_new() - tack a string onto the end of a list of strings - * list_copy() - copy a whole list of strings - * list_sublist() - copy a subset of a list of strings - * list_free() - free a list of strings - * list_print() - print a list of strings to stdout - * list_length() - return the number of items in the list - * - * lol_init() - initialize a LOL (list of lists) - * lol_add() - append a LIST onto an LOL - * lol_free() - free the LOL and its LISTs - * lol_get() - return one of the LISTs in the LOL - * lol_print() - debug print LISTS separated by ":" - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 08/23/94 (seiwald) - new list_append() - */ - -#ifndef LISTS_DWA20011022_H -# define LISTS_DWA20011022_H - -#ifdef HAVE_PYTHON -#include <Python.h> -#endif - -/* - * LIST - list of strings - */ - -typedef struct _list LIST; - -struct _list { - LIST *next; - LIST *tail; /* only valid in head node */ - char *string; /* private copy */ -}; - -/* - * LOL - list of LISTs - */ - -typedef struct _lol LOL; - -# define LOL_MAX 19 - -struct _lol { - int count; - LIST *list[ LOL_MAX ]; -}; - -LIST * list_append( LIST *l, LIST *nl ); -LIST * list_copy( LIST *l, LIST *nl ); -void list_free( LIST *head ); -LIST * list_new( LIST *head, char *string ); -void list_print( LIST *l ); -int list_length( LIST *l ); -LIST * list_sublist( LIST *l, int start, int count ); -LIST * list_pop_front( LIST *l ); -LIST * list_sort( LIST *l); -LIST * list_unique( LIST *sorted_list); -int list_in(LIST* l, char* value); - -# define list_next( l ) ((l)->next) - -# define L0 ((LIST *)0) - -void lol_add( LOL *lol, LIST *l ); -void lol_init( LOL *lol ); -void lol_free( LOL *lol ); -LIST * lol_get( LOL *lol, int i ); -void lol_print( LOL *lol ); -void lol_build( LOL* lol, char** elements ); - -#ifdef HAVE_PYTHON - -PyObject *list_to_python(LIST *l); -LIST *list_from_python(PyObject *l); - -#endif - -#endif - diff --git a/jam-files/engine/make.c b/jam-files/engine/make.c deleted file mode 100644 index c871f0be..00000000 --- a/jam-files/engine/make.c +++ /dev/null @@ -1,814 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * make.c - bring a target up to date, once rules are in place. - * - * This modules controls the execution of rules to bring a target and its - * dependencies up to date. It is invoked after the targets, rules, et. al. - * described in rules.h are created by the interpreting jam files. - * - * This file contains the main make() entry point and the first pass make0(). - * The second pass, make1(), which actually does the command execution, is in - * make1.c. - * - * External routines: - * make() - make a target, given its name - * - * Internal routines: - * make0() - bind and scan everything to make a TARGET - * make0sort() - reorder TARGETS chain by their time (newest to oldest) - * - * 12/26/93 (seiwald) - allow NOTIME targets to be expanded via $(<), $(>). - * 01/04/94 (seiwald) - print all targets, bounded, when tracing commands. - * 04/08/94 (seiwald) - progress report now reflects only targets with actions. - * 04/11/94 (seiwald) - Combined deps & headers into deps[2] in TARGET. - * 12/20/94 (seiwald) - NOTIME renamed NOTFILE. - * 12/20/94 (seiwald) - make0() headers after determining fate of target, so - * that headers are not seen as being dependent on - * themselves. - * 01/19/95 (seiwald) - distinguish between CANTFIND/CANTMAKE targets. - * 02/02/95 (seiwald) - propagate leaf source time for new LEAVES rule. - * 02/14/95 (seiwald) - NOUPDATE rule means don't update existing target. - * 08/22/95 (seiwald) - NOUPDATE targets immune to anyhow (-a) flag. - * 09/06/00 (seiwald) - NOCARE affects targets with sources/actions. - * 03/02/01 (seiwald) - reverse NOCARE change. - * 03/14/02 (seiwald) - TEMPORARY targets no longer take on parents age. - * 03/16/02 (seiwald) - support for -g (reorder builds by source time). - */ - -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "variable.h" -#include "rules.h" - -#ifdef OPT_HEADER_CACHE_EXT - #include "hcache.h" -#endif - -#include "search.h" -#include "newstr.h" -#include "make.h" -#include "headers.h" -#include "command.h" -#include <assert.h> - -#ifndef max - #define max( a,b ) ((a)>(b)?(a):(b)) -#endif - -static TARGETS * make0sort( TARGETS * c ); - -#ifdef OPT_GRAPH_DEBUG_EXT - static void dependGraphOutput( TARGET * t, int depth ); -#endif - -static const char * target_fate[] = -{ - "init", /* T_FATE_INIT */ - "making", /* T_FATE_MAKING */ - "stable", /* T_FATE_STABLE */ - "newer", /* T_FATE_NEWER */ - "temp", /* T_FATE_ISTMP */ - "touched", /* T_FATE_TOUCHED */ - "rebuild", /* T_FATE_REBUILD */ - "missing", /* T_FATE_MISSING */ - "needtmp", /* T_FATE_NEEDTMP */ - "old", /* T_FATE_OUTDATED */ - "update", /* T_FATE_UPDATE */ - "nofind", /* T_FATE_CANTFIND */ - "nomake" /* T_FATE_CANTMAKE */ -}; - -static const char * target_bind[] = -{ - "unbound", - "missing", - "parents", - "exists", -}; - -# define spaces(x) ( " " + ( x > 20 ? 0 : 20-x ) ) - - -/* - * make() - make a target, given its name. - */ - -int make( int n_targets, char const * * targets, int anyhow ) -{ - int i; - COUNTS counts[ 1 ]; - int status = 0; /* 1 if anything fails */ - -#ifdef OPT_HEADER_CACHE_EXT - hcache_init(); -#endif - - memset( (char *)counts, 0, sizeof( *counts ) ); - - /* First bind all targets with LOCATE_TARGET setting. This is needed to - * correctly handle dependencies to generated headers. - */ - bind_explicitly_located_targets(); - - { - PROFILE_ENTER( MAKE_MAKE0 ); - for ( i = 0; i < n_targets; ++i ) - make0( bindtarget( targets[ i ] ), 0, 0, counts, anyhow ); - PROFILE_EXIT( MAKE_MAKE0 ); - } - -#ifdef OPT_GRAPH_DEBUG_EXT - if ( DEBUG_GRAPH ) - for ( i = 0; i < n_targets; ++i ) - dependGraphOutput( bindtarget( targets[ i ] ), 0 ); -#endif - - if ( DEBUG_MAKE ) - { - if ( counts->targets ) - printf( "...found %d target%s...\n", counts->targets, - counts->targets > 1 ? "s" : "" ); - if ( counts->temp ) - printf( "...using %d temp target%s...\n", counts->temp, - counts->temp > 1 ? "s" : "" ); - if ( counts->updating ) - printf( "...updating %d target%s...\n", counts->updating, - counts->updating > 1 ? "s" : "" ); - if ( counts->cantfind ) - printf( "...can't find %d target%s...\n", counts->cantfind, - counts->cantfind > 1 ? "s" : "" ); - if ( counts->cantmake ) - printf( "...can't make %d target%s...\n", counts->cantmake, - counts->cantmake > 1 ? "s" : "" ); - } - -#ifdef OPT_HEADER_CACHE_EXT - hcache_done(); -#endif - - status = counts->cantfind || counts->cantmake; - - { - PROFILE_ENTER( MAKE_MAKE1 ); - for ( i = 0; i < n_targets; ++i ) - status |= make1( bindtarget( targets[ i ] ) ); - PROFILE_EXIT( MAKE_MAKE1 ); - } - - return status; -} - - -/* Force any dependants of t that have already at least begun being visited by - * make0() to be updated. - */ - -static void update_dependants( TARGET * t ) -{ - TARGETS * q; - - for ( q = t->dependants; q; q = q->next ) - { - TARGET * p = q->target; - char fate0 = p->fate; - - /* If we have already at least begun visiting it and we are not already - * rebuilding it for other reasons. - */ - if ( ( fate0 != T_FATE_INIT ) && ( fate0 < T_FATE_BUILD ) ) - { - p->fate = T_FATE_UPDATE; - - if ( DEBUG_FATE ) - { - printf( "fate change %s from %s to %s (as dependant of %s)\n", - p->name, target_fate[ (int) fate0 ], target_fate[ (int) p->fate ], t->name ); - } - - /* If we are done visiting it, go back and make sure its dependants - * get rebuilt. - */ - if ( fate0 > T_FATE_MAKING ) - update_dependants( p ); - } - } -} - - -/* - * Make sure that all of t's rebuilds get rebuilt. - */ - -static void force_rebuilds( TARGET * t ) -{ - TARGETS * d; - for ( d = t->rebuilds; d; d = d->next ) - { - TARGET * r = d->target; - - /* If it is not already being rebuilt for other reasons. */ - if ( r->fate < T_FATE_BUILD ) - { - if ( DEBUG_FATE ) - printf( "fate change %s from %s to %s (by rebuild)\n", - r->name, target_fate[ (int) r->fate ], target_fate[ T_FATE_REBUILD ] ); - - /* Force rebuild it. */ - r->fate = T_FATE_REBUILD; - - /* And make sure its dependants are updated too. */ - update_dependants( r ); - } - } -} - - -/* - * make0() - bind and scan everything to make a TARGET. - * - * Recursively binds a target, searches for #included headers, calls itself on - * those headers and any dependencies. - */ - -void make0 -( - TARGET * t, - TARGET * p, /* parent */ - int depth, /* for display purposes */ - COUNTS * counts, /* for reporting */ - int anyhow -) /* forcibly touch all (real) targets */ -{ - TARGETS * c; - TARGET * ptime = t; - time_t last; - time_t leaf; - time_t hlast; - int fate; - char const * flag = ""; - SETTINGS * s; - -#ifdef OPT_GRAPH_DEBUG_EXT - int savedFate, oldTimeStamp; -#endif - - if ( DEBUG_MAKEPROG ) - printf( "make\t--\t%s%s\n", spaces( depth ), t->name ); - - /* - * Step 1: initialize - */ - - if ( DEBUG_MAKEPROG ) - printf( "make\t--\t%s%s\n", spaces( depth ), t->name ); - - t->fate = T_FATE_MAKING; - - /* - * Step 2: under the influence of "on target" variables, - * bind the target and search for headers. - */ - - /* Step 2a: set "on target" variables. */ - s = copysettings( t->settings ); - pushsettings( s ); - - /* Step 2b: find and timestamp the target file (if it is a file). */ - if ( ( t->binding == T_BIND_UNBOUND ) && !( t->flags & T_FLAG_NOTFILE ) ) - { - char * another_target; - t->boundname = search( t->name, &t->time, &another_target, - t->flags & T_FLAG_ISFILE ); - /* If it was detected that this target refers to an already existing and - * bound one, we add an include dependency, so that every target - * depending on us will depend on that other target as well. - */ - if ( another_target ) - target_include( t, bindtarget( another_target ) ); - - t->binding = t->time ? T_BIND_EXISTS : T_BIND_MISSING; - } - - /* INTERNAL, NOTFILE header nodes have the time of their parents. */ - if ( p && ( t->flags & T_FLAG_INTERNAL ) ) - ptime = p; - - /* If temp file does not exist but parent does, use parent. */ - if ( p && ( t->flags & T_FLAG_TEMP ) && - ( t->binding == T_BIND_MISSING ) && - ( p->binding != T_BIND_MISSING ) ) - { - t->binding = T_BIND_PARENTS; - ptime = p; - } - -#ifdef OPT_SEMAPHORE - { - LIST * var = var_get( "JAM_SEMAPHORE" ); - if ( var ) - { - TARGET * semaphore = bindtarget( var->string ); - semaphore->progress = T_MAKE_SEMAPHORE; - t->semaphore = semaphore; - } - } -#endif - - /* Step 2c: If its a file, search for headers. */ - if ( t->binding == T_BIND_EXISTS ) - headers( t ); - - /* Step 2d: reset "on target" variables. */ - popsettings( s ); - freesettings( s ); - - /* - * Pause for a little progress reporting . - */ - - if ( DEBUG_BIND ) - { - if ( strcmp( t->name, t->boundname ) ) - printf( "bind\t--\t%s%s: %s\n", - spaces( depth ), t->name, t->boundname ); - - switch ( t->binding ) - { - case T_BIND_UNBOUND: - case T_BIND_MISSING: - case T_BIND_PARENTS: - printf( "time\t--\t%s%s: %s\n", - spaces( depth ), t->name, target_bind[ (int) t->binding ] ); - break; - - case T_BIND_EXISTS: - printf( "time\t--\t%s%s: %s", - spaces( depth ), t->name, ctime( &t->time ) ); - break; - } - } - - /* - * Step 3: recursively make0() dependencies & headers. - */ - - /* Step 3a: recursively make0() dependencies. */ - for ( c = t->depends; c; c = c->next ) - { - int internal = t->flags & T_FLAG_INTERNAL; - - /* Warn about circular deps, except for includes, which include each - * other alot. - */ - if ( c->target->fate == T_FATE_INIT ) - make0( c->target, ptime, depth + 1, counts, anyhow ); - else if ( c->target->fate == T_FATE_MAKING && !internal ) - printf( "warning: %s depends on itself\n", c->target->name ); - } - - /* Step 3b: recursively make0() internal includes node. */ - if ( t->includes ) - make0( t->includes, p, depth + 1, counts, anyhow ); - - /* Step 3c: add dependencies' includes to our direct dependencies. */ - { - TARGETS * incs = 0; - for ( c = t->depends; c; c = c->next ) - if ( c->target->includes ) - incs = targetentry( incs, c->target->includes ); - t->depends = targetchain( t->depends, incs ); - } - - /* - * Step 4: compute time & fate - */ - - /* Step 4a: pick up dependencies' time and fate */ - last = 0; - leaf = 0; - fate = T_FATE_STABLE; - for ( c = t->depends; c; c = c->next ) - { - /* If LEAVES has been applied, we only heed the timestamps of the leaf - * source nodes. - */ - leaf = max( leaf, c->target->leaf ); - - if ( t->flags & T_FLAG_LEAVES ) - { - last = leaf; - continue; - } - - last = max( last, c->target->time ); - fate = max( fate, c->target->fate ); - -#ifdef OPT_GRAPH_DEBUG_EXT - if ( DEBUG_FATE ) - if ( fate < c->target->fate ) - printf( "fate change %s from %s to %s by dependency %s\n", - t->name, target_fate[(int) fate], target_fate[(int) c->target->fate], - c->target->name ); -#endif - } - - /* Step 4b: pick up included headers time */ - - /* - * If a header is newer than a temp source that includes it, - * the temp source will need building. - */ - - hlast = t->includes ? t->includes->time : 0; - - /* Step 4c: handle NOUPDATE oddity. - * - * If a NOUPDATE file exists, mark it as having eternally old dependencies. - * Do not inherit our fate from our dependencies. Decide fate based only on - * other flags and our binding (done later). - */ - if ( t->flags & T_FLAG_NOUPDATE ) - { -#ifdef OPT_GRAPH_DEBUG_EXT - if ( DEBUG_FATE ) - if ( fate != T_FATE_STABLE ) - printf( "fate change %s back to stable, NOUPDATE.\n", t->name - ); -#endif - - last = 0; - t->time = 0; - - /* Do not inherit our fate from our dependencies. Decide fate based only - * upon other flags and our binding (done later). - */ - fate = T_FATE_STABLE; - } - - /* Step 4d: determine fate: rebuild target or what? */ - - /* - In English: - If can not find or make child, can not make target. - If children changed, make target. - If target missing, make it. - If children newer, make target. - If temp's children newer than parent, make temp. - If temp's headers newer than parent, make temp. - If deliberately touched, make it. - If up-to-date temp file present, use it. - If target newer than non-notfile parent, mark target newer. - Otherwise, stable! - - Note this block runs from least to most stable: - as we make it further down the list, the target's - fate is getting stabler. - */ - -#ifdef OPT_GRAPH_DEBUG_EXT - savedFate = fate; - oldTimeStamp = 0; -#endif - - if ( fate >= T_FATE_BROKEN ) - { - fate = T_FATE_CANTMAKE; - } - else if ( fate >= T_FATE_SPOIL ) - { - fate = T_FATE_UPDATE; - } - else if ( t->binding == T_BIND_MISSING ) - { - fate = T_FATE_MISSING; - } - else if ( ( t->binding == T_BIND_EXISTS ) && ( last > t->time ) ) - { -#ifdef OPT_GRAPH_DEBUG_EXT - oldTimeStamp = 1; -#endif - fate = T_FATE_OUTDATED; - } - else if ( ( t->binding == T_BIND_PARENTS ) && ( last > p->time ) ) - { -#ifdef OPT_GRAPH_DEBUG_EXT - oldTimeStamp = 1; -#endif - fate = T_FATE_NEEDTMP; - } - else if ( ( t->binding == T_BIND_PARENTS ) && ( hlast > p->time ) ) - { - fate = T_FATE_NEEDTMP; - } - else if ( t->flags & T_FLAG_TOUCHED ) - { - fate = T_FATE_TOUCHED; - } - else if ( anyhow && !( t->flags & T_FLAG_NOUPDATE ) ) - { - fate = T_FATE_TOUCHED; - } - else if ( ( t->binding == T_BIND_EXISTS ) && ( t->flags & T_FLAG_TEMP ) ) - { - fate = T_FATE_ISTMP; - } - else if ( ( t->binding == T_BIND_EXISTS ) && p && - ( p->binding != T_BIND_UNBOUND ) && ( t->time > p->time ) ) - { -#ifdef OPT_GRAPH_DEBUG_EXT - oldTimeStamp = 1; -#endif - fate = T_FATE_NEWER; - } - else - { - fate = T_FATE_STABLE; - } -#ifdef OPT_GRAPH_DEBUG_EXT - if ( DEBUG_FATE && ( fate != savedFate ) ) - { - if ( savedFate == T_FATE_STABLE ) - printf( "fate change %s set to %s%s\n", t->name, - target_fate[ fate ], oldTimeStamp ? " (by timestamp)" : "" ); - else - printf( "fate change %s from %s to %s%s\n", t->name, - target_fate[ savedFate ], target_fate[ fate ], - oldTimeStamp ? " (by timestamp)" : "" ); - } -#endif - - /* Step 4e: handle missing files */ - /* If it is missing and there are no actions to create it, boom. */ - /* If we can not make a target we do not care about it, okay. */ - /* We could insist that there are updating actions for all missing */ - /* files, but if they have dependencies we just pretend it is a NOTFILE. */ - - if ( ( fate == T_FATE_MISSING ) && !t->actions && !t->depends ) - { - if ( t->flags & T_FLAG_NOCARE ) - { -#ifdef OPT_GRAPH_DEBUG_EXT - if ( DEBUG_FATE ) - printf( "fate change %s to STABLE from %s, " - "no actions, no dependencies and do not care\n", - t->name, target_fate[ fate ] ); -#endif - fate = T_FATE_STABLE; - } - else - { - printf( "don't know how to make %s\n", t->name ); - fate = T_FATE_CANTFIND; - } - } - - /* Step 4f: propagate dependencies' time & fate. */ - /* Set leaf time to be our time only if this is a leaf. */ - - t->time = max( t->time, last ); - t->leaf = leaf ? leaf : t->time ; - /* This target's fate may have been updated by virtue of following some - * target's rebuilds list, so only allow it to be increased to the fate we - * have calculated. Otherwise, grab its new fate. - */ - if ( fate > t->fate ) - t->fate = fate; - else - fate = t->fate; - - /* Step 4g: if this target needs to be built, force rebuild everything in - * this target's rebuilds list. - */ - if ( ( fate >= T_FATE_BUILD ) && ( fate < T_FATE_BROKEN ) ) - force_rebuilds( t ); - - /* - * Step 5: sort dependencies by their update time. - */ - - if ( globs.newestfirst ) - t->depends = make0sort( t->depends ); - - /* - * Step 6: a little harmless tabulating for tracing purposes - */ - - /* Do not count or report interal includes nodes. */ - if ( t->flags & T_FLAG_INTERNAL ) - return; - - if ( counts ) - { -#ifdef OPT_IMPROVED_PATIENCE_EXT - ++counts->targets; -#else - if ( !( ++counts->targets % 1000 ) && DEBUG_MAKE ) - printf( "...patience...\n" ); -#endif - - if ( fate == T_FATE_ISTMP ) - ++counts->temp; - else if ( fate == T_FATE_CANTFIND ) - ++counts->cantfind; - else if ( ( fate == T_FATE_CANTMAKE ) && t->actions ) - ++counts->cantmake; - else if ( ( fate >= T_FATE_BUILD ) && ( fate < T_FATE_BROKEN ) && - t->actions ) - ++counts->updating; - } - - if ( !( t->flags & T_FLAG_NOTFILE ) && ( fate >= T_FATE_SPOIL ) ) - flag = "+"; - else if ( ( t->binding == T_BIND_EXISTS ) && p && ( t->time > p->time ) ) - flag = "*"; - - if ( DEBUG_MAKEPROG ) - printf( "made%s\t%s\t%s%s\n", flag, target_fate[ (int) t->fate ], - spaces( depth ), t->name ); -} - - -#ifdef OPT_GRAPH_DEBUG_EXT - -static const char * target_name( TARGET * t ) -{ - static char buf[ 1000 ]; - if ( t->flags & T_FLAG_INTERNAL ) - { - sprintf( buf, "%s (internal node)", t->name ); - return buf; - } - return t->name; -} - - -/* - * dependGraphOutput() - output the DG after make0 has run. - */ - -static void dependGraphOutput( TARGET * t, int depth ) -{ - TARGETS * c; - - if ( ( t->flags & T_FLAG_VISITED ) || !t->name || !t->boundname ) - return; - - t->flags |= T_FLAG_VISITED; - - switch ( t->fate ) - { - case T_FATE_TOUCHED: - case T_FATE_MISSING: - case T_FATE_OUTDATED: - case T_FATE_UPDATE: - printf( "->%s%2d Name: %s\n", spaces( depth ), depth, target_name( t ) ); - break; - default: - printf( " %s%2d Name: %s\n", spaces( depth ), depth, target_name( t ) ); - break; - } - - if ( strcmp( t->name, t->boundname ) ) - printf( " %s Loc: %s\n", spaces( depth ), t->boundname ); - - switch ( t->fate ) - { - case T_FATE_STABLE: - printf( " %s : Stable\n", spaces( depth ) ); - break; - case T_FATE_NEWER: - printf( " %s : Newer\n", spaces( depth ) ); - break; - case T_FATE_ISTMP: - printf( " %s : Up to date temp file\n", spaces( depth ) ); - break; - case T_FATE_NEEDTMP: - printf( " %s : Temporary file, to be updated\n", spaces( depth ) ); - break; - case T_FATE_TOUCHED: - printf( " %s : Been touched, updating it\n", spaces( depth ) ); - break; - case T_FATE_MISSING: - printf( " %s : Missing, creating it\n", spaces( depth ) ); - break; - case T_FATE_OUTDATED: - printf( " %s : Outdated, updating it\n", spaces( depth ) ); - break; - case T_FATE_REBUILD: - printf( " %s : Rebuild, updating it\n", spaces( depth ) ); - break; - case T_FATE_UPDATE: - printf( " %s : Updating it\n", spaces( depth ) ); - break; - case T_FATE_CANTFIND: - printf( " %s : Can not find it\n", spaces( depth ) ); - break; - case T_FATE_CANTMAKE: - printf( " %s : Can make it\n", spaces( depth ) ); - break; - } - - if ( t->flags & ~T_FLAG_VISITED ) - { - printf( " %s : ", spaces( depth ) ); - if ( t->flags & T_FLAG_TEMP ) printf( "TEMPORARY " ); - if ( t->flags & T_FLAG_NOCARE ) printf( "NOCARE " ); - if ( t->flags & T_FLAG_NOTFILE ) printf( "NOTFILE " ); - if ( t->flags & T_FLAG_TOUCHED ) printf( "TOUCHED " ); - if ( t->flags & T_FLAG_LEAVES ) printf( "LEAVES " ); - if ( t->flags & T_FLAG_NOUPDATE ) printf( "NOUPDATE " ); - printf( "\n" ); - } - - for ( c = t->depends; c; c = c->next ) - { - printf( " %s : Depends on %s (%s)", spaces( depth ), - target_name( c->target ), target_fate[ (int) c->target->fate ] ); - if ( c->target->time == t->time ) - printf( " (max time)"); - printf( "\n" ); - } - - for ( c = t->depends; c; c = c->next ) - dependGraphOutput( c->target, depth + 1 ); -} -#endif - - -/* - * make0sort() - reorder TARGETS chain by their time (newest to oldest). - * - * We walk chain, taking each item and inserting it on the sorted result, with - * newest items at the front. This involves updating each of the TARGETS' - * c->next and c->tail. Note that we make c->tail a valid prev pointer for every - * entry. Normally, it is only valid at the head, where prev == tail. Note also - * that while tail is a loop, next ends at the end of the chain. - */ - -static TARGETS * make0sort( TARGETS * chain ) -{ - PROFILE_ENTER( MAKE_MAKE0SORT ); - - TARGETS * result = 0; - - /* Walk the current target list. */ - while ( chain ) - { - TARGETS * c = chain; - TARGETS * s = result; - - chain = chain->next; - - /* Find point s in result for c. */ - while ( s && ( s->target->time > c->target->time ) ) - s = s->next; - - /* Insert c in front of s (might be 0). Do not even think of deciphering - * this. - */ - c->next = s; /* good even if s = 0 */ - if ( result == s ) result = c; /* new head of chain? */ - if ( !s ) s = result; /* wrap to ensure a next */ - if ( result != c ) s->tail->next = c; /* not head? be prev's next */ - c->tail = s->tail; /* take on next's prev */ - s->tail = c; /* make next's prev us */ - } - - PROFILE_EXIT( MAKE_MAKE0SORT ); - return result; -} - - -static LIST * targets_to_update_ = 0; - - -void mark_target_for_updating( char * target ) -{ - targets_to_update_ = list_new( targets_to_update_, target ); -} - - -LIST * targets_to_update() -{ - return targets_to_update_; -} - - -void clear_targets_to_update() -{ - list_free( targets_to_update_ ); - targets_to_update_ = 0; -} diff --git a/jam-files/engine/make.h b/jam-files/engine/make.h deleted file mode 100644 index b372263e..00000000 --- a/jam-files/engine/make.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * make.h - bring a target up to date, once rules are in place - */ - -#include "lists.h" - -int make( int n_targets, const char **targets, int anyhow ); -int make1( TARGET *t ); - -typedef struct { - int temp; - int updating; - int cantfind; - int cantmake; - int targets; - int made; -} COUNTS ; - - -void make0( TARGET *t, TARGET *p, int depth, - COUNTS *counts, int anyhow ); - - -/* - * Specifies that the target should be updated. - */ -void mark_target_for_updating(char *target); -/* - * Returns the list of all the target previously passed to 'mark_target_for_updating'. - */ -LIST *targets_to_update(); -/* - * Cleasr/unmarks all targets that are currently marked for update. - */ -void clear_targets_to_update(); diff --git a/jam-files/engine/make1.c b/jam-files/engine/make1.c deleted file mode 100644 index 8001f333..00000000 --- a/jam-files/engine/make1.c +++ /dev/null @@ -1,1145 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * make1.c - execute command to bring targets up to date - * - * This module contains make1(), the entry point called by make() to - * recursively decend the dependency graph executing update actions as - * marked by make0(). - * - * External routines: - * - * make1() - execute commands to update a TARGET and all of its dependencies. - * - * Internal routines, the recursive/asynchronous command executors: - * - * make1a() - recursively traverse dependency target tree, calling make1b(). - * make1atail() - started processing all dependencies so go on to make1b(). - * make1b() - when dependencies are up to date, build target with make1c(). - * make1c() - launch target's next command, call parents' make1b() if none. - * make1d() - handle command execution completion and call back make1c(). - * - * Internal support routines: - * - * make1cmds() - turn ACTIONS into CMDs, grouping, splitting, etc. - * make1list() - turn a list of targets into a LIST, for $(<) and $(>). - * make1settings() - for vars that get bound values, build up replacement lists. - * make1bind() - bind targets that weren't bound in dependency analysis. - * - * 04/16/94 (seiwald) - Split from make.c. - * 04/21/94 (seiwald) - Handle empty "updated" actions. - * 05/04/94 (seiwald) - async multiprocess (-j) support. - * 06/01/94 (seiwald) - new 'actions existing' does existing sources. - * 12/20/94 (seiwald) - NOTIME renamed NOTFILE. - * 01/19/95 (seiwald) - distinguish between CANTFIND/CANTMAKE targets. - * 01/22/94 (seiwald) - pass per-target JAMSHELL down to exec_cmd(). - * 02/28/95 (seiwald) - Handle empty "existing" actions. - * 03/10/95 (seiwald) - Fancy counts. - */ - -#include "jam.h" - -#include "lists.h" -#include "parse.h" -#include "assert.h" -#include "variable.h" -#include "rules.h" -#include "headers.h" - -#include "search.h" -#include "newstr.h" -#include "make.h" -#include "command.h" -#include "execcmd.h" -#include "compile.h" -#include "output.h" - -#include <stdlib.h> - -#if ! defined(NT) || defined(__GNUC__) - #include <unistd.h> /* for unlink */ -#endif - -static CMD * make1cmds ( TARGET * ); -static LIST * make1list ( LIST *, TARGETS *, int flags ); -static SETTINGS * make1settings( LIST * vars ); -static void make1bind ( TARGET * ); - -/* Ugly static - it is too hard to carry it through the callbacks. */ - -static struct -{ - int failed; - int skipped; - int total; - int made; -} counts[ 1 ] ; - -/* Target state - remove recursive calls by just keeping track of state target - * is in. - */ -typedef struct _state -{ - struct _state * prev; /* previous state on stack */ - TARGET * t; /* current target */ - TARGET * parent; /* parent argument necessary for make1a() */ -#define T_STATE_MAKE1A 0 /* make1a() should be called */ -#define T_STATE_MAKE1ATAIL 1 /* make1atail() should be called */ -#define T_STATE_MAKE1B 2 /* make1b() should be called */ -#define T_STATE_MAKE1C 3 /* make1c() should be called */ -#define T_STATE_MAKE1D 4 /* make1d() should be called */ - int curstate; /* current state */ - int status; -} state; - -static void make1a ( state * ); -static void make1atail ( state * ); -static void make1b ( state * ); -static void make1c ( state * ); -static void make1d ( state * ); -static void make_closure( void * closure, int status, timing_info *, char *, char * ); - -typedef struct _stack -{ - state * stack; -} stack; - -static stack state_stack = { NULL }; - -static state * state_freelist = NULL; - - -static state * alloc_state() -{ - if ( state_freelist != NULL ) - { - state * pState = state_freelist; - state_freelist = pState->prev; - memset( pState, 0, sizeof( state ) ); - return pState; - } - - return (state *)BJAM_MALLOC( sizeof( state ) ); -} - - -static void free_state( state * pState ) -{ - pState->prev = state_freelist; - state_freelist = pState; -} - - -static void clear_state_freelist() -{ - while ( state_freelist != NULL ) - { - state * pState = state_freelist; - state_freelist = state_freelist->prev; - BJAM_FREE( pState ); - } -} - - -static state * current_state( stack * pStack ) -{ - return pStack->stack; -} - - -static void pop_state( stack * pStack ) -{ - if ( pStack->stack != NULL ) - { - state * pState = pStack->stack->prev; - free_state( pStack->stack ); - pStack->stack = pState; - } -} - - -static state * push_state( stack * pStack, TARGET * t, TARGET * parent, int curstate ) -{ - state * pState = alloc_state(); - - pState->t = t; - pState->parent = parent; - pState->prev = pStack->stack; - pState->curstate = curstate; - - pStack->stack = pState; - - return pStack->stack; -} - - -/* - * Pushes a stack onto another stack, effectively reversing the order. - */ - -static void push_stack_on_stack( stack * pDest, stack * pSrc ) -{ - while ( pSrc->stack != NULL ) - { - state * pState = pSrc->stack; - pSrc->stack = pSrc->stack->prev; - pState->prev = pDest->stack; - pDest->stack = pState; - } -} - - -/* - * make1() - execute commands to update a TARGET and all of its dependencies. - */ - -static int intr = 0; - -int make1( TARGET * t ) -{ - state * pState; - - memset( (char *)counts, 0, sizeof( *counts ) ); - - /* Recursively make the target and its dependencies. */ - push_state( &state_stack, t, NULL, T_STATE_MAKE1A ); - - do - { - while ( ( pState = current_state( &state_stack ) ) != NULL ) - { - if ( intr ) - pop_state( &state_stack ); - - switch ( pState->curstate ) - { - case T_STATE_MAKE1A : make1a ( pState ); break; - case T_STATE_MAKE1ATAIL: make1atail( pState ); break; - case T_STATE_MAKE1B : make1b ( pState ); break; - case T_STATE_MAKE1C : make1c ( pState ); break; - case T_STATE_MAKE1D : make1d ( pState ); break; - } - } - } - /* Wait for any outstanding commands to finish running. */ - while ( exec_wait() ); - - clear_state_freelist(); - - /* Talk about it. */ - if ( counts->failed ) - printf( "...failed updating %d target%s...\n", counts->failed, - counts->failed > 1 ? "s" : "" ); - if ( DEBUG_MAKE && counts->skipped ) - printf( "...skipped %d target%s...\n", counts->skipped, - counts->skipped > 1 ? "s" : "" ); - if ( DEBUG_MAKE && counts->made ) - printf( "...updated %d target%s...\n", counts->made, - counts->made > 1 ? "s" : "" ); - - return counts->total != counts->made; -} - - -/* - * make1a() - recursively traverse target tree, calling make1b(). - * - * Called to start processing a specified target. Does nothing if the target is - * already being processed or otherwise starts processing all of its - * dependencies. Once all of its dependencies have started being processed goes - * on and calls make1b() (actually does that indirectly via a helper - * make1atail() state). - */ - -static void make1a( state * pState ) -{ - TARGET * t = pState->t; - TARGETS * c; - - /* If the parent is the first to try to build this target or this target is - * in the make1c() quagmire, arrange for the parent to be notified when this - * target is built. - */ - if ( pState->parent ) - switch ( pState->t->progress ) - { - case T_MAKE_INIT: - case T_MAKE_ACTIVE: - case T_MAKE_RUNNING: - pState->t->parents = targetentry( pState->t->parents, - pState->parent ); - ++pState->parent->asynccnt; - } - - /* If this target is already being processed then do nothing. There is no - * need to start processing the same target all over again. - */ - if ( pState->t->progress != T_MAKE_INIT ) - { - pop_state( &state_stack ); - return; - } - - /* Asynccnt counts the dependencies preventing this target from proceeding - * to make1b() for actual building. We start off with a count of 1 to - * prevent anything from happening until we can notify all dependencies that - * they are needed. This 1 is accounted for when we call make1b() ourselves, - * below. Without this if a a dependency gets built before we finish - * processing all of our other dependencies our build might be triggerred - * prematurely. - */ - pState->t->asynccnt = 1; - - /* Add header nodes created during the building process. */ - { - TARGETS * inc = 0; - for ( c = t->depends; c; c = c->next ) - if ( c->target->rescanned && c->target->includes ) - inc = targetentry( inc, c->target->includes ); - t->depends = targetchain( t->depends, inc ); - } - - /* Guard against circular dependencies. */ - pState->t->progress = T_MAKE_ONSTACK; - - { - stack temp_stack = { NULL }; - for ( c = t->depends; c && !intr; c = c->next ) - push_state( &temp_stack, c->target, pState->t, T_STATE_MAKE1A ); - - /* Using stacks reverses the order of execution. Reverse it back. */ - push_stack_on_stack( &state_stack, &temp_stack ); - } - - pState->curstate = T_STATE_MAKE1ATAIL; -} - - -/* - * make1atail() - started processing all dependencies so go on to make1b(). - */ - -static void make1atail( state * pState ) -{ - pState->t->progress = T_MAKE_ACTIVE; - /* Now that all of our dependencies have bumped up our asynccnt we can - * remove our own internal bump added to prevent this target from being - * built before all of its dependencies start getting processed. - */ - pState->curstate = T_STATE_MAKE1B; -} - - -/* - * make1b() - when dependencies are up to date, build target with make1c(). - * - * Called after all dependencies have started being processed and after each of - * them finishes its processing. The target actually goes on to getting built in - * make1c() only after all of its dependencies have finished their processing. - */ - -static void make1b( state * pState ) -{ - TARGET * t = pState->t; - TARGETS * c; - TARGET * failed = 0; - char * failed_name = "dependencies"; - - /* If any dependencies are still outstanding, wait until they call make1b() - * to signal their completion. - */ - if ( --pState->t->asynccnt ) - { - pop_state( &state_stack ); - return; - } - - /* Try to aquire a semaphore. If it is locked, wait until the target that - * locked it is built and signal completition. - */ -#ifdef OPT_SEMAPHORE - if ( t->semaphore && t->semaphore->asynccnt ) - { - /* Append 't' to the list of targets waiting on semaphore. */ - t->semaphore->parents = targetentry( t->semaphore->parents, t ); - t->asynccnt++; - - if ( DEBUG_EXECCMD ) - printf( "SEM: %s is busy, delaying launch of %s\n", - t->semaphore->name, t->name ); - pop_state( &state_stack ); - return; - } -#endif - - /* Now ready to build target 't', if dependencies built OK. */ - - /* Collect status from dependencies. */ - for ( c = t->depends; c; c = c->next ) - if ( c->target->status > t->status && !( c->target->flags & T_FLAG_NOCARE ) ) - { - failed = c->target; - pState->t->status = c->target->status; - } - /* If an internal header node failed to build, we want to output the target - * that it failed on. - */ - if ( failed ) - { - failed_name = failed->flags & T_FLAG_INTERNAL - ? failed->failed - : failed->name; - } - t->failed = failed_name; - - /* If actions for building any of the dependencies have failed, bail. - * Otherwise, execute all actions to make the current target. - */ - if ( ( pState->t->status == EXEC_CMD_FAIL ) && pState->t->actions ) - { - ++counts->skipped; - if ( ( pState->t->flags & ( T_FLAG_RMOLD | T_FLAG_NOTFILE ) ) == T_FLAG_RMOLD ) - { - if ( !unlink( pState->t->boundname ) ) - printf( "...removing outdated %s\n", pState->t->boundname ); - } - else - printf( "...skipped %s for lack of %s...\n", pState->t->name, failed_name ); - } - - if ( pState->t->status == EXEC_CMD_OK ) - switch ( pState->t->fate ) - { - /* These are handled by the default case below now - case T_FATE_INIT: - case T_FATE_MAKING: - */ - - case T_FATE_STABLE: - case T_FATE_NEWER: - break; - - case T_FATE_CANTFIND: - case T_FATE_CANTMAKE: - pState->t->status = EXEC_CMD_FAIL; - break; - - case T_FATE_ISTMP: - if ( DEBUG_MAKE ) - printf( "...using %s...\n", pState->t->name ); - break; - - case T_FATE_TOUCHED: - case T_FATE_MISSING: - case T_FATE_NEEDTMP: - case T_FATE_OUTDATED: - case T_FATE_UPDATE: - case T_FATE_REBUILD: - /* Prepare commands for executing actions scheduled for this target - * and then schedule transfer to make1c() state to proceed with - * executing the prepared commands. Commands have their embedded - * variables automatically expanded, including making use of any "on - * target" variables. - */ - if ( pState->t->actions ) - { - ++counts->total; - if ( DEBUG_MAKE && !( counts->total % 100 ) ) - printf( "...on %dth target...\n", counts->total ); - - pState->t->cmds = (char *)make1cmds( pState->t ); - /* Set the target's "progress" so that make1c() counts it among - * its successes/failures. - */ - pState->t->progress = T_MAKE_RUNNING; - } - break; - - /* All possible fates should have been accounted for by now. */ - default: - printf( "ERROR: %s has bad fate %d", pState->t->name, - pState->t->fate ); - abort(); - } - - /* Call make1c() to begin the execution of the chain of commands needed to - * build the target. If we are not going to build the target (due of - * dependency failures or no commands needing to be run) the chain will be - * empty and make1c() will directly signal the target's completion. - */ - -#ifdef OPT_SEMAPHORE - /* If there is a semaphore, indicate that it is in use. */ - if ( pState->t->semaphore ) - { - ++pState->t->semaphore->asynccnt; - if ( DEBUG_EXECCMD ) - printf( "SEM: %s now used by %s\n", pState->t->semaphore->name, - pState->t->name ); - } -#endif - - pState->curstate = T_STATE_MAKE1C; -} - - -/* - * make1c() - launch target's next command, call parents' make1b() if none. - * - * If there are (more) commands to run to build this target (and we have not hit - * an error running earlier comands) we launch the command using exec_cmd(). If - * there are no more commands to run, we collect the status from all the actions - * and report our completion to all the parents. - */ - -static void make1c( state * pState ) -{ - CMD * cmd = (CMD *)pState->t->cmds; - - if ( cmd && ( pState->t->status == EXEC_CMD_OK ) ) - { - char * rule_name = 0; - char * target = 0; - - if ( DEBUG_MAKEQ || - ( !( cmd->rule->actions->flags & RULE_QUIETLY ) && DEBUG_MAKE ) ) - { - rule_name = cmd->rule->name; - target = lol_get( &cmd->args, 0 )->string; - if ( globs.noexec ) - out_action( rule_name, target, cmd->buf, "", "", EXIT_OK ); - } - - if ( globs.noexec ) - { - pState->curstate = T_STATE_MAKE1D; - pState->status = EXEC_CMD_OK; - } - else - { - /* Pop state first because exec_cmd() could push state. */ - pop_state( &state_stack ); - exec_cmd( cmd->buf, make_closure, pState->t, cmd->shell, rule_name, - target ); - } - } - else - { - TARGETS * c; - ACTIONS * actions; - - /* Collect status from actions, and distribute it as well. */ - for ( actions = pState->t->actions; actions; actions = actions->next ) - if ( actions->action->status > pState->t->status ) - pState->t->status = actions->action->status; - for ( actions = pState->t->actions; actions; actions = actions->next ) - if ( pState->t->status > actions->action->status ) - actions->action->status = pState->t->status; - - /* Tally success/failure for those we tried to update. */ - if ( pState->t->progress == T_MAKE_RUNNING ) - switch ( pState->t->status ) - { - case EXEC_CMD_OK : ++counts->made ; break; - case EXEC_CMD_FAIL: ++counts->failed; break; - } - - /* Tell parents their dependency has been built. */ - { - stack temp_stack = { NULL }; - TARGET * t = pState->t; - TARGET * additional_includes = NULL; - - t->progress = T_MAKE_DONE; - - /* Target has been updated so rescan it for dependencies. */ - if ( ( t->fate >= T_FATE_MISSING ) && - ( t->status == EXEC_CMD_OK ) && - !t->rescanned ) - { - TARGET * target_to_rescan = t; - SETTINGS * s; - - target_to_rescan->rescanned = 1; - - if ( target_to_rescan->flags & T_FLAG_INTERNAL ) - target_to_rescan = t->original_target; - - /* Clean current includes. */ - target_to_rescan->includes = 0; - - s = copysettings( target_to_rescan->settings ); - pushsettings( s ); - headers( target_to_rescan ); - popsettings( s ); - freesettings( s ); - - if ( target_to_rescan->includes ) - { - target_to_rescan->includes->rescanned = 1; - /* Tricky. The parents have already been processed, but they - * have not seen the internal node, because it was just - * created. We need to make the calls to make1a() that would - * have been made by the parents here, and also make sure - * all unprocessed parents will pick up the includes. We - * must make sure processing of the additional make1a() - * invocations is done before make1b() which means this - * target is built, otherwise the parent would be considered - * built before this make1a() processing has even started. - */ - make0( target_to_rescan->includes, target_to_rescan->parents->target, 0, 0, 0 ); - for ( c = target_to_rescan->parents; c; c = c->next ) - c->target->depends = targetentry( c->target->depends, - target_to_rescan->includes ); - /* Will be processed below. */ - additional_includes = target_to_rescan->includes; - } - } - - if ( additional_includes ) - for ( c = t->parents; c; c = c->next ) - push_state( &temp_stack, additional_includes, c->target, T_STATE_MAKE1A ); - - for ( c = t->parents; c; c = c->next ) - push_state( &temp_stack, c->target, NULL, T_STATE_MAKE1B ); - -#ifdef OPT_SEMAPHORE - /* If there is a semaphore, it is now free. */ - if ( t->semaphore ) - { - assert( t->semaphore->asynccnt == 1 ); - --t->semaphore->asynccnt; - - if ( DEBUG_EXECCMD ) - printf( "SEM: %s is now free\n", t->semaphore->name ); - - /* If anything is waiting, notify the next target. There is no - * point in notifying waiting targets, since they will be - * notified again. - */ - if ( t->semaphore->parents ) - { - TARGETS * first = t->semaphore->parents; - if ( first->next ) - first->next->tail = first->tail; - t->semaphore->parents = first->next; - - if ( DEBUG_EXECCMD ) - printf( "SEM: placing %s on stack\n", first->target->name ); - push_state( &temp_stack, first->target, NULL, T_STATE_MAKE1B ); - BJAM_FREE( first ); - } - } -#endif - - /* Must pop state before pushing any more. */ - pop_state( &state_stack ); - - /* Using stacks reverses the order of execution. Reverse it back. */ - push_stack_on_stack( &state_stack, &temp_stack ); - } - } -} - - -/* - * call_timing_rule() - Look up the __TIMING_RULE__ variable on the given - * target, and if non-empty, invoke the rule it names, passing the given - * timing_info. - */ - -static void call_timing_rule( TARGET * target, timing_info * time ) -{ - LIST * timing_rule; - - pushsettings( target->settings ); - timing_rule = var_get( "__TIMING_RULE__" ); - popsettings( target->settings ); - - if ( timing_rule ) - { - /* rule timing-rule ( args * : target : start end user system ) */ - - /* Prepare the argument list. */ - FRAME frame[ 1 ]; - frame_init( frame ); - - /* args * :: $(__TIMING_RULE__[2-]) */ - lol_add( frame->args, list_copy( L0, timing_rule->next ) ); - - /* target :: the name of the target */ - lol_add( frame->args, list_new( L0, target->name ) ); - - /* start end user system :: info about the action command */ - lol_add( frame->args, list_new( list_new( list_new( list_new( L0, - outf_time ( time->start ) ), - outf_time ( time->end ) ), - outf_double( time->user ) ), - outf_double( time->system ) ) ); - - /* Call the rule. */ - evaluate_rule( timing_rule->string, frame ); - - /* Clean up. */ - frame_free( frame ); - } -} - - -/* - * call_action_rule() - Look up the __ACTION_RULE__ variable on the given - * target, and if non-empty, invoke the rule it names, passing the given info, - * timing_info, executed command and command output. - */ - -static void call_action_rule -( - TARGET * target, - int status, - timing_info * time, - char * executed_command, - char * command_output -) -{ - LIST * action_rule; - - pushsettings( target->settings ); - action_rule = var_get( "__ACTION_RULE__" ); - popsettings( target->settings ); - - if ( action_rule ) - { - /* rule action-rule ( - args * : - target : - command status start end user system : - output ? ) */ - - /* Prepare the argument list. */ - FRAME frame[ 1 ]; - frame_init( frame ); - - /* args * :: $(__ACTION_RULE__[2-]) */ - lol_add( frame->args, list_copy( L0, action_rule->next ) ); - - /* target :: the name of the target */ - lol_add( frame->args, list_new( L0, target->name ) ); - - /* command status start end user system :: info about the action command */ - lol_add( frame->args, - list_new( list_new( list_new( list_new( list_new( list_new( L0, - newstr( executed_command ) ), - outf_int( status ) ), - outf_time( time->start ) ), - outf_time( time->end ) ), - outf_double( time->user ) ), - outf_double( time->system ) ) ); - - /* output ? :: the output of the action command */ - if ( command_output ) - lol_add( frame->args, list_new( L0, newstr( command_output ) ) ); - else - lol_add( frame->args, L0 ); - - /* Call the rule. */ - evaluate_rule( action_rule->string, frame ); - - /* Clean up. */ - frame_free( frame ); - } -} - - -/* - * make_closure() - internal function passed as a notification callback for when - * commands finish getting executed by the OS. - */ - -static void make_closure -( - void * closure, - int status, - timing_info * time, - char * executed_command, - char * command_output -) -{ - TARGET * built = (TARGET *)closure; - - call_timing_rule( built, time ); - if ( DEBUG_EXECCMD ) - printf( "%f sec system; %f sec user\n", time->system, time->user ); - - call_action_rule( built, status, time, executed_command, command_output ); - - push_state( &state_stack, built, NULL, T_STATE_MAKE1D )->status = status; -} - - -/* - * make1d() - handle command execution completion and call back make1c(). - * - * exec_cmd() has completed and now all we need to do is fiddle with the status - * and call back to make1c() so it can run the next command scheduled for - * building this target or close up the target's build process in case there are - * no more commands scheduled for it. On interrupts, we bail heavily. - */ - -static void make1d( state * pState ) -{ - TARGET * t = pState->t; - CMD * cmd = (CMD *)t->cmds; - int status = pState->status; - - if ( t->flags & T_FLAG_FAIL_EXPECTED ) - { - /* Invert execution result when FAIL_EXPECTED has been applied. */ - switch ( status ) - { - case EXEC_CMD_FAIL: status = EXEC_CMD_OK ; break; - case EXEC_CMD_OK: status = EXEC_CMD_FAIL; break; - } - } - - if ( ( status == EXEC_CMD_FAIL ) && - ( cmd->rule->actions->flags & RULE_IGNORE ) ) - status = EXEC_CMD_OK; - - /* On interrupt, set intr so _everything_ fails. */ - if ( status == EXEC_CMD_INTR ) - ++intr; - - /* Print command text on failure. */ - if ( ( status == EXEC_CMD_FAIL ) && DEBUG_MAKE ) - { - if ( !DEBUG_EXEC ) - printf( "%s\n", cmd->buf ); - - printf( "...failed %s ", cmd->rule->name ); - list_print( lol_get( &cmd->args, 0 ) ); - printf( "...\n" ); - } - - /* Treat failed commands as interrupts in case we were asked to stop the - * build in case of any errors. - */ - if ( ( status == EXEC_CMD_FAIL ) && globs.quitquick ) - ++intr; - - /* If the command was interrupted or failed and the target is not - * "precious", remove the targets. - */ - if (status != EXEC_CMD_OK) - { - LIST * targets = lol_get( &cmd->args, 0 ); - for ( ; targets; targets = list_next( targets ) ) - { - int need_unlink = 1; - TARGET* t = bindtarget ( targets->string ); - if (t->flags & T_FLAG_PRECIOUS) - { - need_unlink = 0; - } - if (need_unlink && !unlink( targets->string ) ) - printf( "...removing %s\n", targets->string ); - } - } - - /* Free this command and call make1c() to move onto the next one scheduled - * for building this same target. - */ - t->status = status; - t->cmds = (char *)cmd_next( cmd ); - cmd_free( cmd ); - pState->curstate = T_STATE_MAKE1C; -} - - -/* - * swap_settings() - replace the settings from the current module and target - * with those from the new module and target - */ - -static void swap_settings -( - module_t * * current_module, - TARGET * * current_target, - module_t * new_module, - TARGET * new_target -) -{ - if ( new_module == root_module() ) - new_module = 0; - - if ( ( new_target == *current_target ) && ( new_module == *current_module ) ) - return; - - if ( *current_target ) - popsettings( (*current_target)->settings ); - - if ( new_module != *current_module ) - { - if ( *current_module ) - exit_module( *current_module ); - - *current_module = new_module; - - if ( new_module ) - enter_module( new_module ); - } - - *current_target = new_target; - if ( new_target ) - pushsettings( new_target->settings ); -} - - -/* - * make1cmds() - turn ACTIONS into CMDs, grouping, splitting, etc. - * - * Essentially copies a chain of ACTIONs to a chain of CMDs, grouping - * RULE_TOGETHER actions, splitting RULE_PIECEMEAL actions, and handling - * RULE_NEWSRCS actions. The result is a chain of CMDs which can be expanded by - * var_string() and executed using exec_cmd(). - */ - -static CMD * make1cmds( TARGET * t ) -{ - CMD * cmds = 0; - LIST * shell = 0; - module_t * settings_module = 0; - TARGET * settings_target = 0; - ACTIONS * a0; - - /* Step through actions. Actions may be shared with other targets or grouped - * using RULE_TOGETHER, so actions already seen are skipped. - */ - for ( a0 = t->actions ; a0; a0 = a0->next ) - { - RULE * rule = a0->action->rule; - rule_actions * actions = rule->actions; - SETTINGS * boundvars; - LIST * nt; - LIST * ns; - ACTIONS * a1; - int start; - int chunk; - int length; - - /* Only do rules with commands to execute. If this action has already - * been executed, use saved status. - */ - if ( !actions || a0->action->running ) - continue; - - a0->action->running = 1; - - /* Make LISTS of targets and sources. If `execute together` has been - * specified for this rule, tack on sources from each instance of this - * rule for this target. - */ - nt = make1list( L0, a0->action->targets, 0 ); - ns = make1list( L0, a0->action->sources, actions->flags ); - if ( actions->flags & RULE_TOGETHER ) - for ( a1 = a0->next; a1; a1 = a1->next ) - if ( a1->action->rule == rule && !a1->action->running ) - { - ns = make1list( ns, a1->action->sources, actions->flags ); - a1->action->running = 1; - } - - /* If doing only updated (or existing) sources, but none have been - * updated (or exist), skip this action. - */ - if ( !ns && ( actions->flags & ( RULE_NEWSRCS | RULE_EXISTING ) ) ) - { - list_free( nt ); - continue; - } - - swap_settings( &settings_module, &settings_target, rule->module, t ); - if ( !shell ) - shell = var_get( "JAMSHELL" ); /* shell is per-target */ - - /* If we had 'actions xxx bind vars' we bind the vars now. */ - boundvars = make1settings( actions->bindlist ); - pushsettings( boundvars ); - - /* - * Build command, starting with all source args. - * - * If cmd_new returns 0, it is because the resulting command length is - * > MAXLINE. In this case, we will slowly reduce the number of source - * arguments presented until it does fit. This only applies to actions - * that allow PIECEMEAL commands. - * - * While reducing slowly takes a bit of compute time to get things just - * right, it is worth it to get as close to MAXLINE as possible, because - * launching the commands we are executing is likely to be much more - * compute intensive. - * - * Note we loop through at least once, for sourceless actions. - */ - - start = 0; - chunk = length = list_length( ns ); - - do - { - /* Build cmd: cmd_new consumes its lists. */ - CMD * cmd = cmd_new( rule, - list_copy( L0, nt ), - list_sublist( ns, start, chunk ), - list_copy( L0, shell ) ); - - if ( cmd ) - { - /* It fit: chain it up. */ - if ( !cmds ) cmds = cmd; - else cmds->tail->next = cmd; - cmds->tail = cmd; - start += chunk; - } - else if ( ( actions->flags & RULE_PIECEMEAL ) && ( chunk > 1 ) ) - { - /* Reduce chunk size slowly. */ - chunk = chunk * 9 / 10; - } - else - { - /* Too long and not splittable. */ - printf( "%s actions too long (max %d):\n", rule->name, MAXLINE - ); - - /* Tell the user what didn't fit. */ - cmd = cmd_new( rule, list_copy( L0, nt ), - list_sublist( ns, start, chunk ), - list_new( L0, newstr( "%" ) ) ); - fputs( cmd->buf, stdout ); - exit( EXITBAD ); - } - } - while ( start < length ); - - /* These were always copied when used. */ - list_free( nt ); - list_free( ns ); - - /* Free the variables whose values were bound by 'actions xxx bind - * vars'. - */ - popsettings( boundvars ); - freesettings( boundvars ); - } - - swap_settings( &settings_module, &settings_target, 0, 0 ); - return cmds; -} - - -/* - * make1list() - turn a list of targets into a LIST, for $(<) and $(>). - */ - -static LIST * make1list( LIST * l, TARGETS * targets, int flags ) -{ - for ( ; targets; targets = targets->next ) - { - TARGET * t = targets->target; - - if ( t->binding == T_BIND_UNBOUND ) - make1bind( t ); - - if ( ( flags & RULE_EXISTING ) && ( flags & RULE_NEWSRCS ) ) - { - if ( ( t->binding != T_BIND_EXISTS ) && ( t->fate <= T_FATE_STABLE ) ) - continue; - } - else - { - if ( ( flags & RULE_EXISTING ) && ( t->binding != T_BIND_EXISTS ) ) - continue; - - if ( ( flags & RULE_NEWSRCS ) && ( t->fate <= T_FATE_STABLE ) ) - continue; - } - - /* Prohibit duplicates for RULE_TOGETHER. */ - if ( flags & RULE_TOGETHER ) - { - LIST * m; - for ( m = l; m; m = m->next ) - if ( !strcmp( m->string, t->boundname ) ) - break; - if ( m ) - continue; - } - - /* Build new list. */ - l = list_new( l, copystr( t->boundname ) ); - } - - return l; -} - - -/* - * make1settings() - for vars that get bound values, build up replacement lists. - */ - -static SETTINGS * make1settings( LIST * vars ) -{ - SETTINGS * settings = 0; - - for ( ; vars; vars = list_next( vars ) ) - { - LIST * l = var_get( vars->string ); - LIST * nl = 0; - - for ( ; l; l = list_next( l ) ) - { - TARGET * t = bindtarget( l->string ); - - /* Make sure the target is bound. */ - if ( t->binding == T_BIND_UNBOUND ) - make1bind( t ); - - /* Build a new list. */ - nl = list_new( nl, copystr( t->boundname ) ); - } - - /* Add to settings chain. */ - settings = addsettings( settings, VAR_SET, vars->string, nl ); - } - - return settings; -} - - -/* - * make1bind() - bind targets that were not bound during dependency analysis - * - * Spot the kludge! If a target is not in the dependency tree, it did not get - * bound by make0(), so we have to do it here. Ugly. - */ - -static void make1bind( TARGET * t ) -{ - if ( t->flags & T_FLAG_NOTFILE ) - return; - - pushsettings( t->settings ); - t->boundname = search( t->name, &t->time, 0, ( t->flags & T_FLAG_ISFILE ) ); - t->binding = t->time ? T_BIND_EXISTS : T_BIND_MISSING; - popsettings( t->settings ); -} diff --git a/jam-files/engine/md5.c b/jam-files/engine/md5.c deleted file mode 100644 index c35d96c5..00000000 --- a/jam-files/engine/md5.c +++ /dev/null @@ -1,381 +0,0 @@ -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.c is L. Peter Deutsch - <ghost@aladdin.com>. Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order - either statically or dynamically; added missing #include <string.h> - in library. - 2002-03-11 lpd Corrected argument list for main(), and added int return - type, in test program and T value program. - 2002-02-21 lpd Added missing #include <stdio.h> in test program. - 2000-07-03 lpd Patched to eliminate warnings about "constant is - unsigned in ANSI C, signed in traditional"; made test program - self-checking. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). - 1999-05-03 lpd Original version. - */ - -#include "md5.h" -#include <string.h> - -#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ -#ifdef ARCH_IS_BIG_ENDIAN -# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) -#else -# define BYTE_ORDER 0 -#endif - -#define T_MASK ((md5_word_t)~0) -#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) -#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) -#define T3 0x242070db -#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) -#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) -#define T6 0x4787c62a -#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) -#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) -#define T9 0x698098d8 -#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) -#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) -#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) -#define T13 0x6b901122 -#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) -#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) -#define T16 0x49b40821 -#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) -#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) -#define T19 0x265e5a51 -#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) -#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) -#define T22 0x02441453 -#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) -#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) -#define T25 0x21e1cde6 -#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) -#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) -#define T28 0x455a14ed -#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) -#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) -#define T31 0x676f02d9 -#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) -#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) -#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) -#define T35 0x6d9d6122 -#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) -#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) -#define T38 0x4bdecfa9 -#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) -#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) -#define T41 0x289b7ec6 -#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) -#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) -#define T44 0x04881d05 -#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) -#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) -#define T47 0x1fa27cf8 -#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) -#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) -#define T50 0x432aff97 -#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) -#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) -#define T53 0x655b59c3 -#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) -#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) -#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) -#define T57 0x6fa87e4f -#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) -#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) -#define T60 0x4e0811a1 -#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) -#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) -#define T63 0x2ad7d2bb -#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) - - -static void -md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/) -{ - md5_word_t - a = pms->abcd[0], b = pms->abcd[1], - c = pms->abcd[2], d = pms->abcd[3]; - md5_word_t t; -#if BYTE_ORDER > 0 - /* Define storage only for big-endian CPUs. */ - md5_word_t X[16]; -#else - /* Define storage for little-endian or both types of CPUs. */ - md5_word_t xbuf[16]; - const md5_word_t *X; -#endif - - { -#if BYTE_ORDER == 0 - /* - * Determine dynamically whether this is a big-endian or - * little-endian machine, since we can use a more efficient - * algorithm on the latter. - */ - static const int w = 1; - - if (*((const md5_byte_t *)&w)) /* dynamic little-endian */ -#endif -#if BYTE_ORDER <= 0 /* little-endian */ - { - /* - * On little-endian machines, we can process properly aligned - * data without copying it. - */ - if (!((data - (const md5_byte_t *)0) & 3)) { - /* data are properly aligned */ - X = (const md5_word_t *)data; - } else { - /* not aligned */ - memcpy(xbuf, data, 64); - X = xbuf; - } - } -#endif -#if BYTE_ORDER == 0 - else /* dynamic big-endian */ -#endif -#if BYTE_ORDER >= 0 /* big-endian */ - { - /* - * On big-endian machines, we must arrange the bytes in the - * right order. - */ - const md5_byte_t *xp = data; - int i; - -# if BYTE_ORDER == 0 - X = xbuf; /* (dynamic only) */ -# else -# define xbuf X /* (static only) */ -# endif - for (i = 0; i < 16; ++i, xp += 4) - xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); - } -#endif - } - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) - - /* Round 1. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ -#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + F(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 7, T1); - SET(d, a, b, c, 1, 12, T2); - SET(c, d, a, b, 2, 17, T3); - SET(b, c, d, a, 3, 22, T4); - SET(a, b, c, d, 4, 7, T5); - SET(d, a, b, c, 5, 12, T6); - SET(c, d, a, b, 6, 17, T7); - SET(b, c, d, a, 7, 22, T8); - SET(a, b, c, d, 8, 7, T9); - SET(d, a, b, c, 9, 12, T10); - SET(c, d, a, b, 10, 17, T11); - SET(b, c, d, a, 11, 22, T12); - SET(a, b, c, d, 12, 7, T13); - SET(d, a, b, c, 13, 12, T14); - SET(c, d, a, b, 14, 17, T15); - SET(b, c, d, a, 15, 22, T16); -#undef SET - - /* Round 2. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ -#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + G(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 1, 5, T17); - SET(d, a, b, c, 6, 9, T18); - SET(c, d, a, b, 11, 14, T19); - SET(b, c, d, a, 0, 20, T20); - SET(a, b, c, d, 5, 5, T21); - SET(d, a, b, c, 10, 9, T22); - SET(c, d, a, b, 15, 14, T23); - SET(b, c, d, a, 4, 20, T24); - SET(a, b, c, d, 9, 5, T25); - SET(d, a, b, c, 14, 9, T26); - SET(c, d, a, b, 3, 14, T27); - SET(b, c, d, a, 8, 20, T28); - SET(a, b, c, d, 13, 5, T29); - SET(d, a, b, c, 2, 9, T30); - SET(c, d, a, b, 7, 14, T31); - SET(b, c, d, a, 12, 20, T32); -#undef SET - - /* Round 3. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ -#define H(x, y, z) ((x) ^ (y) ^ (z)) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + H(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 5, 4, T33); - SET(d, a, b, c, 8, 11, T34); - SET(c, d, a, b, 11, 16, T35); - SET(b, c, d, a, 14, 23, T36); - SET(a, b, c, d, 1, 4, T37); - SET(d, a, b, c, 4, 11, T38); - SET(c, d, a, b, 7, 16, T39); - SET(b, c, d, a, 10, 23, T40); - SET(a, b, c, d, 13, 4, T41); - SET(d, a, b, c, 0, 11, T42); - SET(c, d, a, b, 3, 16, T43); - SET(b, c, d, a, 6, 23, T44); - SET(a, b, c, d, 9, 4, T45); - SET(d, a, b, c, 12, 11, T46); - SET(c, d, a, b, 15, 16, T47); - SET(b, c, d, a, 2, 23, T48); -#undef SET - - /* Round 4. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ -#define I(x, y, z) ((y) ^ ((x) | ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + I(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 6, T49); - SET(d, a, b, c, 7, 10, T50); - SET(c, d, a, b, 14, 15, T51); - SET(b, c, d, a, 5, 21, T52); - SET(a, b, c, d, 12, 6, T53); - SET(d, a, b, c, 3, 10, T54); - SET(c, d, a, b, 10, 15, T55); - SET(b, c, d, a, 1, 21, T56); - SET(a, b, c, d, 8, 6, T57); - SET(d, a, b, c, 15, 10, T58); - SET(c, d, a, b, 6, 15, T59); - SET(b, c, d, a, 13, 21, T60); - SET(a, b, c, d, 4, 6, T61); - SET(d, a, b, c, 11, 10, T62); - SET(c, d, a, b, 2, 15, T63); - SET(b, c, d, a, 9, 21, T64); -#undef SET - - /* Then perform the following additions. (That is increment each - of the four registers by the value it had before this block - was started.) */ - pms->abcd[0] += a; - pms->abcd[1] += b; - pms->abcd[2] += c; - pms->abcd[3] += d; -} - -void -md5_init(md5_state_t *pms) -{ - pms->count[0] = pms->count[1] = 0; - pms->abcd[0] = 0x67452301; - pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; - pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; - pms->abcd[3] = 0x10325476; -} - -void -md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes) -{ - const md5_byte_t *p = data; - int left = nbytes; - int offset = (pms->count[0] >> 3) & 63; - md5_word_t nbits = (md5_word_t)(nbytes << 3); - - if (nbytes <= 0) - return; - - /* Update the message length. */ - pms->count[1] += nbytes >> 29; - pms->count[0] += nbits; - if (pms->count[0] < nbits) - pms->count[1]++; - - /* Process an initial partial block. */ - if (offset) { - int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); - - memcpy(pms->buf + offset, p, copy); - if (offset + copy < 64) - return; - p += copy; - left -= copy; - md5_process(pms, pms->buf); - } - - /* Process full blocks. */ - for (; left >= 64; p += 64, left -= 64) - md5_process(pms, p); - - /* Process a final partial block. */ - if (left) - memcpy(pms->buf, p, left); -} - -void -md5_finish(md5_state_t *pms, md5_byte_t digest[16]) -{ - static const md5_byte_t pad[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - md5_byte_t data[8]; - int i; - - /* Save the length before padding. */ - for (i = 0; i < 8; ++i) - data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); - /* Pad to 56 bytes mod 64. */ - md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); - /* Append the length. */ - md5_append(pms, data, 8); - for (i = 0; i < 16; ++i) - digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); -} diff --git a/jam-files/engine/md5.h b/jam-files/engine/md5.h deleted file mode 100644 index 698c995d..00000000 --- a/jam-files/engine/md5.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.h is L. Peter Deutsch - <ghost@aladdin.com>. Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Removed support for non-ANSI compilers; removed - references to Ghostscript; clarified derivation from RFC 1321; - now handles byte order either statically or dynamically. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); - added conditionalization for C++ compilation from Martin - Purschke <purschke@bnl.gov>. - 1999-05-03 lpd Original version. - */ - -#ifndef md5_INCLUDED -# define md5_INCLUDED - -/* - * This package supports both compile-time and run-time determination of CPU - * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be - * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is - * defined as non-zero, the code will be compiled to run only on big-endian - * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to - * run on either big- or little-endian CPUs, but will run slightly less - * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. - */ - -typedef unsigned char md5_byte_t; /* 8-bit byte */ -typedef unsigned int md5_word_t; /* 32-bit word */ - -/* Define the state of the MD5 Algorithm. */ -typedef struct md5_state_s { - md5_word_t count[2]; /* message length in bits, lsw first */ - md5_word_t abcd[4]; /* digest buffer */ - md5_byte_t buf[64]; /* accumulate block */ -} md5_state_t; - -#ifdef __cplusplus -extern "C" -{ -#endif - -/* Initialize the algorithm. */ -void md5_init(md5_state_t *pms); - -/* Append a string to the message. */ -void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); - -/* Finish the message and return the digest. */ -void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif /* md5_INCLUDED */ diff --git a/jam-files/engine/mem.c b/jam-files/engine/mem.c deleted file mode 100644 index 6a11fb38..00000000 --- a/jam-files/engine/mem.c +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright Rene Rivera 2006. -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#include "jam.h" - -#ifdef OPT_BOEHM_GC - - /* Compile the Boehm GC as one big chunk of code. It's much easier - this way, than trying to make radical changes to the bjam build - scripts. */ - - #define ATOMIC_UNCOLLECTABLE - #define NO_EXECUTE_PERMISSION - #define ALL_INTERIOR_POINTERS - - #define LARGE_CONFIG - /* - #define NO_SIGNALS - #define SILENT - */ - #ifndef GC_DEBUG - #define NO_DEBUGGING - #endif - - #ifdef __GLIBC__ - #define __USE_GNU - #endif - - #include "boehm_gc/reclaim.c" - #include "boehm_gc/allchblk.c" - #include "boehm_gc/misc.c" - #include "boehm_gc/alloc.c" - #include "boehm_gc/mach_dep.c" - #include "boehm_gc/os_dep.c" - #include "boehm_gc/mark_rts.c" - #include "boehm_gc/headers.c" - #include "boehm_gc/mark.c" - #include "boehm_gc/obj_map.c" - #include "boehm_gc/pcr_interface.c" - #include "boehm_gc/blacklst.c" - #include "boehm_gc/new_hblk.c" - #include "boehm_gc/real_malloc.c" - #include "boehm_gc/dyn_load.c" - #include "boehm_gc/dbg_mlc.c" - #include "boehm_gc/malloc.c" - #include "boehm_gc/stubborn.c" - #include "boehm_gc/checksums.c" - #include "boehm_gc/pthread_support.c" - #include "boehm_gc/pthread_stop_world.c" - #include "boehm_gc/darwin_stop_world.c" - #include "boehm_gc/typd_mlc.c" - #include "boehm_gc/ptr_chck.c" - #include "boehm_gc/mallocx.c" - #include "boehm_gc/gcj_mlc.c" - #include "boehm_gc/specific.c" - #include "boehm_gc/gc_dlopen.c" - #include "boehm_gc/backgraph.c" - #include "boehm_gc/win32_threads.c" - - /* Needs to be last. */ - #include "boehm_gc/finalize.c" - -#elif defined(OPT_DUMA) - - #ifdef OS_NT - #define WIN32 - #endif - #include "duma/duma.c" - #include "duma/print.c" - -#endif diff --git a/jam-files/engine/mem.h b/jam-files/engine/mem.h deleted file mode 100644 index 71b2fb4b..00000000 --- a/jam-files/engine/mem.h +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright Rene Rivera 2006. -Distributed under the Boost Software License, Version 1.0. -(See accompanying file LICENSE_1_0.txt or copy at -http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BJAM_MEM_H -#define BJAM_MEM_H - - -#ifdef OPT_BOEHM_GC - - /* Use Boehm GC memory allocator. */ - #include <gc.h> - #define bjam_malloc_x(s) memset(GC_malloc(s),0,s) - #define bjam_malloc_atomic_x(s) memset(GC_malloc_atomic(s),0,s) - #define bjam_calloc_x(n,s) memset(GC_malloc((n)*(s)),0,(n)*(s)) - #define bjam_calloc_atomic_x(n,s) memset(GC_malloc_atomic((n)*(s)),0,(n)*(s)) - #define bjam_realloc_x(p,s) GC_realloc(p,s) - #define bjam_free_x(p) GC_free(p) - #define bjam_mem_init_x() GC_init(); GC_enable_incremental() - - #define bjam_malloc_raw_x(s) malloc(s) - #define bjam_calloc_raw_x(n,s) calloc(n,s) - #define bjam_realloc_raw_x(p,s) realloc(p,s) - #define bjam_free_raw_x(p) free(p) - - #ifndef BJAM_NEWSTR_NO_ALLOCATE - #define BJAM_NEWSTR_NO_ALLOCATE - #endif - -#elif defined(OPT_DUMA) - - /* Use Duma memory debugging library. */ - #include <stdlib.h> - #define _DUMA_CONFIG_H_ - #define DUMA_NO_GLOBAL_MALLOC_FREE - #define DUMA_EXPLICIT_INIT - #define DUMA_NO_THREAD_SAFETY - #define DUMA_NO_CPP_SUPPORT - /* #define DUMA_NO_LEAKDETECTION */ - /* #define DUMA_USE_FRAMENO */ - /* #define DUMA_PREFER_ATEXIT */ - /* #define DUMA_OLD_DEL_MACRO */ - /* #define DUMA_NO_HANG_MSG */ - #define DUMA_PAGE_SIZE 4096 - #define DUMA_MIN_ALIGNMENT 1 - /* #define DUMA_GNU_INIT_ATTR 0 */ - typedef unsigned int DUMA_ADDR; - typedef unsigned int DUMA_SIZE; - #include <duma.h> - #define bjam_malloc_x(s) malloc(s) - #define bjam_calloc_x(n,s) calloc(n,s) - #define bjam_realloc_x(p,s) realloc(p,s) - #define bjam_free_x(p) free(p) - - #ifndef BJAM_NEWSTR_NO_ALLOCATE - #define BJAM_NEWSTR_NO_ALLOCATE - #endif - -#else - - /* Standard C memory allocation. */ - #define bjam_malloc_x(s) malloc(s) - #define bjam_calloc_x(n,s) calloc(n,s) - #define bjam_realloc_x(p,s) realloc(p,s) - #define bjam_free_x(p) free(p) - -#endif - -#ifndef bjam_malloc_atomic_x - #define bjam_malloc_atomic_x(s) bjam_malloc_x(s) -#endif -#ifndef bjam_calloc_atomic_x - #define bjam_calloc_atomic_x(n,s) bjam_calloc_x(n,s) -#endif -#ifndef bjam_mem_init_x - #define bjam_mem_init_x() -#endif -#ifndef bjam_mem_close_x - #define bjam_mem_close_x() -#endif -#ifndef bjam_malloc_raw_x - #define bjam_malloc_raw_x(s) bjam_malloc_x(s) -#endif -#ifndef bjam_calloc_raw_x - #define bjam_calloc_raw_x(n,s) bjam_calloc_x(n,s) -#endif -#ifndef bjam_realloc_raw_x - #define bjam_realloc_raw_x(p,s) bjam_realloc_x(p,s) -#endif -#ifndef bjam_free_raw_x - #define bjam_free_raw_x(p) bjam_free_x(p) -#endif - -#ifdef OPT_DEBUG_PROFILE - - /* Profile tracing of memory allocations. */ - #define BJAM_MALLOC(s) (profile_memory(s), bjam_malloc_x(s)) - #define BJAM_MALLOC_ATOMIC(s) (profile_memory(s), bjam_malloc_atomic_x(s)) - #define BJAM_CALLOC(n,s) (profile_memory(n*s), bjam_calloc_x(n,s)) - #define BJAM_CALLOC_ATOMIC(n,s) (profile_memory(n*s), bjam_calloc_atomic_x(n,s)) - #define BJAM_REALLOC(p,s) (profile_memory(s), bjam_realloc_x(p,s)) - #define BJAM_FREE(p) bjam_free_x(p) - #define BJAM_MEM_INIT() bjam_mem_init_x() - #define BJAM_MEM_CLOSE() bjam_mem_close_x() - - #define BJAM_MALLOC_RAW(s) (profile_memory(s), bjam_malloc_raw_x(s)) - #define BJAM_CALLOC_RAW(n,s) (profile_memory(n*s), bjam_calloc_raw_x(n,s)) - #define BJAM_REALLOC_RAW(p,s) (profile_memory(s), bjam_realloc_raw_x(p,s)) - #define BJAM_FREE_RAW(p) bjam_free_raw_x(p) - -#else - - /* No mem tracing. */ - #define BJAM_MALLOC(s) bjam_malloc_x(s) - #define BJAM_MALLOC_ATOMIC(s) bjam_malloc_atomic_x(s) - #define BJAM_CALLOC(n,s) bjam_calloc_x(n,s) - #define BJAM_CALLOC_ATOMIC(n,s) bjam_calloc_atomic_x(n,s) - #define BJAM_REALLOC(p,s) bjam_realloc_x(p,s) - #define BJAM_FREE(p) bjam_free_x(p) - #define BJAM_MEM_INIT() bjam_mem_init_x() - #define BJAM_MEM_CLOSE() bjam_mem_close_x() - - #define BJAM_MALLOC_RAW(s) bjam_malloc_raw_x(s) - #define BJAM_CALLOC_RAW(n,s) bjam_calloc_raw_x(n,s) - #define BJAM_REALLOC_RAW(p,s) bjam_realloc_raw_x(p,s) - #define BJAM_FREE_RAW(p) bjam_free_raw_x(p) - -#endif - - -#endif diff --git a/jam-files/engine/mkjambase.c b/jam-files/engine/mkjambase.c deleted file mode 100644 index cdf59982..00000000 --- a/jam-files/engine/mkjambase.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * mkjambase.c - turn Jambase into a big C structure - * - * Usage: mkjambase jambase.c Jambase ... - * - * Results look like this: - * - * char *jambase[] = { - * "...\n", - * ... - * 0 }; - * - * Handles \'s and "'s specially; knows to delete blank and comment lines. - * - */ - -#include <stdio.h> -#include <string.h> - - -int main( int argc, char * * argv, char * * envp ) -{ - char buf[ 1024 ]; - FILE * fin; - FILE * fout; - char * p; - int doDotC = 0; - - if ( argc < 3 ) - { - fprintf( stderr, "usage: %s jambase.c Jambase ...\n", argv[ 0 ] ); - return -1; - } - - if ( !( fout = fopen( argv[1], "w" ) ) ) - { - perror( argv[ 1 ] ); - return -1; - } - - /* If the file ends in .c generate a C source file. */ - if ( ( p = strrchr( argv[1], '.' ) ) && !strcmp( p, ".c" ) ) - doDotC++; - - /* Now process the files. */ - - argc -= 2; - argv += 2; - - if ( doDotC ) - { - fprintf( fout, "/* Generated by mkjambase from Jambase */\n" ); - fprintf( fout, "char *jambase[] = {\n" ); - } - - for ( ; argc--; ++argv ) - { - if ( !( fin = fopen( *argv, "r" ) ) ) - { - perror( *argv ); - return -1; - } - - if ( doDotC ) - fprintf( fout, "/* %s */\n", *argv ); - else - fprintf( fout, "### %s ###\n", *argv ); - - while ( fgets( buf, sizeof( buf ), fin ) ) - { - if ( doDotC ) - { - char * p = buf; - - /* Strip leading whitespace. */ - while ( ( *p == ' ' ) || ( *p == '\t' ) || ( *p == '\n' ) ) - ++p; - - /* Drop comments and empty lines. */ - if ( ( *p == '#' ) || !*p ) - continue; - - /* Copy. */ - putc( '"', fout ); - for ( ; *p && ( *p != '\n' ); ++p ) - switch ( *p ) - { - case '\\': putc( '\\', fout ); putc( '\\', fout ); break; - case '"' : putc( '\\', fout ); putc( '"' , fout ); break; - case '\r': break; - default: putc( *p, fout ); break; - } - - fprintf( fout, "\\n\",\n" ); - } - else - { - fprintf( fout, "%s", buf ); - } - } - - fclose( fin ); - } - - if ( doDotC ) - fprintf( fout, "0 };\n" ); - - fclose( fout ); - - return 0; -} diff --git a/jam-files/engine/modules.c b/jam-files/engine/modules.c deleted file mode 100644 index 72952594..00000000 --- a/jam-files/engine/modules.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ -#include "jam.h" - -#include "modules.h" -#include "string.h" -#include "hash.h" -#include "newstr.h" -#include "lists.h" -#include "parse.h" -#include "rules.h" -#include "variable.h" -#include "strings.h" -#include <assert.h> - -static struct hash * module_hash = 0; - - -static char * new_module_str( module_t * m, char * suffix ) -{ - char * result; - string s; - string_copy( &s, m->name ); - string_append( &s, suffix ); - result = newstr( s.value ); - string_free( &s ); - return result; -} - - -module_t * bindmodule( char * name ) -{ - PROFILE_ENTER( BINDMODULE ); - - string s; - module_t m_; - module_t * m = &m_; - - if ( !module_hash ) - module_hash = hashinit( sizeof( module_t ), "modules" ); - - string_new( &s ); - if ( name ) - { - string_append( &s, name ); - string_push_back( &s, '.' ); - } - - m->name = s.value; - - if ( hashenter( module_hash, (HASHDATA * *)&m ) ) - { - m->name = newstr( m->name ); - m->variables = 0; - m->rules = 0; - m->imported_modules = 0; - m->class_module = 0; - m->native_rules = 0; - m->user_module = 0; - } - string_free( &s ); - - PROFILE_EXIT( BINDMODULE ); - - return m; -} - -/* - * demand_rules() - Get the module's "rules" hash on demand. - */ -struct hash * demand_rules( module_t * m ) -{ - if ( !m->rules ) - m->rules = hashinit( sizeof( RULE ), new_module_str( m, "rules" ) ); - return m->rules; -} - - -/* - * delete_module() - wipe out the module's rules and variables. - */ - -static void delete_rule_( void * xrule, void * data ) -{ - rule_free( (RULE *)xrule ); -} - - -void delete_module( module_t * m ) -{ - /* Clear out all the rules. */ - if ( m->rules ) - { - hashenumerate( m->rules, delete_rule_, (void *)0 ); - hashdone( m->rules ); - m->rules = 0; - } - - if ( m->variables ) - { - var_hash_swap( &m->variables ); - var_done(); - var_hash_swap( &m->variables ); - m->variables = 0; - } -} - - -module_t * root_module() -{ - static module_t * root = 0; - if ( !root ) - root = bindmodule( 0 ); - return root; -} - -void enter_module( module_t * m ) -{ - var_hash_swap( &m->variables ); -} - - -void exit_module( module_t * m ) -{ - var_hash_swap( &m->variables ); -} - - -void import_module( LIST * module_names, module_t * target_module ) -{ - PROFILE_ENTER( IMPORT_MODULE ); - - struct hash * h; - - if ( !target_module->imported_modules ) - target_module->imported_modules = hashinit( sizeof( char * ), "imported" ); - h = target_module->imported_modules; - - for ( ; module_names; module_names = module_names->next ) - { - char * s = module_names->string; - char * * ss = &s; - hashenter( h, (HASHDATA * *)&ss ); - } - - PROFILE_EXIT( IMPORT_MODULE ); -} - - -static void add_module_name( void * r_, void * result_ ) -{ - char * * r = (char * *)r_; - LIST * * result = (LIST * *)result_; - - *result = list_new( *result, copystr( *r ) ); -} - - -LIST * imported_modules( module_t * module ) -{ - LIST * result = L0; - if ( module->imported_modules ) - hashenumerate( module->imported_modules, add_module_name, &result ); - return result; -} diff --git a/jam-files/engine/modules.h b/jam-files/engine/modules.h deleted file mode 100644 index 60053a23..00000000 --- a/jam-files/engine/modules.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef MODULES_DWA10182001_H -# define MODULES_DWA10182001_H - -#include "lists.h" - -struct module_t -{ - char* name; - struct hash* rules; - struct hash* variables; - struct hash* imported_modules; - struct module_t* class_module; - struct hash* native_rules; - int user_module; -}; - -typedef struct module_t module_t ; /* MSVC debugger gets confused unless this is provided */ - -module_t* bindmodule( char* name ); -module_t* root_module(); -void enter_module( module_t* ); -void exit_module( module_t* ); -void delete_module( module_t* ); - -void import_module(LIST* module_names, module_t* target_module); -LIST* imported_modules(module_t* module); - -struct hash* demand_rules( module_t* ); - - -#endif - diff --git a/jam-files/engine/modules/order.c b/jam-files/engine/modules/order.c deleted file mode 100644 index d77943a7..00000000 --- a/jam-files/engine/modules/order.c +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright Vladimir Prus 2004. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" -#include "../lists.h" -#include "../strings.h" -#include "../newstr.h" -#include "../variable.h" - - -/* Use quite klugy approach: when we add order dependency from 'a' to 'b', - just append 'b' to of value of variable 'a'. -*/ -LIST *add_pair( PARSE *parse, FRAME *frame ) -{ - LIST* arg = lol_get( frame->args, 0 ); - - var_set(arg->string, list_copy(0, arg->next), VAR_APPEND); - - return L0; -} - -/** Given a list and a value, returns position of that value in - the list, or -1 if not found. -*/ -int list_index(LIST* list, const char* value) -{ - int result = 0; - for(; list; list = list->next, ++result) { - if (strcmp(list->string, value) == 0) - return result; - } - return -1; -} - -enum colors { white, gray, black }; - -/* Main routite of topological sort. Calls itself recursively on all - adjacent vertices which were not yet visited. After that, 'current_vertex' - is added to '*result_ptr'. -*/ -void do_ts(int** graph, int current_vertex, int* colors, int** result_ptr) -{ - int i; - - colors[current_vertex] = gray; - for(i = 0; graph[current_vertex][i] != -1; ++i) { - int adjacent_vertex = graph[current_vertex][i]; - - if (colors[adjacent_vertex] == white) - do_ts(graph, adjacent_vertex, colors, result_ptr); - /* The vertex is either black, in which case we don't have to do - anything, a gray, in which case we have a loop. If we have a loop, - it's not clear what useful diagnostic we can emit, so we emit - nothing. */ - } - colors[current_vertex] = black; - **result_ptr = current_vertex; - (*result_ptr)++; -} - -void topological_sort(int** graph, int num_vertices, int* result) -{ - int i; - int* colors = (int*)BJAM_CALLOC(num_vertices, sizeof(int)); - for (i = 0; i < num_vertices; ++i) - colors[i] = white; - - for(i = 0; i < num_vertices; ++i) - if (colors[i] == white) - do_ts(graph, i, colors, &result); - - BJAM_FREE(colors); -} - -LIST *order( PARSE *parse, FRAME *frame ) -{ - LIST* arg = lol_get( frame->args, 0 ); - LIST* tmp; - LIST* result = 0; - int src; - - /* We need to create a graph of order dependencies between - the passed objects. We assume that there are no duplicates - passed to 'add_pair'. - */ - int length = list_length(arg); - int** graph = (int**)BJAM_CALLOC(length, sizeof(int*)); - int* order = (int*)BJAM_MALLOC((length+1)*sizeof(int)); - - for(tmp = arg, src = 0; tmp; tmp = tmp->next, ++src) { - /* For all object this one depend upon, add elements - to 'graph' */ - LIST* dependencies = var_get(tmp->string); - int index = 0; - - graph[src] = (int*)BJAM_CALLOC(list_length(dependencies)+1, sizeof(int)); - for(; dependencies; dependencies = dependencies->next) { - int dst = list_index(arg, dependencies->string); - if (dst != -1) - graph[src][index++] = dst; - } - graph[src][index] = -1; - } - - topological_sort(graph, length, order); - - { - int index = length-1; - for(; index >= 0; --index) { - int i; - tmp = arg; - for (i = 0; i < order[index]; ++i, tmp = tmp->next); - result = list_new(result, tmp->string); - } - } - - /* Clean up */ - { - int i; - for(i = 0; i < length; ++i) - BJAM_FREE(graph[i]); - BJAM_FREE(graph); - BJAM_FREE(order); - } - - return result; -} - -void init_order() -{ - { - char* args[] = { "first", "second", 0 }; - declare_native_rule("class@order", "add-pair", args, add_pair, 1); - } - - { - char* args[] = { "objects", "*", 0 }; - declare_native_rule("class@order", "order", args, order, 1); - } - - -} diff --git a/jam-files/engine/modules/path.c b/jam-files/engine/modules/path.c deleted file mode 100644 index f5d09622..00000000 --- a/jam-files/engine/modules/path.c +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" -#include "../timestamp.h" -#include "../newstr.h" - -LIST *path_exists( PARSE *parse, FRAME *frame ) -{ - LIST* l = lol_get( frame->args, 0 ); - - time_t time; - timestamp(l->string, &time); - if (time != 0) - { - return list_new(0, newstr("true")); - } - else - { - return L0; - } -} - -void init_path() -{ - { - char* args[] = { "location", 0 }; - declare_native_rule("path", "exists", args, path_exists, 1); - } - -} diff --git a/jam-files/engine/modules/property-set.c b/jam-files/engine/modules/property-set.c deleted file mode 100644 index 2b0fb5d9..00000000 --- a/jam-files/engine/modules/property-set.c +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" -#include "../timestamp.h" -#include "../newstr.h" -#include "../strings.h" -#include "../lists.h" -#include "../variable.h" -#include "../compile.h" - -LIST* get_grist(char* f) -{ - char* end = strchr(f, '>'); - string s[1]; - LIST* result; - - string_new(s); - - string_append_range(s, f, end+1); - result = list_new(0, newstr(s->value)); - - string_free(s); - return result; -} - -/* -rule create ( raw-properties * ) -{ - raw-properties = [ sequence.unique - [ sequence.insertion-sort $(raw-properties) ] ] ; - - local key = $(raw-properties:J=-:E=) ; - - if ! $(.ps.$(key)) - { - .ps.$(key) = [ new property-set $(raw-properties) ] ; - } - return $(.ps.$(key)) ; -} -*/ - -LIST *property_set_create( PARSE *parse, FRAME *frame ) -{ - LIST* properties = lol_get( frame->args, 0 ); - LIST* sorted = 0; -#if 0 - LIST* order_sensitive = 0; -#endif - LIST* unique; - LIST* tmp; - LIST* val; - string var[1]; - -#if 0 - /* Sort all properties which are not order sensitive */ - for(tmp = properties; tmp; tmp = tmp->next) { - LIST* g = get_grist(tmp->string); - LIST* att = call_rule("feature.attributes", frame, g, 0); - if (list_in(att, "order-sensitive")) { - order_sensitive = list_new( order_sensitive, tmp->string); - } else { - sorted = list_new( sorted, tmp->string); - } - list_free(att); - } - - sorted = list_sort(sorted); - sorted = list_append(sorted, order_sensitive); - unique = list_unique(sorted); -#endif - sorted = list_sort(properties); - unique = list_unique(sorted); - - string_new(var); - string_append(var, ".ps."); - - for(tmp = unique; tmp; tmp = tmp->next) { - string_append(var, tmp->string); - string_push_back(var, '-'); - } - val = var_get(var->value); - if (val == 0) - { - val = call_rule("new", frame, - list_append(list_new(0, "property-set"), unique), 0); - - var_set(newstr(var->value), list_copy(0, val), VAR_SET); - } - else - { - val = list_copy(0, val); - } - - string_free(var); - /* The 'unique' variable is freed in 'call_rule'. */ - list_free(sorted); - - return val; - -} - -void init_property_set() -{ - { - char* args[] = { "raw-properties", "*", 0 }; - declare_native_rule("property-set", "create", args, property_set_create, 1); - } -} diff --git a/jam-files/engine/modules/readme.txt b/jam-files/engine/modules/readme.txt deleted file mode 100644 index 2edf6e17..00000000 --- a/jam-files/engine/modules/readme.txt +++ /dev/null @@ -1,3 +0,0 @@ - -This directory constains sources which declare native -rules for Boost.Build modules.
\ No newline at end of file diff --git a/jam-files/engine/modules/regex.c b/jam-files/engine/modules/regex.c deleted file mode 100644 index d048ba1d..00000000 --- a/jam-files/engine/modules/regex.c +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" -#include "../timestamp.h" -#include "../newstr.h" -#include "../strings.h" -#include "../regexp.h" -#include "../compile.h" - -/* -rule transform ( list * : pattern : indices * ) -{ - indices ?= 1 ; - local result ; - for local e in $(list) - { - local m = [ MATCH $(pattern) : $(e) ] ; - if $(m) - { - result += $(m[$(indices)]) ; - } - } - return $(result) ; -} -*/ -LIST *regex_transform( PARSE *parse, FRAME *frame ) -{ - LIST* l = lol_get( frame->args, 0 ); - LIST* pattern = lol_get( frame->args, 1 ); - LIST* indices_list = lol_get(frame->args, 2); - int* indices = 0; - int size; - int* p; - LIST* result = 0; - - string buf[1]; - string_new(buf); - - if (indices_list) - { - size = list_length(indices_list); - indices = (int*)BJAM_MALLOC(size*sizeof(int)); - for(p = indices; indices_list; indices_list = indices_list->next) - { - *p++ = atoi(indices_list->string); - } - } - else - { - size = 1; - indices = (int*)BJAM_MALLOC(sizeof(int)); - *indices = 1; - } - - { - /* Result is cached and intentionally never freed */ - regexp *re = regex_compile( pattern->string ); - - for(; l; l = l->next) - { - if( regexec( re, l->string ) ) - { - int i = 0; - for(; i < size; ++i) - { - int index = indices[i]; - /* Skip empty submatches. Not sure it's right in all cases, - but surely is right for the case for which this routine - is optimized -- header scanning. - */ - if (re->startp[index] != re->endp[index]) - { - string_append_range( buf, re->startp[index], re->endp[index] ); - result = list_new( result, newstr( buf->value ) ); - string_truncate( buf, 0 ); - } - } - } - } - string_free( buf ); - } - - BJAM_FREE(indices); - - return result; -} - -void init_regex() -{ - { - char* args[] = { "list", "*", ":", "pattern", ":", "indices", "*", 0 }; - declare_native_rule("regex", "transform", args, regex_transform, 2); - } -} diff --git a/jam-files/engine/modules/sequence.c b/jam-files/engine/modules/sequence.c deleted file mode 100644 index bda80d94..00000000 --- a/jam-files/engine/modules/sequence.c +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" - -# ifndef max -# define max( a,b ) ((a)>(b)?(a):(b)) -# endif - - -LIST *sequence_select_highest_ranked( PARSE *parse, FRAME *frame ) -{ - /* Returns all of 'elements' for which corresponding element in parallel */ - /* list 'rank' is equal to the maximum value in 'rank'. */ - - LIST* elements = lol_get( frame->args, 0 ); - LIST* rank = lol_get( frame->args, 1 ); - - LIST* result = 0; - LIST* tmp; - int highest_rank = -1; - - for (tmp = rank; tmp; tmp = tmp->next) - highest_rank = max(highest_rank, atoi(tmp->string)); - - for (; rank; rank = rank->next, elements = elements->next) - if (atoi(rank->string) == highest_rank) - result = list_new(result, elements->string); - - return result; -} - -void init_sequence() -{ - { - char* args[] = { "elements", "*", ":", "rank", "*", 0 }; - declare_native_rule("sequence", "select-highest-ranked", args, - sequence_select_highest_ranked, 1); - } - -} diff --git a/jam-files/engine/modules/set.c b/jam-files/engine/modules/set.c deleted file mode 100644 index f8219403..00000000 --- a/jam-files/engine/modules/set.c +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "../native.h" - -/* - local result = ; - local element ; - for element in $(B) - { - if ! ( $(element) in $(A) ) - { - result += $(element) ; - } - } - return $(result) ; -*/ -LIST *set_difference( PARSE *parse, FRAME *frame ) -{ - - LIST* b = lol_get( frame->args, 0 ); - LIST* a = lol_get( frame->args, 1 ); - - LIST* result = 0; - for(; b; b = b->next) - { - if (!list_in(a, b->string)) - result = list_new(result, b->string); - } - return result; -} - -void init_set() -{ - { - char* args[] = { "B", "*", ":", "A", "*", 0 }; - declare_native_rule("set", "difference", args, set_difference, 1); - } - -} diff --git a/jam-files/engine/native.c b/jam-files/engine/native.c deleted file mode 100644 index 4c289959..00000000 --- a/jam-files/engine/native.c +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright Vladimir Prus 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "native.h" -#include "hash.h" - -# define P0 (PARSE *)0 -# define C0 (char *)0 - - -void declare_native_rule(char* module, char* rule, char** args, - LIST*(*f)(PARSE*, FRAME*), int version) -{ - module_t* m = bindmodule(module); - if (m->native_rules == 0) { - m->native_rules = hashinit( sizeof( native_rule_t ), "native rules"); - } - - { - native_rule_t n, *np = &n; - n.name = rule; - if (args) - { - n.arguments = args_new(); - lol_build( n.arguments->data, args ); - } - else - { - n.arguments = 0; - } - n.procedure = parse_make( f, P0, P0, P0, C0, C0, 0 ); - n.version = version; - hashenter(m->native_rules, (HASHDATA**)&np); - } -} diff --git a/jam-files/engine/native.h b/jam-files/engine/native.h deleted file mode 100644 index 3fc710b9..00000000 --- a/jam-files/engine/native.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright David Abrahams 2003. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#ifndef NATIVE_H_VP_2003_12_09 -#define NATIVE_H_VP_2003_12_09 - -#include "rules.h" - -struct native_rule_t -{ - char* name; - argument_list* arguments; - PARSE* procedure; - /* Version of the interface that the native rule provides. - It's possible that we want to change the set parameter - for existing native rule. In that case, version number - should be incremented so that Boost.Build can check for - version it relies on. - - Versions are numbered from 1. - */ - int version; -}; - -/* MSVC debugger gets confused unless this is provided */ -typedef struct native_rule_t native_rule_t ; - -void declare_native_rule(char* module, char* rule, char** args, - LIST*(*f)(PARSE*, FRAME*), int version); - - - -#endif diff --git a/jam-files/engine/newstr.c b/jam-files/engine/newstr.c deleted file mode 100644 index 6a229eb2..00000000 --- a/jam-files/engine/newstr.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "newstr.h" -# include "hash.h" -# include "compile.h" -# include <stddef.h> -# include <stdlib.h> - -/* - * newstr.c - string manipulation routines - * - * To minimize string copying, string creation, copying, and freeing - * is done through newstr. - * - * External functions: - * - * newstr() - return a dynamically allocated copy of a string - * copystr() - return a copy of a string previously returned by newstr() - * freestr() - free a string returned by newstr() or copystr() - * str_done() - free string tables - * - * Once a string is passed to newstr(), the returned string is readonly. - * - * This implementation builds a hash table of all strings, so that multiple - * calls of newstr() on the same string allocate memory for the string once. - * Strings are never actually freed. - */ - -typedef char * STRING; - -static struct hash * strhash = 0; -static int strtotal = 0; -static int strcount_in = 0; -static int strcount_out = 0; - - -/* - * Immortal string allocator implementation speeds string allocation and cuts - * down on internal fragmentation. - */ - -# define STRING_BLOCK 4096 -typedef struct strblock -{ - struct strblock * next; - char data[STRING_BLOCK]; -} strblock; - -static strblock * strblock_chain = 0; - -/* Storage remaining in the current strblock */ -static char * storage_start = 0; -static char * storage_finish = 0; - - -/* - * allocate() - Allocate n bytes of immortal string storage. - */ - -static char * allocate( size_t const n ) -{ -#ifdef BJAM_NEWSTR_NO_ALLOCATE - return (char*)BJAM_MALLOC_ATOMIC(n); -#else - /* See if we can grab storage from an existing block. */ - size_t remaining = storage_finish - storage_start; - if ( remaining >= n ) - { - char * result = storage_start; - storage_start += n; - return result; - } - else /* Must allocate a new block. */ - { - strblock * new_block; - size_t nalloc = n; - if ( nalloc < STRING_BLOCK ) - nalloc = STRING_BLOCK; - - /* Allocate a new block and link into the chain. */ - new_block = (strblock *)BJAM_MALLOC( offsetof( strblock, data[0] ) + nalloc * sizeof( new_block->data[0] ) ); - if ( new_block == 0 ) - return 0; - new_block->next = strblock_chain; - strblock_chain = new_block; - - /* Take future allocations out of the larger remaining space. */ - if ( remaining < nalloc - n ) - { - storage_start = new_block->data + n; - storage_finish = new_block->data + nalloc; - } - return new_block->data; - } -#endif -} - - -/* - * newstr() - return a dynamically allocated copy of a string. - */ - -char * newstr( char * string ) -{ - STRING str; - STRING * s = &str; - - if ( !strhash ) - strhash = hashinit( sizeof( STRING ), "strings" ); - - *s = string; - - if ( hashenter( strhash, (HASHDATA **)&s ) ) - { - int l = strlen( string ); - char * m = (char *)allocate( l + 1 ); - - strtotal += l + 1; - memcpy( m, string, l + 1 ); - *s = m; - } - - strcount_in += 1; - return *s; -} - - -/* - * copystr() - return a copy of a string previously returned by newstr() - */ - -char * copystr( char * s ) -{ - strcount_in += 1; - return s; -} - - -/* - * freestr() - free a string returned by newstr() or copystr() - */ - -void freestr( char * s ) -{ - strcount_out += 1; -} - - -/* - * str_done() - free string tables. - */ - -void str_done() -{ - /* Reclaim string blocks. */ - while ( strblock_chain != 0 ) - { - strblock * n = strblock_chain->next; - BJAM_FREE(strblock_chain); - strblock_chain = n; - } - - hashdone( strhash ); - - if ( DEBUG_MEM ) - printf( "%dK in strings\n", strtotal / 1024 ); - - /* printf( "--- %d strings of %d dangling\n", strcount_in-strcount_out, strcount_in ); */ -} diff --git a/jam-files/engine/newstr.h b/jam-files/engine/newstr.h deleted file mode 100644 index 84a4d7b6..00000000 --- a/jam-files/engine/newstr.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * newstr.h - string manipulation routines - */ - -char * copystr ( char * ); -void freestr ( char * ); -char * newstr ( char * ); -void str_done(); diff --git a/jam-files/engine/option.c b/jam-files/engine/option.c deleted file mode 100644 index d25e5e8a..00000000 --- a/jam-files/engine/option.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "option.h" - -/* - * option.c - command line option processing - * - * {o >o - * \<>) "Process command line options as defined in <option.h>. - * Return the number of argv[] elements used up by options, - * or -1 if an invalid option flag was given or an argument - * was supplied for an option that does not require one." - */ - -int getoptions( int argc, char * * argv, char * opts, bjam_option * optv ) -{ - int i; - int optc = N_OPTS; - - memset( (char *)optv, '\0', sizeof( *optv ) * N_OPTS ); - - for ( i = 0; i < argc; ++i ) - { - char *arg; - - if ( ( argv[ i ][ 0 ] != '-' ) || - ( ( argv[ i ][ 1 ] != '-' ) && !isalpha( argv[ i ][ 1 ] ) ) ) - continue; - - if ( !optc-- ) - { - printf( "too many options (%d max)\n", N_OPTS ); - return -1; - } - - for ( arg = &argv[ i ][ 1 ]; *arg; ++arg ) - { - char * f; - - for ( f = opts; *f; ++f ) - if ( *f == *arg ) - break; - - if ( !*f ) - { - printf( "Invalid option: -%c\n", *arg ); - return -1; - } - - optv->flag = *f; - - if ( f[ 1 ] != ':' ) - { - optv++->val = "true"; - } - else if ( arg[ 1 ] ) - { - optv++->val = &arg[1]; - break; - } - else if ( ++i < argc ) - { - optv++->val = argv[ i ]; - break; - } - else - { - printf( "option: -%c needs argument\n", *f ); - return -1; - } - } - } - - return i; -} - - -/* - * Name: getoptval() - find an option given its character. - */ - -char * getoptval( bjam_option * optv, char opt, int subopt ) -{ - int i; - for ( i = 0; i < N_OPTS; ++i, ++optv ) - if ( ( optv->flag == opt ) && !subopt-- ) - return optv->val; - return 0; -} diff --git a/jam-files/engine/option.h b/jam-files/engine/option.h deleted file mode 100644 index 99ef620d..00000000 --- a/jam-files/engine/option.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * option.h - command line option processing - * - * {o >o - * \ -) "Command line option." - */ - -typedef struct bjam_option -{ - char flag; /* filled in by getoption() */ - char *val; /* set to random address if true */ -} bjam_option; - -# define N_OPTS 256 - -int getoptions( int argc, char **argv, char *opts, bjam_option *optv ); -char * getoptval( bjam_option *optv, char opt, int subopt ); diff --git a/jam-files/engine/output.c b/jam-files/engine/output.c deleted file mode 100644 index 483c6ca9..00000000 --- a/jam-files/engine/output.c +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright 2007 Rene Rivera - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -*/ - -#include "jam.h" -#include "output.h" -#include "newstr.h" -#include <stdio.h> - -#define bjam_out (stdout) -#define bjam_err (stderr) - -static void out_ -( - char const * data, - FILE * io -) -{ - while ( *data ) - { - size_t len = strcspn(data,"\r"); - data += fwrite(data,1,len,io); - if ( *data == '\r' ) ++data; - } -} - - -void out_action -( - char const * action, - char const * target, - char const * command, - char const * out_data, - char const * err_data, - int exit_reason -) -{ - /* Print out the action+target line, if the action is quite the action - * should be null. - */ - if ( action ) - { - fprintf( bjam_out, "%s %s\n", action, target ); - } - - /* Print out the command executed if given -d+2. */ - if ( DEBUG_EXEC ) - { - fputs( command, bjam_out ); - fputc( '\n', bjam_out ); - } - - /* Print out the command executed to the command stream. */ - if ( globs.cmdout ) - { - fputs( command, globs.cmdout ); - } - - switch ( exit_reason ) - { - case EXIT_OK: - break; - case EXIT_FAIL: - break; - case EXIT_TIMEOUT: - { - /* Process expired, make user aware with explicit message. */ - if ( action ) - { - /* But only output for non-quietly actions. */ - fprintf( bjam_out, "%ld second time limit exceeded\n", globs.timeout ); - } - break; - } - default: - break; - } - - /* Print out the command output, if requested, or if the program failed. */ - if ( action || exit_reason != EXIT_OK) - { - /* But only output for non-quietly actions. */ - if ( ( 0 != out_data ) && - ( ( globs.pipe_action & 1 /* STDOUT_FILENO */ ) || - ( globs.pipe_action == 0 ) ) ) - { - out_( out_data, bjam_out ); - } - if ( ( 0 != err_data ) && - ( globs.pipe_action & 2 /* STDERR_FILENO */ ) ) - { - out_( err_data, bjam_err ); - } - } - - fflush( bjam_out ); - fflush( bjam_err ); - fflush( globs.cmdout ); -} - - -char * outf_int( int value ) -{ - char buffer[50]; - sprintf( buffer, "%i", value ); - return newstr( buffer ); -} - - -char * outf_double( double value ) -{ - char buffer[50]; - sprintf( buffer, "%f", value ); - return newstr( buffer ); -} - - -char * outf_time( time_t value ) -{ - char buffer[50]; - strftime( buffer, 49, "%Y-%m-%d %H:%M:%SZ", gmtime( &value ) ); - return newstr( buffer ); -} diff --git a/jam-files/engine/output.h b/jam-files/engine/output.h deleted file mode 100644 index 9e9876cf..00000000 --- a/jam-files/engine/output.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright 2007 Rene Rivera - Distributed under the Boost Software License, Version 1.0. - (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -*/ - -#ifndef BJAM_OUTPUT_H -#define BJAM_OUTPUT_H - -#include <time.h> - -#define EXIT_OK 0 -#define EXIT_FAIL 1 -#define EXIT_TIMEOUT 2 - -void out_action( - const char * action, - const char * target, - const char * command, - const char * out_data, - const char * err_data, - int exit_reason - ); - -char * outf_int( int value ); -char * outf_double( double value ); -char * outf_time( time_t value ); - -#endif diff --git a/jam-files/engine/parse.c b/jam-files/engine/parse.c deleted file mode 100644 index 9114fa05..00000000 --- a/jam-files/engine/parse.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#include "jam.h" -#include "lists.h" -#include "parse.h" -#include "scan.h" -#include "newstr.h" -#include "modules.h" -#include "frames.h" - -/* - * parse.c - make and destroy parse trees as driven by the parser - * - * 09/07/00 (seiwald) - ref count on PARSE to avoid freeing when used, - * as per Matt Armstrong. - * 09/11/00 (seiwald) - structure reworked to reflect that (*func)() - * returns a LIST *. - */ - -static PARSE * yypsave; - -void parse_file( char * f, FRAME * frame ) -{ - /* Suspend scan of current file and push this new file in the stream. */ - yyfparse( f ); - - /* Now parse each block of rules and execute it. Execute it outside of the - * parser so that recursive calls to yyrun() work (no recursive yyparse's). - */ - - for ( ; ; ) - { - PARSE * p; - - /* Filled by yyparse() calling parse_save(). */ - yypsave = 0; - - /* If parse error or empty parse, outta here. */ - if ( yyparse() || !( p = yypsave ) ) - break; - - /* Run the parse tree. */ - parse_evaluate( p, frame ); - parse_free( p ); - } -} - - -void parse_save( PARSE * p ) -{ - yypsave = p; -} - - -PARSE * parse_make( - LIST * (* func)( PARSE *, FRAME * ), - PARSE * left, - PARSE * right, - PARSE * third, - char * string, - char * string1, - int num ) -{ - PARSE * p = (PARSE *)BJAM_MALLOC( sizeof( PARSE ) ); - - p->func = func; - p->left = left; - p->right = right; - p->third = third; - p->string = string; - p->string1 = string1; - p->num = num; - p->refs = 1; - p->rulename = 0; - - if ( left ) - { - p->file = left->file; - p->line = left->line; - } - else - { - yyinput_stream( &p->file, &p->line ); - } - - return p; -} - - -void parse_refer( PARSE * p ) -{ - ++p->refs; -} - - -void parse_free( PARSE * p ) -{ - if ( --p->refs ) - return; - - if ( p->string ) - freestr( p->string ); - if ( p->string1 ) - freestr( p->string1 ); - if ( p->left ) - parse_free( p->left ); - if ( p->right ) - parse_free( p->right ); - if ( p->third ) - parse_free( p->third ); - if ( p->rulename ) - freestr( p->rulename ); - - BJAM_FREE( (char *)p ); -} - - -LIST * parse_evaluate( PARSE * p, FRAME * frame ) -{ - frame->procedure = p; - return (*p->func)( p, frame ); -} diff --git a/jam-files/engine/parse.h b/jam-files/engine/parse.h deleted file mode 100644 index e324972f..00000000 --- a/jam-files/engine/parse.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#ifndef PARSE_DWA20011020_H -#define PARSE_DWA20011020_H - -#include "frames.h" -#include "modules.h" -#include "lists.h" - -/* - * parse.h - make and destroy parse trees as driven by the parser. - */ - -/* - * Parse tree node. - */ - -struct _PARSE { - LIST * (* func)( PARSE *, FRAME * ); - PARSE * left; - PARSE * right; - PARSE * third; - char * string; - char * string1; - int num; - int refs; -/* module * module; */ - char * rulename; - char * file; - int line; -}; - -void parse_file( char *, FRAME * ); -void parse_save( PARSE * ); - -PARSE * parse_make( - LIST * (* func)( PARSE *, FRAME * ), - PARSE * left, - PARSE * right, - PARSE * third, - char * string, - char * string1, - int num ); - -void parse_refer ( PARSE * ); -void parse_free ( PARSE * ); -LIST * parse_evaluate( PARSE *, FRAME * ); - -#endif diff --git a/jam-files/engine/patchlevel.h b/jam-files/engine/patchlevel.h deleted file mode 100644 index 699efd84..00000000 --- a/jam-files/engine/patchlevel.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* Keep JAMVERSYM in sync with VERSION. */ -/* It can be accessed as $(JAMVERSION) in the Jamfile. */ - -#define VERSION_MAJOR 2011 -#define VERSION_MINOR 04 -#define VERSION_PATCH 0 -#define VERSION_MAJOR_SYM "2011" -#define VERSION_MINOR_SYM "04" -#define VERSION_PATCH_SYM "00" -#define VERSION "2011.4" -#define JAMVERSYM "JAMVERSION=2011.4" diff --git a/jam-files/engine/pathmac.c b/jam-files/engine/pathmac.c deleted file mode 100644 index e2c250e3..00000000 --- a/jam-files/engine/pathmac.c +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "pathsys.h" - -# ifdef OS_MAC - -# define DELIM ':' - -/* - * pathunix.c - manipulate file names on UNIX, NT, OS2 - * - * External routines: - * - * path_parse() - split a file name into dir/base/suffix/member - * path_build() - build a filename given dir/base/suffix/member - * path_parent() - make a PATHNAME point to its parent dir - * - * File_parse() and path_build() just manipuate a string and a structure; - * they do not make system calls. - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 12/26/93 (seiwald) - handle dir/.suffix properly in path_build() - * 12/19/94 (mikem) - solaris string table insanity support - * 12/21/94 (wingerd) Use backslashes for pathnames - the NT way. - * 02/14/95 (seiwald) - parse and build /xxx properly - * 02/23/95 (wingerd) Compilers on NT can handle "/" in pathnames, so we - * should expect hdr searches to come up with strings - * like "thing/thing.h". So we need to test for "/" as - * well as "\" when parsing pathnames. - * 03/16/95 (seiwald) - fixed accursed typo on line 69. - * 05/03/96 (seiwald) - split from filent.c, fileunix.c - * 12/20/96 (seiwald) - when looking for the rightmost . in a file name, - * don't include the archive member name. - * 01/10/01 (seiwald) - path_parse now strips the trailing : from the - * directory name, unless the directory name is all - * :'s, so that $(d:P) works. - */ - -/* - * path_parse() - split a file name into dir/base/suffix/member - */ - -void -path_parse( - char *file, - PATHNAME *f ) -{ - char *p, *q; - char *end; - - memset( (char *)f, 0, sizeof( *f ) ); - - /* Look for <grist> */ - - if ( file[0] == '<' && ( p = strchr( file, '>' ) ) ) - { - f->f_grist.ptr = file; - f->f_grist.len = p - file; - file = p + 1; - } - - /* Look for dir: */ - - if ( p = strrchr( file, DELIM ) ) - { - f->f_dir.ptr = file; - f->f_dir.len = p - file; - file = p + 1; - - /* All :'s? Include last : as part of directory name */ - - while ( ( p > f->f_dir.ptr ) && ( *--p == DELIM ) ); - - if ( p == f->f_dir.ptr ) - ++f->f_dir.len; - } - - end = file + strlen( file ); - - /* Look for (member). */ - - if ( ( p = strchr( file, '(' ) ) && ( end[-1] == ')' ) ) - { - f->f_member.ptr = p + 1; - f->f_member.len = end - p - 2; - end = p; - } - - /* Look for .suffix */ - /* This would be memrchr() */ - - p = 0; - q = file; - - while ( q = memchr( q, '.', end - q ) ) - p = q++; - - if ( p ) - { - f->f_suffix.ptr = p; - f->f_suffix.len = end - p; - end = p; - } - - /* Leaves base */ - - f->f_base.ptr = file; - f->f_base.len = end - file; -} - -/* - * path_build() - build a filename given dir/base/suffix/member. - */ - -# define DIR_EMPTY 0 /* "" */ -# define DIR_DOT 1 /* : */ -# define DIR_DOTDOT 2 /* :: */ -# define DIR_ABS 3 /* dira:dirb: */ -# define DIR_REL 4 /* :dira:dirb: */ - -# define G_DIR 0 /* take dir */ -# define G_ROOT 1 /* take root */ -# define G_CAT 2 /* prepend root to dir */ -# define G_DTDR 3 /* :: of rel dir */ -# define G_DDDD 4 /* make it ::: (../..) */ -# define G_MT 5 /* leave it empty */ - -char grid[5][5] = { -/* EMPTY DOT DOTDOT ABS REL */ -/* EMPTY */ { G_MT, G_DIR, G_DIR, G_DIR, G_DIR }, -/* DOT */ { G_ROOT, G_DIR, G_DIR, G_DIR, G_DIR }, -/* DOTDOT */ { G_ROOT, G_ROOT, G_DDDD, G_DIR, G_DTDR }, -/* ABS */ { G_ROOT, G_ROOT, G_ROOT, G_DIR, G_CAT }, -/* REL */ { G_ROOT, G_ROOT, G_ROOT, G_DIR, G_CAT } -}; - -static int file_flags( char * ptr, int len ) -{ - if ( !len ) - return DIR_EMPTY; - if ( ( len == 1 ) && ( ptr[0] == DELIM ) ) - return DIR_DOT; - if ( ( len == 2 ) && ( ptr[0] == DELIM ) && ( ptr[1] == DELIM ) ) - return DIR_DOTDOT; - if ( ptr[0] == DELIM ) - return DIR_REL; - return DIR_ABS; -} - - -void path_build( PATHNAME * f, string * file, int binding ) -{ - int dflag; - int rflag; - int act; - - file_build1( f, file ); - - /* Combine root & directory, according to the grid. */ - - dflag = file_flags( f->f_dir.ptr, f->f_dir.len ); - rflag = file_flags( f->f_root.ptr, f->f_root.len ); - - switch ( act = grid[ rflag ][ dflag ] ) - { - case G_DTDR: - { - /* :: of rel dir */ - string_push_back( file, DELIM ); - } - /* fall through */ - - case G_DIR: - /* take dir */ - string_append_range( file, f->f_dir.ptr, f->f_dir.ptr + f->f_dir.len ); - break; - - case G_ROOT: - /* take root */ - string_append_range( file, f->f_root.ptr, f->f_root.ptr + f->f_root.len ); - break; - - case G_CAT: - /* prepend root to dir */ - string_append_range( file, f->f_root.ptr, f->f_root.ptr + f->f_root.len ); - if ( file->value[ file->size - 1 ] == DELIM ) - string_pop_back( file ); - string_append_range( file, f->f_dir.ptr, f->f_dir.ptr + f->f_dir.len ); - break; - - case G_DDDD: - /* make it ::: (../..) */ - string_append( file, ":::" ); - break; - } - - /* Put : between dir and file (if none already). */ - - if ( ( act != G_MT ) && - ( file->value[ file->size - 1 ] != DELIM ) && - ( f->f_base.len || f->f_suffix.len ) ) - { - string_push_back( file, DELIM ); - } - - if ( f->f_base.len ) - string_append_range( file, f->f_base.ptr, f->f_base.ptr + f->f_base.len ); - - if ( f->f_suffix.len ) - string_append_range( file, f->f_suffix.ptr, f->f_suffix.ptr + f->f_suffix.len ); - - if ( f->f_member.len ) - { - string_push_back( file, '(' ); - string_append_range( file, f->f_member.ptr, f->f_member.ptr + f->f_member.len ); - string_push_back( file, ')' ); - } - - if ( DEBUG_SEARCH ) - printf( " -> '%s'\n", file->value ); -} - - -/* - * path_parent() - make a PATHNAME point to its parent dir - */ - -void path_parent( PATHNAME * f ) -{ - /* Just set everything else to nothing. */ - - f->f_base.ptr = - f->f_suffix.ptr = - f->f_member.ptr = ""; - - f->f_base.len = - f->f_suffix.len = - f->f_member.len = 0; -} - -# endif /* OS_MAC */ diff --git a/jam-files/engine/pathsys.h b/jam-files/engine/pathsys.h deleted file mode 100644 index 73775810..00000000 --- a/jam-files/engine/pathsys.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * pathsys.h - PATHNAME struct - */ - -/* - * PATHNAME - a name of a file, broken into <grist>dir/base/suffix(member) - * - * <grist> is salt to distinguish between targets that otherwise would - * have the same name: it never appears in the bound name of a target. - * (member) is an archive member name: the syntax is arbitrary, but must - * agree in path_parse(), path_build() and the Jambase. - * - * On VMS, we keep track of whether the original path was a directory - * (without a file), so that $(VAR:D) can climb to the parent. - */ - -#ifndef PATHSYS_VP_20020211_H -# define PATHSYS_VP_20020211_H - -#include "strings.h" - -typedef struct _pathname PATHNAME; -typedef struct _pathpart PATHPART; - -struct _pathpart -{ - char * ptr; - int len; -}; - -struct _pathname -{ - PATHPART part[6]; -#ifdef OS_VMS - int parent; -#endif - -#define f_grist part[0] -#define f_root part[1] -#define f_dir part[2] -#define f_base part[3] -#define f_suffix part[4] -#define f_member part[5] -}; - -void path_build( PATHNAME * f, string * file, int binding ); -void path_build1( PATHNAME * f, string * file ); - -void path_parse( char * file, PATHNAME * f ); -void path_parent( PATHNAME * f ); - -#ifdef NT - -/** Returns newstr-allocated string with long equivivalent of 'short_name'. - If none exists -- i.e. 'short_path' is already long path, it's returned - unaltered. */ -char * short_path_to_long_path( char * short_path ); - -#endif - -#ifdef USE_PATHUNIX -/** Returns a static pointer to the system dependent path to the temporary - directory. NOTE: *without* a trailing path separator. -*/ -const char * path_tmpdir( void ); - -/** Returns a new temporary name. -*/ -const char * path_tmpnam( void ); - -/** Returns a new temporary path. -*/ -const char * path_tmpfile( void ); -#endif - -/** Give the first argument to 'main', return a full path to - our executable. Returns null in the unlikely case it - cannot be determined. Caller is responsible for freeing - the string. - - Implemented in jam.c -*/ -char * executable_path (char *argv0); - -#endif diff --git a/jam-files/engine/pathunix.c b/jam-files/engine/pathunix.c deleted file mode 100644 index 2daad14b..00000000 --- a/jam-files/engine/pathunix.c +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Copyright 2005 Rene Rivera. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "pathsys.h" -# include "strings.h" -# include "newstr.h" -# include "filesys.h" -# include <time.h> -# include <stdlib.h> -# ifndef OS_NT -# include <unistd.h> -# endif - -# ifdef USE_PATHUNIX - -/* - * pathunix.c - manipulate file names on UNIX, NT, OS2, AmigaOS - * - * External routines: - * - * path_parse() - split a file name into dir/base/suffix/member - * path_build() - build a filename given dir/base/suffix/member - * path_parent() - make a PATHNAME point to its parent dir - * - * File_parse() and path_build() just manipuate a string and a structure; - * they do not make system calls. - * - * 04/08/94 (seiwald) - Coherent/386 support added. - * 12/26/93 (seiwald) - handle dir/.suffix properly in path_build() - * 12/19/94 (mikem) - solaris string table insanity support - * 12/21/94 (wingerd) Use backslashes for pathnames - the NT way. - * 02/14/95 (seiwald) - parse and build /xxx properly - * 02/23/95 (wingerd) Compilers on NT can handle "/" in pathnames, so we - * should expect hdr searches to come up with strings - * like "thing/thing.h". So we need to test for "/" as - * well as "\" when parsing pathnames. - * 03/16/95 (seiwald) - fixed accursed typo on line 69. - * 05/03/96 (seiwald) - split from filent.c, fileunix.c - * 12/20/96 (seiwald) - when looking for the rightmost . in a file name, - * don't include the archive member name. - * 01/13/01 (seiwald) - turn on \ handling on UNIX, on by accident - */ - -/* - * path_parse() - split a file name into dir/base/suffix/member - */ - -void path_parse( char * file, PATHNAME * f ) -{ - char * p; - char * q; - char * end; - - memset( (char *)f, 0, sizeof( *f ) ); - - /* Look for <grist> */ - - if ( ( file[0] == '<' ) && ( p = strchr( file, '>' ) ) ) - { - f->f_grist.ptr = file; - f->f_grist.len = p - file; - file = p + 1; - } - - /* Look for dir/ */ - - p = strrchr( file, '/' ); - -# if PATH_DELIM == '\\' - /* On NT, look for dir\ as well */ - { - char *p1 = strrchr( file, '\\' ); - p = p1 > p ? p1 : p; - } -# endif - - if ( p ) - { - f->f_dir.ptr = file; - f->f_dir.len = p - file; - - /* Special case for / - dirname is /, not "" */ - - if ( !f->f_dir.len ) - f->f_dir.len = 1; - -# if PATH_DELIM == '\\' - /* Special case for D:/ - dirname is D:/, not "D:" */ - - if ( f->f_dir.len == 2 && file[1] == ':' ) - f->f_dir.len = 3; -# endif - - file = p + 1; - } - - end = file + strlen( file ); - - /* Look for (member) */ - - if ( ( p = strchr( file, '(' ) ) && ( end[ -1 ] == ')' ) ) - { - f->f_member.ptr = p + 1; - f->f_member.len = end - p - 2; - end = p; - } - - /* Look for .suffix */ - /* This would be memrchr() */ - - p = 0; - q = file; - - while ( ( q = (char *)memchr( q, '.', end - q ) ) ) - p = q++; - - if ( p ) - { - f->f_suffix.ptr = p; - f->f_suffix.len = end - p; - end = p; - } - - /* Leaves base */ - - f->f_base.ptr = file; - f->f_base.len = end - file; -} - -/* - * path_delims - the string of legal path delimiters - */ -static char path_delims[] = { - PATH_DELIM, -# if PATH_DELIM == '\\' - '/', -# endif - 0 -}; - -/* - * is_path_delim() - true iff c is a path delimiter - */ -static int is_path_delim( char c ) -{ - char* p = strchr( path_delims, c ); - return p && *p; -} - -/* - * as_path_delim() - convert c to a path delimiter if it isn't one - * already - */ -static char as_path_delim( char c ) -{ - return is_path_delim( c ) ? c : PATH_DELIM; -} - -/* - * path_build() - build a filename given dir/base/suffix/member - * - * To avoid changing slash direction on NT when reconstituting paths, - * instead of unconditionally appending PATH_DELIM we check the - * past-the-end character of the previous path element. If it is in - * path_delims, we append that, and only append PATH_DELIM as a last - * resort. This heuristic is based on the fact that PATHNAME objects - * are usually the result of calling path_parse, which leaves the - * original slashes in the past-the-end position. Correctness depends - * on the assumption that all strings are zero terminated, so a - * past-the-end character will always be available. - * - * As an attendant patch, we had to ensure that backslashes are used - * explicitly in timestamp.c - */ - -void -path_build( - PATHNAME *f, - string *file, - int binding ) -{ - file_build1( f, file ); - - /* Don't prepend root if it's . or directory is rooted */ -# if PATH_DELIM == '/' - - if ( f->f_root.len - && !( f->f_root.len == 1 && f->f_root.ptr[0] == '.' ) - && !( f->f_dir.len && f->f_dir.ptr[0] == '/' ) ) - -# else /* unix */ - - if ( f->f_root.len - && !( f->f_root.len == 1 && f->f_root.ptr[0] == '.' ) - && !( f->f_dir.len && f->f_dir.ptr[0] == '/' ) - && !( f->f_dir.len && f->f_dir.ptr[0] == '\\' ) - && !( f->f_dir.len && f->f_dir.ptr[1] == ':' ) ) - -# endif /* unix */ - - { - string_append_range( file, f->f_root.ptr, f->f_root.ptr + f->f_root.len ); - /* If 'root' already ends with path delimeter, - don't add yet another one. */ - if ( ! is_path_delim( f->f_root.ptr[f->f_root.len-1] ) ) - string_push_back( file, as_path_delim( f->f_root.ptr[f->f_root.len] ) ); - } - - if ( f->f_dir.len ) - string_append_range( file, f->f_dir.ptr, f->f_dir.ptr + f->f_dir.len ); - - /* UNIX: Put / between dir and file */ - /* NT: Put \ between dir and file */ - - if ( f->f_dir.len && ( f->f_base.len || f->f_suffix.len ) ) - { - /* UNIX: Special case for dir \ : don't add another \ */ - /* NT: Special case for dir / : don't add another / */ - -# if PATH_DELIM == '\\' - if ( !( f->f_dir.len == 3 && f->f_dir.ptr[1] == ':' ) ) -# endif - if ( !( f->f_dir.len == 1 && is_path_delim( f->f_dir.ptr[0] ) ) ) - string_push_back( file, as_path_delim( f->f_dir.ptr[f->f_dir.len] ) ); - } - - if ( f->f_base.len ) - { - string_append_range( file, f->f_base.ptr, f->f_base.ptr + f->f_base.len ); - } - - if ( f->f_suffix.len ) - { - string_append_range( file, f->f_suffix.ptr, f->f_suffix.ptr + f->f_suffix.len ); - } - - if ( f->f_member.len ) - { - string_push_back( file, '(' ); - string_append_range( file, f->f_member.ptr, f->f_member.ptr + f->f_member.len ); - string_push_back( file, ')' ); - } -} - -/* - * path_parent() - make a PATHNAME point to its parent dir - */ - -void -path_parent( PATHNAME *f ) -{ - /* just set everything else to nothing */ - - f->f_base.ptr = - f->f_suffix.ptr = - f->f_member.ptr = ""; - - f->f_base.len = - f->f_suffix.len = - f->f_member.len = 0; -} - -#ifdef NT -#include <windows.h> -#include <tchar.h> - -/* The definition of this in winnt.h is not ANSI-C compatible. */ -#undef INVALID_FILE_ATTRIBUTES -#define INVALID_FILE_ATTRIBUTES ((DWORD)-1) - - -DWORD ShortPathToLongPath(LPCTSTR lpszShortPath,LPTSTR lpszLongPath,DWORD - cchBuffer) -{ - LONG i=0; - TCHAR path[_MAX_PATH]={0}; - TCHAR ret[_MAX_PATH]={0}; - LONG pos=0, prev_pos=0; - LONG len=_tcslen(lpszShortPath); - - /* Is the string valid? */ - if (!lpszShortPath) { - SetLastError(ERROR_INVALID_PARAMETER); - return 0; - } - - /* Is the path valid? */ - if (GetFileAttributes(lpszShortPath)==INVALID_FILE_ATTRIBUTES) - return 0; - - /* Convert "/" to "\" */ - for (i=0;i<len;++i) { - if (lpszShortPath[i]==_T('/')) - path[i]=_T('\\'); - else - path[i]=lpszShortPath[i]; - } - - /* UNC path? */ - if (path[0]==_T('\\') && path[1]==_T('\\')) { - pos=2; - for (i=0;i<2;++i) { - while (path[pos]!=_T('\\') && path[pos]!=_T('\0')) - ++pos; - ++pos; - } - _tcsncpy(ret,path,pos-1); - } /* Drive letter? */ - else if (path[1]==_T(':')) { - if (path[2]==_T('\\')) - pos=3; - if (len==3) { - if (cchBuffer>3) - _tcscpy(lpszLongPath,lpszShortPath); - return len; - } - _tcsncpy(ret,path,2); - } - - /* Expand the path for each subpath, and strip trailing backslashes */ - for (prev_pos = pos-1;pos<=len;++pos) { - if (path[pos]==_T('\\') || (path[pos]==_T('\0') && - path[pos-1]!=_T('\\'))) { - WIN32_FIND_DATA fd; - HANDLE hf=0; - TCHAR c=path[pos]; - char* new_element; - path[pos]=_T('\0'); - - /* the path[prev_pos+1]... path[pos] range is the part of - path we're handling right now. We need to find long - name for that element and add it. */ - new_element = path + prev_pos + 1; - - /* First add separator, but only if there's something in result already. */ - if (ret[0] != _T('\0')) - { - _tcscat(ret,_T("\\")); - } - - /* If it's ".." element, we need to append it, not - the name in parent that FindFirstFile will return. - Same goes for "." */ - - if (new_element[0] == _T('.') && new_element[1] == _T('\0') || - new_element[0] == _T('.') && new_element[1] == _T('.') - && new_element[2] == _T('\0')) - { - _tcscat(ret, new_element); - } - else - { - hf=FindFirstFile(path, &fd); - if (hf==INVALID_HANDLE_VALUE) - return 0; - - _tcscat(ret,fd.cFileName); - FindClose(hf); - } - - path[pos]=c; - - prev_pos = pos; - } - } - - len=_tcslen(ret)+1; - if (cchBuffer>=len) - _tcscpy(lpszLongPath,ret); - - return len; -} - -char* short_path_to_long_path(char* short_path) -{ - char buffer2[_MAX_PATH]; - int ret = ShortPathToLongPath(short_path, buffer2, _MAX_PATH); - - if (ret) - return newstr(buffer2); - else - return newstr(short_path); -} - -#endif - -static string path_tmpdir_buffer[1]; -static const char * path_tmpdir_result = 0; - -const char * path_tmpdir() -{ - if (!path_tmpdir_result) - { - # ifdef OS_NT - DWORD pathLength = 0; - pathLength = GetTempPath(pathLength,NULL); - string_new(path_tmpdir_buffer); - string_reserve(path_tmpdir_buffer,pathLength); - pathLength = GetTempPathA(pathLength,path_tmpdir_buffer[0].value); - path_tmpdir_buffer[0].value[pathLength-1] = '\0'; - path_tmpdir_buffer[0].size = pathLength-1; - # else - const char * t = getenv("TMPDIR"); - if (!t) - { - t = "/tmp"; - } - string_new(path_tmpdir_buffer); - string_append(path_tmpdir_buffer,t); - # endif - path_tmpdir_result = path_tmpdir_buffer[0].value; - } - return path_tmpdir_result; -} - -const char * path_tmpnam(void) -{ - char name_buffer[64]; - # ifdef OS_NT - unsigned long c0 = GetCurrentProcessId(); - # else - unsigned long c0 = getpid(); - # endif - static unsigned long c1 = 0; - if (0 == c1) c1 = time(0)&0xffff; - c1 += 1; - sprintf(name_buffer,"jam%lx%lx.000",c0,c1); - return newstr(name_buffer); -} - -const char * path_tmpfile(void) -{ - const char * result = 0; - - string file_path; - string_copy(&file_path,path_tmpdir()); - string_push_back(&file_path,PATH_DELIM); - string_append(&file_path,path_tmpnam()); - result = newstr(file_path.value); - string_free(&file_path); - - return result; -} - - -# endif /* unix, NT, OS/2, AmigaOS */ diff --git a/jam-files/engine/pathvms.c b/jam-files/engine/pathvms.c deleted file mode 100644 index 975fe5a5..00000000 --- a/jam-files/engine/pathvms.c +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" -# include "pathsys.h" - -# ifdef OS_VMS - -# define DEBUG - -/* - * pathvms.c - manipulate file names on VMS - * - * External routines: - * - * path_parse() - split a file name into dir/base/suffix/member - * path_build() - build a filename given dir/base/suffix/member - * path_parent() - make a PATHNAME point to its parent dir - * - * File_parse() and path_build() just manipuate a string and a structure; - * they do not make system calls. - * - * WARNING! This file contains voodoo logic, as black magic is - * necessary for wrangling with VMS file name. Woe be to people - * who mess with this code. - * - * 02/09/95 (seiwald) - bungled R=[xxx] - was using directory length! - * 05/03/96 (seiwald) - split from filevms.c - */ - -/* - * path_parse() - split a file name into dir/base/suffix/member. - */ - -void path_parse( char * file, PATHNAME * f ) -{ - char * p; - char * q; - char * end; - - memset( (char *)f, 0, sizeof( *f ) ); - - /* Look for <grist> */ - - if ( ( file[0] == '<' ) && ( p = strchr( file, '>' ) ) ) - { - f->f_grist.ptr = file; - f->f_grist.len = p - file; - file = p + 1; - } - - /* Look for dev:[dir] or dev: */ - - if ( ( p = strchr( file, ']' ) ) || ( p = strchr( file, ':' ) ) ) - { - f->f_dir.ptr = file; - f->f_dir.len = p + 1 - file; - file = p + 1; - } - - end = file + strlen( file ); - - /* Look for (member). */ - - if ( ( p = strchr( file, '(' ) ) && ( end[ -1 ] == ')' ) ) - { - f->f_member.ptr = p + 1; - f->f_member.len = end - p - 2; - end = p; - } - - /* Look for .suffix */ - /* This would be memrchr(). */ - - p = 0; - q = file; - - while ( q = (char *)memchr( q, '.', end - q ) ) - p = q++; - - if ( p ) - { - f->f_suffix.ptr = p; - f->f_suffix.len = end - p; - end = p; - } - - /* Leaves base. */ - f->f_base.ptr = file; - f->f_base.len = end - file; - - /* Is this a directory without a file spec? */ - f->parent = 0; -} - -/* - * dir mods result - * --- --- ------ - * Rerooting: - * - * (none) :R=dev: dev: - * devd: :R=dev: devd: - * devd:[dir] :R=dev: devd:[dir] - * [.dir] :R=dev: dev:[dir] questionable - * [dir] :R=dev: dev:[dir] - * - * (none) :R=[rdir] [rdir] questionable - * devd: :R=[rdir] devd: - * devd:[dir] :R=[rdir] devd:[dir] - * [.dir] :R=[rdir] [rdir.dir] questionable - * [dir] :R=[rdir] [rdir] - * - * (none) :R=dev:[root] dev:[root] - * devd: :R=dev:[root] devd: - * devd:[dir] :R=dev:[root] devd:[dir] - * [.dir] :R=dev:[root] dev:[root.dir] - * [dir] :R=dev:[root] [dir] - * - * Climbing to parent: - * - */ - -# define DIR_EMPTY 0 /* empty string */ -# define DIR_DEV 1 /* dev: */ -# define DIR_DEVDIR 2 /* dev:[dir] */ -# define DIR_DOTDIR 3 /* [.dir] */ -# define DIR_DASHDIR 4 /* [-] or [-.dir] */ -# define DIR_ABSDIR 5 /* [dir] */ -# define DIR_ROOT 6 /* [000000] or dev:[000000] */ - -# define G_DIR 0 /* take just dir */ -# define G_ROOT 1 /* take just root */ -# define G_VAD 2 /* root's dev: + [abs] */ -# define G_DRD 3 /* root's dev:[dir] + [.rel] */ -# define G_VRD 4 /* root's dev: + [.rel] made [abs] */ -# define G_DDD 5 /* root's dev:[dir] + . + [dir] */ - -static int grid[7][7] = { - -/* root/dir EMPTY DEV DEVDIR DOTDIR DASH, ABSDIR ROOT */ -/* EMPTY */ G_DIR, G_DIR, G_DIR, G_DIR, G_DIR, G_DIR, G_DIR, -/* DEV */ G_ROOT, G_DIR, G_DIR, G_VRD, G_VAD, G_VAD, G_VAD, -/* DEVDIR */ G_ROOT, G_DIR, G_DIR, G_DRD, G_VAD, G_VAD, G_VAD, -/* DOTDIR */ G_ROOT, G_DIR, G_DIR, G_DRD, G_DIR, G_DIR, G_DIR, -/* DASHDIR */ G_ROOT, G_DIR, G_DIR, G_DRD, G_DDD, G_DIR, G_DIR, -/* ABSDIR */ G_ROOT, G_DIR, G_DIR, G_DRD, G_DIR, G_DIR, G_DIR, -/* ROOT */ G_ROOT, G_DIR, G_DIR, G_VRD, G_DIR, G_DIR, G_DIR, - -}; - -struct dirinf -{ - int flags; - - struct - { - char * ptr; - int len; - } dev, dir; -}; - -static char * strnchr( char * buf, int c, int len ) -{ - while ( len-- ) - if ( *buf && ( *buf++ == c ) ) - return buf - 1; - return 0; -} - - -static void dir_flags( char * buf, int len, struct dirinf * i ) -{ - char * p; - - if ( !buf || !len ) - { - i->flags = DIR_EMPTY; - i->dev.ptr = - i->dir.ptr = 0; - i->dev.len = - i->dir.len = 0; - } - else if ( p = strnchr( buf, ':', len ) ) - { - i->dev.ptr = buf; - i->dev.len = p + 1 - buf; - i->dir.ptr = buf + i->dev.len; - i->dir.len = len - i->dev.len; - i->flags = i->dir.len && *i->dir.ptr == '[' ? DIR_DEVDIR : DIR_DEV; - } - else - { - i->dev.ptr = buf; - i->dev.len = 0; - i->dir.ptr = buf; - i->dir.len = len; - - if ( ( *buf == '[' ) && ( buf[1] == ']' ) ) - i->flags = DIR_EMPTY; - else if ( ( *buf == '[' ) && ( buf[1] == '.' ) ) - i->flags = DIR_DOTDIR; - else if ( ( *buf == '[' ) && ( buf[1] == '-' ) ) - i->flags = DIR_DASHDIR; - else - i->flags = DIR_ABSDIR; - } - - /* But if its rooted in any way. */ - - if ( ( i->dir.len == 8 ) && !strncmp( i->dir.ptr, "[000000]", 8 ) ) - i->flags = DIR_ROOT; -} - - -/* - * path_build() - build a filename given dir/base/suffix/member - */ - -void path_build( PATHNAME * f, string * file, int binding ) -{ - struct dirinf root; - struct dirinf dir; - int g; - - file_build1( f, file ); - - /* Get info on root and dir for combining. */ - dir_flags( f->f_root.ptr, f->f_root.len, &root ); - dir_flags( f->f_dir.ptr, f->f_dir.len, &dir ); - - /* Combine. */ - switch ( g = grid[ root.flags ][ dir.flags ] ) - { - case G_DIR: - /* take dir */ - string_append_range( file, f->f_dir.ptr, f->f_dir.ptr + f->f_dir.len ); - break; - - case G_ROOT: - /* take root */ - string_append_range( file, f->f_root.ptr, f->f_root.ptr + f->f_root.len ); - break; - - case G_VAD: - /* root's dev + abs directory */ - string_append_range( file, root.dev.ptr, root.dev.ptr + root.dev.len ); - string_append_range( file, dir.dir.ptr, dir.dir.ptr + dir.dir.len ); - break; - - case G_DRD: - case G_DDD: - /* root's dev:[dir] + rel directory */ - string_append_range( file, f->f_root.ptr, f->f_root.ptr + f->f_root.len ); - - /* sanity checks: root ends with ] */ - - if ( file->value[file->size - 1] == ']' ) - string_pop_back( file ); - - /* Add . if separating two -'s */ - - if ( g == G_DDD ) - string_push_back( file, '.' ); - - /* skip [ of dir */ - string_append_range( file, dir.dir.ptr + 1, dir.dir.ptr + 1 + dir.dir.len - 1 ); - break; - - case G_VRD: - /* root's dev + rel directory made abs */ - string_append_range( file, root.dev.ptr, root.dev.ptr + root.dev.len ); - string_push_back( file, '[' ); - /* skip [. of rel dir */ - string_append_range( file, dir.dir.ptr + 2, dir.dir.ptr + 2 + dir.dir.len - 2 ); - break; - } - -# ifdef DEBUG - if ( DEBUG_SEARCH && ( root.flags || dir.flags ) ) - printf( "%d x %d = %d (%s)\n", root.flags, dir.flags, - grid[ root.flags ][ dir.flags ], file->value ); -# endif - - /* - * Now do the special :P modifier when no file was present. - * (none) (none) - * [dir1.dir2] [dir1] - * [dir] [000000] - * [.dir] (none) - * [] [] - */ - - if ( ( file->value[ file->size - 1 ] == ']' ) && f->parent ) - { - char * p = file->value + file->size; - while ( p-- > file->value ) - { - if ( *p == '.' ) - { - /* If we've truncated everything and left with '[', - return empty string. */ - if ( p == file->value + 1 ) - string_truncate( file, 0 ); - else - { - string_truncate( file, p - file->value ); - string_push_back( file, ']' ); - } - break; - } - - if ( *p == '-' ) - { - /* handle .- or - */ - if ( ( p > file->value ) && ( p[ -1 ] == '.' ) ) - --p; - - *p++ = ']'; - break; - } - - if ( *p == '[' ) - { - if ( p[ 1 ] == ']' ) - { - /* CONSIDER: I don't see any use of this code. We immediately - break, and 'p' is a local variable. */ - p += 2; - } - else - { - string_truncate( file, p - file->value ); - string_append( file, "[000000]" ); - } - break; - } - } - } - - /* Now copy the file pieces. */ - if ( f->f_base.len ) - { - string_append_range( file, f->f_base.ptr, f->f_base.ptr + f->f_base.len ); - } - - /* If there is no suffix, we append a "." onto all generated names. This - * keeps VMS from appending its own (wrong) idea of what the suffix should - * be. - */ - if ( f->f_suffix.len ) - string_append_range( file, f->f_suffix.ptr, f->f_suffix.ptr + f->f_suffix.len ); - else if ( binding && f->f_base.len ) - string_push_back( file, '.' ); - - if ( f->f_member.len ) - { - string_push_back( file, '(' ); - string_append_range( file, f->f_member.ptr, f->f_member.ptr + f->f_member.len ); - string_push_back( file, ')' ); - } - -# ifdef DEBUG - if ( DEBUG_SEARCH ) - printf( "built %.*s + %.*s / %.*s suf %.*s mem %.*s -> %s\n", - f->f_root.len, f->f_root.ptr, - f->f_dir.len, f->f_dir.ptr, - f->f_base.len, f->f_base.ptr, - f->f_suffix.len, f->f_suffix.ptr, - f->f_member.len, f->f_member.ptr, - file->value ); -# endif -} - - -/* - * path_parent() - make a PATHNAME point to its parent dir - */ - -void path_parent( PATHNAME * f ) -{ - if ( f->f_base.len ) - { - f->f_base.ptr = - f->f_suffix.ptr = - f->f_member.ptr = ""; - - f->f_base.len = - f->f_suffix.len = - f->f_member.len = 0; - } - else - { - f->parent = 1; - } -} - -# endif /* VMS */ diff --git a/jam-files/engine/pwd.c b/jam-files/engine/pwd.c deleted file mode 100644 index 90c8eb17..00000000 --- a/jam-files/engine/pwd.c +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright Vladimir Prus 2002, Rene Rivera 2005. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "jam.h" -#include "lists.h" -#include "newstr.h" -#include "pathsys.h" -#include "mem.h" - -#include <limits.h> -#include <errno.h> - -/* MinGW on windows declares PATH_MAX in limits.h */ -#if defined(NT) && ! defined(__GNUC__) -#include <direct.h> -#define PATH_MAX _MAX_PATH -#else -#include <unistd.h> -#if defined(__COMO__) - #include <linux/limits.h> -#endif -#endif - -#ifndef PATH_MAX - #define PATH_MAX 1024 -#endif - -/* The current directory can't change in bjam, so optimize this to cache -** the result. -*/ -static char * pwd_result = NULL; - - -LIST* -pwd(void) -{ - if (!pwd_result) - { - int buffer_size = PATH_MAX; - char * result_buffer = 0; - do - { - char * buffer = BJAM_MALLOC_RAW(buffer_size); - result_buffer = getcwd(buffer,buffer_size); - if (result_buffer) - { - #ifdef NT - pwd_result = short_path_to_long_path(result_buffer); - #else - pwd_result = newstr(result_buffer); - #endif - } - buffer_size *= 2; - BJAM_FREE_RAW(buffer); - } - while (!pwd_result && errno == ERANGE); - - if (!pwd_result) - { - perror("can not get current directory"); - return L0; - } - } - return list_new(L0, pwd_result); -} diff --git a/jam-files/engine/pwd.h b/jam-files/engine/pwd.h deleted file mode 100644 index 37cb531e..00000000 --- a/jam-files/engine/pwd.h +++ /dev/null @@ -1,10 +0,0 @@ -/* Copyright Vladimir Prus 2002. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#ifndef PWD_H -#define PWD_H - -LIST* pwd(void); - -#endif diff --git a/jam-files/engine/regexp.c b/jam-files/engine/regexp.c deleted file mode 100644 index 30197a2f..00000000 --- a/jam-files/engine/regexp.c +++ /dev/null @@ -1,1328 +0,0 @@ -/* - * regcomp and regexec -- regsub and regerror are elsewhere - * - * Copyright (c) 1986 by University of Toronto. - * Written by Henry Spencer. Not derived from licensed software. - * - * Permission is granted to anyone to use this software for any - * purpose on any computer system, and to redistribute it freely, - * subject to the following restrictions: - * - * 1. The author is not responsible for the consequences of use of - * this software, no matter how awful, even if they arise - * from defects in it. - * - * 2. The origin of this software must not be misrepresented, either - * by explicit claim or by omission. - * - * 3. Altered versions must be plainly marked as such, and must not - * be misrepresented as being the original software. - *** THIS IS AN ALTERED VERSION. It was altered by John Gilmore, - *** hoptoad!gnu, on 27 Dec 1986, to add \n as an alternative to | - *** to assist in implementing egrep. - *** THIS IS AN ALTERED VERSION. It was altered by John Gilmore, - *** hoptoad!gnu, on 27 Dec 1986, to add \< and \> for word-matching - *** as in BSD grep and ex. - *** THIS IS AN ALTERED VERSION. It was altered by John Gilmore, - *** hoptoad!gnu, on 28 Dec 1986, to optimize characters quoted with \. - *** THIS IS AN ALTERED VERSION. It was altered by James A. Woods, - *** ames!jaw, on 19 June 1987, to quash a regcomp() redundancy. - *** THIS IS AN ALTERED VERSION. It was altered by Christopher Seiwald - *** seiwald@vix.com, on 28 August 1993, for use in jam. Regmagic.h - *** was moved into regexp.h, and the include of regexp.h now uses "'s - *** to avoid conflicting with the system regexp.h. Const, bless its - *** soul, was removed so it can compile everywhere. The declaration - *** of strchr() was in conflict on AIX, so it was removed (as it is - *** happily defined in string.h). - *** THIS IS AN ALTERED VERSION. It was altered by Christopher Seiwald - *** seiwald@perforce.com, on 20 January 2000, to use function prototypes. - * - * Beware that some of this code is subtly aware of the way operator precedence - * is structured in regular expressions. Serious changes in regular-expression - * syntax might require a total rethink. - */ - - -#include "jam.h" -#include "regexp.h" -#include <stdio.h> -#include <ctype.h> -#ifndef ultrix - #include <stdlib.h> -#endif -#include <string.h> - - -/* - * The "internal use only" fields in regexp.h are present to pass info from - * compile to execute that permits the execute phase to run lots faster on - * simple cases. They are: - : - * regstart char that must begin a match; '\0' if none obvious. - * reganch is the match anchored (at beginning-of-line only)? - * regmust string (pointer into program) that match must include, or NULL. - * regmlen length of regmust string. - * - * Regstart and reganch permit very fast decisions on suitable starting points - * for a match, cutting down the work a lot. Regmust permits fast rejection of - * lines that cannot possibly match. The regmust tests are costly enough that - * regcomp() supplies a regmust only if the r.e. contains something potentially - * expensive (at present, the only such thing detected is * or + at the start of - * the r.e., which can involve a lot of backup). Regmlen is supplied because the - * test in regexec() needs it and regcomp() is computing it anyway. - */ - -/* - * Structure for regexp "program". This is essentially a linear encoding of a - * nondeterministic finite-state machine (aka syntax charts or "railroad normal - * form" in parsing technology). Each node is an opcode plus a "next" pointer, - * possibly plus an operand. "Next" pointers of all nodes except BRANCH - * implement concatenation; a "next" pointer with a BRANCH on both ends of it is - * connecting two alternatives. [Here we have one of the subtle syntax - * dependencies: an individual BRANCH, as opposed to a collection of them, is - * never concatenated with anything because of operator precedence.] The operand - * of some types of node is a literal string; for others, it is a node leading - * into a sub-FSM. In particular, the operand of a BRANCH node is the first node - * of the branch. [NB this is *not* a tree structure: the tail of the branch - * connects to the thing following the set of BRANCHes.] The opcodes are: - */ - -/* definition number opnd? meaning */ -#define END 0 /* no End of program. */ -#define BOL 1 /* no Match "" at beginning of line. */ -#define EOL 2 /* no Match "" at end of line. */ -#define ANY 3 /* no Match any one character. */ -#define ANYOF 4 /* str Match any character in this string. */ -#define ANYBUT 5 /* str Match any character not in this string. */ -#define BRANCH 6 /* node Match this alternative, or the next... */ -#define BACK 7 /* no Match "", "next" ptr points backward. */ -#define EXACTLY 8 /* str Match this string. */ -#define NOTHING 9 /* no Match empty string. */ -#define STAR 10 /* node Match this (simple) thing 0 or more times. */ -#define PLUS 11 /* node Match this (simple) thing 1 or more times. */ -#define WORDA 12 /* no Match "" at wordchar, where prev is nonword */ -#define WORDZ 13 /* no Match "" at nonwordchar, where prev is word */ -#define OPEN 20 /* no Mark this point in input as start of #n. */ - /* OPEN+1 is number 1, etc. */ -#define CLOSE 30 /* no Analogous to OPEN. */ - - -/* - * Opcode notes: - * - * BRANCH The set of branches constituting a single choice are hooked - * together with their "next" pointers, since precedence prevents - * anything being concatenated to any individual branch. The - * "next" pointer of the last BRANCH in a choice points to the - * thing following the whole choice. This is also where the - * final "next" pointer of each individual branch points; each - * branch starts with the operand node of a BRANCH node. - * - * BACK Normal "next" pointers all implicitly point forward; BACK - * exists to make loop structures possible. - * - * STAR,PLUS '?', and complex '*' and '+', are implemented as circular - * BRANCH structures using BACK. Simple cases (one character - * per match) are implemented with STAR and PLUS for speed - * and to minimize recursive plunges. - * - * OPEN,CLOSE ...are numbered at compile time. - */ - -/* - * A node is one char of opcode followed by two chars of "next" pointer. - * "Next" pointers are stored as two 8-bit pieces, high order first. The - * value is a positive offset from the opcode of the node containing it. - * An operand, if any, simply follows the node. (Note that much of the - * code generation knows about this implicit relationship.) - * - * Using two bytes for the "next" pointer is vast overkill for most things, - * but allows patterns to get big without disasters. - */ -#define OP(p) (*(p)) -#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377)) -#define OPERAND(p) ((p) + 3) - -/* - * See regmagic.h for one further detail of program structure. - */ - - -/* - * Utility definitions. - */ -#ifndef CHARBITS -#define UCHARAT(p) ((int)*(unsigned char *)(p)) -#else -#define UCHARAT(p) ((int)*(p)&CHARBITS) -#endif - -#define FAIL(m) { regerror(m); return(NULL); } -#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') - -/* - * Flags to be passed up and down. - */ -#define HASWIDTH 01 /* Known never to match null string. */ -#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */ -#define SPSTART 04 /* Starts with * or +. */ -#define WORST 0 /* Worst case. */ - -/* - * Global work variables for regcomp(). - */ -static char *regparse; /* Input-scan pointer. */ -static int regnpar; /* () count. */ -static char regdummy; -static char *regcode; /* Code-emit pointer; ®dummy = don't. */ -static long regsize; /* Code size. */ - -/* - * Forward declarations for regcomp()'s friends. - */ -#ifndef STATIC -#define STATIC static -#endif -STATIC char *reg( int paren, int *flagp ); -STATIC char *regbranch( int *flagp ); -STATIC char *regpiece( int *flagp ); -STATIC char *regatom( int *flagp ); -STATIC char *regnode( int op ); -STATIC char *regnext( register char *p ); -STATIC void regc( int b ); -STATIC void reginsert( char op, char *opnd ); -STATIC void regtail( char *p, char *val ); -STATIC void regoptail( char *p, char *val ); -#ifdef STRCSPN -STATIC int strcspn(); -#endif - -/* - - regcomp - compile a regular expression into internal code - * - * We can't allocate space until we know how big the compiled form will be, - * but we can't compile it (and thus know how big it is) until we've got a - * place to put the code. So we cheat: we compile it twice, once with code - * generation turned off and size counting turned on, and once "for real". - * This also means that we don't allocate space until we are sure that the - * thing really will compile successfully, and we never have to move the - * code and thus invalidate pointers into it. (Note that it has to be in - * one piece because free() must be able to free it all.) - * - * Beware that the optimization-preparation code in here knows about some - * of the structure of the compiled regexp. - */ -regexp * -regcomp( char *exp ) -{ - register regexp *r; - register char *scan; - register char *longest; - register unsigned len; - int flags; - - if (exp == NULL) - FAIL("NULL argument"); - - /* First pass: determine size, legality. */ -#ifdef notdef - if (exp[0] == '.' && exp[1] == '*') exp += 2; /* aid grep */ -#endif - regparse = (char *)exp; - regnpar = 1; - regsize = 0L; - regcode = ®dummy; - regc(MAGIC); - if (reg(0, &flags) == NULL) - return(NULL); - - /* Small enough for pointer-storage convention? */ - if (regsize >= 32767L) /* Probably could be 65535L. */ - FAIL("regexp too big"); - - /* Allocate space. */ - r = (regexp *)BJAM_MALLOC(sizeof(regexp) + (unsigned)regsize); - if (r == NULL) - FAIL("out of space"); - - /* Second pass: emit code. */ - regparse = (char *)exp; - regnpar = 1; - regcode = r->program; - regc(MAGIC); - if (reg(0, &flags) == NULL) - return(NULL); - - /* Dig out information for optimizations. */ - r->regstart = '\0'; /* Worst-case defaults. */ - r->reganch = 0; - r->regmust = NULL; - r->regmlen = 0; - scan = r->program+1; /* First BRANCH. */ - if (OP(regnext(scan)) == END) { /* Only one top-level choice. */ - scan = OPERAND(scan); - - /* Starting-point info. */ - if (OP(scan) == EXACTLY) - r->regstart = *OPERAND(scan); - else if (OP(scan) == BOL) - r->reganch++; - - /* - * If there's something expensive in the r.e., find the - * longest literal string that must appear and make it the - * regmust. Resolve ties in favor of later strings, since - * the regstart check works with the beginning of the r.e. - * and avoiding duplication strengthens checking. Not a - * strong reason, but sufficient in the absence of others. - */ - if (flags&SPSTART) { - longest = NULL; - len = 0; - for (; scan != NULL; scan = regnext(scan)) - if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { - longest = OPERAND(scan); - len = strlen(OPERAND(scan)); - } - r->regmust = longest; - r->regmlen = len; - } - } - - return(r); -} - -/* - - reg - regular expression, i.e. main body or parenthesized thing - * - * Caller must absorb opening parenthesis. - * - * Combining parenthesis handling with the base level of regular expression - * is a trifle forced, but the need to tie the tails of the branches to what - * follows makes it hard to avoid. - */ -static char * -reg( - int paren, /* Parenthesized? */ - int *flagp ) -{ - register char *ret; - register char *br; - register char *ender; - register int parno = 0; - int flags; - - *flagp = HASWIDTH; /* Tentatively. */ - - /* Make an OPEN node, if parenthesized. */ - if (paren) { - if (regnpar >= NSUBEXP) - FAIL("too many ()"); - parno = regnpar; - regnpar++; - ret = regnode(OPEN+parno); - } else - ret = NULL; - - /* Pick up the branches, linking them together. */ - br = regbranch(&flags); - if (br == NULL) - return(NULL); - if (ret != NULL) - regtail(ret, br); /* OPEN -> first. */ - else - ret = br; - if (!(flags&HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags&SPSTART; - while (*regparse == '|' || *regparse == '\n') { - regparse++; - br = regbranch(&flags); - if (br == NULL) - return(NULL); - regtail(ret, br); /* BRANCH -> BRANCH. */ - if (!(flags&HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags&SPSTART; - } - - /* Make a closing node, and hook it on the end. */ - ender = regnode((paren) ? CLOSE+parno : END); - regtail(ret, ender); - - /* Hook the tails of the branches to the closing node. */ - for (br = ret; br != NULL; br = regnext(br)) - regoptail(br, ender); - - /* Check for proper termination. */ - if (paren && *regparse++ != ')') { - FAIL("unmatched ()"); - } else if (!paren && *regparse != '\0') { - if (*regparse == ')') { - FAIL("unmatched ()"); - } else - FAIL("junk on end"); /* "Can't happen". */ - /* NOTREACHED */ - } - - return(ret); -} - -/* - - regbranch - one alternative of an | operator - * - * Implements the concatenation operator. - */ -static char * -regbranch( int *flagp ) -{ - register char *ret; - register char *chain; - register char *latest; - int flags; - - *flagp = WORST; /* Tentatively. */ - - ret = regnode(BRANCH); - chain = NULL; - while (*regparse != '\0' && *regparse != ')' && - *regparse != '\n' && *regparse != '|') { - latest = regpiece(&flags); - if (latest == NULL) - return(NULL); - *flagp |= flags&HASWIDTH; - if (chain == NULL) /* First piece. */ - *flagp |= flags&SPSTART; - else - regtail(chain, latest); - chain = latest; - } - if (chain == NULL) /* Loop ran zero times. */ - (void) regnode(NOTHING); - - return(ret); -} - -/* - - regpiece - something followed by possible [*+?] - * - * Note that the branching code sequences used for ? and the general cases - * of * and + are somewhat optimized: they use the same NOTHING node as - * both the endmarker for their branch list and the body of the last branch. - * It might seem that this node could be dispensed with entirely, but the - * endmarker role is not redundant. - */ -static char * -regpiece( int *flagp ) -{ - register char *ret; - register char op; - register char *next; - int flags; - - ret = regatom(&flags); - if (ret == NULL) - return(NULL); - - op = *regparse; - if (!ISMULT(op)) { - *flagp = flags; - return(ret); - } - - if (!(flags&HASWIDTH) && op != '?') - FAIL("*+ operand could be empty"); - *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH); - - if (op == '*' && (flags&SIMPLE)) - reginsert(STAR, ret); - else if (op == '*') { - /* Emit x* as (x&|), where & means "self". */ - reginsert(BRANCH, ret); /* Either x */ - regoptail(ret, regnode(BACK)); /* and loop */ - regoptail(ret, ret); /* back */ - regtail(ret, regnode(BRANCH)); /* or */ - regtail(ret, regnode(NOTHING)); /* null. */ - } else if (op == '+' && (flags&SIMPLE)) - reginsert(PLUS, ret); - else if (op == '+') { - /* Emit x+ as x(&|), where & means "self". */ - next = regnode(BRANCH); /* Either */ - regtail(ret, next); - regtail(regnode(BACK), ret); /* loop back */ - regtail(next, regnode(BRANCH)); /* or */ - regtail(ret, regnode(NOTHING)); /* null. */ - } else if (op == '?') { - /* Emit x? as (x|) */ - reginsert(BRANCH, ret); /* Either x */ - regtail(ret, regnode(BRANCH)); /* or */ - next = regnode(NOTHING); /* null. */ - regtail(ret, next); - regoptail(ret, next); - } - regparse++; - if (ISMULT(*regparse)) - FAIL("nested *?+"); - - return(ret); -} - -/* - - regatom - the lowest level - * - * Optimization: gobbles an entire sequence of ordinary characters so that - * it can turn them into a single node, which is smaller to store and - * faster to run. Backslashed characters are exceptions, each becoming a - * separate node; the code is simpler that way and it's not worth fixing. - */ -static char * -regatom( int *flagp ) -{ - register char *ret; - int flags; - - *flagp = WORST; /* Tentatively. */ - - switch (*regparse++) { - /* FIXME: these chars only have meaning at beg/end of pat? */ - case '^': - ret = regnode(BOL); - break; - case '$': - ret = regnode(EOL); - break; - case '.': - ret = regnode(ANY); - *flagp |= HASWIDTH|SIMPLE; - break; - case '[': { - register int classr; - register int classend; - - if (*regparse == '^') { /* Complement of range. */ - ret = regnode(ANYBUT); - regparse++; - } else - ret = regnode(ANYOF); - if (*regparse == ']' || *regparse == '-') - regc(*regparse++); - while (*regparse != '\0' && *regparse != ']') { - if (*regparse == '-') { - regparse++; - if (*regparse == ']' || *regparse == '\0') - regc('-'); - else { - classr = UCHARAT(regparse-2)+1; - classend = UCHARAT(regparse); - if (classr > classend+1) - FAIL("invalid [] range"); - for (; classr <= classend; classr++) - regc(classr); - regparse++; - } - } else - regc(*regparse++); - } - regc('\0'); - if (*regparse != ']') - FAIL("unmatched []"); - regparse++; - *flagp |= HASWIDTH|SIMPLE; - } - break; - case '(': - ret = reg(1, &flags); - if (ret == NULL) - return(NULL); - *flagp |= flags&(HASWIDTH|SPSTART); - break; - case '\0': - case '|': - case '\n': - case ')': - FAIL("internal urp"); /* Supposed to be caught earlier. */ - break; - case '?': - case '+': - case '*': - FAIL("?+* follows nothing"); - break; - case '\\': - switch (*regparse++) { - case '\0': - FAIL("trailing \\"); - break; - case '<': - ret = regnode(WORDA); - break; - case '>': - ret = regnode(WORDZ); - break; - /* FIXME: Someday handle \1, \2, ... */ - default: - /* Handle general quoted chars in exact-match routine */ - goto de_fault; - } - break; - de_fault: - default: - /* - * Encode a string of characters to be matched exactly. - * - * This is a bit tricky due to quoted chars and due to - * '*', '+', and '?' taking the SINGLE char previous - * as their operand. - * - * On entry, the char at regparse[-1] is going to go - * into the string, no matter what it is. (It could be - * following a \ if we are entered from the '\' case.) - * - * Basic idea is to pick up a good char in ch and - * examine the next char. If it's *+? then we twiddle. - * If it's \ then we frozzle. If it's other magic char - * we push ch and terminate the string. If none of the - * above, we push ch on the string and go around again. - * - * regprev is used to remember where "the current char" - * starts in the string, if due to a *+? we need to back - * up and put the current char in a separate, 1-char, string. - * When regprev is NULL, ch is the only char in the - * string; this is used in *+? handling, and in setting - * flags |= SIMPLE at the end. - */ - { - char *regprev; - register char ch; - - regparse--; /* Look at cur char */ - ret = regnode(EXACTLY); - for ( regprev = 0 ; ; ) { - ch = *regparse++; /* Get current char */ - switch (*regparse) { /* look at next one */ - - default: - regc(ch); /* Add cur to string */ - break; - - case '.': case '[': case '(': - case ')': case '|': case '\n': - case '$': case '^': - case '\0': - /* FIXME, $ and ^ should not always be magic */ - magic: - regc(ch); /* dump cur char */ - goto done; /* and we are done */ - - case '?': case '+': case '*': - if (!regprev) /* If just ch in str, */ - goto magic; /* use it */ - /* End mult-char string one early */ - regparse = regprev; /* Back up parse */ - goto done; - - case '\\': - regc(ch); /* Cur char OK */ - switch (regparse[1]){ /* Look after \ */ - case '\0': - case '<': - case '>': - /* FIXME: Someday handle \1, \2, ... */ - goto done; /* Not quoted */ - default: - /* Backup point is \, scan * point is after it. */ - regprev = regparse; - regparse++; - continue; /* NOT break; */ - } - } - regprev = regparse; /* Set backup point */ - } - done: - regc('\0'); - *flagp |= HASWIDTH; - if (!regprev) /* One char? */ - *flagp |= SIMPLE; - } - break; - } - - return(ret); -} - -/* - - regnode - emit a node - */ -static char * /* Location. */ -regnode( int op ) -{ - register char *ret; - register char *ptr; - - ret = regcode; - if (ret == ®dummy) { - regsize += 3; - return(ret); - } - - ptr = ret; - *ptr++ = op; - *ptr++ = '\0'; /* Null "next" pointer. */ - *ptr++ = '\0'; - regcode = ptr; - - return(ret); -} - -/* - - regc - emit (if appropriate) a byte of code - */ -static void -regc( int b ) -{ - if (regcode != ®dummy) - *regcode++ = b; - else - regsize++; -} - -/* - - reginsert - insert an operator in front of already-emitted operand - * - * Means relocating the operand. - */ -static void -reginsert( - char op, - char *opnd ) -{ - register char *src; - register char *dst; - register char *place; - - if (regcode == ®dummy) { - regsize += 3; - return; - } - - src = regcode; - regcode += 3; - dst = regcode; - while (src > opnd) - *--dst = *--src; - - place = opnd; /* Op node, where operand used to be. */ - *place++ = op; - *place++ = '\0'; - *place++ = '\0'; -} - -/* - - regtail - set the next-pointer at the end of a node chain - */ -static void -regtail( - char *p, - char *val ) -{ - register char *scan; - register char *temp; - register int offset; - - if (p == ®dummy) - return; - - /* Find last node. */ - scan = p; - for (;;) { - temp = regnext(scan); - if (temp == NULL) - break; - scan = temp; - } - - if (OP(scan) == BACK) - offset = scan - val; - else - offset = val - scan; - *(scan+1) = (offset>>8)&0377; - *(scan+2) = offset&0377; -} - -/* - - regoptail - regtail on operand of first argument; nop if operandless - */ - -static void -regoptail( - char *p, - char *val ) -{ - /* "Operandless" and "op != BRANCH" are synonymous in practice. */ - if (p == NULL || p == ®dummy || OP(p) != BRANCH) - return; - regtail(OPERAND(p), val); -} - -/* - * regexec and friends - */ - -/* - * Global work variables for regexec(). - */ -static char *reginput; /* String-input pointer. */ -static char *regbol; /* Beginning of input, for ^ check. */ -static char **regstartp; /* Pointer to startp array. */ -static char **regendp; /* Ditto for endp. */ - -/* - * Forwards. - */ -STATIC int regtry( regexp *prog, char *string ); -STATIC int regmatch( char *prog ); -STATIC int regrepeat( char *p ); - -#ifdef DEBUG -int regnarrate = 0; -void regdump(); -STATIC char *regprop(); -#endif - -/* - - regexec - match a regexp against a string - */ -int -regexec( - register regexp *prog, - register char *string ) -{ - register char *s; - - /* Be paranoid... */ - if (prog == NULL || string == NULL) { - regerror("NULL parameter"); - return(0); - } - - /* Check validity of program. */ - if (UCHARAT(prog->program) != MAGIC) { - regerror("corrupted program"); - return(0); - } - - /* If there is a "must appear" string, look for it. */ - if ( prog->regmust != NULL ) - { - s = (char *)string; - while ( ( s = strchr( s, prog->regmust[ 0 ] ) ) != NULL ) - { - if ( !strncmp( s, prog->regmust, prog->regmlen ) ) - break; /* Found it. */ - ++s; - } - if ( s == NULL ) /* Not present. */ - return 0; - } - - /* Mark beginning of line for ^ . */ - regbol = (char *)string; - - /* Simplest case: anchored match need be tried only once. */ - if ( prog->reganch ) - return regtry( prog, string ); - - /* Messy cases: unanchored match. */ - s = (char *)string; - if (prog->regstart != '\0') - /* We know what char it must start with. */ - while ((s = strchr(s, prog->regstart)) != NULL) { - if (regtry(prog, s)) - return(1); - s++; - } - else - /* We do not -- general case. */ - do { - if ( regtry( prog, s ) ) - return( 1 ); - } while ( *s++ != '\0' ); - - /* Failure. */ - return 0; -} - - -/* - * regtry() - try match at specific point. - */ - -static int /* 0 failure, 1 success */ -regtry( - regexp *prog, - char *string ) -{ - register int i; - register char * * sp; - register char * * ep; - - reginput = string; - regstartp = prog->startp; - regendp = prog->endp; - - sp = prog->startp; - ep = prog->endp; - for ( i = NSUBEXP; i > 0; --i ) - { - *sp++ = NULL; - *ep++ = NULL; - } - if ( regmatch( prog->program + 1 ) ) - { - prog->startp[ 0 ] = string; - prog->endp[ 0 ] = reginput; - return 1; - } - else - return 0; -} - - -/* - * regmatch() - main matching routine. - * - * Conceptually the strategy is simple: check to see whether the current node - * matches, call self recursively to see whether the rest matches, and then act - * accordingly. In practice we make some effort to avoid recursion, in - * particular by going through "ordinary" nodes (that do not need to know - * whether the rest of the match failed) by a loop instead of by recursion. - */ - -static int /* 0 failure, 1 success */ -regmatch( char * prog ) -{ - char * scan; /* Current node. */ - char * next; /* Next node. */ - - scan = prog; -#ifdef DEBUG - if (scan != NULL && regnarrate) - fprintf(stderr, "%s(\n", regprop(scan)); -#endif - while (scan != NULL) { -#ifdef DEBUG - if (regnarrate) - fprintf(stderr, "%s...\n", regprop(scan)); -#endif - next = regnext(scan); - - switch (OP(scan)) { - case BOL: - if (reginput != regbol) - return(0); - break; - case EOL: - if (*reginput != '\0') - return(0); - break; - case WORDA: - /* Must be looking at a letter, digit, or _ */ - if ((!isalnum(*reginput)) && *reginput != '_') - return(0); - /* Prev must be BOL or nonword */ - if (reginput > regbol && - (isalnum(reginput[-1]) || reginput[-1] == '_')) - return(0); - break; - case WORDZ: - /* Must be looking at non letter, digit, or _ */ - if (isalnum(*reginput) || *reginput == '_') - return(0); - /* We don't care what the previous char was */ - break; - case ANY: - if (*reginput == '\0') - return(0); - reginput++; - break; - case EXACTLY: { - register int len; - register char *opnd; - - opnd = OPERAND(scan); - /* Inline the first character, for speed. */ - if (*opnd != *reginput) - return(0); - len = strlen(opnd); - if (len > 1 && strncmp(opnd, reginput, len) != 0) - return(0); - reginput += len; - } - break; - case ANYOF: - if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) == NULL) - return(0); - reginput++; - break; - case ANYBUT: - if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) != NULL) - return(0); - reginput++; - break; - case NOTHING: - break; - case BACK: - break; - case OPEN+1: - case OPEN+2: - case OPEN+3: - case OPEN+4: - case OPEN+5: - case OPEN+6: - case OPEN+7: - case OPEN+8: - case OPEN+9: { - register int no; - register char *save; - - no = OP(scan) - OPEN; - save = reginput; - - if (regmatch(next)) { - /* - * Don't set startp if some later - * invocation of the same parentheses - * already has. - */ - if (regstartp[no] == NULL) - regstartp[no] = save; - return(1); - } else - return(0); - } - break; - case CLOSE+1: - case CLOSE+2: - case CLOSE+3: - case CLOSE+4: - case CLOSE+5: - case CLOSE+6: - case CLOSE+7: - case CLOSE+8: - case CLOSE+9: { - register int no; - register char *save; - - no = OP(scan) - CLOSE; - save = reginput; - - if (regmatch(next)) { - /* - * Don't set endp if some later - * invocation of the same parentheses - * already has. - */ - if (regendp[no] == NULL) - regendp[no] = save; - return(1); - } else - return(0); - } - break; - case BRANCH: { - register char *save; - - if (OP(next) != BRANCH) /* No choice. */ - next = OPERAND(scan); /* Avoid recursion. */ - else { - do { - save = reginput; - if (regmatch(OPERAND(scan))) - return(1); - reginput = save; - scan = regnext(scan); - } while (scan != NULL && OP(scan) == BRANCH); - return(0); - /* NOTREACHED */ - } - } - break; - case STAR: - case PLUS: { - register char nextch; - register int no; - register char *save; - register int min; - - /* - * Lookahead to avoid useless match attempts - * when we know what character comes next. - */ - nextch = '\0'; - if (OP(next) == EXACTLY) - nextch = *OPERAND(next); - min = (OP(scan) == STAR) ? 0 : 1; - save = reginput; - no = regrepeat(OPERAND(scan)); - while (no >= min) { - /* If it could work, try it. */ - if (nextch == '\0' || *reginput == nextch) - if (regmatch(next)) - return(1); - /* Couldn't or didn't -- back up. */ - no--; - reginput = save + no; - } - return(0); - } - break; - case END: - return(1); /* Success! */ - break; - default: - regerror("memory corruption"); - return(0); - break; - } - - scan = next; - } - - /* - * We get here only if there's trouble -- normally "case END" is - * the terminating point. - */ - regerror("corrupted pointers"); - return(0); -} - -/* - - regrepeat - repeatedly match something simple, report how many - */ -static int -regrepeat( char *p ) -{ - register int count = 0; - register char *scan; - register char *opnd; - - scan = reginput; - opnd = OPERAND(p); - switch (OP(p)) { - case ANY: - count = strlen(scan); - scan += count; - break; - case EXACTLY: - while (*opnd == *scan) { - count++; - scan++; - } - break; - case ANYOF: - while (*scan != '\0' && strchr(opnd, *scan) != NULL) { - count++; - scan++; - } - break; - case ANYBUT: - while (*scan != '\0' && strchr(opnd, *scan) == NULL) { - count++; - scan++; - } - break; - default: /* Oh dear. Called inappropriately. */ - regerror("internal foulup"); - count = 0; /* Best compromise. */ - break; - } - reginput = scan; - - return(count); -} - -/* - - regnext - dig the "next" pointer out of a node - */ -static char * -regnext( register char *p ) -{ - register int offset; - - if (p == ®dummy) - return(NULL); - - offset = NEXT(p); - if (offset == 0) - return(NULL); - - if (OP(p) == BACK) - return(p-offset); - else - return(p+offset); -} - -#ifdef DEBUG - -STATIC char *regprop(); - -/* - - regdump - dump a regexp onto stdout in vaguely comprehensible form - */ -void -regdump( regexp *r ) -{ - register char *s; - register char op = EXACTLY; /* Arbitrary non-END op. */ - register char *next; - - - s = r->program + 1; - while (op != END) { /* While that wasn't END last time... */ - op = OP(s); - printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */ - next = regnext(s); - if (next == NULL) /* Next ptr. */ - printf("(0)"); - else - printf("(%d)", (s-r->program)+(next-s)); - s += 3; - if (op == ANYOF || op == ANYBUT || op == EXACTLY) { - /* Literal string, where present. */ - while (*s != '\0') { - putchar(*s); - s++; - } - s++; - } - putchar('\n'); - } - - /* Header fields of interest. */ - if (r->regstart != '\0') - printf("start `%c' ", r->regstart); - if (r->reganch) - printf("anchored "); - if (r->regmust != NULL) - printf("must have \"%s\"", r->regmust); - printf("\n"); -} - -/* - - regprop - printable representation of opcode - */ -static char * -regprop( char *op ) -{ - register char *p; - static char buf[50]; - - (void) strcpy(buf, ":"); - - switch (OP(op)) { - case BOL: - p = "BOL"; - break; - case EOL: - p = "EOL"; - break; - case ANY: - p = "ANY"; - break; - case ANYOF: - p = "ANYOF"; - break; - case ANYBUT: - p = "ANYBUT"; - break; - case BRANCH: - p = "BRANCH"; - break; - case EXACTLY: - p = "EXACTLY"; - break; - case NOTHING: - p = "NOTHING"; - break; - case BACK: - p = "BACK"; - break; - case END: - p = "END"; - break; - case OPEN+1: - case OPEN+2: - case OPEN+3: - case OPEN+4: - case OPEN+5: - case OPEN+6: - case OPEN+7: - case OPEN+8: - case OPEN+9: - sprintf(buf+strlen(buf), "OPEN%d", OP(op)-OPEN); - p = NULL; - break; - case CLOSE+1: - case CLOSE+2: - case CLOSE+3: - case CLOSE+4: - case CLOSE+5: - case CLOSE+6: - case CLOSE+7: - case CLOSE+8: - case CLOSE+9: - sprintf(buf+strlen(buf), "CLOSE%d", OP(op)-CLOSE); - p = NULL; - break; - case STAR: - p = "STAR"; - break; - case PLUS: - p = "PLUS"; - break; - case WORDA: - p = "WORDA"; - break; - case WORDZ: - p = "WORDZ"; - break; - default: - regerror("corrupted opcode"); - break; - } - if (p != NULL) - (void) strcat(buf, p); - return(buf); -} -#endif - -/* - * The following is provided for those people who do not have strcspn() in - * their C libraries. They should get off their butts and do something - * about it; at least one public-domain implementation of those (highly - * useful) string routines has been published on Usenet. - */ -#ifdef STRCSPN -/* - * strcspn - find length of initial segment of s1 consisting entirely - * of characters not from s2 - */ - -static int -strcspn( - char *s1, - char *s2 ) -{ - register char *scan1; - register char *scan2; - register int count; - - count = 0; - for (scan1 = s1; *scan1 != '\0'; scan1++) { - for (scan2 = s2; *scan2 != '\0';) /* ++ moved down. */ - if (*scan1 == *scan2++) - return(count); - count++; - } - return(count); -} -#endif diff --git a/jam-files/engine/regexp.h b/jam-files/engine/regexp.h deleted file mode 100644 index 9d4604f6..00000000 --- a/jam-files/engine/regexp.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Definitions etc. for regexp(3) routines. - * - * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof], - * not the System V one. - */ -#ifndef REGEXP_DWA20011023_H -# define REGEXP_DWA20011023_H - -#define NSUBEXP 10 -typedef struct regexp { - char *startp[NSUBEXP]; - char *endp[NSUBEXP]; - char regstart; /* Internal use only. */ - char reganch; /* Internal use only. */ - char *regmust; /* Internal use only. */ - int regmlen; /* Internal use only. */ - char program[1]; /* Unwarranted chumminess with compiler. */ -} regexp; - -regexp *regcomp( char *exp ); -int regexec( regexp *prog, char *string ); -void regerror( char *s ); - -/* - * The first byte of the regexp internal "program" is actually this magic - * number; the start node begins in the second byte. - */ -#define MAGIC 0234 - -#endif - diff --git a/jam-files/engine/rules.c b/jam-files/engine/rules.c deleted file mode 100644 index a0be1d34..00000000 --- a/jam-files/engine/rules.c +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -# include "jam.h" -# include "lists.h" -# include "parse.h" -# include "variable.h" -# include "rules.h" -# include "newstr.h" -# include "hash.h" -# include "modules.h" -# include "search.h" -# include "lists.h" -# include "pathsys.h" -# include "timestamp.h" - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -/* - * rules.c - access to RULEs, TARGETs, and ACTIONs - * - * External routines: - * - * bindrule() - return pointer to RULE, creating it if necessary. - * bindtarget() - return pointer to TARGET, creating it if necessary. - * touch_target() - mark a target to simulate being new. - * targetlist() - turn list of target names into a TARGET chain. - * targetentry() - add a TARGET to a chain of TARGETS. - * actionlist() - append to an ACTION chain. - * addsettings() - add a deferred "set" command to a target. - * pushsettings() - set all target specific variables. - * popsettings() - reset target specific variables to their pre-push values. - * freesettings() - delete a settings list. - * rules_done() - free RULE and TARGET tables. - * - * 04/12/94 (seiwald) - actionlist() now just appends a single action. - * 08/23/94 (seiwald) - Support for '+=' (append to variable) - */ - -static void set_rule_actions( RULE *, rule_actions * ); -static void set_rule_body ( RULE *, argument_list *, PARSE * procedure ); - -static struct hash * targethash = 0; - -struct _located_target -{ - char * file_name; - TARGET * target; -}; -typedef struct _located_target LOCATED_TARGET ; - -static struct hash * located_targets = 0; - - -/* - * target_include() - adds the 'included' TARGET to the list of targets included - * by the 'including' TARGET. Such targets are modeled as dependencies of the - * internal include node belonging to the 'including' TARGET. - */ - -void target_include( TARGET * including, TARGET * included ) -{ - TARGET * internal; - if ( !including->includes ) - { - including->includes = copytarget( including ); - including->includes->original_target = including; - } - internal = including->includes; - internal->depends = targetentry( internal->depends, included ); -} - - -/* - * enter_rule() - return pointer to RULE, creating it if necessary in - * target_module. - */ - -static RULE * enter_rule( char * rulename, module_t * target_module ) -{ - RULE rule; - RULE * r = &rule; - - r->name = rulename; - - if ( hashenter( demand_rules( target_module ), (HASHDATA * *)&r ) ) - { - r->name = newstr( rulename ); /* never freed */ - r->procedure = (PARSE *)0; - r->module = 0; - r->actions = 0; - r->arguments = 0; - r->exported = 0; - r->module = target_module; -#ifdef HAVE_PYTHON - r->python_function = 0; -#endif - } - return r; -} - - -/* - * define_rule() - return pointer to RULE, creating it if necessary in - * target_module. Prepare it to accept a body or action originating in - * src_module. - */ - -static RULE * define_rule -( - module_t * src_module, - char * rulename, - module_t * target_module -) -{ - RULE * r = enter_rule( rulename, target_module ); - if ( r->module != src_module ) /* if the rule was imported from elsewhere, clear it now */ - { - set_rule_body( r, 0, 0 ); - set_rule_actions( r, 0 ); - r->module = src_module; /* r will be executed in the source module */ - } - return r; -} - - -void rule_free( RULE * r ) -{ - freestr( r->name ); - r->name = ""; - parse_free( r->procedure ); - r->procedure = 0; - if ( r->arguments ) - args_free( r->arguments ); - r->arguments = 0; - if ( r->actions ) - actions_free( r->actions ); - r->actions = 0; -} - - -/* - * bindtarget() - return pointer to TARGET, creating it if necessary. - */ - -TARGET * bindtarget( char const * target_name ) -{ - TARGET target; - TARGET * t = ⌖ - - if ( !targethash ) - targethash = hashinit( sizeof( TARGET ), "targets" ); - - /* Perforce added const everywhere. No time to merge that change. */ -#ifdef NT - target_name = short_path_to_long_path( (char *)target_name ); -#endif - t->name = (char *)target_name; - - if ( hashenter( targethash, (HASHDATA * *)&t ) ) - { - memset( (char *)t, '\0', sizeof( *t ) ); - t->name = newstr( (char *)target_name ); /* never freed */ - t->boundname = t->name; /* default for T_FLAG_NOTFILE */ - } - - return t; -} - - -static void bind_explicitly_located_target( void * xtarget, void * data ) -{ - TARGET * t = (TARGET *)xtarget; - if ( !( t->flags & T_FLAG_NOTFILE ) ) - { - /* Check if there's a setting for LOCATE */ - SETTINGS * s = t->settings; - for ( ; s ; s = s->next ) - { - if ( strcmp( s->symbol, "LOCATE" ) == 0 ) - { - pushsettings( t->settings ); - /* We are binding a target with explicit LOCATE. So third - * argument is of no use: nothing will be returned through it. - */ - t->boundname = search( t->name, &t->time, 0, 0 ); - popsettings( t->settings ); - break; - } - } - } -} - - -void bind_explicitly_located_targets() -{ - if ( targethash ) - hashenumerate( targethash, bind_explicitly_located_target, (void *)0 ); -} - - -/* TODO: It is probably not a good idea to use functions in other modules like - this. */ -void call_bind_rule( char * target, char * boundname ); - - -TARGET * search_for_target ( char * name, LIST * search_path ) -{ - PATHNAME f[1]; - string buf[1]; - LOCATED_TARGET lt; - LOCATED_TARGET * lta = < - time_t time; - int found = 0; - TARGET * result; - - string_new( buf ); - - path_parse( name, f ); - - f->f_grist.ptr = 0; - f->f_grist.len = 0; - - while ( search_path ) - { - f->f_root.ptr = search_path->string; - f->f_root.len = strlen( search_path->string ); - - string_truncate( buf, 0 ); - path_build( f, buf, 1 ); - - lt.file_name = buf->value ; - - if ( !located_targets ) - located_targets = hashinit( sizeof(LOCATED_TARGET), - "located targets" ); - - if ( hashcheck( located_targets, (HASHDATA * *)<a ) ) - { - return lta->target; - } - - timestamp( buf->value, &time ); - if ( time ) - { - found = 1; - break; - } - - search_path = list_next( search_path ); - } - - if ( !found ) - { - f->f_root.ptr = 0; - f->f_root.len = 0; - - string_truncate( buf, 0 ); - path_build( f, buf, 1 ); - - timestamp( buf->value, &time ); - } - - result = bindtarget( name ); - result->boundname = newstr( buf->value ); - result->time = time; - result->binding = time ? T_BIND_EXISTS : T_BIND_MISSING; - - call_bind_rule( result->name, result->boundname ); - - string_free( buf ); - - return result; -} - - -/* - * copytarget() - make a new target with the old target's name. - * - * Not entered into hash table -- for internal nodes. - */ - -TARGET * copytarget( const TARGET * ot ) -{ - TARGET * t = (TARGET *)BJAM_MALLOC( sizeof( *t ) ); - memset( (char *)t, '\0', sizeof( *t ) ); - t->name = copystr( ot->name ); - t->boundname = t->name; - - t->flags |= T_FLAG_NOTFILE | T_FLAG_INTERNAL; - - return t; -} - - -/* - * touch_target() - mark a target to simulate being new. - */ - -void touch_target( char * t ) -{ - bindtarget( t )->flags |= T_FLAG_TOUCHED; -} - - -/* - * targetlist() - turn list of target names into a TARGET chain. - * - * Inputs: - * chain existing TARGETS to append to - * targets list of target names - */ - -TARGETS * targetlist( TARGETS * chain, LIST * target_names ) -{ - for ( ; target_names; target_names = list_next( target_names ) ) - chain = targetentry( chain, bindtarget( target_names->string ) ); - return chain; -} - - -/* - * targetentry() - add a TARGET to a chain of TARGETS. - * - * Inputs: - * chain existing TARGETS to append to - * target new target to append - */ - -TARGETS * targetentry( TARGETS * chain, TARGET * target ) -{ - TARGETS * c = (TARGETS *)BJAM_MALLOC( sizeof( TARGETS ) ); - c->target = target; - - if ( !chain ) chain = c; - else chain->tail->next = c; - chain->tail = c; - c->next = 0; - - return chain; -} - - -/* - * targetchain() - append two TARGET chains. - * - * Inputs: - * chain exisitng TARGETS to append to - * target new target to append - */ - -TARGETS * targetchain( TARGETS * chain, TARGETS * targets ) -{ - if ( !targets ) return chain; - if ( !chain ) return targets; - - chain->tail->next = targets; - chain->tail = targets->tail; - - return chain; -} - -/* - * actionlist() - append to an ACTION chain. - */ - -ACTIONS * actionlist( ACTIONS * chain, ACTION * action ) -{ - ACTIONS * actions = (ACTIONS *)BJAM_MALLOC( sizeof( ACTIONS ) ); - - actions->action = action; - - if ( !chain ) chain = actions; - else chain->tail->next = actions; - chain->tail = actions; - actions->next = 0; - - return chain; -} - -static SETTINGS * settings_freelist; - - -/* - * addsettings() - add a deferred "set" command to a target. - * - * Adds a variable setting (varname=list) onto a chain of settings for a - * particular target. 'flag' controls the relationship between new and old - * values in the same way as in var_set() function (see variable.c). Returns - * the head of the settings chain. - */ - -SETTINGS * addsettings( SETTINGS * head, int flag, char * symbol, LIST * value ) -{ - SETTINGS * v; - - /* Look for previous settings. */ - for ( v = head; v; v = v->next ) - if ( !strcmp( v->symbol, symbol ) ) - break; - - /* If not previously set, alloc a new. */ - /* If appending, do so. */ - /* Else free old and set new. */ - if ( !v ) - { - v = settings_freelist; - - if ( v ) - settings_freelist = v->next; - else - v = (SETTINGS *)BJAM_MALLOC( sizeof( *v ) ); - - v->symbol = newstr( symbol ); - v->value = value; - v->next = head; - v->multiple = 0; - head = v; - } - else if ( flag == VAR_APPEND ) - { - v->value = list_append( v->value, value ); - } - else if ( flag != VAR_DEFAULT ) - { - list_free( v->value ); - v->value = value; - } - else - list_free( value ); - - /* Return (new) head of list. */ - return head; -} - - -/* - * pushsettings() - set all target specific variables. - */ - -void pushsettings( SETTINGS * v ) -{ - for ( ; v; v = v->next ) - v->value = var_swap( v->symbol, v->value ); -} - - -/* - * popsettings() - reset target specific variables to their pre-push values. - */ - -void popsettings( SETTINGS * v ) -{ - pushsettings( v ); /* just swap again */ -} - - -/* - * copysettings() - duplicate a settings list, returning the new copy. - */ - -SETTINGS * copysettings( SETTINGS * head ) -{ - SETTINGS * copy = 0; - SETTINGS * v; - for ( v = head; v; v = v->next ) - copy = addsettings( copy, VAR_SET, v->symbol, list_copy( 0, v->value ) ); - return copy; -} - - -/* - * freetargets() - delete a targets list. - */ - -void freetargets( TARGETS * chain ) -{ - while ( chain ) - { - TARGETS * n = chain->next; - BJAM_FREE( chain ); - chain = n; - } -} - - -/* - * freeactions() - delete an action list. - */ - -void freeactions( ACTIONS * chain ) -{ - while ( chain ) - { - ACTIONS * n = chain->next; - BJAM_FREE( chain ); - chain = n; - } -} - - -/* - * freesettings() - delete a settings list. - */ - -void freesettings( SETTINGS * v ) -{ - while ( v ) - { - SETTINGS * n = v->next; - freestr( v->symbol ); - list_free( v->value ); - v->next = settings_freelist; - settings_freelist = v; - v = n; - } -} - - -static void freetarget( void * xt, void * data ) -{ - TARGET * t = (TARGET *)xt; - if ( t->settings ) freesettings( t->settings ); - if ( t->depends ) freetargets ( t->depends ); - if ( t->includes ) freetarget ( t->includes, (void *)0 ); - if ( t->actions ) freeactions ( t->actions ); -} - - -/* - * rules_done() - free RULE and TARGET tables. - */ - -void rules_done() -{ - hashenumerate( targethash, freetarget, 0 ); - hashdone( targethash ); - while ( settings_freelist ) - { - SETTINGS * n = settings_freelist->next; - BJAM_FREE( settings_freelist ); - settings_freelist = n; - } -} - - -/* - * args_new() - make a new reference-counted argument list. - */ - -argument_list * args_new() -{ - argument_list * r = (argument_list *)BJAM_MALLOC( sizeof(argument_list) ); - r->reference_count = 0; - lol_init( r->data ); - return r; -} - - -/* - * args_refer() - add a new reference to the given argument list. - */ - -void args_refer( argument_list * a ) -{ - ++a->reference_count; -} - - -/* - * args_free() - release a reference to the given argument list. - */ - -void args_free( argument_list * a ) -{ - if ( --a->reference_count <= 0 ) - { - lol_free( a->data ); - BJAM_FREE( a ); - } -} - - -/* - * actions_refer() - add a new reference to the given actions. - */ - -void actions_refer( rule_actions * a ) -{ - ++a->reference_count; -} - - -/* - * actions_free() - release a reference to the given actions. - */ - -void actions_free( rule_actions * a ) -{ - if ( --a->reference_count <= 0 ) - { - freestr( a->command ); - list_free( a->bindlist ); - BJAM_FREE( a ); - } -} - - -/* - * set_rule_body() - set the argument list and procedure of the given rule. - */ - -static void set_rule_body( RULE * rule, argument_list * args, PARSE * procedure ) -{ - if ( args ) - args_refer( args ); - if ( rule->arguments ) - args_free( rule->arguments ); - rule->arguments = args; - - if ( procedure ) - parse_refer( procedure ); - if ( rule->procedure ) - parse_free( rule->procedure ); - rule->procedure = procedure; -} - - -/* - * global_name() - given a rule, return the name for a corresponding rule in the - * global module. - */ - -static char * global_rule_name( RULE * r ) -{ - if ( r->module == root_module() ) - return r->name; - - { - char name[4096] = ""; - strncat( name, r->module->name, sizeof( name ) - 1 ); - strncat( name, r->name, sizeof( name ) - 1 ); - return newstr( name); - } -} - - -/* - * global_rule() - given a rule, produce the corresponding entry in the global - * module. - */ - -static RULE * global_rule( RULE * r ) -{ - if ( r->module == root_module() ) - return r; - - { - char * name = global_rule_name( r ); - RULE * result = define_rule( r->module, name, root_module() ); - freestr( name ); - return result; - } -} - - -/* - * new_rule_body() - make a new rule named rulename in the given module, with - * the given argument list and procedure. If exported is true, the rule is - * exported to the global module as modulename.rulename. - */ - -RULE * new_rule_body( module_t * m, char * rulename, argument_list * args, PARSE * procedure, int exported ) -{ - RULE * local = define_rule( m, rulename, m ); - local->exported = exported; - set_rule_body( local, args, procedure ); - - /* Mark the procedure with the global rule name, regardless of whether the - * rule is exported. That gives us something reasonably identifiable that we - * can use, e.g. in profiling output. Only do this once, since this could be - * called multiple times with the same procedure. - */ - if ( procedure->rulename == 0 ) - procedure->rulename = global_rule_name( local ); - - return local; -} - - -static void set_rule_actions( RULE * rule, rule_actions * actions ) -{ - if ( actions ) - actions_refer( actions ); - if ( rule->actions ) - actions_free( rule->actions ); - rule->actions = actions; -} - - -static rule_actions * actions_new( char * command, LIST * bindlist, int flags ) -{ - rule_actions * result = (rule_actions *)BJAM_MALLOC( sizeof( rule_actions ) ); - result->command = copystr( command ); - result->bindlist = bindlist; - result->flags = flags; - result->reference_count = 0; - return result; -} - - -RULE * new_rule_actions( module_t * m, char * rulename, char * command, LIST * bindlist, int flags ) -{ - RULE * local = define_rule( m, rulename, m ); - RULE * global = global_rule( local ); - set_rule_actions( local, actions_new( command, bindlist, flags ) ); - set_rule_actions( global, local->actions ); - return local; -} - - -/* - * Looks for a rule in the specified module, and returns it, if found. First - * checks if the rule is present in the module's rule table. Second, if name of - * the rule is in the form name1.name2 and name1 is in the list of imported - * modules, look in module 'name1' for rule 'name2'. - */ - -RULE * lookup_rule( char * rulename, module_t * m, int local_only ) -{ - RULE rule; - RULE * r = &rule; - RULE * result = 0; - module_t * original_module = m; - - r->name = rulename; - - if ( m->class_module ) - m = m->class_module; - - if ( m->rules && hashcheck( m->rules, (HASHDATA * *)&r ) ) - result = r; - else if ( !local_only && m->imported_modules ) - { - /* Try splitting the name into module and rule. */ - char *p = strchr( r->name, '.' ) ; - if ( p ) - { - *p = '\0'; - /* Now, r->name keeps the module name, and p+1 keeps the rule name. - */ - if ( hashcheck( m->imported_modules, (HASHDATA * *)&r ) ) - result = lookup_rule( p + 1, bindmodule( rulename ), 1 ); - *p = '.'; - } - } - - if ( result ) - { - if ( local_only && !result->exported ) - result = 0; - else - { - /* Lookup started in class module. We have found a rule in class - * module, which is marked for execution in that module, or in some - * instances. Mark it for execution in the instance where we started - * the lookup. - */ - int execute_in_class = ( result->module == m ); - int execute_in_some_instance = ( result->module->class_module && - ( result->module->class_module == m ) ); - if ( ( original_module != m ) && - ( execute_in_class || execute_in_some_instance ) ) - result->module = original_module; - } - } - - return result; -} - - -RULE * bindrule( char * rulename, module_t * m ) -{ - RULE * result = lookup_rule( rulename, m, 0 ); - if ( !result ) - result = lookup_rule( rulename, root_module(), 0 ); - /* We have only one caller, 'evaluate_rule', which will complain about - * calling an undefined rule. We could issue the error here, but we do not - * have the necessary information, such as frame. - */ - if ( !result ) - result = enter_rule( rulename, m ); - return result; -} - - -RULE * import_rule( RULE * source, module_t * m, char * name ) -{ - RULE * dest = define_rule( source->module, name, m ); - set_rule_body( dest, source->arguments, source->procedure ); - set_rule_actions( dest, source->actions ); - return dest; -} diff --git a/jam-files/engine/rules.h b/jam-files/engine/rules.h deleted file mode 100644 index 806a1469..00000000 --- a/jam-files/engine/rules.h +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#ifndef RULES_DWA_20011020_H -#define RULES_DWA_20011020_H - -#include "modules.h" -#include "jam.h" -#include "parse.h" - - -/* - * rules.h - targets, rules, and related information - * - * This file describes the structures holding the targets, rules, and - * related information accumulated by interpreting the statements - * of the jam files. - * - * The following are defined: - * - * RULE - a generic jam rule, the product of RULE and ACTIONS. - * ACTIONS - a chain of ACTIONs. - * ACTION - a RULE instance with targets and sources. - * SETTINGS - variables to set when executing a TARGET's ACTIONS. - * TARGETS - a chain of TARGETs. - * TARGET - an entity (e.g. a file) that can be built. - * - * 04/11/94 (seiwald) - Combined deps & headers into deps[2] in TARGET. - * 04/12/94 (seiwald) - actionlist() now just appends a single action. - * 06/01/94 (seiwald) - new 'actions existing' does existing sources - * 12/20/94 (seiwald) - NOTIME renamed NOTFILE. - * 01/19/95 (seiwald) - split DONTKNOW into CANTFIND/CANTMAKE. - * 02/02/95 (seiwald) - new LEAVES modifier on targets. - * 02/14/95 (seiwald) - new NOUPDATE modifier on targets. - */ - -typedef struct _rule RULE; -typedef struct _target TARGET; -typedef struct _targets TARGETS; -typedef struct _action ACTION; -typedef struct _actions ACTIONS; -typedef struct _settings SETTINGS ; - -/* RULE - a generic jam rule, the product of RULE and ACTIONS. */ - -/* A rule's argument list. */ -struct argument_list -{ - int reference_count; - LOL data[1]; -}; - -/* Build actions corresponding to a rule. */ -struct rule_actions -{ - int reference_count; - char * command; /* command string from ACTIONS */ - LIST * bindlist; - int flags; /* modifiers on ACTIONS */ - -#define RULE_NEWSRCS 0x01 /* $(>) is updated sources only */ -#define RULE_TOGETHER 0x02 /* combine actions on single target */ -#define RULE_IGNORE 0x04 /* ignore return status of executes */ -#define RULE_QUIETLY 0x08 /* do not mention it unless verbose */ -#define RULE_PIECEMEAL 0x10 /* split exec so each $(>) is small */ -#define RULE_EXISTING 0x20 /* $(>) is pre-exisitng sources only */ -}; - -typedef struct rule_actions rule_actions; -typedef struct argument_list argument_list; - -struct _rule -{ - char * name; - PARSE * procedure; /* parse tree from RULE */ - argument_list * arguments; /* argument checking info, or NULL for unchecked - */ - rule_actions * actions; /* build actions, or NULL for no actions */ - module_t * module; /* module in which this rule is executed */ - int exported; /* nonzero if this rule is supposed to appear in - * the global module and be automatically - * imported into other modules - */ -#ifdef HAVE_PYTHON - PyObject * python_function; -#endif -}; - -/* ACTIONS - a chain of ACTIONs. */ -struct _actions -{ - ACTIONS * next; - ACTIONS * tail; /* valid only for head */ - ACTION * action; -}; - -/* ACTION - a RULE instance with targets and sources. */ -struct _action -{ - RULE * rule; - TARGETS * targets; - TARGETS * sources; /* aka $(>) */ - char running; /* has been started */ - char status; /* see TARGET status */ -}; - -/* SETTINGS - variables to set when executing a TARGET's ACTIONS. */ -struct _settings -{ - SETTINGS * next; - char * symbol; /* symbol name for var_set() */ - LIST * value; /* symbol value for var_set() */ - int multiple; -}; - -/* TARGETS - a chain of TARGETs. */ -struct _targets -{ - TARGETS * next; - TARGETS * tail; /* valid only for head */ - TARGET * target; -}; - -/* TARGET - an entity (e.g. a file) that can be built. */ -struct _target -{ - char * name; - char * boundname; /* if search() relocates target */ - ACTIONS * actions; /* rules to execute, if any */ - SETTINGS * settings; /* variables to define */ - - short flags; /* status info */ - -#define T_FLAG_TEMP 0x0001 /* TEMPORARY applied */ -#define T_FLAG_NOCARE 0x0002 /* NOCARE applied */ -#define T_FLAG_NOTFILE 0x0004 /* NOTFILE applied */ -#define T_FLAG_TOUCHED 0x0008 /* ALWAYS applied or -t target */ -#define T_FLAG_LEAVES 0x0010 /* LEAVES applied */ -#define T_FLAG_NOUPDATE 0x0020 /* NOUPDATE applied */ -#define T_FLAG_VISITED 0x0040 /* CWM: Used in debugging */ - -/* This flag has been added to support a new built-in rule named "RMBAD". It is - * used to force removal of outdated targets whose dependencies fail to build. - */ -#define T_FLAG_RMOLD 0x0080 /* RMBAD applied */ - -/* This flag was added to support a new built-in rule named "FAIL_EXPECTED" used - * to indicate that the result of running a given action should be inverted, - * i.e. ok <=> fail. This is useful for launching certain test runs from a - * Jamfile. - */ -#define T_FLAG_FAIL_EXPECTED 0x0100 /* FAIL_EXPECTED applied */ - -#define T_FLAG_INTERNAL 0x0200 /* internal INCLUDES node */ - -/* Indicates that the target must be a file. This prevents matching non-files, - * like directories, when a target is searched. - */ -#define T_FLAG_ISFILE 0x0400 - -#define T_FLAG_PRECIOUS 0x0800 - - char binding; /* how target relates to a real file or - * folder - */ - -#define T_BIND_UNBOUND 0 /* a disembodied name */ -#define T_BIND_MISSING 1 /* could not find real file */ -#define T_BIND_PARENTS 2 /* using parent's timestamp */ -#define T_BIND_EXISTS 3 /* real file, timestamp valid */ - - TARGETS * depends; /* dependencies */ - TARGETS * dependants; /* the inverse of dependencies */ - TARGETS * rebuilds; /* targets that should be force-rebuilt - * whenever this one is - */ - TARGET * includes; /* internal includes node */ - TARGET * original_target; /* original_target->includes = this */ - char rescanned; - - time_t time; /* update time */ - time_t leaf; /* update time of leaf sources */ - - char fate; /* make0()'s diagnosis */ - -#define T_FATE_INIT 0 /* nothing done to target */ -#define T_FATE_MAKING 1 /* make0(target) on stack */ - -#define T_FATE_STABLE 2 /* target did not need updating */ -#define T_FATE_NEWER 3 /* target newer than parent */ - -#define T_FATE_SPOIL 4 /* >= SPOIL rebuilds parents */ -#define T_FATE_ISTMP 4 /* unneeded temp target oddly present */ - -#define T_FATE_BUILD 5 /* >= BUILD rebuilds target */ -#define T_FATE_TOUCHED 5 /* manually touched with -t */ -#define T_FATE_REBUILD 6 -#define T_FATE_MISSING 7 /* is missing, needs updating */ -#define T_FATE_NEEDTMP 8 /* missing temp that must be rebuild */ -#define T_FATE_OUTDATED 9 /* is out of date, needs updating */ -#define T_FATE_UPDATE 10 /* deps updated, needs updating */ - -#define T_FATE_BROKEN 11 /* >= BROKEN ruins parents */ -#define T_FATE_CANTFIND 11 /* no rules to make missing target */ -#define T_FATE_CANTMAKE 12 /* can not find dependencies */ - - char progress; /* tracks make1() progress */ - -#define T_MAKE_INIT 0 /* make1(target) not yet called */ -#define T_MAKE_ONSTACK 1 /* make1(target) on stack */ -#define T_MAKE_ACTIVE 2 /* make1(target) in make1b() */ -#define T_MAKE_RUNNING 3 /* make1(target) running commands */ -#define T_MAKE_DONE 4 /* make1(target) done */ - -#ifdef OPT_SEMAPHORE - #define T_MAKE_SEMAPHORE 5 /* Special target type for semaphores */ -#endif - -#ifdef OPT_SEMAPHORE - TARGET * semaphore; /* used in serialization */ -#endif - - char status; /* exec_cmd() result */ - - int asynccnt; /* child deps outstanding */ - TARGETS * parents; /* used by make1() for completion */ - char * cmds; /* type-punned command list */ - - char * failed; -}; - - -/* Action related functions. */ -ACTIONS * actionlist ( ACTIONS *, ACTION * ); -void freeactions ( ACTIONS * ); -SETTINGS * addsettings ( SETTINGS *, int flag, char * symbol, LIST * value ); -void pushsettings ( SETTINGS * ); -void popsettings ( SETTINGS * ); -SETTINGS * copysettings ( SETTINGS * ); -void freesettings ( SETTINGS * ); -void actions_refer( rule_actions * ); -void actions_free ( rule_actions * ); - -/* Argument list related functions. */ -void args_free ( argument_list * ); -argument_list * args_new (); -void args_refer( argument_list * ); - -/* Rule related functions. */ -RULE * bindrule ( char * rulename, module_t * ); -RULE * import_rule ( RULE * source, module_t *, char * name ); -RULE * new_rule_body ( module_t *, char * rulename, argument_list *, PARSE * procedure, int exprt ); -RULE * new_rule_actions( module_t *, char * rulename, char * command, LIST * bindlist, int flags ); -void rule_free ( RULE * ); - -/* Target related functions. */ -void bind_explicitly_located_targets(); -TARGET * bindtarget ( char const * target_name ); -TARGET * copytarget ( TARGET const * t ); -void freetargets ( TARGETS * ); -TARGET * search_for_target ( char * name, LIST * search_path ); -TARGETS * targetchain ( TARGETS * chain, TARGETS * ); -TARGETS * targetentry ( TARGETS * chain, TARGET * ); -void target_include ( TARGET * including, TARGET * included ); -TARGETS * targetlist ( TARGETS * chain, LIST * target_names ); -void touch_target ( char * t ); - -/* Final module cleanup. */ -void rules_done(); - -#endif diff --git a/jam-files/engine/scan.c b/jam-files/engine/scan.c deleted file mode 100644 index 11c44c0e..00000000 --- a/jam-files/engine/scan.c +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -#include "jam.h" -#include "lists.h" -#include "parse.h" -#include "scan.h" -#include "jamgram.h" -#include "jambase.h" -#include "newstr.h" - -/* - * scan.c - the jam yacc scanner - * - * 12/26/93 (seiwald) - bump buf in yylex to 10240 - yuk. - * 09/16/94 (seiwald) - check for overflows, unmatched {}'s, etc. - * Also handle tokens abutting EOF by remembering - * to return EOF now matter how many times yylex() - * reinvokes yyline(). - * 02/11/95 (seiwald) - honor only punctuation keywords if SCAN_PUNCT. - * 07/27/95 (seiwald) - Include jamgram.h after scan.h, so that YYSTYPE is - * defined before Linux's yacc tries to redefine it. - */ - -struct keyword -{ - char * word; - int type; -} keywords[] = -{ -#include "jamgramtab.h" - { 0, 0 } -}; - -struct include -{ - struct include * next; /* next serial include file */ - char * string; /* pointer into current line */ - char * * strings; /* for yyfparse() -- text to parse */ - FILE * file; /* for yyfparse() -- file being read */ - char * fname; /* for yyfparse() -- file name */ - int line; /* line counter for error messages */ - char buf[ 512 ]; /* for yyfparse() -- line buffer */ -}; - -static struct include * incp = 0; /* current file; head of chain */ - -static int scanmode = SCAN_NORMAL; -static int anyerrors = 0; - - -static char * symdump( YYSTYPE * ); - -#define BIGGEST_TOKEN 10240 /* no single token can be larger */ - - -/* - * Set parser mode: normal, string, or keyword. - */ - -void yymode( int n ) -{ - scanmode = n; -} - - -void yyerror( char * s ) -{ - /* We use yylval instead of incp to access the error location information as - * the incp pointer will already be reset to 0 in case the error occurred at - * EOF. - * - * The two may differ only if we get an error while reading a lexical token - * spanning muliple lines, e.g. a multi-line string literal or action body, - * in which case yylval location information will hold the information about - * where this token started while incp will hold the information about where - * reading it broke. - * - * TODO: Test the theory about when yylval and incp location information are - * the same and when they differ. - */ - printf( "%s:%d: %s at %s\n", yylval.file, yylval.line, s, symdump( &yylval ) ); - ++anyerrors; -} - - -int yyanyerrors() -{ - return anyerrors != 0; -} - - -void yyfparse( char * s ) -{ - struct include * i = (struct include *)BJAM_MALLOC( sizeof( *i ) ); - - /* Push this onto the incp chain. */ - i->string = ""; - i->strings = 0; - i->file = 0; - i->fname = copystr( s ); - i->line = 0; - i->next = incp; - incp = i; - - /* If the filename is "+", it means use the internal jambase. */ - if ( !strcmp( s, "+" ) ) - i->strings = jambase; -} - - -/* - * yyline() - read new line and return first character. - * - * Fabricates a continuous stream of characters across include files, returning - * EOF at the bitter end. - */ - -int yyline() -{ - struct include * i = incp; - - if ( !incp ) - return EOF; - - /* Once we start reading from the input stream, we reset the include - * insertion point so that the next include file becomes the head of the - * list. - */ - - /* If there is more data in this line, return it. */ - if ( *i->string ) - return *i->string++; - - /* If we are reading from an internal string list, go to the next string. */ - if ( i->strings ) - { - if ( *i->strings ) - { - ++i->line; - i->string = *(i->strings++); - return *i->string++; - } - } - else - { - /* If necessary, open the file. */ - if ( !i->file ) - { - FILE * f = stdin; - if ( strcmp( i->fname, "-" ) && !( f = fopen( i->fname, "r" ) ) ) - perror( i->fname ); - i->file = f; - } - - /* If there is another line in this file, start it. */ - if ( i->file && fgets( i->buf, sizeof( i->buf ), i->file ) ) - { - ++i->line; - i->string = i->buf; - return *i->string++; - } - } - - /* This include is done. Free it up and return EOF so yyparse() returns to - * parse_file(). - */ - - incp = i->next; - - /* Close file, free name. */ - if ( i->file && ( i->file != stdin ) ) - fclose( i->file ); - freestr( i->fname ); - BJAM_FREE( (char *)i ); - - return EOF; -} - - -/* - * yylex() - set yylval to current token; return its type. - * - * Macros to move things along: - * - * yychar() - return and advance character; invalid after EOF. - * yyprev() - back up one character; invalid before yychar(). - * - * yychar() returns a continuous stream of characters, until it hits the EOF of - * the current include file. - */ - -#define yychar() ( *incp->string ? *incp->string++ : yyline() ) -#define yyprev() ( incp->string-- ) - -int yylex() -{ - int c; - char buf[ BIGGEST_TOKEN ]; - char * b = buf; - - if ( !incp ) - goto eof; - - /* Get first character (whitespace or of token). */ - c = yychar(); - - if ( scanmode == SCAN_STRING ) - { - /* If scanning for a string (action's {}'s), look for the closing brace. - * We handle matching braces, if they match. - */ - - int nest = 1; - - while ( ( c != EOF ) && ( b < buf + sizeof( buf ) ) ) - { - if ( c == '{' ) - ++nest; - - if ( ( c == '}' ) && !--nest ) - break; - - *b++ = c; - - c = yychar(); - - /* Turn trailing "\r\n" sequences into plain "\n" for Cygwin. */ - if ( ( c == '\n' ) && ( b[ -1 ] == '\r' ) ) - --b; - } - - /* We ate the ending brace -- regurgitate it. */ - if ( c != EOF ) - yyprev(); - - /* Check for obvious errors. */ - if ( b == buf + sizeof( buf ) ) - { - yyerror( "action block too big" ); - goto eof; - } - - if ( nest ) - { - yyerror( "unmatched {} in action block" ); - goto eof; - } - - *b = 0; - yylval.type = STRING; - yylval.string = newstr( buf ); - yylval.file = incp->fname; - yylval.line = incp->line; - } - else - { - char * b = buf; - struct keyword * k; - int inquote = 0; - int notkeyword; - - /* Eat white space. */ - for ( ;; ) - { - /* Skip past white space. */ - while ( ( c != EOF ) && isspace( c ) ) - c = yychar(); - - /* Not a comment? */ - if ( c != '#' ) - break; - - /* Swallow up comment line. */ - while ( ( ( c = yychar() ) != EOF ) && ( c != '\n' ) ) ; - } - - /* c now points to the first character of a token. */ - if ( c == EOF ) - goto eof; - - yylval.file = incp->fname; - yylval.line = incp->line; - - /* While scanning the word, disqualify it for (expensive) keyword lookup - * when we can: $anything, "anything", \anything - */ - notkeyword = c == '$'; - - /* Look for white space to delimit word. "'s get stripped but preserve - * white space. \ protects next character. - */ - while - ( - ( c != EOF ) && - ( b < buf + sizeof( buf ) ) && - ( inquote || !isspace( c ) ) - ) - { - if ( c == '"' ) - { - /* begin or end " */ - inquote = !inquote; - notkeyword = 1; - } - else if ( c != '\\' ) - { - /* normal char */ - *b++ = c; - } - else if ( ( c = yychar() ) != EOF ) - { - /* \c */ - if (c == 'n') - c = '\n'; - else if (c == 'r') - c = '\r'; - else if (c == 't') - c = '\t'; - *b++ = c; - notkeyword = 1; - } - else - { - /* \EOF */ - break; - } - - c = yychar(); - } - - /* Check obvious errors. */ - if ( b == buf + sizeof( buf ) ) - { - yyerror( "string too big" ); - goto eof; - } - - if ( inquote ) - { - yyerror( "unmatched \" in string" ); - goto eof; - } - - /* We looked ahead a character - back up. */ - if ( c != EOF ) - yyprev(); - - /* Scan token table. Do not scan if it is obviously not a keyword or if - * it is an alphabetic when were looking for punctuation. - */ - - *b = 0; - yylval.type = ARG; - - if ( !notkeyword && !( isalpha( *buf ) && ( scanmode == SCAN_PUNCT ) ) ) - for ( k = keywords; k->word; ++k ) - if ( ( *buf == *k->word ) && !strcmp( k->word, buf ) ) - { - yylval.type = k->type; - yylval.string = k->word; /* used by symdump */ - break; - } - - if ( yylval.type == ARG ) - yylval.string = newstr( buf ); - } - - if ( DEBUG_SCAN ) - printf( "scan %s\n", symdump( &yylval ) ); - - return yylval.type; - -eof: - /* We do not reset yylval.file & yylval.line here so unexpected EOF error - * messages would include correct error location information. - */ - yylval.type = EOF; - return yylval.type; -} - - -static char * symdump( YYSTYPE * s ) -{ - static char buf[ BIGGEST_TOKEN + 20 ]; - switch ( s->type ) - { - case EOF : sprintf( buf, "EOF" ); break; - case 0 : sprintf( buf, "unknown symbol %s", s->string ); break; - case ARG : sprintf( buf, "argument %s" , s->string ); break; - case STRING: sprintf( buf, "string \"%s\"" , s->string ); break; - default : sprintf( buf, "keyword %s" , s->string ); break; - } - return buf; -} - - -/* - * Get information about the current file and line, for those epsilon - * transitions that produce a parse. - */ - -void yyinput_stream( char * * name, int * line ) -{ - if ( incp ) - { - *name = incp->fname; - *line = incp->line; - } - else - { - *name = "(builtin)"; - *line = -1; - } -} diff --git a/jam-files/engine/scan.h b/jam-files/engine/scan.h deleted file mode 100644 index 3fad1c24..00000000 --- a/jam-files/engine/scan.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * scan.h - the jam yacc scanner - * - * External functions: - * - * yyerror( char *s ) - print a parsing error message. - * yyfparse( char *s ) - scan include file s. - * yylex() - parse the next token, returning its type. - * yymode() - adjust lexicon of scanner. - * yyparse() - declaration for yacc parser. - * yyanyerrors() - indicate if any parsing errors occured. - * - * The yymode() function is for the parser to adjust the lexicon of the scanner. - * Aside from normal keyword scanning, there is a mode to handle action strings - * (look only for the closing }) and a mode to ignore most keywords when looking - * for a punctuation keyword. This allows non-punctuation keywords to be used in - * lists without quoting. - */ - -/* - * YYSTYPE - value of a lexical token - */ - -#define YYSTYPE YYSYMBOL - -typedef struct _YYSTYPE -{ - int type; - char * string; - PARSE * parse; - LIST * list; - int number; - char * file; - int line; -} YYSTYPE; - -extern YYSTYPE yylval; - -void yymode( int n ); -void yyerror( char * s ); -int yyanyerrors(); -void yyfparse( char * s ); -int yyline(); -int yylex(); -int yyparse(); -void yyinput_stream( char * * name, int * line ); - -# define SCAN_NORMAL 0 /* normal parsing */ -# define SCAN_STRING 1 /* look only for matching } */ -# define SCAN_PUNCT 2 /* only punctuation keywords */ diff --git a/jam-files/engine/search.c b/jam-files/engine/search.c deleted file mode 100644 index 6c23d97a..00000000 --- a/jam-files/engine/search.c +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#include "jam.h" -#include "lists.h" -#include "search.h" -#include "timestamp.h" -#include "pathsys.h" -#include "variable.h" -#include "newstr.h" -#include "compile.h" -#include "strings.h" -#include "hash.h" -#include "filesys.h" -#include <string.h> - - -typedef struct _binding -{ - char * binding; - char * target; -} BINDING; - -static struct hash *explicit_bindings = 0; - - -void call_bind_rule -( - char * target_, - char * boundname_ -) -{ - LIST * bind_rule = var_get( "BINDRULE" ); - if ( bind_rule ) - { - /* No guarantee that the target is an allocated string, so be on the - * safe side. - */ - char * target = copystr( target_ ); - - /* Likewise, do not rely on implementation details of newstr.c: allocate - * a copy of boundname. - */ - char * boundname = copystr( boundname_ ); - if ( boundname && target ) - { - /* Prepare the argument list. */ - FRAME frame[1]; - frame_init( frame ); - - /* First argument is the target name. */ - lol_add( frame->args, list_new( L0, target ) ); - - lol_add( frame->args, list_new( L0, boundname ) ); - if ( lol_get( frame->args, 1 ) ) - evaluate_rule( bind_rule->string, frame ); - - /* Clean up */ - frame_free( frame ); - } - else - { - if ( boundname ) - freestr( boundname ); - if ( target ) - freestr( target ); - } - } -} - -/* - * search.c - find a target along $(SEARCH) or $(LOCATE) - * First, check if LOCATE is set. If so, use it to determine - * the location of target and return, regardless of whether anything - * exists on that location. - * - * Second, examine all directories in SEARCH. If there's file already - * or there's another target with the same name which was placed - * to this location via LOCATE setting, stop and return the location. - * In case of previous target, return it's name via the third argument. - * - * This bevahiour allow to handle dependency on generated files. If - * caller does not expect that target is generated, 0 can be passed as - * the third argument. - */ - -char * -search( - char *target, - time_t *time, - char **another_target, - int file -) -{ - PATHNAME f[1]; - LIST *varlist; - string buf[1]; - int found = 0; - /* Will be set to 1 if target location is specified via LOCATE. */ - int explicitly_located = 0; - char *boundname = 0; - - if ( another_target ) - *another_target = 0; - - if (! explicit_bindings ) - explicit_bindings = hashinit( sizeof(BINDING), - "explicitly specified locations"); - - string_new( buf ); - /* Parse the filename */ - - path_parse( target, f ); - - f->f_grist.ptr = 0; - f->f_grist.len = 0; - - if ( ( varlist = var_get( "LOCATE" ) ) ) - { - f->f_root.ptr = varlist->string; - f->f_root.len = strlen( varlist->string ); - - path_build( f, buf, 1 ); - - if ( DEBUG_SEARCH ) - printf( "locate %s: %s\n", target, buf->value ); - - explicitly_located = 1; - - timestamp( buf->value, time ); - found = 1; - } - else if ( ( varlist = var_get( "SEARCH" ) ) ) - { - while ( varlist ) - { - BINDING b, *ba = &b; - file_info_t *ff; - - f->f_root.ptr = varlist->string; - f->f_root.len = strlen( varlist->string ); - - string_truncate( buf, 0 ); - path_build( f, buf, 1 ); - - if ( DEBUG_SEARCH ) - printf( "search %s: %s\n", target, buf->value ); - - ff = file_query(buf->value); - timestamp( buf->value, time ); - - b.binding = buf->value; - - if ( hashcheck( explicit_bindings, (HASHDATA**)&ba ) ) - { - if ( DEBUG_SEARCH ) - printf(" search %s: found explicitly located target %s\n", - target, ba->target); - if ( another_target ) - *another_target = ba->target; - found = 1; - break; - } - else if ( ff && ff->time ) - { - if ( !file || ff->is_file ) - { - found = 1; - break; - } - } - - varlist = list_next( varlist ); - } - } - - if ( !found ) - { - /* Look for the obvious */ - /* This is a questionable move. Should we look in the */ - /* obvious place if SEARCH is set? */ - - f->f_root.ptr = 0; - f->f_root.len = 0; - - string_truncate( buf, 0 ); - path_build( f, buf, 1 ); - - if ( DEBUG_SEARCH ) - printf( "search %s: %s\n", target, buf->value ); - - timestamp( buf->value, time ); - } - - boundname = newstr( buf->value ); - string_free( buf ); - - if ( explicitly_located ) - { - BINDING b; - BINDING * ba = &b; - b.binding = boundname; - b.target = target; - /* CONSIDER: we probably should issue a warning is another file - is explicitly bound to the same location. This might break - compatibility, though. */ - hashenter( explicit_bindings, (HASHDATA * *)&ba ); - } - - /* prepare a call to BINDRULE if the variable is set */ - call_bind_rule( target, boundname ); - - return boundname; -} diff --git a/jam-files/engine/search.h b/jam-files/engine/search.h deleted file mode 100644 index c364cac0..00000000 --- a/jam-files/engine/search.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * search.h - find a target along $(SEARCH) or $(LOCATE) - */ - -char *search( char *target, time_t *time, char **another_target, int file ); diff --git a/jam-files/engine/strings.c b/jam-files/engine/strings.c deleted file mode 100644 index 89561237..00000000 --- a/jam-files/engine/strings.c +++ /dev/null @@ -1,201 +0,0 @@ -/* Copyright David Abrahams 2004. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -#include "jam.h" -#include "strings.h" -#include <stdlib.h> -#include <string.h> -#include <assert.h> -#include <stdio.h> - - -#ifndef NDEBUG -# define JAM_STRING_MAGIC ((char)0xcf) -# define JAM_STRING_MAGIC_SIZE 4 -static void assert_invariants( string* self ) -{ - int i; - - if ( self->value == 0 ) - { - assert( self->size == 0 ); - assert( self->capacity == 0 ); - assert( self->opt[0] == 0 ); - return; - } - - assert( self->size < self->capacity ); - assert( ( self->capacity <= sizeof(self->opt) ) == ( self->value == self->opt ) ); - assert( strlen( self->value ) == self->size ); - - for (i = 0; i < 4; ++i) - { - assert( self->magic[i] == JAM_STRING_MAGIC ); - assert( self->value[self->capacity + i] == JAM_STRING_MAGIC ); - } -} -#else -# define JAM_STRING_MAGIC_SIZE 0 -# define assert_invariants(x) do {} while (0) -#endif - -void string_new( string* s ) -{ - s->value = s->opt; - s->size = 0; - s->capacity = sizeof(s->opt); - s->opt[0] = 0; -#ifndef NDEBUG - memset(s->magic, JAM_STRING_MAGIC, sizeof(s->magic)); -#endif - assert_invariants( s ); -} - -void string_free( string* s ) -{ - assert_invariants( s ); - if ( s->value != s->opt ) - BJAM_FREE( s->value ); - string_new( s ); -} - -static void string_reserve_internal( string* self, size_t capacity ) -{ - if ( self->value == self->opt ) - { - self->value = (char*)BJAM_MALLOC_ATOMIC( capacity + JAM_STRING_MAGIC_SIZE ); - self->value[0] = 0; - strncat( self->value, self->opt, sizeof(self->opt) ); - assert( strlen( self->value ) <= self->capacity ); /* This is a regression test */ - } - else - { - self->value = (char*)BJAM_REALLOC( self->value, capacity + JAM_STRING_MAGIC_SIZE ); - } -#ifndef NDEBUG - memcpy( self->value + capacity, self->magic, JAM_STRING_MAGIC_SIZE ); -#endif - self->capacity = capacity; -} - -void string_reserve( string* self, size_t capacity ) -{ - assert_invariants( self ); - if ( capacity <= self->capacity ) - return; - string_reserve_internal( self, capacity ); - assert_invariants( self ); -} - -static void extend_full( string* self, char const* start, char const* finish ) -{ - size_t new_size = self->capacity + ( finish - start ); - size_t new_capacity = self->capacity; - size_t old_size = self->capacity; - while ( new_capacity < new_size + 1) - new_capacity <<= 1; - string_reserve_internal( self, new_capacity ); - memcpy( self->value + old_size, start, new_size - old_size ); - self->value[new_size] = 0; - self->size = new_size; -} - -void string_append( string* self, char const* rhs ) -{ - char* p = self->value + self->size; - char* end = self->value + self->capacity; - assert_invariants( self ); - - while ( *rhs && p != end) - *p++ = *rhs++; - - if ( p != end ) - { - *p = 0; - self->size = p - self->value; - } - else - { - extend_full( self, rhs, rhs + strlen(rhs) ); - } - assert_invariants( self ); -} - -void string_append_range( string* self, char const* start, char const* finish ) -{ - char* p = self->value + self->size; - char* end = self->value + self->capacity; - assert_invariants( self ); - - while ( p != end && start != finish ) - *p++ = *start++; - - if ( p != end ) - { - *p = 0; - self->size = p - self->value; - } - else - { - extend_full( self, start, finish ); - } - assert_invariants( self ); -} - -void string_copy( string* s, char const* rhs ) -{ - string_new( s ); - string_append( s, rhs ); -} - -void string_truncate( string* self, size_t n ) -{ - assert_invariants( self ); - assert( n <= self->capacity ); - self->value[self->size = n] = 0; - assert_invariants( self ); -} - -void string_pop_back( string* self ) -{ - string_truncate( self, self->size - 1 ); -} - -void string_push_back( string* self, char x ) -{ - string_append_range( self, &x, &x + 1 ); -} - -char string_back( string* self ) -{ - assert_invariants( self ); - return self->value[self->size - 1]; -} - -#ifndef NDEBUG -void string_unit_test() -{ - string s[1]; - int i; - char buffer[sizeof(s->opt) * 2 + 2]; - int limit = sizeof(buffer) > 254 ? 254 : sizeof(buffer); - - string_new(s); - - for (i = 0; i < limit; ++i) - { - string_push_back( s, (char)(i + 1) ); - }; - - for (i = 0; i < limit; ++i) - { - assert( i < s->size ); - assert( s->value[i] == (char)(i + 1)); - } - - string_free(s); - -} -#endif - diff --git a/jam-files/engine/strings.h b/jam-files/engine/strings.h deleted file mode 100644 index 33c77bd7..00000000 --- a/jam-files/engine/strings.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef STRINGS_DWA20011024_H -# define STRINGS_DWA20011024_H - -/* Copyright David Abrahams 2004. Distributed under the Boost */ -/* Software License, Version 1.0. (See accompanying */ -/* file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ - -# include <stddef.h> - -typedef struct string -{ - char* value; - unsigned long size; - unsigned long capacity; - char opt[32]; -#ifndef NDEBUG - char magic[4]; -#endif -} string; - -void string_new( string* ); -void string_copy( string*, char const* ); -void string_free( string* ); -void string_append( string*, char const* ); -void string_append_range( string*, char const*, char const* ); -void string_push_back( string* s, char x ); -void string_reserve( string*, size_t ); -void string_truncate( string*, size_t ); -void string_pop_back( string* ); -char string_back( string* ); -void string_unit_test(); - -#endif - diff --git a/jam-files/engine/subst.c b/jam-files/engine/subst.c deleted file mode 100644 index 75524ecc..00000000 --- a/jam-files/engine/subst.c +++ /dev/null @@ -1,94 +0,0 @@ -#include <stddef.h> -#include "jam.h" -#include "regexp.h" -#include "hash.h" - -#include "newstr.h" -#include "lists.h" -#include "parse.h" -#include "compile.h" -#include "frames.h" - -struct regex_entry -{ - const char* pattern; - regexp* regex; -}; -typedef struct regex_entry regex_entry; - -static struct hash* regex_hash; - -regexp* regex_compile( const char* pattern ) -{ - regex_entry entry, *e = &entry; - entry.pattern = pattern; - - if ( !regex_hash ) - regex_hash = hashinit(sizeof(regex_entry), "regex"); - - if ( hashenter( regex_hash, (HASHDATA **)&e ) ) - e->regex = regcomp( (char*)pattern ); - - return e->regex; -} - -LIST* -builtin_subst( - PARSE *parse, - FRAME *frame ) -{ - LIST* result = L0; - LIST* arg1 = lol_get( frame->args, 0 ); - - if ( arg1 && list_next(arg1) && list_next(list_next(arg1)) ) - { - - const char* source = arg1->string; - const char* pattern = list_next(arg1)->string; - regexp* repat = regex_compile( pattern ); - - if ( regexec( repat, (char*)source) ) - { - LIST* subst = list_next(arg1); - - while ((subst = list_next(subst)) != L0) - { -# define BUFLEN 4096 - char buf[BUFLEN + 1]; - const char* in = subst->string; - char* out = buf; - - for ( in = subst->string; *in && out < buf + BUFLEN; ++in ) - { - if ( *in == '\\' || *in == '$' ) - { - ++in; - if ( *in == 0 ) - { - break; - } - else if ( *in >= '0' && *in <= '9' ) - { - unsigned n = *in - '0'; - const size_t srclen = repat->endp[n] - repat->startp[n]; - const size_t remaining = buf + BUFLEN - out; - const size_t len = srclen < remaining ? srclen : remaining; - memcpy( out, repat->startp[n], len ); - out += len; - continue; - } - /* fall through and copy the next character */ - } - *out++ = *in; - } - *out = 0; - - result = list_new( result, newstr( buf ) ); -#undef BUFLEN - } - } - } - - return result; -} - diff --git a/jam-files/engine/timestamp.c b/jam-files/engine/timestamp.c deleted file mode 100644 index 8a59c8c0..00000000 --- a/jam-files/engine/timestamp.c +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright 1993-2002 Christopher Seiwald and Perforce Software, Inc. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -# include "jam.h" - -# include "hash.h" -# include "filesys.h" -# include "pathsys.h" -# include "timestamp.h" -# include "newstr.h" -# include "strings.h" - -/* - * timestamp.c - get the timestamp of a file or archive member - * - * 09/22/00 (seiwald) - downshift names on OS2, too - */ - -/* - * BINDING - all known files - */ - -typedef struct _binding BINDING; - -struct _binding { - char *name; - short flags; - -# define BIND_SCANNED 0x01 /* if directory or arch, has been scanned */ - - short progress; - -# define BIND_INIT 0 /* never seen */ -# define BIND_NOENTRY 1 /* timestamp requested but file never found */ -# define BIND_SPOTTED 2 /* file found but not timed yet */ -# define BIND_MISSING 3 /* file found but can't get timestamp */ -# define BIND_FOUND 4 /* file found and time stamped */ - - time_t time; /* update time - 0 if not exist */ -}; - -static struct hash * bindhash = 0; -static void time_enter( void *, char *, int, time_t ); - -static char * time_progress[] = -{ - "INIT", - "NOENTRY", - "SPOTTED", - "MISSING", - "FOUND" -}; - - -/* - * timestamp() - return timestamp on a file, if present. - */ - -void timestamp( char * target, time_t * time ) -{ - PROFILE_ENTER( timestamp ); - - PATHNAME f1; - PATHNAME f2; - BINDING binding; - BINDING * b = &binding; - string buf[ 1 ]; -#ifdef DOWNSHIFT_PATHS - string path; - char * p; -#endif - -#ifdef DOWNSHIFT_PATHS - string_copy( &path, target ); - p = path.value; - - do - { - *p = tolower( *p ); -#ifdef NT - /* On NT, we must use backslashes or the file will not be found. */ - if ( *p == '/' ) - *p = PATH_DELIM; -#endif - } - while ( *p++ ); - - target = path.value; -#endif /* #ifdef DOWNSHIFT_PATHS */ - string_new( buf ); - - if ( !bindhash ) - bindhash = hashinit( sizeof( BINDING ), "bindings" ); - - /* Quick path - is it there? */ - b->name = target; - b->time = b->flags = 0; - b->progress = BIND_INIT; - - if ( hashenter( bindhash, (HASHDATA * *)&b ) ) - b->name = newstr( target ); /* never freed */ - - if ( b->progress != BIND_INIT ) - goto afterscanning; - - b->progress = BIND_NOENTRY; - - /* Not found - have to scan for it. */ - path_parse( target, &f1 ); - - /* Scan directory if not already done so. */ - { - BINDING binding; - BINDING * b = &binding; - - f2 = f1; - f2.f_grist.len = 0; - path_parent( &f2 ); - path_build( &f2, buf, 0 ); - - b->name = buf->value; - b->time = b->flags = 0; - b->progress = BIND_INIT; - - if ( hashenter( bindhash, (HASHDATA * *)&b ) ) - b->name = newstr( buf->value ); /* never freed */ - - if ( !( b->flags & BIND_SCANNED ) ) - { - file_dirscan( buf->value, time_enter, bindhash ); - b->flags |= BIND_SCANNED; - } - } - - /* Scan archive if not already done so. */ - if ( f1.f_member.len ) - { - BINDING binding; - BINDING * b = &binding; - - f2 = f1; - f2.f_grist.len = 0; - f2.f_member.len = 0; - string_truncate( buf, 0 ); - path_build( &f2, buf, 0 ); - - b->name = buf->value; - b->time = b->flags = 0; - b->progress = BIND_INIT; - - if ( hashenter( bindhash, (HASHDATA * *)&b ) ) - b->name = newstr( buf->value ); /* never freed */ - - if ( !( b->flags & BIND_SCANNED ) ) - { - file_archscan( buf->value, time_enter, bindhash ); - b->flags |= BIND_SCANNED; - } - } - - afterscanning: - - if ( b->progress == BIND_SPOTTED ) - { - b->progress = file_time( b->name, &b->time ) < 0 - ? BIND_MISSING - : BIND_FOUND; - } - - *time = b->progress == BIND_FOUND ? b->time : 0; - string_free( buf ); -#ifdef DOWNSHIFT_PATHS - string_free( &path ); -#endif - - PROFILE_EXIT( timestamp ); -} - - -static void time_enter( void * closure, char * target, int found, time_t time ) -{ - BINDING binding; - BINDING * b = &binding; - struct hash * bindhash = (struct hash *)closure; - -#ifdef DOWNSHIFT_PATHS - char path[ MAXJPATH ]; - char * p = path; - - do *p++ = tolower( *target ); - while ( *target++ ); - - target = path; -#endif - - b->name = target; - b->flags = 0; - - if ( hashenter( bindhash, (HASHDATA * *)&b ) ) - b->name = newstr( target ); /* never freed */ - - b->time = time; - b->progress = found ? BIND_FOUND : BIND_SPOTTED; - - if ( DEBUG_BINDSCAN ) - printf( "time ( %s ) : %s\n", target, time_progress[ b->progress ] ); -} - - -/* - * stamps_done() - free timestamp tables. - */ - -void stamps_done() -{ - hashdone( bindhash ); -} diff --git a/jam-files/engine/timestamp.h b/jam-files/engine/timestamp.h deleted file mode 100644 index f5752763..00000000 --- a/jam-files/engine/timestamp.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright 1993, 1995 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * timestamp.h - get the timestamp of a file or archive member - */ - -void timestamp( char * target, time_t * time ); -void stamps_done(); diff --git a/jam-files/engine/variable.c b/jam-files/engine/variable.c deleted file mode 100644 index 795f3458..00000000 --- a/jam-files/engine/variable.c +++ /dev/null @@ -1,631 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* This file is ALSO: - * Copyright 2001-2004 David Abrahams. - * Copyright 2005 Reece H. Dunn. - * Copyright 2005 Rene Rivera. - * Distributed under the Boost Software License, Version 1.0. - * (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) - */ - -#include "jam.h" -#include "lists.h" -#include "parse.h" -#include "variable.h" -#include "expand.h" -#include "hash.h" -#include "filesys.h" -#include "newstr.h" -#include "strings.h" -#include "pathsys.h" -#include <stdlib.h> -#include <stdio.h> - -/* - * variable.c - handle Jam multi-element variables. - * - * External routines: - * - * var_defines() - load a bunch of variable=value settings. - * var_string() - expand a string with variables in it. - * var_get() - get value of a user defined symbol. - * var_set() - set a variable in jam's user defined symbol table. - * var_swap() - swap a variable's value with the given one. - * var_done() - free variable tables. - * - * Internal routines: - * - * var_enter() - make new var symbol table entry, returning var ptr. - * var_dump() - dump a variable to stdout. - * - * 04/13/94 (seiwald) - added shorthand L0 for null list pointer - * 08/23/94 (seiwald) - Support for '+=' (append to variable) - * 01/22/95 (seiwald) - split environment variables at blanks or :'s - * 05/10/95 (seiwald) - split path variables at SPLITPATH (not :) - * 09/11/00 (seiwald) - defunct var_list() removed - */ - -static struct hash *varhash = 0; - -/* - * VARIABLE - a user defined multi-value variable - */ - -typedef struct _variable VARIABLE ; - -struct _variable -{ - char * symbol; - LIST * value; -}; - -static VARIABLE * var_enter( char * symbol ); -static void var_dump( char * symbol, LIST * value, char * what ); - - -/* - * var_hash_swap() - swap all variable settings with those passed - * - * Used to implement separate settings spaces for modules - */ - -void var_hash_swap( struct hash * * new_vars ) -{ - struct hash * old = varhash; - varhash = *new_vars; - *new_vars = old; -} - - -/* - * var_defines() - load a bunch of variable=value settings - * - * If preprocess is false, take the value verbatim. - * - * Otherwise, if the variable value is enclosed in quotes, strip the - * quotes. - * - * Otherwise, if variable name ends in PATH, split value at :'s. - * - * Otherwise, split the value at blanks. - */ - -void var_defines( char * const * e, int preprocess ) -{ - string buf[1]; - - string_new( buf ); - - for ( ; *e; ++e ) - { - char * val; - -# ifdef OS_MAC - /* On the mac (MPW), the var=val is actually var\0val */ - /* Think different. */ - - if ( ( val = strchr( *e, '=' ) ) || ( val = *e + strlen( *e ) ) ) -# else - if ( ( val = strchr( *e, '=' ) ) ) -# endif - { - LIST * l = L0; - char * pp; - char * p; -# ifdef OPT_NO_EXTERNAL_VARIABLE_SPLIT - char split = '\0'; -# else - # ifdef OS_MAC - char split = ','; - # else - char split = ' '; - # endif -# endif - size_t len = strlen( val + 1 ); - - int quoted = ( val[1] == '"' ) && ( val[len] == '"' ) && - ( len > 1 ); - - if ( quoted && preprocess ) - { - string_append_range( buf, val + 2, val + len ); - l = list_new( l, newstr( buf->value ) ); - string_truncate( buf, 0 ); - } - else - { - /* Split *PATH at :'s, not spaces. */ - if ( val - 4 >= *e ) - { - if ( !strncmp( val - 4, "PATH", 4 ) || - !strncmp( val - 4, "Path", 4 ) || - !strncmp( val - 4, "path", 4 ) ) - split = SPLITPATH; - } - - /* Do the split. */ - for - ( - pp = val + 1; - preprocess && ( ( p = strchr( pp, split ) ) != 0 ); - pp = p + 1 - ) - { - string_append_range( buf, pp, p ); - l = list_new( l, newstr( buf->value ) ); - string_truncate( buf, 0 ); - } - - l = list_new( l, newstr( pp ) ); - } - - /* Get name. */ - string_append_range( buf, *e, val ); - var_set( buf->value, l, VAR_SET ); - string_truncate( buf, 0 ); - } - } - string_free( buf ); -} - - -/* - * var_string() - expand a string with variables in it - * - * Copies in to out; doesn't modify targets & sources. - */ - -int var_string( char * in, char * out, int outsize, LOL * lol ) -{ - char * out0 = out; - char * oute = out + outsize - 1; - - while ( *in ) - { - char * lastword; - int dollar = 0; - - /* Copy white space. */ - while ( isspace( *in ) ) - { - if ( out >= oute ) - return -1; - *out++ = *in++; - } - - lastword = out; - - /* Copy non-white space, watching for variables. */ - while ( *in && !isspace( *in ) ) - { - if ( out >= oute ) - return -1; - - if ( ( in[ 0 ] == '$' ) && ( in[ 1 ] == '(' ) ) - { - ++dollar; - *out++ = *in++; - } - #ifdef OPT_AT_FILES - else if ( ( in[ 0 ] == '@' ) && ( in[ 1 ] == '(' ) ) - { - int depth = 1; - char * ine = in + 2; - char * split = 0; - - /* Scan the content of the response file @() section. */ - while ( *ine && ( depth > 0 ) ) - { - switch ( *ine ) - { - case '(': ++depth; break; - case ')': --depth; break; - case ':': - if ( ( depth == 1 ) && ( ine[ 1 ] == 'E' ) && ( ine[ 2 ] == '=' ) ) - split = ine; - break; - } - ++ine; - } - - if ( !split ) - { - /* the @() reference doesn't match the @(foo:E=bar) format. - hence we leave it alone by copying directly to output. */ - int l = 0; - if ( out + 2 >= oute ) return -1; - *( out++ ) = '@'; - *( out++ ) = '('; - l = var_string( in + 2, out, oute - out, lol ); - if ( l < 0 ) return -1; - out += l; - if ( out + 1 >= oute ) return -1; - *( out++ ) = ')'; - } - else if ( depth == 0 ) - { - string file_name_v; - int file_name_l = 0; - const char * file_name_s = 0; - - /* Expand the temporary file name var inline. */ - #if 0 - string_copy( &file_name_v, "$(" ); - string_append_range( &file_name_v, in + 2, split ); - string_push_back( &file_name_v, ')' ); - #else - string_new( &file_name_v ); - string_append_range( &file_name_v, in + 2, split ); - #endif - file_name_l = var_string( file_name_v.value, out, oute - out + 1, lol ); - string_free( &file_name_v ); - if ( file_name_l < 0 ) return -1; - file_name_s = out; - - /* For stdout/stderr we will create a temp file and generate - * a command that outputs the content as needed. - */ - if ( ( strcmp( "STDOUT", out ) == 0 ) || - ( strcmp( "STDERR", out ) == 0 ) ) - { - int err_redir = strcmp( "STDERR", out ) == 0; - out[ 0 ] = '\0'; - file_name_s = path_tmpfile(); - file_name_l = strlen(file_name_s); - #ifdef OS_NT - if ( ( out + 7 + file_name_l + ( err_redir ? 5 : 0 ) ) >= oute ) - return -1; - sprintf( out,"type \"%s\"%s", file_name_s, - err_redir ? " 1>&2" : "" ); - #else - if ( ( out + 6 + file_name_l + ( err_redir ? 5 : 0 ) ) >= oute ) - return -1; - sprintf( out,"cat \"%s\"%s", file_name_s, - err_redir ? " 1>&2" : "" ); - #endif - /* We also make sure that the temp files created by this - * get nuked eventually. - */ - file_remove_atexit( file_name_s ); - } - - /* Expand the file value into the file reference. */ - var_string_to_file( split + 3, ine - split - 4, file_name_s, - lol ); - - /* Continue on with the expansion. */ - out += strlen( out ); - } - - /* And continue with the parsing just past the @() reference. */ - in = ine; - } - #endif - else - { - *out++ = *in++; - } - } - - /* Add zero to 'out' so that 'lastword' is correctly zero-terminated. */ - if ( out >= oute ) - return -1; - /* Do not increment, intentionally. */ - *out = '\0'; - - /* If a variable encountered, expand it and and embed the - * space-separated members of the list in the output. - */ - if ( dollar ) - { - LIST * l = var_expand( L0, lastword, out, lol, 0 ); - - out = lastword; - - while ( l ) - { - int so = strlen( l->string ); - - if ( out + so >= oute ) - return -1; - - strcpy( out, l->string ); - out += so; - l = list_next( l ); - if ( l ) *out++ = ' '; - } - - list_free( l ); - } - } - - if ( out >= oute ) - return -1; - - *out++ = '\0'; - - return out - out0; -} - - -void var_string_to_file( const char * in, int insize, const char * out, LOL * lol ) -{ - char const * ine = in + insize; - FILE * out_file = 0; - int out_debug = DEBUG_EXEC ? 1 : 0; - if ( globs.noexec ) - { - /* out_debug = 1; */ - } - else if ( strcmp( out, "STDOUT" ) == 0 ) - { - out_file = stdout; - } - else if ( strcmp( out, "STDERR" ) == 0 ) - { - out_file = stderr; - } - else - { - /* Handle "path to file" filenames. */ - string out_name; - if ( ( out[ 0 ] == '"' ) && ( out[ strlen( out ) - 1 ] == '"' ) ) - { - string_copy( &out_name, out + 1 ); - string_truncate( &out_name, out_name.size - 1 ); - } - else - { - string_copy( &out_name,out ); - } - out_file = fopen( out_name.value, "w" ); - if ( !out_file ) - { - printf( "failed to write output file '%s'!\n", out_name.value ); - exit( EXITBAD ); - } - string_free( &out_name ); - } - - if ( out_debug ) printf( "\nfile %s\n", out ); - - while ( *in && ( in < ine ) ) - { - int dollar = 0; - const char * output_0 = in; - const char * output_1 = in; - - /* Copy white space. */ - while ( ( output_1 < ine ) && isspace( *output_1 ) ) - ++output_1; - - if ( output_0 < output_1 ) - { - if ( out_file ) fwrite( output_0, output_1 - output_0, 1, out_file ); - if ( out_debug ) fwrite( output_0, output_1 - output_0, 1, stdout ); - } - output_0 = output_1; - - /* Copy non-white space, watching for variables. */ - while ( ( output_1 < ine ) && *output_1 && !isspace( *output_1 ) ) - { - if ( ( output_1[ 0 ] == '$' ) && ( output_1[ 1 ] == '(' ) ) - ++dollar; - ++output_1; - } - - /* If a variable encountered, expand it and embed the space-separated - * members of the list in the output. - */ - if ( dollar ) - { - LIST * l = var_expand( L0, (char *)output_0, (char *)output_1, lol, 0 ); - - while ( l ) - { - if ( out_file ) fputs( l->string, out_file ); - if ( out_debug ) puts( l->string ); - l = list_next( l ); - if ( l ) - { - if ( out_file ) fputc( ' ', out_file ); - if ( out_debug ) fputc( ' ', stdout ); - } - } - - list_free( l ); - } - else if ( output_0 < output_1 ) - { - if ( out_file ) - { - const char * output_n = output_0; - while ( output_n < output_1 ) - { - output_n += fwrite( output_n, 1, output_1-output_n, out_file ); - } - } - if ( out_debug ) - { - const char * output_n = output_0; - while ( output_n < output_1 ) - { - output_n += fwrite( output_n, 1, output_1-output_n, stdout ); - } - } - } - - in = output_1; - } - - if ( out_file && ( out_file != stdout ) && ( out_file != stderr ) ) - { - fflush( out_file ); - fclose( out_file ); - } - - if ( out_debug ) fputc( '\n', stdout ); -} - - -/* - * var_get() - get value of a user defined symbol. - * - * Returns NULL if symbol unset. - */ - -LIST * var_get( char * symbol ) -{ - LIST * result = 0; -#ifdef OPT_AT_FILES - /* Some "fixed" variables... */ - if ( strcmp( "TMPDIR", symbol ) == 0 ) - { - result = list_new( L0, newstr( (char *)path_tmpdir() ) ); - } - else if ( strcmp( "TMPNAME", symbol ) == 0 ) - { - result = list_new( L0, newstr( (char *)path_tmpnam() ) ); - } - else if ( strcmp( "TMPFILE", symbol ) == 0 ) - { - result = list_new( L0, newstr( (char *)path_tmpfile() ) ); - } - else if ( strcmp( "STDOUT", symbol ) == 0 ) - { - result = list_new( L0, newstr( "STDOUT" ) ); - } - else if ( strcmp( "STDERR", symbol ) == 0 ) - { - result = list_new( L0, newstr( "STDERR" ) ); - } - else -#endif - { - VARIABLE var; - VARIABLE * v = &var; - - v->symbol = symbol; - - if ( varhash && hashcheck( varhash, (HASHDATA * *)&v ) ) - { - if ( DEBUG_VARGET ) - var_dump( v->symbol, v->value, "get" ); - result = v->value; - } - } - return result; -} - - -/* - * var_set() - set a variable in Jam's user defined symbol table. - * - * 'flag' controls the relationship between new and old values of the variable: - * SET replaces the old with the new; APPEND appends the new to the old; DEFAULT - * only uses the new if the variable was previously unset. - * - * Copies symbol. Takes ownership of value. - */ - -void var_set( char * symbol, LIST * value, int flag ) -{ - VARIABLE * v = var_enter( symbol ); - - if ( DEBUG_VARSET ) - var_dump( symbol, value, "set" ); - - switch ( flag ) - { - case VAR_SET: - /* Replace value */ - list_free( v->value ); - v->value = value; - break; - - case VAR_APPEND: - /* Append value */ - v->value = list_append( v->value, value ); - break; - - case VAR_DEFAULT: - /* Set only if unset */ - if ( !v->value ) - v->value = value; - else - list_free( value ); - break; - } -} - - -/* - * var_swap() - swap a variable's value with the given one. - */ - -LIST * var_swap( char * symbol, LIST * value ) -{ - VARIABLE * v = var_enter( symbol ); - LIST * oldvalue = v->value; - if ( DEBUG_VARSET ) - var_dump( symbol, value, "set" ); - v->value = value; - return oldvalue; -} - - -/* - * var_enter() - make new var symbol table entry, returning var ptr. - */ - -static VARIABLE * var_enter( char * symbol ) -{ - VARIABLE var; - VARIABLE * v = &var; - - if ( !varhash ) - varhash = hashinit( sizeof( VARIABLE ), "variables" ); - - v->symbol = symbol; - v->value = 0; - - if ( hashenter( varhash, (HASHDATA * *)&v ) ) - v->symbol = newstr( symbol ); /* never freed */ - - return v; -} - - -/* - * var_dump() - dump a variable to stdout. - */ - -static void var_dump( char * symbol, LIST * value, char * what ) -{ - printf( "%s %s = ", what, symbol ); - list_print( value ); - printf( "\n" ); -} - - -/* - * var_done() - free variable tables. - */ - -static void delete_var_( void * xvar, void * data ) -{ - VARIABLE * v = (VARIABLE *)xvar; - freestr( v->symbol ); - list_free( v-> value ); -} - - -void var_done() -{ - hashenumerate( varhash, delete_var_, (void *)0 ); - hashdone( varhash ); -} diff --git a/jam-files/engine/variable.h b/jam-files/engine/variable.h deleted file mode 100644 index 5c49e3ca..00000000 --- a/jam-files/engine/variable.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 1993, 2000 Christopher Seiwald. - * - * This file is part of Jam - see jam.c for Copyright information. - */ - -/* - * variable.h - handle jam multi-element variables - */ - -struct hash; - -void var_defines( char* const *e, int preprocess ); -int var_string( char *in, char *out, int outsize, LOL *lol ); -LIST * var_get( char *symbol ); -void var_set( char *symbol, LIST *value, int flag ); -LIST * var_swap( char *symbol, LIST *value ); -void var_done(); -void var_hash_swap( struct hash** ); - -/** Expands the "in" expression directly into the "out" file. - The file can be one of: a path, STDOUT, or STDERR to send - the output to a file overwriting previous content, to - the console, or to the error output respectively. -*/ -void var_string_to_file( const char * in, int insize, const char * out, LOL * lol ); - -/* - * Defines for var_set(). - */ - -# define VAR_SET 0 /* override previous value */ -# define VAR_APPEND 1 /* append to previous value */ -# define VAR_DEFAULT 2 /* set only if no previous value */ - diff --git a/jam-files/engine/w32_getreg.c b/jam-files/engine/w32_getreg.c deleted file mode 100644 index 5a06f43e..00000000 --- a/jam-files/engine/w32_getreg.c +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright Paul Lin 2003. Copyright 2006 Bojan Resnik. -Distributed under the Boost Software License, Version 1.0. (See accompanying -file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -*/ - -# include "jam.h" - -# if defined( OS_NT ) || defined( OS_CYGWIN ) - -# include "lists.h" -# include "newstr.h" -# include "parse.h" -# include "frames.h" -# include "strings.h" - -# define WIN32_LEAN_AND_MEAN -# include <windows.h> - -# define MAX_REGISTRY_DATA_LENGTH 4096 -# define MAX_REGISTRY_KEYNAME_LENGTH 256 -# define MAX_REGISTRY_VALUENAME_LENGTH 16384 - -typedef struct -{ - LPCSTR name; - HKEY value; -} KeyMap; - -static const KeyMap dlRootKeys[] = { - { "HKLM", HKEY_LOCAL_MACHINE }, - { "HKCU", HKEY_CURRENT_USER }, - { "HKCR", HKEY_CLASSES_ROOT }, - { "HKEY_LOCAL_MACHINE", HKEY_LOCAL_MACHINE }, - { "HKEY_CURRENT_USER", HKEY_CURRENT_USER }, - { "HKEY_CLASSES_ROOT", HKEY_CLASSES_ROOT }, - { 0, 0 } -}; - -static HKEY get_key(char const** path) -{ - const KeyMap *p; - - for (p = dlRootKeys; p->name; ++p) - { - int n = strlen(p->name); - if (!strncmp(*path,p->name,n)) - { - if ((*path)[n] == '\\' || (*path)[n] == 0) - { - *path += n + 1; - break; - } - } - } - - return p->value; -} - -LIST* -builtin_system_registry( - PARSE *parse, - FRAME *frame ) -{ - char const* path = lol_get(frame->args, 0)->string; - LIST* result = L0; - HKEY key = get_key(&path); - - if ( - key != 0 - && ERROR_SUCCESS == RegOpenKeyEx(key, path, 0, KEY_QUERY_VALUE, &key) - ) - { - DWORD type; - BYTE data[MAX_REGISTRY_DATA_LENGTH]; - DWORD len = sizeof(data); - LIST const* const field = lol_get(frame->args, 1); - - if ( ERROR_SUCCESS == - RegQueryValueEx(key, field ? field->string : 0, 0, &type, data, &len) ) - { - switch (type) - { - - case REG_EXPAND_SZ: - { - long len; - string expanded[1]; - string_new(expanded); - - while ( - (len = ExpandEnvironmentStrings( - (LPCSTR)data, expanded->value, expanded->capacity)) - > expanded->capacity - ) - string_reserve(expanded, len); - - expanded->size = len - 1; - - result = list_new( result, newstr(expanded->value) ); - string_free( expanded ); - } - break; - - case REG_MULTI_SZ: - { - char* s; - - for (s = (char*)data; *s; s += strlen(s) + 1) - result = list_new( result, newstr(s) ); - - } - break; - - case REG_DWORD: - { - char buf[100]; - sprintf( buf, "%u", *(PDWORD)data ); - result = list_new( result, newstr(buf) ); - } - break; - - case REG_SZ: - result = list_new( result, newstr((char*)data) ); - break; - } - } - RegCloseKey(key); - } - return result; -} - -static LIST* get_subkey_names(HKEY key, char const* path) -{ - LIST* result = 0; - - if ( ERROR_SUCCESS == - RegOpenKeyEx(key, path, 0, KEY_ENUMERATE_SUB_KEYS, &key) - ) - { - char name[MAX_REGISTRY_KEYNAME_LENGTH]; - DWORD name_size = sizeof(name); - DWORD index; - FILETIME last_write_time; - - for ( index = 0; - ERROR_SUCCESS == RegEnumKeyEx( - key, index, name, &name_size, 0, 0, 0, &last_write_time); - ++index, - name_size = sizeof(name) - ) - { - name[name_size] = 0; - result = list_append(result, list_new(0, newstr(name))); - } - - RegCloseKey(key); - } - - return result; -} - -static LIST* get_value_names(HKEY key, char const* path) -{ - LIST* result = 0; - - if ( ERROR_SUCCESS == RegOpenKeyEx(key, path, 0, KEY_QUERY_VALUE, &key) ) - { - char name[MAX_REGISTRY_VALUENAME_LENGTH]; - DWORD name_size = sizeof(name); - DWORD index; - - for ( index = 0; - ERROR_SUCCESS == RegEnumValue( - key, index, name, &name_size, 0, 0, 0, 0); - ++index, - name_size = sizeof(name) - ) - { - name[name_size] = 0; - result = list_append(result, list_new(0, newstr(name))); - } - - RegCloseKey(key); - } - - return result; -} - -LIST* -builtin_system_registry_names( - PARSE *parse, - FRAME *frame ) -{ - char const* path = lol_get(frame->args, 0)->string; - char const* result_type = lol_get(frame->args, 1)->string; - - HKEY key = get_key(&path); - - if ( !strcmp(result_type, "subkeys") ) - return get_subkey_names(key, path); - if ( !strcmp(result_type, "values") ) - return get_value_names(key, path); - return 0; -} - -# endif diff --git a/jam-files/engine/yyacc.c b/jam-files/engine/yyacc.c deleted file mode 100644 index b5efc96b..00000000 --- a/jam-files/engine/yyacc.c +++ /dev/null @@ -1,268 +0,0 @@ -/* Copyright 2002 Rene Rivera. -** Distributed under the Boost Software License, Version 1.0. -** (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) -*/ - -#include <stdio.h> -#include <string.h> -#include <ctype.h> -#include <stdlib.h> - -/* -# yyacc - yacc wrapper -# -# Allows tokens to be written as `literal` and then automatically -# substituted with #defined tokens. -# -# Usage: -# yyacc file.y filetab.h file.yy -# -# inputs: -# file.yy yacc grammar with ` literals -# -# outputs: -# file.y yacc grammar -# filetab.h array of string <-> token mappings -# -# 3-13-93 -# Documented and p moved in sed command (for some reason, -# s/x/y/p doesn't work). -# 10-12-93 -# Take basename as second argument. -# 12-31-96 -# reversed order of args to be compatible with GenFile rule -# 11-20-2002 -# Reimplemented as a C program for portability. (Rene Rivera) -*/ - -void print_usage(); -char * copy_string(char * s, int l); -char * tokenize_string(char * s); -int cmp_literal(const void * a, const void * b); - -typedef struct -{ - char * string; - char * token; -} literal; - -int main(int argc, char ** argv) -{ - int result = 0; - if (argc != 4) - { - print_usage(); - result = 1; - } - else - { - FILE * token_output_f = 0; - FILE * grammar_output_f = 0; - FILE * grammar_source_f = 0; - - grammar_source_f = fopen(argv[3],"r"); - if (grammar_source_f == 0) { result = 1; } - if (result == 0) - { - literal literals[1024]; - int t = 0; - char l[2048]; - while (1) - { - if (fgets(l,2048,grammar_source_f) != 0) - { - char * c = l; - while (1) - { - char * c1 = strchr(c,'`'); - if (c1 != 0) - { - char * c2 = strchr(c1+1,'`'); - if (c2 != 0) - { - literals[t].string = copy_string(c1+1,c2-c1-1); - literals[t].token = tokenize_string(literals[t].string); - t += 1; - c = c2+1; - } - else - break; - } - else - break; - } - } - else - { - break; - } - } - literals[t].string = 0; - literals[t].token = 0; - qsort(literals,t,sizeof(literal),cmp_literal); - { - int p = 1; - int i = 1; - while (literals[i].string != 0) - { - if (strcmp(literals[p-1].string,literals[i].string) != 0) - { - literals[p] = literals[i]; - p += 1; - } - i += 1; - } - literals[p].string = 0; - literals[p].token = 0; - t = p; - } - token_output_f = fopen(argv[2],"w"); - if (token_output_f != 0) - { - int i = 0; - while (literals[i].string != 0) - { - fprintf(token_output_f," { \"%s\", %s },\n",literals[i].string,literals[i].token); - i += 1; - } - fclose(token_output_f); - } - else - result = 1; - if (result == 0) - { - grammar_output_f = fopen(argv[1],"w"); - if (grammar_output_f != 0) - { - int i = 0; - while (literals[i].string != 0) - { - fprintf(grammar_output_f,"%%token %s\n",literals[i].token); - i += 1; - } - rewind(grammar_source_f); - while (1) - { - if (fgets(l,2048,grammar_source_f) != 0) - { - char * c = l; - while (1) - { - char * c1 = strchr(c,'`'); - if (c1 != 0) - { - char * c2 = strchr(c1+1,'`'); - if (c2 != 0) - { - literal key; - literal * replacement = 0; - key.string = copy_string(c1+1,c2-c1-1); - key.token = 0; - replacement = (literal*)bsearch( - &key,literals,t,sizeof(literal),cmp_literal); - *c1 = 0; - fprintf(grammar_output_f,"%s%s",c,replacement->token); - c = c2+1; - } - else - { - fprintf(grammar_output_f,"%s",c); - break; - } - } - else - { - fprintf(grammar_output_f,"%s",c); - break; - } - } - } - else - { - break; - } - } - fclose(grammar_output_f); - } - else - result = 1; - } - } - if (result != 0) - { - perror("yyacc"); - } - } - return result; -} - -static char * usage[] = { - "yyacc <grammar output.y> <token table output.h> <grammar source.yy>", - 0 }; - -void print_usage() -{ - char ** u; - for (u = usage; *u != 0; ++u) - { - fputs(*u,stderr); putc('\n',stderr); - } -} - -char * copy_string(char * s, int l) -{ - char * result = (char*)malloc(l+1); - strncpy(result,s,l); - result[l] = 0; - return result; -} - -char * tokenize_string(char * s) -{ - char * result; - char * literal = s; - int l; - int c; - - if (strcmp(s,":") == 0) literal = "_colon"; - else if (strcmp(s,"!") == 0) literal = "_bang"; - else if (strcmp(s,"!=") == 0) literal = "_bang_equals"; - else if (strcmp(s,"&&") == 0) literal = "_amperamper"; - else if (strcmp(s,"&") == 0) literal = "_amper"; - else if (strcmp(s,"+") == 0) literal = "_plus"; - else if (strcmp(s,"+=") == 0) literal = "_plus_equals"; - else if (strcmp(s,"||") == 0) literal = "_barbar"; - else if (strcmp(s,"|") == 0) literal = "_bar"; - else if (strcmp(s,";") == 0) literal = "_semic"; - else if (strcmp(s,"-") == 0) literal = "_minus"; - else if (strcmp(s,"<") == 0) literal = "_langle"; - else if (strcmp(s,"<=") == 0) literal = "_langle_equals"; - else if (strcmp(s,">") == 0) literal = "_rangle"; - else if (strcmp(s,">=") == 0) literal = "_rangle_equals"; - else if (strcmp(s,".") == 0) literal = "_period"; - else if (strcmp(s,"?") == 0) literal = "_question"; - else if (strcmp(s,"?=") == 0) literal = "_question_equals"; - else if (strcmp(s,"=") == 0) literal = "_equals"; - else if (strcmp(s,",") == 0) literal = "_comma"; - else if (strcmp(s,"[") == 0) literal = "_lbracket"; - else if (strcmp(s,"]") == 0) literal = "_rbracket"; - else if (strcmp(s,"{") == 0) literal = "_lbrace"; - else if (strcmp(s,"}") == 0) literal = "_rbrace"; - else if (strcmp(s,"(") == 0) literal = "_lparen"; - else if (strcmp(s,")") == 0) literal = "_rparen"; - l = strlen(literal)+2; - result = (char*)malloc(l+1); - for (c = 0; literal[c] != 0; ++c) - { - result[c] = toupper(literal[c]); - } - result[l-2] = '_'; - result[l-1] = 't'; - result[l] = 0; - return result; -} - -int cmp_literal(const void * a, const void * b) -{ - return strcmp(((const literal *)a)->string,((const literal *)b)->string); -} diff --git a/jam-files/sanity.jam b/jam-files/sanity.jam deleted file mode 100644 index 738316f8..00000000 --- a/jam-files/sanity.jam +++ /dev/null @@ -1,286 +0,0 @@ -import modules ; -import option ; -import os ; -import path ; -import project ; -import build-system ; -import version ; - -#Shell with trailing line removed http://lists.boost.org/boost-build/2007/08/17051.php -rule trim-nl ( str extras * ) { -return [ MATCH "([^ -]*)" : $(str) ] $(extras) ; -} -rule _shell ( cmd : extras * ) { - return [ trim-nl [ SHELL $(cmd) : $(extras) ] ] ; -} - -rule shell_or_fail ( cmd ) { - local ret = [ SHELL $(cmd) : exit-status ] ; - if $(ret[2]) != 0 { - exit $(cmd) failed : 1 ; - } -} - -cxxflags = [ os.environ "CXXFLAGS" ] ; -cflags = [ os.environ "CFLAGS" ] ; -ldflags = [ os.environ "LDFLAGS" ] ; - -#Run g++ with empty main and these arguments to see if it passes. -rule test_flags ( flags * ) { - flags = $(cxxflags) $(ldflags) $(flags) ; - local cmd = "bash -c \"g++ "$(flags:J=" ")" -x c++ - <<<'int main() {}' -o /dev/null >/dev/null 2>/dev/null\"" ; - local ret = [ SHELL $(cmd) : exit-status ] ; - if --debug-configuration in [ modules.peek : ARGV ] { - echo $(cmd) ; - echo $(ret) ; - } - if $(ret[2]) = 0 { - return true ; - } else { - return ; - } -} - -rule test_header ( name ) { - return [ test_flags "-include $(name)" ] ; -} - -rule test_library ( name ) { - return [ test_flags "-l$(name)" ] ; -} - -{ - local cleaning = [ option.get "clean" : : yes ] ; - cleaning ?= [ option.get "clean-all" : no : yes ] ; - if "clean" in [ modules.peek : ARGV ] { - cleaning = yes ; - } - constant CLEANING : $(cleaning) ; -} - -requirements = ; - -FORCE-STATIC = [ option.get "static" : : "yes" ] ; -if $(FORCE-STATIC) { - requirements += <link>static <runtime-link>static ; -} - -#Determine if a library can be compiled statically. -rule auto-shared ( name : additional * ) { - additional ?= "" ; - if [ test_flags $(additional)" -static -l"$(name) ] { - return ; - } else { - if $(FORCE-STATIC) { - echo "Could not statically link against lib $(name). Your build will probably fail." ; - return ; - } else { - return "<link>shared" ; - } - } -} - -# MacPorts' default location is /opt/local -- use this if no path is given. -with-macports = [ option.get "with-macports" : : "/opt/local" ] ; -if $(with-macports) { - using darwin ; - ECHO "Using --with-macports=$(with-macports), implying use of darwin GCC" ; - - L-boost-search = -L$(with-macports)/lib ; - boost-search = <search>$(with-macports)/lib ; - I-boost-include = -I$(with-macports)/include ; - boost-include = <include>$(with-macports)/include ; -} -else { - with-boost = [ option.get "with-boost" ] ; - with-boost ?= [ os.environ "BOOST_ROOT" ] ; - if $(with-boost) { - L-boost-search = -L$(with-boost)/lib" "-L$(with-boost)/lib64 ; - boost-search = <search>$(with-boost)/lib <search>$(with-boost)/lib64 ; - I-boost-include = -I$(with-boost)/include ; - boost-include = <include>$(with-boost)/include ; - } else { - L-boost-search = "" ; - boost-search = ; - I-boost-include = "" ; - boost-include = ; - } -} - -#Convenience rule for boost libraries. Defines library boost_$(name). -rule boost-lib ( name macro : deps * ) { - #Link multi-threaded programs against the -mt version if available. Old - #versions of boost do not have -mt tagged versions of all libraries. Sadly, - #boost.jam does not handle this correctly. - if [ test_flags $(L-boost-search)" -lboost_"$(name)"-mt$(boost-lib-version)" ] { - lib inner_boost_$(name) : : <threading>single $(boost-search) <name>boost_$(name)$(boost-lib-version) : : <library>$(deps) ; - lib inner_boost_$(name) : : <threading>multi $(boost-search) <name>boost_$(name)-mt$(boost-lib-version) : : <library>$(deps) ; - } else { - lib inner_boost_$(name) : : $(boost-search) <name>boost_$(name)$(boost-lib-version) : : <library>$(deps) ; - } - - alias boost_$(name) : inner_boost_$(name) : $(boost-auto-shared) : : <link>shared:<define>BOOST_$(macro) $(boost-include) ; -} - -#Argument is e.g. 103600 -rule boost ( min-version ) { - local cmd = "bash -c \"g++ "$(I-boost-include)" -dM -x c++ -E /dev/null -include boost/version.hpp 2>/dev/null |grep '#define BOOST_'\"" ; - local boost-shell = [ SHELL "$(cmd)" : exit-status ] ; - if $(boost-shell[2]) != 0 && $(CLEANING) = no { - echo Failed to run "$(cmd)" ; - exit Boost does not seem to be installed or g++ is confused. : 1 ; - } - boost-version = [ MATCH "#define BOOST_VERSION ([0-9]*)" : $(boost-shell[1]) ] ; - if $(boost-version) < $(min-version) && $(CLEANING) = no { - exit You have Boost $(boost-version). This package requires Boost at least $(min-version) (and preferably newer). : 1 ; - } - # If matching version tags exist, use them. - boost-lib-version = [ MATCH "#define BOOST_LIB_VERSION \"([^\"]*)\"" : $(boost-shell[1]) ] ; - if [ test_flags $(L-boost-search)" -lboost_program_options-"$(boost-lib-version) ] { - boost-lib-version = "-"$(boost-lib-version) ; - } else { - boost-lib-version = "" ; - } - - #Are we linking static binaries against shared boost? - boost-auto-shared = [ auto-shared "boost_program_options"$(boost-lib-version) : $(L-boost-search) ] ; - - #See tools/build/v2/contrib/boost.jam in a boost distribution for a table of macros to define. - boost-lib system SYSTEM_DYN_LINK ; - boost-lib thread THREAD_DYN_DLL : boost_system ; - boost-lib program_options PROGRAM_OPTIONS_DYN_LINK ; - boost-lib unit_test_framework TEST_DYN_LINK ; - boost-lib iostreams IOSTREAMS_DYN_LINK ; - boost-lib filesystem FILE_SYSTEM_DYN_LINK ; -} - -#Link normally to a library, but sometimes static isn't installed so fall back to dynamic. -rule external-lib ( name : search-path * ) { - lib $(name) : : [ auto-shared $(name) : "-L"$(search-path) ] <search>$(search-path) ; -} - -#Write the current command line to previous.sh. This does not do shell escaping. -{ - local build-log = $(TOP)/previous.sh ; - if ! [ path.exists $(build-log) ] { - SHELL "touch $(build-log) && chmod +x $(build-log)" ; - } - local script = [ modules.peek : ARGV ] ; - if $(script[1]) = "./jam-files/bjam" { - #The ./bjam shell script calls ./jam-files/bjam so that appears in argv but - #we want ./bjam to appear so the environment variables are set correctly. - script = "./bjam "$(script[2-]:J=" ") ; - } else { - script = $(script:J=" ") ; - } - script = "#!/bin/sh\n$(script)\n" ; - local ignored = @($(build-log):E=$(script)) ; -} - -#Boost jam's static clang for Linux is buggy. -requirements += <cxxflags>$(cxxflags) <cflags>$(cflags) <linkflags>$(ldflags) <os>LINUX,<toolset>clang:<link>shared ; - -if ! [ option.get "without-libsegfault" : : "yes" ] && ! $(FORCE-STATIC) { - #libSegFault prints a stack trace on segfault. Link against it if available. - if [ test_flags "-lSegFault" ] { - external-lib SegFault ; - requirements += <library>SegFault ; - } -} - -if [ option.get "git" : : "yes" ] { - local revision = [ _shell "git rev-parse --verify HEAD |head -c 7" ] ; - constant GITTAG : "/"$(revision) ; -} else { - constant GITTAG : "" ; -} - -prefix = [ option.get "prefix" ] ; -if $(prefix) { - prefix = [ path.root $(prefix) [ path.pwd ] ] ; - prefix = $(prefix)$(GITTAG) ; -} else { - prefix = $(TOP)$(GITTAG) ; -} - -bindir = [ option.get "bindir" : $(prefix)/bin ] ; -libdir = [ option.get "libdir" : $(prefix)/lib ] ; -rule install-bin-libs ( deps * ) { - install prefix-bin : $(deps) : <location>$(bindir) <install-dependencies>on <install-type>EXE <link>shared:<dll-path>$(libdir) ; - install prefix-lib : $(deps) : <location>$(libdir) <install-dependencies>on <install-type>LIB <link>shared:<dll-path>$(libdir) ; -} -rule install-headers ( name : list * : source-root ? ) { - local includedir = [ option.get "includedir" : $(prefix)/include ] ; - source-root ?= "." ; - install $(name) : $(list) : <location>$(includedir) <install-source-root>$(source-root) ; -} - -rule build-projects ( projects * ) { - for local p in $(projects) { - build-project $(p) ; - } -} - -#Only one post build hook is allowed. Allow multiple. -post-hooks = ; -rule post-build ( ok ? ) { - for local r in $(post-hooks) { - $(r) $(ok) ; - } -} -IMPORT $(__name__) : post-build : : $(__name__).post-build ; -build-system.set-post-build-hook $(__name__).post-build ; -rule add-post-hook ( names * ) { - post-hooks += $(names) ; -} - - -#Backend for writing content to files after build completes. -post-files = ; -post-contents = ; -rule save-post-build ( ok ? ) { - if $(ok) { - while $(post-files) { - local ignored = @($(post-files[1]):E=$(post-contents[1])) ; - post-files = $(post-files[2-]) ; - post-contents = $(post-contents[2-]) ; - } - } -} -add-post-hook save-post-build ; - -#Queue content to be written to file when build completes successfully. -rule add-post-write ( name content ) { - post-files += $(name) ; - post-contents += $(content) ; -} - -#Compare contents of file with current. If they're different, force the targets to rebuild then overwrite the file. -rule always-if-changed ( file current : targets * ) { - local previous = inconsistent ; - if [ path.exists $(file) ] { - previous = [ _shell "cat $(file)" ] ; - } - if $(current) != $(previous) { - #Write inconsistent while the build is running - if [ path.exists $(file) ] { - local ignored = @($(file):E=inconsistent) ; - } - add-post-write $(file) $(current) ; - for local i in $(targets) { - always $(i) ; - } - } -} - -if [ option.get "sanity-test" : : "yes" ] { - local current_version = [ modules.peek : JAM_VERSION ] ; - if ( $(current_version[0]) < 2000 && [ version.check-jam-version 3 1 16 ] ) || [ version.check-jam-version 2011 0 0 ] { - EXIT "Sane" : 0 ; - } else { - EXIT "Bad" : 1 ; - } -} - -use-project /top : . ; diff --git a/klm/lm/Jamfile b/klm/lm/Jamfile deleted file mode 100644 index dd620068..00000000 --- a/klm/lm/Jamfile +++ /dev/null @@ -1,15 +0,0 @@ -lib kenlm : bhiksha.cc binary_format.cc config.cc lm_exception.cc model.cc quantize.cc read_arpa.cc search_hashed.cc search_trie.cc trie.cc trie_sort.cc value_build.cc virtual_interface.cc vocab.cc ../util//kenutil : <include>.. : : <include>.. <library>../util//kenutil ; - -import testing ; - -run left_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa ; -run model_test.cc ../util//kenutil kenlm /top//boost_unit_test_framework : : test.arpa test_nounk.arpa ; - -exe query : ngram_query.cc kenlm ../util//kenutil ; -exe build_binary : build_binary.cc kenlm ../util//kenutil ; -exe kenlm_max_order : max_order.cc : <include>.. ; - -alias programs : query build_binary kenlm_max_order ; - -install legacy : build_binary query kenlm_max_order - : <location>$(TOP)/lm <install-type>EXE <install-dependencies>on <link>shared:<dll-path>$(TOP)/lm <link>shared:<install-type>LIB ; diff --git a/klm/util/Jamfile b/klm/util/Jamfile deleted file mode 100644 index a939265f..00000000 --- a/klm/util/Jamfile +++ /dev/null @@ -1,10 +0,0 @@ -lib kenutil : bit_packing.cc ersatz_progress.cc exception.cc file.cc file_piece.cc mmap.cc murmur_hash.cc usage.cc /top//z : <include>.. : : <include>.. ; - -import testing ; - -unit-test bit_packing_test : bit_packing_test.cc kenutil /top//boost_unit_test_framework ; -run file_piece_test.cc kenutil /top//boost_unit_test_framework : : file_piece.cc ; -unit-test joint_sort_test : joint_sort_test.cc kenutil /top//boost_unit_test_framework ; -unit-test probing_hash_table_test : probing_hash_table_test.cc kenutil /top//boost_unit_test_framework ; -unit-test sorted_uniform_test : sorted_uniform_test.cc kenutil /top//boost_unit_test_framework ; -unit-test tokenize_piece_test : tokenize_piece_test.cc kenutil /top//boost_unit_test_framework ; diff --git a/mira/Jamfile b/mira/Jamfile deleted file mode 100644 index a483e57d..00000000 --- a/mira/Jamfile +++ /dev/null @@ -1 +0,0 @@ -exe kbest_mira : kbest_mira.cc ../decoder//decoder ; diff --git a/mteval/Jamfile b/mteval/Jamfile deleted file mode 100644 index 3ed2c2cc..00000000 --- a/mteval/Jamfile +++ /dev/null @@ -1,8 +0,0 @@ -import testing ; - -lib mteval : ter.cc comb_scorer.cc aer_scorer.cc scorer.cc external_scorer.cc ns.cc ns_ter.cc ns_ext.cc ns_comb.cc ns_docscorer.cc ns_cer.cc ..//utils : <include>. : : <include>. <library>..//z ; -exe fast_score : fast_score.cc mteval ..//utils ..//boost_program_options ; -exe mbr_kbest : mbr_kbest.cc mteval ..//utils ..//boost_program_options ; -alias programs : fast_score mbr_kbest ; - -unit-test scorer_test : scorer_test.cc mteval ..//utils ..//z ..//boost_unit_test_framework : <testing.arg>$(TOP)/mteval/test_data ; diff --git a/mteval/mbr_kbest.cc b/mteval/mbr_kbest.cc index 2bd31566..2519bc01 100644 --- a/mteval/mbr_kbest.cc +++ b/mteval/mbr_kbest.cc @@ -1,7 +1,9 @@ #include <iostream> #include <vector> +#include <tr1/unordered_map> #include <boost/program_options.hpp> +#include <boost/functional/hash.hpp> #include "prob.h" #include "tdict.h" @@ -10,6 +12,7 @@ #include "stringlib.h" using namespace std; +using namespace std::tr1; namespace po = boost::program_options; @@ -31,27 +34,33 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } +struct ScoreComparer { + bool operator()(const pair<vector<WordID>, prob_t>& a, const pair<vector<WordID>, prob_t>& b) const { + return a.second > b.second; + } +}; + struct LossComparer { bool operator()(const pair<vector<WordID>, prob_t>& a, const pair<vector<WordID>, prob_t>& b) const { return a.second < b.second; } }; -bool ReadKBestList(istream* in, string* sent_id, vector<pair<vector<WordID>, prob_t> >* list) { +bool ReadKBestList(const double mbr_scale, istream* in, string* sent_id, vector<pair<vector<WordID>, prob_t> >* list) { static string cache_id; static pair<vector<WordID>, prob_t> cache_pair; list->clear(); string cur_id; + unordered_map<vector<WordID>, unsigned, boost::hash<vector<WordID> > > sent2id; if (cache_pair.first.size() > 0) { list->push_back(cache_pair); + sent2id[cache_pair.first] = 0; cur_id = cache_id; cache_pair.first.clear(); } string line; string tstr; - while(*in) { - getline(*in, line); - if (line.empty()) continue; + while(getline(*in, line)) { size_t p1 = line.find(" ||| "); if (p1 == string::npos) { cerr << "Bad format: " << line << endl; abort(); } size_t p2 = line.find(" ||| ", p1 + 4); @@ -59,16 +68,25 @@ bool ReadKBestList(istream* in, string* sent_id, vector<pair<vector<WordID>, pro size_t p3 = line.rfind(" ||| "); cache_id = line.substr(0, p1); tstr = line.substr(p1 + 5, p2 - p1 - 5); - double val = strtod(line.substr(p3 + 5).c_str(), NULL); + double val = strtod(line.substr(p3 + 5).c_str(), NULL) * mbr_scale; TD::ConvertSentence(tstr, &cache_pair.first); cache_pair.second.logeq(val); if (cur_id.empty()) cur_id = cache_id; if (cur_id == cache_id) { - list->push_back(cache_pair); + unordered_map<vector<WordID>, unsigned, boost::hash<vector<WordID> > >::iterator it = + sent2id.find(cache_pair.first); + if (it == sent2id.end()) { + sent2id.insert(make_pair(cache_pair.first, unsigned(list->size()))); + list->push_back(cache_pair); + } else { + (*list)[it->second].second += cache_pair.second; + // cerr << "Cruch: " << line << "\n newp=" << (*list)[it->second].second << endl; + } *sent_id = cur_id; cache_pair.first.clear(); } else { break; } } + sort(list->begin(), list->end(), ScoreComparer()); return !list->empty(); } @@ -87,14 +105,14 @@ int main(int argc, char** argv) { vector<pair<vector<WordID>, prob_t> > list; ReadFile rf(file); string sent_id; - while(ReadKBestList(rf.stream(), &sent_id, &list)) { + while(ReadKBestList(mbr_scale, rf.stream(), &sent_id, &list)) { vector<prob_t> joints(list.size()); - const prob_t max_score = pow(list.front().second, mbr_scale); + const prob_t max_score = list.front().second; prob_t marginal = prob_t::Zero(); for (int i = 0 ; i < list.size(); ++i) { - const prob_t joint = pow(list[i].second, mbr_scale) / max_score; + const prob_t joint = list[i].second / max_score; joints[i] = joint; - // cerr << "list[" << i << "] joint=" << log(joint) << endl; + //cerr << "list[" << i << "] joint=" << log(joint) << endl; marginal += joint; } int mbr_idx = -1; diff --git a/mteval/ns_docscorer.cc b/mteval/ns_docscorer.cc index 28a2fd09..f72ad115 100644 --- a/mteval/ns_docscorer.cc +++ b/mteval/ns_docscorer.cc @@ -16,7 +16,7 @@ void DocumentScorer::Init(const EvaluationMetric* metric, const string& src_file, bool verbose) { scorers_.clear(); - cerr << "Loading references (" << ref_files.size() << " files)\n"; + if (verbose) cerr << "Loading references (" << ref_files.size() << " files)\n"; assert(src_file.empty()); std::vector<ReadFile> ifs(ref_files.begin(),ref_files.end()); for (int i=0; i < ref_files.size(); ++i) ifs[i].Init(ref_files[i]); @@ -55,6 +55,6 @@ void DocumentScorer::Init(const EvaluationMetric* metric, ++line; } } - cerr << "Loaded reference translations for " << scorers_.size() << " sentences.\n"; + if (verbose) cerr << "Loaded reference translations for " << scorers_.size() << " sentences.\n"; } diff --git a/mteval/ns_docscorer.h b/mteval/ns_docscorer.h index 170ac627..a5757258 100644 --- a/mteval/ns_docscorer.h +++ b/mteval/ns_docscorer.h @@ -5,7 +5,7 @@ #include <string> #include <boost/shared_ptr.hpp> -struct EvaluationMetric; +class EvaluationMetric; struct SegmentEvaluator; class DocumentScorer { public: diff --git a/phrasinator/Jamfile b/phrasinator/Jamfile deleted file mode 100644 index 1fc34f79..00000000 --- a/phrasinator/Jamfile +++ /dev/null @@ -1,4 +0,0 @@ -exe gibbs_train_plm : gibbs_train_plm.cc ..//utils ..//z ..//boost_program_options ; -exe gibbs_train_plm_notables : gibbs_train_plm.notables.cc ..//utils ..//z ..//boost_program_options ; - -alias programs : gibbs_train_plm gibbs_train_plm_notables ; diff --git a/phrasinator/Makefile.am b/phrasinator/Makefile.am deleted file mode 100644 index 3ddd1934..00000000 --- a/phrasinator/Makefile.am +++ /dev/null @@ -1,14 +0,0 @@ -bin_PROGRAMS = gibbs_train_plm gibbs_train_plm_notables - -#head_bigram_model - -gibbs_train_plm_notables_SOURCES = gibbs_train_plm.notables.cc -gibbs_train_plm_notables_LDADD = $(top_srcdir)/utils/libutils.a -lz - -gibbs_train_plm_SOURCES = gibbs_train_plm.cc -gibbs_train_plm_LDADD = $(top_srcdir)/utils/libutils.a -lz - -#head_bigram_model_SOURCES = head_bigram_model.cc -#head_bigram_model_LDADD = $(top_srcdir)/utils/libutils.a -lz - -AM_CPPFLAGS = -funroll-loops -ffast-math -W -Wall -I$(top_srcdir)/utils diff --git a/phrasinator/README b/phrasinator/README deleted file mode 100644 index fb5b93ef..00000000 --- a/phrasinator/README +++ /dev/null @@ -1,16 +0,0 @@ -The "phrasinator" uses a simple Bayesian nonparametric model to segment -text into chunks. The inferred model is then saved so that it can rapidly -predict segments in new (but related) texts. - - Input will be a corpus of sentences, e.g.: - - economists have argued that real interest rates have fallen . - - The output will be a model that, when run with cdec, will produce - a segmentation into phrasal units, e.g.: - - economists have argued that real_interest_rates have fallen . - - -To train a model, run ./train-phrasinator.pl and follow instructions. - diff --git a/phrasinator/gibbs_train_plm.cc b/phrasinator/gibbs_train_plm.cc deleted file mode 100644 index 7847a460..00000000 --- a/phrasinator/gibbs_train_plm.cc +++ /dev/null @@ -1,309 +0,0 @@ -#include <iostream> -#include <tr1/memory> - -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp.h" -#include "m.h" - -using namespace std; -using namespace std::tr1; -namespace po = boost::program_options; - -Dict d; // global dictionary - -string Join(char joiner, const vector<int>& phrase) { - ostringstream os; - for (unsigned i = 0; i < phrase.size(); ++i) { - if (i > 0) os << joiner; - os << d.Convert(phrase[i]); - } - return os.str(); -} - -ostream& operator<<(ostream& os, const vector<int>& phrase) { - for (unsigned i = 0; i < phrase.size(); ++i) - os << (i == 0 ? "" : " ") << d.Convert(phrase[i]); - return os; -} - -struct UnigramLM { - explicit UnigramLM(const string& fname) { - ifstream in(fname.c_str()); - assert(in); - } - - double logprob(unsigned word) const { - assert(word < freqs_.size()); - return freqs_[word]; - } - - vector<double> freqs_; -}; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read file from") - ("random_seed,S",po::value<uint32_t>(), "Random seed") - ("write_cdec_grammar,g", po::value<string>(), "Write cdec grammar to this file") - ("write_cdec_weights,w", po::value<string>(), "Write cdec weights to this file") - ("poisson_length,p", "Use a Poisson distribution as the length of a phrase in the base distribuion") - ("no_hyperparameter_inference,N", "Disable hyperparameter inference"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadCorpus(const string& filename, vector<vector<int> >* c, set<int>* vocab) { - c->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - c->push_back(vector<int>()); - vector<int>& v = c->back(); - d.ConvertWhitespaceDelimitedLine(line, &v); - for (unsigned i = 0; i < v.size(); ++i) vocab->insert(v[i]); - } - if (in != &cin) delete in; -} - -struct UniphraseLM { - UniphraseLM(const vector<vector<int> >& corpus, - const set<int>& vocab, - const po::variables_map& conf) : - phrases_(1,1,1,1), - gen_(1,1,1,1), - corpus_(corpus), - uniform_word_(1.0 / vocab.size()), - gen_p0_(0.5), - p_end_(0.5), - use_poisson_(conf.count("poisson_length") > 0) {} - - double p0(const vector<int>& phrase) const { - static vector<double> p0s(10000, 0.0); - assert(phrase.size() < 10000); - double& p = p0s[phrase.size()]; - if (p) return p; - p = exp(log_p0(phrase)); - if (!p) { - cerr << "0 prob phrase: " << phrase << "\nAssigning std::numeric_limits<double>::min()\n"; - p = std::numeric_limits<double>::min(); - } - return p; - } - - double log_p0(const vector<int>& phrase) const { - double len_logprob; - if (use_poisson_) - len_logprob = Md::log_poisson(phrase.size(), 1.0); - else - len_logprob = log(1 - p_end_) * (phrase.size() -1) + log(p_end_); - return log(uniform_word_) * phrase.size() + len_logprob; - } - - double llh() const { - double llh = gen_.log_crp_prob(); - llh += gen_.num_tables(false) * log(gen_p0_) + - gen_.num_tables(true) * log(1 - gen_p0_); - double llhr = phrases_.log_crp_prob(); - for (CCRP<vector<int> >::const_iterator it = phrases_.begin(); it != phrases_.end(); ++it) { - llhr += phrases_.num_tables(it->first) * log_p0(it->first); - //llhr += log_p0(it->first); - if (!isfinite(llh)) { - cerr << it->first << endl; - cerr << log_p0(it->first) << endl; - abort(); - } - } - return llh + llhr; - } - - void Sample(unsigned int samples, bool hyp_inf, MT19937* rng) { - cerr << "Initializing...\n"; - z_.resize(corpus_.size()); - int tc = 0; - for (unsigned i = 0; i < corpus_.size(); ++i) { - const vector<int>& line = corpus_[i]; - const int ls = line.size(); - const int last_pos = ls - 1; - vector<bool>& z = z_[i]; - z.resize(ls); - int prev = 0; - for (int j = 0; j < ls; ++j) { - z[j] = rng->next() < 0.5; - if (j == last_pos) z[j] = true; // break phrase at the end of the sentence - if (z[j]) { - const vector<int> p(line.begin() + prev, line.begin() + j + 1); - phrases_.increment(p, p0(p), rng); - //cerr << p << ": " << p0(p) << endl; - prev = j + 1; - gen_.increment(false, gen_p0_, rng); - ++tc; // remove - } - } - ++tc; - gen_.increment(true, 1.0 - gen_p0_, rng); // end of utterance - } - cerr << "TC: " << tc << endl; - cerr << "Initial LLH: " << llh() << endl; - cerr << "Sampling...\n"; - cerr << gen_ << endl; - for (unsigned s = 1; s < samples; ++s) { - cerr << '.'; - if (s % 10 == 0) { - cerr << " [" << s; - if (hyp_inf) ResampleHyperparameters(rng); - cerr << " LLH=" << llh() << "]\n"; - vector<int> z(z_[0].size(), 0); - //for (int j = 0; j < z.size(); ++j) z[j] = z_[0][j]; - //SegCorpus::Write(corpus_[0], z, d); - } - for (unsigned i = 0; i < corpus_.size(); ++i) { - const vector<int>& line = corpus_[i]; - const int ls = line.size(); - const int last_pos = ls - 1; - vector<bool>& z = z_[i]; - int prev = 0; - for (int j = 0; j < last_pos; ++j) { // don't resample last position - int next = j+1; while(!z[next]) { ++next; } - const vector<int> p1p2(line.begin() + prev, line.begin() + next + 1); - const vector<int> p1(line.begin() + prev, line.begin() + j + 1); - const vector<int> p2(line.begin() + j + 1, line.begin() + next + 1); - - if (z[j]) { - phrases_.decrement(p1, rng); - phrases_.decrement(p2, rng); - gen_.decrement(false, rng); - gen_.decrement(false, rng); - } else { - phrases_.decrement(p1p2, rng); - gen_.decrement(false, rng); - } - - const double d1 = phrases_.prob(p1p2, p0(p1p2)) * gen_.prob(false, gen_p0_); - double d2 = phrases_.prob(p1, p0(p1)) * gen_.prob(false, gen_p0_); - phrases_.increment(p1, p0(p1), rng); - gen_.increment(false, gen_p0_, rng); - d2 *= phrases_.prob(p2, p0(p2)) * gen_.prob(false, gen_p0_); - phrases_.decrement(p1, rng); - gen_.decrement(false, rng); - z[j] = rng->SelectSample(d1, d2); - - if (z[j]) { - phrases_.increment(p1, p0(p1), rng); - phrases_.increment(p2, p0(p2), rng); - gen_.increment(false, gen_p0_, rng); - gen_.increment(false, gen_p0_, rng); - prev = j + 1; - } else { - phrases_.increment(p1p2, p0(p1p2), rng); - gen_.increment(false, gen_p0_, rng); - } - } - } - } -// cerr << endl << endl << gen_ << endl << phrases_ << endl; - cerr << gen_.prob(false, gen_p0_) << " " << gen_.prob(true, 1 - gen_p0_) << endl; - } - - void WriteCdecGrammarForCurrentSample(ostream* os) const { - CCRP<vector<int> >::const_iterator it = phrases_.begin(); - for (; it != phrases_.end(); ++it) { - (*os) << "[X] ||| " << Join(' ', it->first) << " ||| " - << Join('_', it->first) << " ||| C=1 P=" - << log(phrases_.prob(it->first, p0(it->first))) << endl; - } - } - - double OOVUnigramLogProb() const { - vector<int> x(1,99999999); - return log(phrases_.prob(x, p0(x))); - } - - void ResampleHyperparameters(MT19937* rng) { - phrases_.resample_hyperparameters(rng); - gen_.resample_hyperparameters(rng); - cerr << " d=" << phrases_.discount() << ",s=" << phrases_.strength(); - } - - CCRP<vector<int> > phrases_; - CCRP<bool> gen_; - vector<vector<bool> > z_; // z_[i] is there a phrase boundary after the ith word - const vector<vector<int> >& corpus_; - const double uniform_word_; - const double gen_p0_; - const double p_end_; // in base length distribution, p of the end of a phrase - const bool use_poisson_; -}; - - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<int> > corpus; - set<int> vocab; - ReadCorpus(conf["input"].as<string>(), &corpus, &vocab); - cerr << "Corpus size: " << corpus.size() << " sentences\n"; - cerr << "Vocabulary size: " << vocab.size() << " types\n"; - - UniphraseLM ulm(corpus, vocab, conf); - ulm.Sample(conf["samples"].as<unsigned>(), conf.count("no_hyperparameter_inference") == 0, &rng); - cerr << "OOV unigram prob: " << ulm.OOVUnigramLogProb() << endl; - - for (unsigned i = 0; i < corpus.size(); ++i) -// SegCorpus::Write(corpus[i], shmmlm.z_[i], d); - ; - if (conf.count("write_cdec_grammar")) { - string fname = conf["write_cdec_grammar"].as<string>(); - cerr << "Writing model to " << fname << " ...\n"; - WriteFile wf(fname); - ulm.WriteCdecGrammarForCurrentSample(wf.stream()); - } - - if (conf.count("write_cdec_weights")) { - string fname = conf["write_cdec_weights"].as<string>(); - cerr << "Writing weights to " << fname << " .\n"; - WriteFile wf(fname); - ostream& os = *wf.stream(); - os << "# make C smaller to use more phrases\nP 1\nPassThrough " << ulm.OOVUnigramLogProb() << "\nC -3\n"; - } - - return 0; -} - diff --git a/phrasinator/gibbs_train_plm.notables.cc b/phrasinator/gibbs_train_plm.notables.cc deleted file mode 100644 index 4526eaa6..00000000 --- a/phrasinator/gibbs_train_plm.notables.cc +++ /dev/null @@ -1,335 +0,0 @@ -#include <iostream> -#include <tr1/memory> - -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "filelib.h" -#include "dict.h" -#include "sampler.h" -#include "ccrp.h" -#include "ccrp_nt.h" - -using namespace std; -using namespace std::tr1; -namespace po = boost::program_options; - -Dict d; // global dictionary - -string Join(char joiner, const vector<int>& phrase) { - ostringstream os; - for (unsigned i = 0; i < phrase.size(); ++i) { - if (i > 0) os << joiner; - os << d.Convert(phrase[i]); - } - return os.str(); -} - -template <typename BType> -void WriteSeg(const vector<int>& line, const vector<BType>& label, const Dict& d) { - assert(line.size() == label.size()); - assert(label.back()); - unsigned prev = 0; - unsigned cur = 0; - while (cur < line.size()) { - if (label[cur]) { - if (prev) cout << ' '; - cout << "{{"; - for (unsigned i = prev; i <= cur; ++i) - cout << (i == prev ? "" : " ") << d.Convert(line[i]); - cout << "}}:" << label[cur]; - prev = cur + 1; - } - ++cur; - } - cout << endl; -} - -ostream& operator<<(ostream& os, const vector<int>& phrase) { - for (unsigned i = 0; i < phrase.size(); ++i) - os << (i == 0 ? "" : " ") << d.Convert(phrase[i]); - return os; -} - -struct UnigramLM { - explicit UnigramLM(const string& fname) { - ifstream in(fname.c_str()); - assert(in); - } - - double logprob(unsigned word) const { - assert(word < freqs_.size()); - return freqs_[word]; - } - - vector<double> freqs_; -}; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") - ("input,i",po::value<string>(),"Read file from") - ("random_seed,S",po::value<uint32_t>(), "Random seed") - ("write_cdec_grammar,g", po::value<string>(), "Write cdec grammar to this file") - ("write_cdec_weights,w", po::value<string>(), "Write cdec weights to this file") - ("poisson_length,p", "Use a Poisson distribution as the length of a phrase in the base distribuion") - ("no_hyperparameter_inference,N", "Disable hyperparameter inference"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("input") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void ReadCorpus(const string& filename, vector<vector<int> >* c, set<int>* vocab) { - c->clear(); - istream* in; - if (filename == "-") - in = &cin; - else - in = new ifstream(filename.c_str()); - assert(*in); - string line; - while(*in) { - getline(*in, line); - if (line.empty() && !*in) break; - c->push_back(vector<int>()); - vector<int>& v = c->back(); - d.ConvertWhitespaceDelimitedLine(line, &v); - for (unsigned i = 0; i < v.size(); ++i) vocab->insert(v[i]); - } - if (in != &cin) delete in; -} - -double log_poisson(unsigned x, const double& lambda) { - assert(lambda > 0.0); - return log(lambda) * x - lgamma(x + 1) - lambda; -} - -struct UniphraseLM { - UniphraseLM(const vector<vector<int> >& corpus, - const set<int>& vocab, - const po::variables_map& conf) : - phrases_(1,1), - gen_(1,1), - corpus_(corpus), - uniform_word_(1.0 / vocab.size()), - gen_p0_(0.5), - p_end_(0.5), - use_poisson_(conf.count("poisson_length") > 0) {} - - double p0(const vector<int>& phrase) const { - static vector<double> p0s(10000, 0.0); - assert(phrase.size() < 10000); - double& p = p0s[phrase.size()]; - if (p) return p; - p = exp(log_p0(phrase)); - if (!p) { - cerr << "0 prob phrase: " << phrase << "\nAssigning std::numeric_limits<double>::min()\n"; - p = std::numeric_limits<double>::min(); - } - return p; - } - - double log_p0(const vector<int>& phrase) const { - double len_logprob; - if (use_poisson_) - len_logprob = log_poisson(phrase.size(), 1.0); - else - len_logprob = log(1 - p_end_) * (phrase.size() -1) + log(p_end_); - return log(uniform_word_) * phrase.size() + len_logprob; - } - - double llh() const { - double llh = gen_.log_crp_prob(); - llh += log(gen_p0_) + log(1 - gen_p0_); - double llhr = phrases_.log_crp_prob(); - for (CCRP_NoTable<vector<int> >::const_iterator it = phrases_.begin(); it != phrases_.end(); ++it) { - llhr += log_p0(it->first); - //llhr += log_p0(it->first); - if (!isfinite(llh)) { - cerr << it->first << endl; - cerr << log_p0(it->first) << endl; - abort(); - } - } - return llh + llhr; - } - - void Sample(unsigned int samples, bool hyp_inf, MT19937* rng) { - cerr << "Initializing...\n"; - z_.resize(corpus_.size()); - int tc = 0; - for (unsigned i = 0; i < corpus_.size(); ++i) { - const vector<int>& line = corpus_[i]; - const int ls = line.size(); - const int last_pos = ls - 1; - vector<bool>& z = z_[i]; - z.resize(ls); - int prev = 0; - for (int j = 0; j < ls; ++j) { - z[j] = rng->next() < 0.5; - if (j == last_pos) z[j] = true; // break phrase at the end of the sentence - if (z[j]) { - const vector<int> p(line.begin() + prev, line.begin() + j + 1); - phrases_.increment(p); - //cerr << p << ": " << p0(p) << endl; - prev = j + 1; - gen_.increment(false); - ++tc; // remove - } - } - ++tc; - gen_.increment(true); // end of utterance - } - cerr << "TC: " << tc << endl; - cerr << "Initial LLH: " << llh() << endl; - cerr << "Sampling...\n"; - cerr << gen_ << endl; - for (unsigned s = 1; s < samples; ++s) { - cerr << '.'; - if (s % 10 == 0) { - cerr << " [" << s; - if (hyp_inf) ResampleHyperparameters(rng); - cerr << " LLH=" << llh() << "]\n"; - vector<int> z(z_[0].size(), 0); - //for (int j = 0; j < z.size(); ++j) z[j] = z_[0][j]; - //SegCorpus::Write(corpus_[0], z, d); - } - for (unsigned i = 0; i < corpus_.size(); ++i) { - const vector<int>& line = corpus_[i]; - const int ls = line.size(); - const int last_pos = ls - 1; - vector<bool>& z = z_[i]; - int prev = 0; - for (int j = 0; j < last_pos; ++j) { // don't resample last position - int next = j+1; while(!z[next]) { ++next; } - const vector<int> p1p2(line.begin() + prev, line.begin() + next + 1); - const vector<int> p1(line.begin() + prev, line.begin() + j + 1); - const vector<int> p2(line.begin() + j + 1, line.begin() + next + 1); - - if (z[j]) { - phrases_.decrement(p1); - phrases_.decrement(p2); - gen_.decrement(false); - gen_.decrement(false); - } else { - phrases_.decrement(p1p2); - gen_.decrement(false); - } - - const double d1 = phrases_.prob(p1p2, p0(p1p2)) * gen_.prob(false, gen_p0_); - double d2 = phrases_.prob(p1, p0(p1)) * gen_.prob(false, gen_p0_); - phrases_.increment(p1); - gen_.increment(false); - d2 *= phrases_.prob(p2, p0(p2)) * gen_.prob(false, gen_p0_); - phrases_.decrement(p1); - gen_.decrement(false); - z[j] = rng->SelectSample(d1, d2); - - if (z[j]) { - phrases_.increment(p1); - phrases_.increment(p2); - gen_.increment(false); - gen_.increment(false); - prev = j + 1; - } else { - phrases_.increment(p1p2); - gen_.increment(false); - } - } - } - } -// cerr << endl << endl << gen_ << endl << phrases_ << endl; - cerr << gen_.prob(false, gen_p0_) << " " << gen_.prob(true, 1 - gen_p0_) << endl; - } - - void WriteCdecGrammarForCurrentSample(ostream* os) const { - CCRP_NoTable<vector<int> >::const_iterator it = phrases_.begin(); - for (; it != phrases_.end(); ++it) { - (*os) << "[X] ||| " << Join(' ', it->first) << " ||| " - << Join('_', it->first) << " ||| C=1 P=" - << log(phrases_.prob(it->first, p0(it->first))) << endl; - } - } - - double OOVUnigramLogProb() const { - vector<int> x(1,99999999); - return log(phrases_.prob(x, p0(x))); - } - - void ResampleHyperparameters(MT19937* rng) { - phrases_.resample_hyperparameters(rng); - gen_.resample_hyperparameters(rng); - cerr << " " << phrases_.alpha(); - } - - CCRP_NoTable<vector<int> > phrases_; - CCRP_NoTable<bool> gen_; - vector<vector<bool> > z_; // z_[i] is there a phrase boundary after the ith word - const vector<vector<int> >& corpus_; - const double uniform_word_; - const double gen_p0_; - const double p_end_; // in base length distribution, p of the end of a phrase - const bool use_poisson_; -}; - - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - boost::shared_ptr<MT19937> prng; - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - - vector<vector<int> > corpus; - set<int> vocab; - ReadCorpus(conf["input"].as<string>(), &corpus, &vocab); - cerr << "Corpus size: " << corpus.size() << " sentences\n"; - cerr << "Vocabulary size: " << vocab.size() << " types\n"; - - UniphraseLM ulm(corpus, vocab, conf); - ulm.Sample(conf["samples"].as<unsigned>(), conf.count("no_hyperparameter_inference") == 0, &rng); - cerr << "OOV unigram prob: " << ulm.OOVUnigramLogProb() << endl; - - for (unsigned i = 0; i < corpus.size(); ++i) - WriteSeg(corpus[i], ulm.z_[i], d); - - if (conf.count("write_cdec_grammar")) { - string fname = conf["write_cdec_grammar"].as<string>(); - cerr << "Writing model to " << fname << " ...\n"; - WriteFile wf(fname); - ulm.WriteCdecGrammarForCurrentSample(wf.stream()); - } - - if (conf.count("write_cdec_weights")) { - string fname = conf["write_cdec_weights"].as<string>(); - cerr << "Writing weights to " << fname << " .\n"; - WriteFile wf(fname); - ostream& os = *wf.stream(); - os << "# make C smaller to use more phrases\nP 1\nPassThrough " << ulm.OOVUnigramLogProb() << "\nC -3\n"; - } - - - - return 0; -} - diff --git a/phrasinator/train-phrasinator.pl b/phrasinator/train-phrasinator.pl deleted file mode 100755 index c50b8e68..00000000 --- a/phrasinator/train-phrasinator.pl +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/perl -w -use strict; -my $script_dir; BEGIN { use Cwd qw/ abs_path cwd /; use File::Basename; $script_dir = dirname(abs_path($0)); push @INC, $script_dir; } -use Getopt::Long; -use File::Spec qw (rel2abs); - -my $DECODER = "$script_dir/../decoder/cdec"; -my $TRAINER = "$script_dir/gibbs_train_plm_notables"; - -die "Can't find $TRAINER" unless -f $TRAINER; -die "Can't execute $TRAINER" unless -x $TRAINER; - -if (!GetOptions( - "decoder=s" => \$DECODER, -)) { usage(); } - -die "Can't find $DECODER" unless -f $DECODER; -die "Can't execute $DECODER" unless -x $DECODER; -if (scalar @ARGV != 2) { usage(); } -my $INFILE = shift @ARGV; -my $OUTDIR = shift @ARGV; -$OUTDIR = File::Spec->rel2abs($OUTDIR); -print STDERR " Input file: $INFILE\n"; -print STDERR "Output directory: $OUTDIR\n"; -open F, "<$INFILE" or die "Failed to open $INFILE for reading: $!"; -close F; -die "Please remove existing directory $OUTDIR\n" if (-f $OUTDIR || -d $OUTDIR); - -my $CMD = "mkdir $OUTDIR"; -safesystem($CMD) or die "Failed to create directory $OUTDIR\n$!"; - -my $grammar="$OUTDIR/grammar.gz"; -my $weights="$OUTDIR/weights"; -$CMD = "$TRAINER -w $weights -g $grammar -i $INFILE"; -safesystem($CMD) or die "Failed to train model!\n"; -my $cdecini = "$OUTDIR/cdec.ini"; -open C, ">$cdecini" or die "Failed to open $cdecini for writing: $!"; - -print C <<EOINI; -quiet=true -formalism=scfg -grammar=$grammar -add_pass_through_rules=true -weights=$OUTDIR/weights -EOINI - -close C; - -print <<EOT; - -Model trained successfully. Text can be decoded into phrasal units with -the following command: - - $DECODER -c $OUTDIR/cdec.ini < FILE.TXT - -EOT -exit(0); - -sub usage { - print <<EOT; -Usage: $0 [options] INPUT.TXT OUTPUT-DIRECTORY - - Infers a phrasal segmentation model from the tokenized text in INPUT.TXT - and writes it to OUTPUT-DIRECTORY/ so that it can be applied to other - text or have its granularity altered. - -EOT - exit(1); -} - -sub safesystem { - print STDERR "Executing: @_\n"; - system(@_); - if ($? == -1) { - print STDERR "ERROR: Failed to execute: @_\n $!\n"; - exit(1); - } - elsif ($? & 127) { - printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; - exit(1); - } - else { - my $exitcode = $? >> 8; - print STDERR "Exit code: $exitcode\n" if $exitcode; - return ! $exitcode; - } -} - diff --git a/python/README.md b/python/README.md index da9f9387..bea6190a 100644 --- a/python/README.md +++ b/python/README.md @@ -12,6 +12,10 @@ Compile a parallel corpus and a word alignment into a suffix array representatio python -m cdec.sa.compile -f f.txt -e e.txt -a a.txt -o output/ -c extract.ini +Or, if your parallel corpus is in a single-file format (with source and target sentences on a single line, separated by a triple pipe `|||`), use: + + python -m cdec.sa.compile -b f-e.txt -a a.txt -o output/ -c extract.ini + Extract grammar rules from the compiled corpus: cat input.txt | python -m cdec.sa.extract -c extract.ini -g grammars/ diff --git a/python/src/_cdec.cpp b/python/src/_cdec.cpp index 1bd600f0..c8d50a4f 100644 --- a/python/src/_cdec.cpp +++ b/python/src/_cdec.cpp @@ -1,4 +1,4 @@ -/* Generated by Cython 0.17.beta1 on Tue Aug 14 22:47:23 2012 */ +/* Generated by Cython 0.17.1 on Tue Oct 16 01:04:11 2012 */ #define PY_SSIZE_T_CLEAN #include "Python.h" @@ -11,7 +11,6 @@ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif - #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall @@ -23,22 +22,18 @@ #define __fastcall #endif #endif - #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif - #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif - #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif - #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 @@ -46,12 +41,12 @@ #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif - #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" + #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ @@ -63,8 +58,8 @@ #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" + #define CYTHON_FORMAT_SSIZE_T "z" #endif - #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) @@ -72,7 +67,6 @@ #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) - typedef struct { void *buf; PyObject *obj; @@ -86,7 +80,6 @@ Py_ssize_t *suboffsets; void *internal; } Py_buffer; - #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 @@ -98,11 +91,9 @@ #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) - typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif - #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ @@ -112,21 +103,16 @@ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif - #if PY_MAJOR_VERSION < 3 && PY_MINOR_VERSION < 6 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif - #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif - #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif - - #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ @@ -139,10 +125,8 @@ #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_READ(k, d, i) ((k=k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif - #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject @@ -150,7 +134,6 @@ #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif - #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type @@ -169,7 +152,6 @@ #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif - #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) @@ -177,9 +159,7 @@ #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif - #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) - #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type @@ -196,11 +176,9 @@ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif - #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif - #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong @@ -209,7 +187,6 @@ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif - #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) @@ -228,11 +205,9 @@ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif - #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif - #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) @@ -242,7 +217,6 @@ #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif - #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) @@ -251,6 +225,7 @@ #define __Pyx_DOCSTR(n) (n) #endif + #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) @@ -275,6 +250,10 @@ #define __PYX_HAVE_API___cdec #include "string.h" #include <string> +#include "ios" +#include "new" +#include "stdexcept" +#include "typeinfo" #include <vector> #include <utility> #include <iostream> @@ -400,11 +379,13 @@ static const char *__pyx_f[] = { "hypergraph.pxi", "lattice.pxi", "mteval.pxi", + "stringsource", "cdec.sa._sa.pxd", }; /*--- Type declarations ---*/ struct __pyx_obj_4cdec_2sa_3_sa_FeatureVector; +struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__; struct __pyx_obj_5_cdec_NTRef; struct __pyx_obj_5_cdec_Scorer; struct __pyx_obj_4cdec_2sa_3_sa_IntList; @@ -427,7 +408,6 @@ struct __pyx_obj_5_cdec_Candidate; struct __pyx_obj_5_cdec___pyx_scope_struct_6_genexpr; struct __pyx_obj_5_cdec_NT; struct __pyx_obj_4cdec_2sa_3_sa_FloatList; -struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__; struct __pyx_obj_5_cdec_HypergraphEdge; struct __pyx_obj_5_cdec___pyx_scope_struct_1___iter__; struct __pyx_obj_5_cdec___pyx_scope_struct_16___get__; @@ -459,7 +439,7 @@ struct __pyx_opt_args_5_cdec_as_str; /* "_cdec.pyx":6 * cimport decoder * - * cdef char* as_str(data, char* error_msg='Cannot convert type %s to str'): # <<<<<<<<<<<<<< + * cdef bytes as_str(data, char* error_msg='Cannot convert type %s to str'): # <<<<<<<<<<<<<< * cdef bytes ret * if isinstance(data, unicode): */ @@ -468,7 +448,7 @@ struct __pyx_opt_args_5_cdec_as_str { char *error_msg; }; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":25 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":25 * cdef void read_handle(self, FILE* f) * * cdef class FeatureVector: # <<<<<<<<<<<<<< @@ -482,7 +462,20 @@ struct __pyx_obj_4cdec_2sa_3_sa_FeatureVector { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":20 +/* "_cdec.pyx":47 + * cdef DenseVector weights + * + * def __init__(self, config_str=None, **config): # <<<<<<<<<<<<<< + * """Decoder('formalism = scfg') -> initialize from configuration string + * Decoder(formalism='scfg') -> initialize from named parameters + */ +struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ { + PyObject_HEAD + PyObject *__pyx_v_config; +}; + + +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":21 * return '[%s]' % self.cat * * cdef class NTRef: # <<<<<<<<<<<<<< @@ -495,7 +488,7 @@ struct __pyx_obj_5_cdec_NTRef { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":117 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":121 * return CandidateSet(self) * * cdef class Scorer: # <<<<<<<<<<<<<< @@ -509,7 +502,7 @@ struct __pyx_obj_5_cdec_Scorer { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":12 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":12 * cdef void read_handle(self, FILE* f) * * cdef class IntList: # <<<<<<<<<<<<<< @@ -526,7 +519,7 @@ struct __pyx_obj_4cdec_2sa_3_sa_IntList { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":29 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":29 * cdef FloatList values * * cdef class Phrase: # <<<<<<<<<<<<<< @@ -543,7 +536,7 @@ struct __pyx_obj_4cdec_2sa_3_sa_Phrase { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":90 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":90 * return candidate * * def __iter__(self): # <<<<<<<<<<<<<< @@ -559,8 +552,8 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_22___iter__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":178 - * super(MRule, self).__init__(lhs, rhs, e, scores, a) +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":193 + * super(MRule, self).__init__(lhs, rhs, e, scores, None) * * cdef class Grammar: # <<<<<<<<<<<<<< * cdef shared_ptr[grammar.Grammar]* grammar @@ -572,9 +565,9 @@ struct __pyx_obj_5_cdec_Grammar { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":63 - * +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":63 * def todot(self): + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): # <<<<<<<<<<<<<< * yield 'digraph lattice {' * yield 'rankdir = LR;' @@ -595,12 +588,12 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_20_lines { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":91 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":103 * del hypos * * def sample_trees(self, unsigned n): # <<<<<<<<<<<<<< + * """hg.sample_trees(n) -> Sample of n trees from the hypergraph.""" * cdef vector[string]* trees = new vector[string]() - * hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) */ struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees { PyObject_HEAD @@ -613,7 +606,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":136 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":156 * * property edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -629,7 +622,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_13___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":5 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":5 * import cdec.sa._sa as _sa * * def _phrase(phrase): # <<<<<<<<<<<<<< @@ -642,7 +635,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_2__phrase { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":65 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":65 * return result * * cdef class CandidateSet: # <<<<<<<<<<<<<< @@ -657,7 +650,7 @@ struct __pyx_obj_5_cdec_CandidateSet { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":142 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":162 * * property nodes: * def __get__(self): # <<<<<<<<<<<<<< @@ -673,7 +666,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_14___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":47 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":49 * return TRule(lhs, f, e, scores, a) * * cdef class TRule: # <<<<<<<<<<<<<< @@ -686,7 +679,7 @@ struct __pyx_obj_5_cdec_TRule { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":35 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":35 * cdef public int chunklen(self, int k) * * cdef class Rule: # <<<<<<<<<<<<<< @@ -704,19 +697,19 @@ struct __pyx_obj_4cdec_2sa_3_sa_Rule { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":166 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":177 * _phrase(self.f), _phrase(self.e), scores) * * cdef class MRule(TRule): # <<<<<<<<<<<<<< - * def __init__(self, lhs, rhs, scores, a=None): - * cdef unsigned i = 1 + * def __init__(self, lhs, rhs, scores): + * """MRule(lhs, rhs, scores, a=None) -> Monolingual rule. */ struct __pyx_obj_5_cdec_MRule { struct __pyx_obj_5_cdec_TRule __pyx_base; }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":98 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":100 * self.cs.AddKBestCandidates(hypergraph.hg[0], k, self.scorer.get()) * * cdef class SegmentEvaluator: # <<<<<<<<<<<<<< @@ -730,7 +723,7 @@ struct __pyx_obj_5_cdec_SegmentEvaluator { }; -/* "_cdec.pyx":57 +/* "_cdec.pyx":56 * 'csplit', 'tagger', 'lexalign'): * raise InvalidConfig('formalism "%s" unknown' % formalism) * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) # <<<<<<<<<<<<<< @@ -739,7 +732,7 @@ struct __pyx_obj_5_cdec_SegmentEvaluator { */ struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr { PyObject_HEAD - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *__pyx_outer_scope; + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *__pyx_outer_scope; PyObject *__pyx_v_kv; PyObject *__pyx_t_0; Py_ssize_t __pyx_t_1; @@ -747,12 +740,12 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":62 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":61 * yield self[i] * * def todot(self): # <<<<<<<<<<<<<< + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): - * yield 'digraph lattice {' */ struct __pyx_obj_5_cdec___pyx_scope_struct_19_todot { PyObject_HEAD @@ -760,7 +753,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_19_todot { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":12 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":12 * return stats * * cdef class Candidate: # <<<<<<<<<<<<<< @@ -774,7 +767,7 @@ struct __pyx_obj_5_cdec_Candidate { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":162 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":173 * * def __str__(self): * scores = ' '.join('%s=%s' % feat for feat in self.scores) # <<<<<<<<<<<<<< @@ -791,7 +784,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_6_genexpr { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":8 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":8 * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) * * cdef class NT: # <<<<<<<<<<<<<< @@ -805,7 +798,7 @@ struct __pyx_obj_5_cdec_NT { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":3 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":3 * from libc.stdio cimport FILE * * cdef class FloatList: # <<<<<<<<<<<<<< @@ -822,20 +815,7 @@ struct __pyx_obj_4cdec_2sa_3_sa_FloatList { }; -/* "_cdec.pyx":46 - * cdef DenseVector weights - * - * def __cinit__(self, config_str=None, **config): # <<<<<<<<<<<<<< - * """ Configuration can be given as a string: - * Decoder('formalism = scfg') - */ -struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ { - PyObject_HEAD - PyObject *__pyx_v_config; -}; - - -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":170 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":191 * return vector * * cdef class HypergraphEdge: # <<<<<<<<<<<<<< @@ -851,7 +831,7 @@ struct __pyx_obj_5_cdec_HypergraphEdge { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":67 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":72 * self.vector.set_value(fid, value) * * def __iter__(self): # <<<<<<<<<<<<<< @@ -868,7 +848,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_1___iter__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":226 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":247 * * property in_edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -884,7 +864,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_16___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":121 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":131 * * property a: * def __get__(self): # <<<<<<<<<<<<<< @@ -901,7 +881,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_4___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":57 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":56 * return unicode(str(self), 'utf8') * * def __iter__(self): # <<<<<<<<<<<<<< @@ -917,7 +897,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_18___iter__ { }; -/* "_cdec.pyx":42 +/* "_cdec.pyx":43 * yield key, str(value) * * cdef class Decoder: # <<<<<<<<<<<<<< @@ -931,7 +911,7 @@ struct __pyx_obj_5_cdec_Decoder { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":216 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":237 * raise NotImplemented('comparison not implemented for HypergraphEdge') * * cdef class HypergraphNode: # <<<<<<<<<<<<<< @@ -946,7 +926,7 @@ struct __pyx_obj_5_cdec_HypergraphNode { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":45 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":48 * return sparse * * cdef class SparseVector: # <<<<<<<<<<<<<< @@ -959,7 +939,7 @@ struct __pyx_obj_5_cdec_SparseVector { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":232 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":253 * * property out_edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -975,7 +955,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_17___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":31 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":32 * self.vector[0][fid] = value * * def __iter__(self): # <<<<<<<<<<<<<< @@ -991,7 +971,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct____iter__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":44 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":44 * return self.stats.size() * * def __iter__(self): # <<<<<<<<<<<<<< @@ -1008,7 +988,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_21___iter__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":3 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":3 * from cython.operator cimport preincrement as pinc * * cdef class DenseVector: # <<<<<<<<<<<<<< @@ -1022,7 +1002,7 @@ struct __pyx_obj_5_cdec_DenseVector { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":184 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":199 * del self.grammar * * def __iter__(self): # <<<<<<<<<<<<<< @@ -1041,7 +1021,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_7___iter__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":172 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":176 * out.fields[i] = ss[i] * * cdef class Metric: # <<<<<<<<<<<<<< @@ -1054,7 +1034,7 @@ struct __pyx_obj_5_cdec_Metric { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":26 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":26 * return fmap * * cdef class SufficientStats: # <<<<<<<<<<<<<< @@ -1068,12 +1048,12 @@ struct __pyx_obj_5_cdec_SufficientStats { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":36 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":44 * return unicode(hypergraph.JoshuaVisualizationString(self.hg[0]).c_str(), 'utf8') * * def kbest(self, size): # <<<<<<<<<<<<<< + * """hg.kbest(size) -> List of k-best hypotheses in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal]* derivations = new kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal].Derivation* derivation */ struct __pyx_obj_5_cdec___pyx_scope_struct_8_kbest { PyObject_HEAD @@ -1087,12 +1067,12 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_8_kbest { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":66 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":76 * del e_derivations * * def kbest_features(self, size): # <<<<<<<<<<<<<< + * """hg.kbest_trees(size) -> List of k-best feature vectors in the hypergraph.""" * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal]* derivations = new kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal].Derivation* derivation */ struct __pyx_obj_5_cdec___pyx_scope_struct_10_kbest_features { PyObject_HEAD @@ -1107,7 +1087,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_10_kbest_features { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":190 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":211 * * property tail_nodes: * def __get__(self): # <<<<<<<<<<<<<< @@ -1123,8 +1103,8 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_15___get__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":161 - * self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":172 + * self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) * * def __str__(self): # <<<<<<<<<<<<<< * scores = ' '.join('%s=%s' % feat for feat in self.scores) @@ -1136,7 +1116,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_5___str__ { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":6 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":6 * * def _phrase(phrase): * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) # <<<<<<<<<<<<<< @@ -1153,12 +1133,12 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_3_genexpr { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":48 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":57 * del derivations * * def kbest_trees(self, size): # <<<<<<<<<<<<<< + * """hg.kbest_trees(size) -> List of k-best trees in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal]* f_derivations = new kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal].Derivation* f_derivation */ struct __pyx_obj_5_cdec___pyx_scope_struct_9_kbest_trees { PyObject_HEAD @@ -1176,7 +1156,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_9_kbest_trees { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":4 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":4 * cimport kbest * * cdef class Hypergraph: # <<<<<<<<<<<<<< @@ -1191,7 +1171,7 @@ struct __pyx_obj_5_cdec_Hypergraph { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":3 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":3 * cimport lattice * * cdef class Lattice: # <<<<<<<<<<<<<< @@ -1204,12 +1184,12 @@ struct __pyx_obj_5_cdec_Lattice { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":81 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":92 * del derivations * * def sample(self, unsigned n): # <<<<<<<<<<<<<< + * """hg.sample(n) -> Sample of n hypotheses from the hypergraph.""" * cdef vector[hypergraph.Hypothesis]* hypos = new vector[hypergraph.Hypothesis]() - * hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) */ struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample { PyObject_HEAD @@ -1222,7 +1202,7 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample { }; -/* "_cdec.pyx":31 +/* "_cdec.pyx":32 * SetSilent(yn) * * def _make_config(config): # <<<<<<<<<<<<<< @@ -1245,12 +1225,12 @@ struct __pyx_obj_5_cdec___pyx_scope_struct_23__make_config { }; -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":201 - * self.grammar.get().SetGrammarName(string(<char *>name)) +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":217 + * self.grammar.get().SetGrammarName(name) * * cdef class TextGrammar(Grammar): # <<<<<<<<<<<<<< - * def __cinit__(self, rules): - * self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) + * def __init__(self, rules): + * """TextGrammar(rules) -> SCFG Grammar containing the rules.""" */ struct __pyx_obj_5_cdec_TextGrammar { struct __pyx_obj_5_cdec_Grammar __pyx_base; @@ -1258,7 +1238,7 @@ struct __pyx_obj_5_cdec_TextGrammar { -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":4 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":4 * cimport kbest * * cdef class Hypergraph: # <<<<<<<<<<<<<< @@ -1272,7 +1252,7 @@ struct __pyx_vtabstruct_5_cdec_Hypergraph { static struct __pyx_vtabstruct_5_cdec_Hypergraph *__pyx_vtabptr_5_cdec_Hypergraph; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":170 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":191 * return vector * * cdef class HypergraphEdge: # <<<<<<<<<<<<<< @@ -1286,7 +1266,7 @@ struct __pyx_vtabstruct_5_cdec_HypergraphEdge { static struct __pyx_vtabstruct_5_cdec_HypergraphEdge *__pyx_vtabptr_5_cdec_HypergraphEdge; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":12 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":12 * cdef void read_handle(self, FILE* f) * * cdef class IntList: # <<<<<<<<<<<<<< @@ -1306,7 +1286,7 @@ struct __pyx_vtabstruct_4cdec_2sa_3_sa_IntList { static struct __pyx_vtabstruct_4cdec_2sa_3_sa_IntList *__pyx_vtabptr_4cdec_2sa_3_sa_IntList; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":3 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":3 * from libc.stdio cimport FILE * * cdef class FloatList: # <<<<<<<<<<<<<< @@ -1322,7 +1302,7 @@ struct __pyx_vtabstruct_4cdec_2sa_3_sa_FloatList { static struct __pyx_vtabstruct_4cdec_2sa_3_sa_FloatList *__pyx_vtabptr_4cdec_2sa_3_sa_FloatList; -/* "/Users/vchahun/Sandbox/cdec/python/src/cdec.sa._sa.pxd":29 +/* "/home/vchahune/tools/cdec/python/src/cdec.sa._sa.pxd":29 * cdef FloatList values * * cdef class Phrase: # <<<<<<<<<<<<<< @@ -1337,7 +1317,7 @@ struct __pyx_vtabstruct_4cdec_2sa_3_sa_Phrase { static struct __pyx_vtabstruct_4cdec_2sa_3_sa_Phrase *__pyx_vtabptr_4cdec_2sa_3_sa_Phrase; -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":216 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":237 * raise NotImplemented('comparison not implemented for HypergraphEdge') * * cdef class HypergraphNode: # <<<<<<<<<<<<<< @@ -1542,11 +1522,9 @@ static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ -static CYTHON_INLINE int __Pyx_NegateNonNeg(int b) { - return unlikely(b < 0) ? b : !b; -} -static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) { - return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b); +static CYTHON_INLINE int __Pyx_PyDict_Contains(PyObject* item, PyObject* dict, int eq) { + int result = PyDict_Contains(dict, item); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } #define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) @@ -1661,18 +1639,6 @@ static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, PyObject *tuple); static int __Pyx_CyFunction_init(void); -#include <string.h> - -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ - -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ - -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); @@ -1705,9 +1671,6 @@ static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); -static void __Pyx_WriteUnraisable(const char *name, int clineno, - int lineno, const char *filename); /*proto*/ - static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ #define __Pyx_Generator_USED @@ -1718,14 +1681,14 @@ typedef struct { PyObject_HEAD __pyx_generator_body_t body; PyObject *closure; - int is_running; - int resume_label; PyObject *exc_type; PyObject *exc_value; PyObject *exc_traceback; PyObject *gi_weakreflist; PyObject *classobj; PyObject *yieldfrom; + int resume_label; + char is_running; // using T_BOOL for property below requires char value } __pyx_GeneratorObject; static __pyx_GeneratorObject *__Pyx_Generator_New(__pyx_generator_body_t body, PyObject *closure); @@ -1737,6 +1700,9 @@ static int __Pyx_PyGen_FetchStopIterationValue(PyObject **pvalue); #define __Pyx_PyGen_FetchStopIterationValue(pvalue) PyGen_FetchStopIterationValue(pvalue) #endif +static void __Pyx_WriteUnraisable(const char *name, int clineno, + int lineno, const char *filename); /*proto*/ + static int __Pyx_check_binary_version(void); static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ @@ -1749,10 +1715,10 @@ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ #endif #endif -static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ - static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ + static void* __Pyx_GetVtable(PyObject *dict); /*proto*/ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /*proto*/ @@ -1858,13 +1824,14 @@ static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_20_lines = 0; static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_21___iter__ = 0; static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_22___iter__ = 0; static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_23__make_config = 0; -static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_24___cinit__ = 0; +static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_24___init__ = 0; static PyTypeObject *__pyx_ptype_5_cdec___pyx_scope_struct_25_genexpr = 0; -static char *__pyx_f_5_cdec_as_str(PyObject *, struct __pyx_opt_args_5_cdec_as_str *__pyx_optional_args); /*proto*/ +static PyObject *__pyx_f_5_cdec_as_str(PyObject *, struct __pyx_opt_args_5_cdec_as_str *__pyx_optional_args); /*proto*/ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_obj_4cdec_2sa_3_sa_Rule *); /*proto*/ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject *, PyObject *); /*proto*/ static float __pyx_f_5_cdec__compute_score(void *, SufficientStats *); /*proto*/ static void __pyx_f_5_cdec__compute_sufficient_stats(void *, std::string *, std::vector<std::string> *, SufficientStats *); /*proto*/ +static std::string __pyx_convert_string_from_py_(PyObject *); /*proto*/ #define __Pyx_MODULE_NAME "_cdec" int __pyx_module_is_main__cdec = 0; @@ -1940,12 +1907,12 @@ static PyObject *__pyx_pf_5_cdec_5TRule_3lhs___get__(struct __pyx_obj_5_cdec_TRu static int __pyx_pf_5_cdec_5TRule_3lhs_2__set__(struct __pyx_obj_5_cdec_TRule *__pyx_v_self, PyObject *__pyx_v_lhs); /* proto */ static PyObject *__pyx_pf_5_cdec_5TRule_7__str___genexpr(PyObject *__pyx_self); /* proto */ static PyObject *__pyx_pf_5_cdec_5TRule_4__str__(struct __pyx_obj_5_cdec_TRule *__pyx_v_self); /* proto */ -static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_v_self, PyObject *__pyx_v_lhs, PyObject *__pyx_v_rhs, PyObject *__pyx_v_scores, PyObject *__pyx_v_a); /* proto */ +static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_v_self, PyObject *__pyx_v_lhs, PyObject *__pyx_v_rhs, PyObject *__pyx_v_scores); /* proto */ static void __pyx_pf_5_cdec_7Grammar___dealloc__(CYTHON_UNUSED struct __pyx_obj_5_cdec_Grammar *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_5_cdec_7Grammar_2__iter__(struct __pyx_obj_5_cdec_Grammar *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_5_cdec_7Grammar_4name___get__(struct __pyx_obj_5_cdec_Grammar *__pyx_v_self); /* proto */ static int __pyx_pf_5_cdec_7Grammar_4name_2__set__(struct __pyx_obj_5_cdec_Grammar *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextGrammar *__pyx_v_self, PyObject *__pyx_v_rules); /* proto */ +static int __pyx_pf_5_cdec_11TextGrammar___init__(struct __pyx_obj_5_cdec_TextGrammar *__pyx_v_self, PyObject *__pyx_v_rules); /* proto */ static void __pyx_pf_5_cdec_10Hypergraph___dealloc__(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self); /* proto */ @@ -2024,8 +1991,8 @@ static PyObject *__pyx_pf_5_cdec_6Metric_4score(CYTHON_UNUSED struct __pyx_obj_5 static PyObject *__pyx_pf_5_cdec_6Metric_6evaluate(CYTHON_UNUSED struct __pyx_obj_5_cdec_Metric *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_hyp, CYTHON_UNUSED PyObject *__pyx_v_refs); /* proto */ static PyObject *__pyx_pf_5_cdec_2set_silent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_yn); /* proto */ static PyObject *__pyx_pf_5_cdec_4_make_config(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_config); /* proto */ -static PyObject *__pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(PyObject *__pyx_self); /* proto */ -static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_config_str, PyObject *__pyx_v_config); /* proto */ +static PyObject *__pyx_pf_5_cdec_7Decoder_8__init___genexpr(PyObject *__pyx_self); /* proto */ +static int __pyx_pf_5_cdec_7Decoder___init__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_config_str, PyObject *__pyx_v_config); /* proto */ static void __pyx_pf_5_cdec_7Decoder_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_5_cdec_Decoder *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_5_cdec_7Decoder_7weights___get__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self); /* proto */ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_weights); /* proto */ @@ -2047,30 +2014,30 @@ static char __pyx_k_16[] = "csplit_preserve_full_word"; static char __pyx_k_17[] = "cannot reweight hypergraph with %s"; static char __pyx_k_18[] = "comparison not implemented for HypergraphEdge"; static char __pyx_k_20[] = "comparison not implemented for HypergraphNode"; -static char __pyx_k_23[] = "cannot create lattice from %s"; -static char __pyx_k_24[] = "lattice index out of range"; -static char __pyx_k_28[] = "digraph lattice {"; - static char __pyx_k_29[] = "rankdir = LR;"; - static char __pyx_k_30[] = "node [shape=circle];"; - static char __pyx_k_31[] = "%d -> %d [label=\"%s\"];"; - static char __pyx_k_32[] = "\""; - static char __pyx_k_33[] = "\\\""; - static char __pyx_k_35[] = "%d [shape=doublecircle]"; -static char __pyx_k_36[] = "}"; -static char __pyx_k_39[] = "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi"; -static char __pyx_k_40[] = "\n"; -static char __pyx_k_42[] = "sufficient stats vector index out of range"; -static char __pyx_k_44[] = "candidate set index out of range"; -static char __pyx_k_46[] = "%s %s"; -static char __pyx_k_47[] = "%s = %s"; -static char __pyx_k_49[] = "formalism \"%s\" unknown"; -static char __pyx_k_50[] = "cannot initialize weights with %s"; -static char __pyx_k_51[] = "#"; -static char __pyx_k_54[] = "Cannot translate input type %s"; -static char __pyx_k_55[] = "cdec.sa._sa"; -static char __pyx_k_56[] = "*"; -static char __pyx_k_59[] = "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi"; -static char __pyx_k_65[] = "/Users/vchahun/Sandbox/cdec/python/src/_cdec.pyx"; +static char __pyx_k_22[] = "cannot create lattice from %s"; +static char __pyx_k_23[] = "lattice index out of range"; +static char __pyx_k_26[] = "digraph lattice {"; + static char __pyx_k_27[] = "rankdir = LR;"; + static char __pyx_k_28[] = "node [shape=circle];"; + static char __pyx_k_29[] = "%d -> %d [label=\"%s\"];"; + static char __pyx_k_30[] = "\""; + static char __pyx_k_31[] = "\\\""; + static char __pyx_k_33[] = "%d [shape=doublecircle]"; +static char __pyx_k_34[] = "}"; +static char __pyx_k_37[] = "/home/vchahune/tools/cdec/python/src/lattice.pxi"; +static char __pyx_k_38[] = "\n"; +static char __pyx_k_40[] = "sufficient stats vector index out of range"; +static char __pyx_k_42[] = "candidate set index out of range"; +static char __pyx_k_44[] = "%s %s"; +static char __pyx_k_45[] = "%s = %s"; +static char __pyx_k_47[] = "formalism \"%s\" unknown"; +static char __pyx_k_48[] = "cannot initialize weights with %s"; +static char __pyx_k_49[] = "#"; +static char __pyx_k_52[] = "Cannot translate input type %s"; +static char __pyx_k_53[] = "cdec.sa._sa"; +static char __pyx_k_54[] = "*"; +static char __pyx_k_57[] = "/home/vchahune/tools/cdec/python/src/grammar.pxi"; +static char __pyx_k_63[] = "/home/vchahune/tools/cdec/python/src/_cdec.pyx"; static char __pyx_k__a[] = "a"; static char __pyx_k__e[] = "e"; static char __pyx_k__f[] = "f"; @@ -2170,32 +2137,32 @@ static PyObject *__pyx_n_s_16; static PyObject *__pyx_kp_s_17; static PyObject *__pyx_kp_s_18; static PyObject *__pyx_kp_s_20; +static PyObject *__pyx_kp_s_22; static PyObject *__pyx_kp_s_23; -static PyObject *__pyx_kp_s_24; +static PyObject *__pyx_kp_s_26; +static PyObject *__pyx_kp_s_27; static PyObject *__pyx_kp_s_28; static PyObject *__pyx_kp_s_29; static PyObject *__pyx_kp_s_3; static PyObject *__pyx_kp_s_30; static PyObject *__pyx_kp_s_31; -static PyObject *__pyx_kp_s_32; static PyObject *__pyx_kp_s_33; -static PyObject *__pyx_kp_s_35; -static PyObject *__pyx_kp_s_36; -static PyObject *__pyx_kp_s_39; +static PyObject *__pyx_kp_s_34; +static PyObject *__pyx_kp_s_37; +static PyObject *__pyx_kp_s_38; static PyObject *__pyx_kp_s_4; static PyObject *__pyx_kp_s_40; static PyObject *__pyx_kp_s_42; static PyObject *__pyx_kp_s_44; -static PyObject *__pyx_kp_s_46; +static PyObject *__pyx_kp_s_45; static PyObject *__pyx_kp_s_47; +static PyObject *__pyx_kp_s_48; static PyObject *__pyx_kp_s_49; -static PyObject *__pyx_kp_s_50; -static PyObject *__pyx_kp_s_51; -static PyObject *__pyx_kp_s_54; -static PyObject *__pyx_n_s_55; -static PyObject *__pyx_n_s_56; -static PyObject *__pyx_kp_s_59; -static PyObject *__pyx_kp_s_65; +static PyObject *__pyx_kp_s_52; +static PyObject *__pyx_n_s_53; +static PyObject *__pyx_n_s_54; +static PyObject *__pyx_kp_s_57; +static PyObject *__pyx_kp_s_63; static PyObject *__pyx_kp_s_7; static PyObject *__pyx_kp_s_8; static PyObject *__pyx_kp_s_9; @@ -2297,47 +2264,44 @@ static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_14; static PyObject *__pyx_k_tuple_19; static PyObject *__pyx_k_tuple_21; -static PyObject *__pyx_k_tuple_22; +static PyObject *__pyx_k_tuple_24; static PyObject *__pyx_k_tuple_25; -static PyObject *__pyx_k_tuple_26; -static PyObject *__pyx_k_tuple_27; -static PyObject *__pyx_k_tuple_34; -static PyObject *__pyx_k_tuple_37; +static PyObject *__pyx_k_tuple_32; +static PyObject *__pyx_k_tuple_35; +static PyObject *__pyx_k_tuple_39; static PyObject *__pyx_k_tuple_41; static PyObject *__pyx_k_tuple_43; -static PyObject *__pyx_k_tuple_45; -static PyObject *__pyx_k_tuple_48; -static PyObject *__pyx_k_tuple_52; -static PyObject *__pyx_k_tuple_53; -static PyObject *__pyx_k_tuple_57; +static PyObject *__pyx_k_tuple_46; +static PyObject *__pyx_k_tuple_50; +static PyObject *__pyx_k_tuple_51; +static PyObject *__pyx_k_tuple_55; +static PyObject *__pyx_k_tuple_58; +static PyObject *__pyx_k_tuple_59; static PyObject *__pyx_k_tuple_60; static PyObject *__pyx_k_tuple_61; -static PyObject *__pyx_k_tuple_62; -static PyObject *__pyx_k_tuple_63; -static PyObject *__pyx_k_tuple_66; -static PyObject *__pyx_k_codeobj_38; -static PyObject *__pyx_k_codeobj_58; -static PyObject *__pyx_k_codeobj_64; -static PyObject *__pyx_k_codeobj_67; +static PyObject *__pyx_k_tuple_64; +static PyObject *__pyx_k_codeobj_36; +static PyObject *__pyx_k_codeobj_56; +static PyObject *__pyx_k_codeobj_62; +static PyObject *__pyx_k_codeobj_65; /* "_cdec.pyx":6 * cimport decoder * - * cdef char* as_str(data, char* error_msg='Cannot convert type %s to str'): # <<<<<<<<<<<<<< + * cdef bytes as_str(data, char* error_msg='Cannot convert type %s to str'): # <<<<<<<<<<<<<< * cdef bytes ret * if isinstance(data, unicode): */ -static char *__pyx_f_5_cdec_as_str(PyObject *__pyx_v_data, struct __pyx_opt_args_5_cdec_as_str *__pyx_optional_args) { +static PyObject *__pyx_f_5_cdec_as_str(PyObject *__pyx_v_data, struct __pyx_opt_args_5_cdec_as_str *__pyx_optional_args) { char *__pyx_v_error_msg = ((char *)__pyx_k_1); PyObject *__pyx_v_ret = 0; - char *__pyx_r; + PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; - char *__pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; @@ -2349,7 +2313,7 @@ static char *__pyx_f_5_cdec_as_str(PyObject *__pyx_v_data, struct __pyx_opt_args } /* "_cdec.pyx":8 - * cdef char* as_str(data, char* error_msg='Cannot convert type %s to str'): + * cdef bytes as_str(data, char* error_msg='Cannot convert type %s to str'): * cdef bytes ret * if isinstance(data, unicode): # <<<<<<<<<<<<<< * ret = data.encode('utf8') @@ -2448,26 +2412,32 @@ static char *__pyx_f_5_cdec_as_str(PyObject *__pyx_v_data, struct __pyx_opt_args * * include "vectors.pxi" */ - __pyx_t_5 = PyBytes_AsString(((PyObject *)__pyx_v_ret)); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_r = __pyx_t_5; + __Pyx_XDECREF(((PyObject *)__pyx_r)); + __Pyx_INCREF(((PyObject *)__pyx_v_ret)); + __pyx_r = __pyx_v_ret; goto __pyx_L0; - __pyx_r = 0; + __pyx_r = ((PyObject*)Py_None); __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); - __Pyx_WriteUnraisable("_cdec.as_str", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.as_str", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ret); + __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ static int __pyx_pw_5_cdec_11DenseVector_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_11DenseVector___init__[] = "DenseVector() -> Dense weight/feature vector."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_11DenseVector___init__; +#endif static int __pyx_pw_5_cdec_11DenseVector_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_r; __Pyx_RefNannyDeclarations @@ -2480,30 +2450,35 @@ static int __pyx_pw_5_cdec_11DenseVector_1__init__(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":7 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":7 * cdef bint owned # if True, do not manage memory * * def __init__(self): # <<<<<<<<<<<<<< + * """DenseVector() -> Dense weight/feature vector.""" * self.vector = new vector[weight_t]() - * self.owned = False */ static int __pyx_pf_5_cdec_11DenseVector___init__(struct __pyx_obj_5_cdec_DenseVector *__pyx_v_self) { int __pyx_r; __Pyx_RefNannyDeclarations + std::vector<weight_t> *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":8 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":9 * def __init__(self): + * """DenseVector() -> Dense weight/feature vector.""" * self.vector = new vector[weight_t]() # <<<<<<<<<<<<<< * self.owned = False * */ - __pyx_v_self->vector = new std::vector<weight_t>(); + try {__pyx_t_1 = new std::vector<weight_t>();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_v_self->vector = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":9 - * def __init__(self): + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":10 + * """DenseVector() -> Dense weight/feature vector.""" * self.vector = new vector[weight_t]() * self.owned = False # <<<<<<<<<<<<<< * @@ -2512,6 +2487,11 @@ static int __pyx_pf_5_cdec_11DenseVector___init__(struct __pyx_obj_5_cdec_DenseV __pyx_v_self->owned = 0; __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("_cdec.DenseVector.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -2525,7 +2505,7 @@ static void __pyx_pw_5_cdec_11DenseVector_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":11 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":12 * self.owned = False * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -2538,7 +2518,7 @@ static void __pyx_pf_5_cdec_11DenseVector_2__dealloc__(struct __pyx_obj_5_cdec_D int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":12 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":13 * * def __dealloc__(self): * if not self.owned: # <<<<<<<<<<<<<< @@ -2548,7 +2528,7 @@ static void __pyx_pf_5_cdec_11DenseVector_2__dealloc__(struct __pyx_obj_5_cdec_D __pyx_t_1 = (!__pyx_v_self->owned); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":13 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":14 * def __dealloc__(self): * if not self.owned: * del self.vector # <<<<<<<<<<<<<< @@ -2574,7 +2554,7 @@ static Py_ssize_t __pyx_pw_5_cdec_11DenseVector_5__len__(PyObject *__pyx_v_self) return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":15 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":16 * del self.vector * * def __len__(self): # <<<<<<<<<<<<<< @@ -2587,7 +2567,7 @@ static Py_ssize_t __pyx_pf_5_cdec_11DenseVector_4__len__(struct __pyx_obj_5_cdec __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":16 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":17 * * def __len__(self): * return self.vector.size() # <<<<<<<<<<<<<< @@ -2611,7 +2591,7 @@ static PyObject *__pyx_pw_5_cdec_11DenseVector_7__getitem__(PyObject *__pyx_v_se __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); assert(__pyx_arg_fname); { - __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -2624,7 +2604,7 @@ static PyObject *__pyx_pw_5_cdec_11DenseVector_7__getitem__(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":18 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":19 * return self.vector.size() * * def __getitem__(self, char* fname): # <<<<<<<<<<<<<< @@ -2644,7 +2624,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_6__getitem__(struct __pyx_obj_5_c int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":19 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":20 * * def __getitem__(self, char* fname): * cdef int fid = FDConvert(fname) # <<<<<<<<<<<<<< @@ -2653,7 +2633,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_6__getitem__(struct __pyx_obj_5_c */ __pyx_v_fid = FD::Convert(__pyx_v_fname); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":20 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":21 * def __getitem__(self, char* fname): * cdef int fid = FDConvert(fname) * if 0 <= fid < self.vector.size(): # <<<<<<<<<<<<<< @@ -2666,7 +2646,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_6__getitem__(struct __pyx_obj_5_c } if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":21 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":22 * cdef int fid = FDConvert(fname) * if 0 <= fid < self.vector.size(): * return self.vector[0][fid] # <<<<<<<<<<<<<< @@ -2674,7 +2654,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_6__getitem__(struct __pyx_obj_5_c * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyFloat_FromDouble(((__pyx_v_self->vector[0])[__pyx_v_fid])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyFloat_FromDouble(((__pyx_v_self->vector[0])[__pyx_v_fid])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; @@ -2683,26 +2663,26 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_6__getitem__(struct __pyx_obj_5_c } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":22 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":23 * if 0 <= fid < self.vector.size(): * return self.vector[0][fid] * raise KeyError(fname) # <<<<<<<<<<<<<< * * def __setitem__(self, char* fname, float value): */ - __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; @@ -2726,10 +2706,10 @@ static int __pyx_pw_5_cdec_11DenseVector_9__setitem__(PyObject *__pyx_v_self, Py __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); assert(__pyx_arg_fname); { - __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } assert(__pyx_arg_value); { - __pyx_v_value = __pyx_PyFloat_AsFloat(__pyx_arg_value); if (unlikely((__pyx_v_value == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_value = __pyx_PyFloat_AsFloat(__pyx_arg_value); if (unlikely((__pyx_v_value == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -2742,11 +2722,11 @@ static int __pyx_pw_5_cdec_11DenseVector_9__setitem__(PyObject *__pyx_v_self, Py return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":24 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":25 * raise KeyError(fname) * * def __setitem__(self, char* fname, float value): # <<<<<<<<<<<<<< - * cdef int fid = FDConvert(<char *>fname) + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) */ @@ -2762,43 +2742,43 @@ static int __pyx_pf_5_cdec_11DenseVector_8__setitem__(struct __pyx_obj_5_cdec_De int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":25 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":26 * * def __setitem__(self, char* fname, float value): - * cdef int fid = FDConvert(<char *>fname) # <<<<<<<<<<<<<< + * cdef int fid = FDConvert(fname) # <<<<<<<<<<<<<< * if fid < 0: raise KeyError(fname) * if self.vector.size() <= fid: */ - __pyx_v_fid = FD::Convert(((char *)__pyx_v_fname)); + __pyx_v_fid = FD::Convert(__pyx_v_fname); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":26 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":27 * def __setitem__(self, char* fname, float value): - * cdef int fid = FDConvert(<char *>fname) + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) # <<<<<<<<<<<<<< * if self.vector.size() <= fid: * self.vector.resize(fid + 1) */ __pyx_t_1 = (__pyx_v_fid < 0); if (__pyx_t_1) { - __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":27 - * cdef int fid = FDConvert(<char *>fname) + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":28 + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) * if self.vector.size() <= fid: # <<<<<<<<<<<<<< * self.vector.resize(fid + 1) @@ -2807,7 +2787,7 @@ static int __pyx_pf_5_cdec_11DenseVector_8__setitem__(struct __pyx_obj_5_cdec_De __pyx_t_1 = (__pyx_v_self->vector->size() <= __pyx_v_fid); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":28 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":29 * if fid < 0: raise KeyError(fname) * if self.vector.size() <= fid: * self.vector.resize(fid + 1) # <<<<<<<<<<<<<< @@ -2819,7 +2799,7 @@ static int __pyx_pf_5_cdec_11DenseVector_8__setitem__(struct __pyx_obj_5_cdec_De } __pyx_L4:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":29 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":30 * if self.vector.size() <= fid: * self.vector.resize(fid + 1) * self.vector[0][fid] = value # <<<<<<<<<<<<<< @@ -2852,7 +2832,7 @@ static PyObject *__pyx_pw_5_cdec_11DenseVector_11__iter__(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":31 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":32 * self.vector[0][fid] = value * * def __iter__(self): # <<<<<<<<<<<<<< @@ -2878,7 +2858,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_10__iter__(struct __pyx_obj_5_cde __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_11DenseVector_12generator, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_11DenseVector_12generator, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -2915,9 +2895,9 @@ static PyObject *__pyx_gb_5_cdec_11DenseVector_12generator(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":33 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":34 * def __iter__(self): * cdef unsigned fid * for fid in range(1, self.vector.size()): # <<<<<<<<<<<<<< @@ -2928,26 +2908,26 @@ static PyObject *__pyx_gb_5_cdec_11DenseVector_12generator(__pyx_GeneratorObject for (__pyx_t_2 = 1; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_fid = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":34 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":35 * cdef unsigned fid * for fid in range(1, self.vector.size()): * yield str(FDConvert(fid).c_str()), self.vector[0][fid] # <<<<<<<<<<<<<< * * def dot(self, SparseVector other): */ - __pyx_t_3 = PyBytes_FromString(FD::Convert(__pyx_cur_scope->__pyx_v_fid).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyBytes_FromString(FD::Convert(__pyx_cur_scope->__pyx_v_fid).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyFloat_FromDouble(((__pyx_cur_scope->__pyx_v_self->vector[0])[__pyx_cur_scope->__pyx_v_fid])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyFloat_FromDouble(((__pyx_cur_scope->__pyx_v_self->vector[0])[__pyx_cur_scope->__pyx_v_fid])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); @@ -2967,7 +2947,7 @@ static PyObject *__pyx_gb_5_cdec_11DenseVector_12generator(__pyx_GeneratorObject __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -2986,11 +2966,12 @@ static PyObject *__pyx_gb_5_cdec_11DenseVector_12generator(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_11DenseVector_14dot(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/ +static char __pyx_doc_5_cdec_11DenseVector_13dot[] = "vector.dot(SparseVector other) -> Dot product of the two vectors."; static PyObject *__pyx_pw_5_cdec_11DenseVector_14dot(PyObject *__pyx_v_self, PyObject *__pyx_v_other) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("dot (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_11DenseVector_13dot(((struct __pyx_obj_5_cdec_DenseVector *)__pyx_v_self), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_other)); goto __pyx_L0; __pyx_L1_error:; @@ -3000,12 +2981,12 @@ static PyObject *__pyx_pw_5_cdec_11DenseVector_14dot(PyObject *__pyx_v_self, PyO return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":36 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":37 * yield str(FDConvert(fid).c_str()), self.vector[0][fid] * * def dot(self, SparseVector other): # <<<<<<<<<<<<<< + * """vector.dot(SparseVector other) -> Dot product of the two vectors.""" * return other.dot(self) - * */ static PyObject *__pyx_pf_5_cdec_11DenseVector_13dot(struct __pyx_obj_5_cdec_DenseVector *__pyx_v_self, struct __pyx_obj_5_cdec_SparseVector *__pyx_v_other) { @@ -3019,22 +3000,22 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_13dot(struct __pyx_obj_5_cdec_Den int __pyx_clineno = 0; __Pyx_RefNannySetupContext("dot", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":37 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":39 * def dot(self, SparseVector other): + * """vector.dot(SparseVector other) -> Dot product of the two vectors.""" * return other.dot(self) # <<<<<<<<<<<<<< * * def tosparse(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_other), __pyx_n_s__dot); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_other), __pyx_n_s__dot); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; @@ -3058,6 +3039,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_13dot(struct __pyx_obj_5_cdec_Den /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_11DenseVector_16tosparse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_11DenseVector_15tosparse[] = "vector.tosparse() -> Equivalent SparseVector."; static PyObject *__pyx_pw_5_cdec_11DenseVector_16tosparse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -3067,12 +3049,12 @@ static PyObject *__pyx_pw_5_cdec_11DenseVector_16tosparse(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":39 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":41 * return other.dot(self) * * def tosparse(self): # <<<<<<<<<<<<<< + * """vector.tosparse() -> Equivalent SparseVector.""" * cdef SparseVector sparse = SparseVector.__new__(SparseVector) - * sparse.vector = new FastSparseVector[weight_t]() */ static PyObject *__pyx_pf_5_cdec_11DenseVector_15tosparse(struct __pyx_obj_5_cdec_DenseVector *__pyx_v_self) { @@ -3085,21 +3067,21 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_15tosparse(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tosparse", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":40 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":43 * def tosparse(self): + * """vector.tosparse() -> Equivalent SparseVector.""" * cdef SparseVector sparse = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * sparse.vector = new FastSparseVector[weight_t]() * InitSparseVector(self.vector[0], sparse.vector) */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_sparse = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":41 - * def tosparse(self): + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":44 + * """vector.tosparse() -> Equivalent SparseVector.""" * cdef SparseVector sparse = SparseVector.__new__(SparseVector) * sparse.vector = new FastSparseVector[weight_t]() # <<<<<<<<<<<<<< * InitSparseVector(self.vector[0], sparse.vector) @@ -3107,7 +3089,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_15tosparse(struct __pyx_obj_5_cde */ __pyx_v_sparse->vector = new FastSparseVector<weight_t>(); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":42 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":45 * cdef SparseVector sparse = SparseVector.__new__(SparseVector) * sparse.vector = new FastSparseVector[weight_t]() * InitSparseVector(self.vector[0], sparse.vector) # <<<<<<<<<<<<<< @@ -3116,7 +3098,7 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_15tosparse(struct __pyx_obj_5_cde */ Weights::InitSparseVector((__pyx_v_self->vector[0]), __pyx_v_sparse->vector); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":43 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":46 * sparse.vector = new FastSparseVector[weight_t]() * InitSparseVector(self.vector[0], sparse.vector) * return sparse # <<<<<<<<<<<<<< @@ -3143,6 +3125,10 @@ static PyObject *__pyx_pf_5_cdec_11DenseVector_15tosparse(struct __pyx_obj_5_cde /* Python wrapper */ static int __pyx_pw_5_cdec_12SparseVector_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_12SparseVector___init__[] = "SparseVector() -> Sparse feature/weight vector."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_12SparseVector___init__; +#endif static int __pyx_pw_5_cdec_12SparseVector_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { int __pyx_r; __Pyx_RefNannyDeclarations @@ -3155,12 +3141,12 @@ static int __pyx_pw_5_cdec_12SparseVector_1__init__(PyObject *__pyx_v_self, PyOb return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":48 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":51 * cdef FastSparseVector[weight_t]* vector * * def __init__(self): # <<<<<<<<<<<<<< + * """SparseVector() -> Sparse feature/weight vector.""" * self.vector = new FastSparseVector[weight_t]() - * */ static int __pyx_pf_5_cdec_12SparseVector___init__(struct __pyx_obj_5_cdec_SparseVector *__pyx_v_self) { @@ -3168,9 +3154,9 @@ static int __pyx_pf_5_cdec_12SparseVector___init__(struct __pyx_obj_5_cdec_Spars __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":49 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":53 * def __init__(self): + * """SparseVector() -> Sparse feature/weight vector.""" * self.vector = new FastSparseVector[weight_t]() # <<<<<<<<<<<<<< * * def __dealloc__(self): @@ -3191,7 +3177,7 @@ static void __pyx_pw_5_cdec_12SparseVector_3__dealloc__(PyObject *__pyx_v_self) __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":51 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":55 * self.vector = new FastSparseVector[weight_t]() * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -3203,7 +3189,7 @@ static void __pyx_pf_5_cdec_12SparseVector_2__dealloc__(CYTHON_UNUSED struct __p __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":52 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":56 * * def __dealloc__(self): * del self.vector # <<<<<<<<<<<<<< @@ -3217,6 +3203,7 @@ static void __pyx_pf_5_cdec_12SparseVector_2__dealloc__(CYTHON_UNUSED struct __p /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_12SparseVector_5copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_12SparseVector_4copy[] = "vector.copy() -> SparseVector copy."; static PyObject *__pyx_pw_5_cdec_12SparseVector_5copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -3226,12 +3213,12 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_5copy(PyObject *__pyx_v_self, CY return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":54 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":58 * del self.vector * * def copy(self): # <<<<<<<<<<<<<< + * """vector.copy() -> SparseVector copy.""" * return self * 1 - * */ static PyObject *__pyx_pf_5_cdec_12SparseVector_4copy(struct __pyx_obj_5_cdec_SparseVector *__pyx_v_self) { @@ -3243,15 +3230,15 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_4copy(struct __pyx_obj_5_cdec_Sp int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":55 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":60 * def copy(self): + * """vector.copy() -> SparseVector copy.""" * return self * 1 # <<<<<<<<<<<<<< * * def __getitem__(self, char* fname): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyNumber_Multiply(((PyObject *)__pyx_v_self), __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyNumber_Multiply(((PyObject *)__pyx_v_self), __pyx_int_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -3277,7 +3264,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_7__getitem__(PyObject *__pyx_v_s __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); assert(__pyx_arg_fname); { - __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -3290,7 +3277,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_7__getitem__(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":57 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":62 * return self * 1 * * def __getitem__(self, char* fname): # <<<<<<<<<<<<<< @@ -3310,7 +3297,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_6__getitem__(struct __pyx_obj_5_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":58 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":63 * * def __getitem__(self, char* fname): * cdef int fid = FDConvert(fname) # <<<<<<<<<<<<<< @@ -3319,7 +3306,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_6__getitem__(struct __pyx_obj_5_ */ __pyx_v_fid = FD::Convert(__pyx_v_fname); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":59 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":64 * def __getitem__(self, char* fname): * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) # <<<<<<<<<<<<<< @@ -3328,24 +3315,24 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_6__getitem__(struct __pyx_obj_5_ */ __pyx_t_1 = (__pyx_v_fid < 0); if (__pyx_t_1) { - __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":60 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":65 * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) * return self.vector.value(fid) # <<<<<<<<<<<<<< @@ -3353,7 +3340,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_6__getitem__(struct __pyx_obj_5_ * def __setitem__(self, char* fname, float value): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_self->vector->value(__pyx_v_fid)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_self->vector->value(__pyx_v_fid)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; @@ -3381,10 +3368,10 @@ static int __pyx_pw_5_cdec_12SparseVector_9__setitem__(PyObject *__pyx_v_self, P __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); assert(__pyx_arg_fname); { - __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } assert(__pyx_arg_value); { - __pyx_v_value = __pyx_PyFloat_AsFloat(__pyx_arg_value); if (unlikely((__pyx_v_value == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_value = __pyx_PyFloat_AsFloat(__pyx_arg_value); if (unlikely((__pyx_v_value == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -3397,11 +3384,11 @@ static int __pyx_pw_5_cdec_12SparseVector_9__setitem__(PyObject *__pyx_v_self, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":62 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":67 * return self.vector.value(fid) * * def __setitem__(self, char* fname, float value): # <<<<<<<<<<<<<< - * cdef int fid = FDConvert(<char *>fname) + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) */ @@ -3417,43 +3404,43 @@ static int __pyx_pf_5_cdec_12SparseVector_8__setitem__(struct __pyx_obj_5_cdec_S int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":63 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":68 * * def __setitem__(self, char* fname, float value): - * cdef int fid = FDConvert(<char *>fname) # <<<<<<<<<<<<<< + * cdef int fid = FDConvert(fname) # <<<<<<<<<<<<<< * if fid < 0: raise KeyError(fname) * self.vector.set_value(fid, value) */ - __pyx_v_fid = FD::Convert(((char *)__pyx_v_fname)); + __pyx_v_fid = FD::Convert(__pyx_v_fname); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":64 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":69 * def __setitem__(self, char* fname, float value): - * cdef int fid = FDConvert(<char *>fname) + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) # <<<<<<<<<<<<<< * self.vector.set_value(fid, value) * */ __pyx_t_1 = (__pyx_v_fid < 0); if (__pyx_t_1) { - __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyBytes_FromString(__pyx_v_fname); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":65 - * cdef int fid = FDConvert(<char *>fname) + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":70 + * cdef int fid = FDConvert(fname) * if fid < 0: raise KeyError(fname) * self.vector.set_value(fid, value) # <<<<<<<<<<<<<< * @@ -3485,7 +3472,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_11__iter__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":67 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":72 * self.vector.set_value(fid, value) * * def __iter__(self): # <<<<<<<<<<<<<< @@ -3511,7 +3498,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_10__iter__(struct __pyx_obj_5_cd __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_12SparseVector_12generator1, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_12SparseVector_12generator1, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -3548,9 +3535,9 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":68 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":73 * * def __iter__(self): * cdef FastSparseVector[weight_t].const_iterator* it = new FastSparseVector[weight_t].const_iterator(self.vector[0], False) # <<<<<<<<<<<<<< @@ -3559,7 +3546,7 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje */ __pyx_cur_scope->__pyx_v_it = new FastSparseVector<weight_t>::const_iterator((__pyx_cur_scope->__pyx_v_self->vector[0]), 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":70 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":75 * cdef FastSparseVector[weight_t].const_iterator* it = new FastSparseVector[weight_t].const_iterator(self.vector[0], False) * cdef unsigned i * try: # <<<<<<<<<<<<<< @@ -3568,7 +3555,7 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":71 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":76 * cdef unsigned i * try: * for i in range(self.vector.size()): # <<<<<<<<<<<<<< @@ -3579,26 +3566,26 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":72 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":77 * try: * for i in range(self.vector.size()): * yield (str(FDConvert(it[0].ptr().first).c_str()), it[0].ptr().second) # <<<<<<<<<<<<<< * pinc(it[0]) # ++it * finally: */ - __pyx_t_3 = PyBytes_FromString(FD::Convert((__pyx_cur_scope->__pyx_v_it[0]).operator->()->first).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_3 = PyBytes_FromString(FD::Convert((__pyx_cur_scope->__pyx_v_it[0]).operator->()->first).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyFloat_FromDouble((__pyx_cur_scope->__pyx_v_it[0]).operator->()->second); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_4 = PyFloat_FromDouble((__pyx_cur_scope->__pyx_v_it[0]).operator->()->second); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); @@ -3618,9 +3605,9 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje __pyx_L9_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":73 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":78 * for i in range(self.vector.size()): * yield (str(FDConvert(it[0].ptr().first).c_str()), it[0].ptr().second) * pinc(it[0]) # ++it # <<<<<<<<<<<<<< @@ -3631,7 +3618,7 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje } } - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":75 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":80 * pinc(it[0]) # ++it * finally: * del it # <<<<<<<<<<<<<< @@ -3683,6 +3670,7 @@ static PyObject *__pyx_gb_5_cdec_12SparseVector_12generator1(__pyx_GeneratorObje /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_12SparseVector_14dot(PyObject *__pyx_v_self, PyObject *__pyx_v_other); /*proto*/ +static char __pyx_doc_5_cdec_12SparseVector_13dot[] = "vector.dot(SparseVector/DenseVector other) -> Dot product of the two vectors."; static PyObject *__pyx_pw_5_cdec_12SparseVector_14dot(PyObject *__pyx_v_self, PyObject *__pyx_v_other) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -3692,12 +3680,12 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_14dot(PyObject *__pyx_v_self, Py return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":77 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":82 * del it * * def dot(self, other): # <<<<<<<<<<<<<< + * """vector.dot(SparseVector/DenseVector other) -> Dot product of the two vectors.""" * if isinstance(other, DenseVector): - * return self.vector.dot((<DenseVector> other).vector[0]) */ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_SparseVector *__pyx_v_self, PyObject *__pyx_v_other) { @@ -3711,9 +3699,9 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp int __pyx_clineno = 0; __Pyx_RefNannySetupContext("dot", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":78 - * + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":84 * def dot(self, other): + * """vector.dot(SparseVector/DenseVector other) -> Dot product of the two vectors.""" * if isinstance(other, DenseVector): # <<<<<<<<<<<<<< * return self.vector.dot((<DenseVector> other).vector[0]) * elif isinstance(other, SparseVector): @@ -3724,15 +3712,15 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":79 - * def dot(self, other): + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":85 + * """vector.dot(SparseVector/DenseVector other) -> Dot product of the two vectors.""" * if isinstance(other, DenseVector): * return self.vector.dot((<DenseVector> other).vector[0]) # <<<<<<<<<<<<<< * elif isinstance(other, SparseVector): * return self.vector.dot((<SparseVector> other).vector[0]) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->vector->dot((((struct __pyx_obj_5_cdec_DenseVector *)__pyx_v_other)->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->vector->dot((((struct __pyx_obj_5_cdec_DenseVector *)__pyx_v_other)->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -3740,7 +3728,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp goto __pyx_L3; } - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":80 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":86 * if isinstance(other, DenseVector): * return self.vector.dot((<DenseVector> other).vector[0]) * elif isinstance(other, SparseVector): # <<<<<<<<<<<<<< @@ -3753,7 +3741,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":81 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":87 * return self.vector.dot((<DenseVector> other).vector[0]) * elif isinstance(other, SparseVector): * return self.vector.dot((<SparseVector> other).vector[0]) # <<<<<<<<<<<<<< @@ -3761,7 +3749,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->vector->dot((((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_other)->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->vector->dot((((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_other)->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -3770,26 +3758,26 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_13dot(struct __pyx_obj_5_cdec_Sp } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":82 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":88 * elif isinstance(other, SparseVector): * return self.vector.dot((<SparseVector> other).vector[0]) * raise TypeError('cannot take the dot product of %s and SparseVector' % type(other)) # <<<<<<<<<<<<<< * * def __richcmp__(SparseVector x, SparseVector y, int op): */ - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_3), ((PyObject *)Py_TYPE(__pyx_v_other))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_3), ((PyObject *)Py_TYPE(__pyx_v_other))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; @@ -3810,8 +3798,8 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_16__richcmp__(PyObject *__pyx_v_ PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__richcmp__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_12SparseVector_15__richcmp__(((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_x), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_y), ((int)__pyx_v_op)); goto __pyx_L0; __pyx_L1_error:; @@ -3821,7 +3809,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_16__richcmp__(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":84 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":90 * raise TypeError('cannot take the dot product of %s and SparseVector' % type(other)) * * def __richcmp__(SparseVector x, SparseVector y, int op): # <<<<<<<<<<<<<< @@ -3839,7 +3827,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__richcmp__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":87 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":93 * if op == 2: # == * return x.vector[0] == y.vector[0] * elif op == 3: # != # <<<<<<<<<<<<<< @@ -3848,7 +3836,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 */ switch (__pyx_v_op) { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":85 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":91 * * def __richcmp__(SparseVector x, SparseVector y, int op): * if op == 2: # == # <<<<<<<<<<<<<< @@ -3857,7 +3845,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 */ case 2: - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":86 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":92 * def __richcmp__(SparseVector x, SparseVector y, int op): * if op == 2: # == * return x.vector[0] == y.vector[0] # <<<<<<<<<<<<<< @@ -3865,14 +3853,14 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 * return not (x == y) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyBool_FromLong(((__pyx_v_x->vector[0]) == (__pyx_v_y->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong(((__pyx_v_x->vector[0]) == (__pyx_v_y->vector[0]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; break; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":87 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":93 * if op == 2: # == * return x.vector[0] == y.vector[0] * elif op == 3: # != # <<<<<<<<<<<<<< @@ -3881,7 +3869,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 */ case 3: - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":88 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":94 * return x.vector[0] == y.vector[0] * elif op == 3: # != * return not (x == y) # <<<<<<<<<<<<<< @@ -3889,11 +3877,10 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -3901,18 +3888,18 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_15__richcmp__(struct __pyx_obj_5 break; } - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":89 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":95 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for SparseVector') # <<<<<<<<<<<<<< * * def __len__(self): */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[1]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[1]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; @@ -3937,7 +3924,7 @@ static Py_ssize_t __pyx_pw_5_cdec_12SparseVector_18__len__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":91 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":97 * raise NotImplemented('comparison not implemented for SparseVector') * * def __len__(self): # <<<<<<<<<<<<<< @@ -3950,7 +3937,7 @@ static Py_ssize_t __pyx_pf_5_cdec_12SparseVector_17__len__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":92 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":98 * * def __len__(self): * return self.vector.size() # <<<<<<<<<<<<<< @@ -3974,7 +3961,7 @@ static int __pyx_pw_5_cdec_12SparseVector_20__contains__(PyObject *__pyx_v_self, __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0); assert(__pyx_arg_fname); { - __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_fname = PyBytes_AsString(__pyx_arg_fname); if (unlikely((!__pyx_v_fname) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -3987,7 +3974,7 @@ static int __pyx_pw_5_cdec_12SparseVector_20__contains__(PyObject *__pyx_v_self, return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":94 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":100 * return self.vector.size() * * def __contains__(self, char* fname): # <<<<<<<<<<<<<< @@ -4000,7 +3987,7 @@ static int __pyx_pf_5_cdec_12SparseVector_19__contains__(struct __pyx_obj_5_cdec __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__contains__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":95 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":101 * * def __contains__(self, char* fname): * return self.vector.nonzero(FDConvert(fname)) # <<<<<<<<<<<<<< @@ -4027,7 +4014,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_22__neg__(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":97 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":103 * return self.vector.nonzero(FDConvert(fname)) * * def __neg__(self): # <<<<<<<<<<<<<< @@ -4045,20 +4032,20 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_21__neg__(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__neg__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":98 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":104 * * def __neg__(self): * cdef SparseVector result = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * result.vector = new FastSparseVector[weight_t](self.vector[0]) * result.vector[0] *= -1.0 */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":99 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":105 * def __neg__(self): * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](self.vector[0]) # <<<<<<<<<<<<<< @@ -4067,7 +4054,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_21__neg__(struct __pyx_obj_5_cde */ __pyx_v_result->vector = new FastSparseVector<weight_t>((__pyx_v_self->vector[0])); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":100 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":106 * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](self.vector[0]) * result.vector[0] *= -1.0 # <<<<<<<<<<<<<< @@ -4076,7 +4063,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_21__neg__(struct __pyx_obj_5_cde */ (__pyx_v_result->vector[0]) *= -1.0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":101 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":107 * result.vector = new FastSparseVector[weight_t](self.vector[0]) * result.vector[0] *= -1.0 * return result # <<<<<<<<<<<<<< @@ -4107,7 +4094,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_24__iadd__(PyObject *__pyx_v_sel PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iadd__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_12SparseVector_23__iadd__(((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_self), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_other)); goto __pyx_L0; __pyx_L1_error:; @@ -4117,7 +4104,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_24__iadd__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":103 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":109 * return result * * def __iadd__(SparseVector self, SparseVector other): # <<<<<<<<<<<<<< @@ -4130,7 +4117,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_23__iadd__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iadd__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":104 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":110 * * def __iadd__(SparseVector self, SparseVector other): * self.vector[0] += other.vector[0] # <<<<<<<<<<<<<< @@ -4139,7 +4126,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_23__iadd__(struct __pyx_obj_5_cd */ (__pyx_v_self->vector[0]) += (__pyx_v_other->vector[0]); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":105 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":111 * def __iadd__(SparseVector self, SparseVector other): * self.vector[0] += other.vector[0] * return self # <<<<<<<<<<<<<< @@ -4164,7 +4151,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_26__isub__(PyObject *__pyx_v_sel PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__isub__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_other), __pyx_ptype_5_cdec_SparseVector, 1, "other", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_12SparseVector_25__isub__(((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_self), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_other)); goto __pyx_L0; __pyx_L1_error:; @@ -4174,7 +4161,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_26__isub__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":107 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":113 * return self * * def __isub__(SparseVector self, SparseVector other): # <<<<<<<<<<<<<< @@ -4187,7 +4174,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_25__isub__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__isub__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":108 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":114 * * def __isub__(SparseVector self, SparseVector other): * self.vector[0] -= other.vector[0] # <<<<<<<<<<<<<< @@ -4196,7 +4183,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_25__isub__(struct __pyx_obj_5_cd */ (__pyx_v_self->vector[0]) -= (__pyx_v_other->vector[0]); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":109 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":115 * def __isub__(SparseVector self, SparseVector other): * self.vector[0] -= other.vector[0] * return self # <<<<<<<<<<<<<< @@ -4223,7 +4210,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_28__imul__(PyObject *__pyx_v_sel __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__imul__ (wrapper)", 0); assert(__pyx_arg_scalar); { - __pyx_v_scalar = __pyx_PyFloat_AsFloat(__pyx_arg_scalar); if (unlikely((__pyx_v_scalar == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_scalar = __pyx_PyFloat_AsFloat(__pyx_arg_scalar); if (unlikely((__pyx_v_scalar == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -4236,7 +4223,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_28__imul__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":111 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":117 * return self * * def __imul__(SparseVector self, float scalar): # <<<<<<<<<<<<<< @@ -4249,7 +4236,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_27__imul__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__imul__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":112 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":118 * * def __imul__(SparseVector self, float scalar): * self.vector[0] *= scalar # <<<<<<<<<<<<<< @@ -4258,7 +4245,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_27__imul__(struct __pyx_obj_5_cd */ (__pyx_v_self->vector[0]) *= __pyx_v_scalar; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":113 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":119 * def __imul__(SparseVector self, float scalar): * self.vector[0] *= scalar * return self # <<<<<<<<<<<<<< @@ -4286,7 +4273,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_30__idiv__(PyObject *__pyx_v_sel __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__idiv__ (wrapper)", 0); assert(__pyx_arg_scalar); { - __pyx_v_scalar = __pyx_PyFloat_AsFloat(__pyx_arg_scalar); if (unlikely((__pyx_v_scalar == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_scalar = __pyx_PyFloat_AsFloat(__pyx_arg_scalar); if (unlikely((__pyx_v_scalar == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -4300,7 +4287,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_30__idiv__(PyObject *__pyx_v_sel } #endif /*!(#if PY_MAJOR_VERSION < 3)*/ -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":115 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":121 * return self * * def __idiv__(SparseVector self, float scalar): # <<<<<<<<<<<<<< @@ -4314,7 +4301,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_29__idiv__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__idiv__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":116 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":122 * * def __idiv__(SparseVector self, float scalar): * self.vector[0] /= scalar # <<<<<<<<<<<<<< @@ -4323,7 +4310,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_29__idiv__(struct __pyx_obj_5_cd */ (__pyx_v_self->vector[0]) /= __pyx_v_scalar; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":117 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":123 * def __idiv__(SparseVector self, float scalar): * self.vector[0] /= scalar * return self # <<<<<<<<<<<<<< @@ -4349,8 +4336,8 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_32__add__(PyObject *__pyx_v_x, P PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__add__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_12SparseVector_31__add__(((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_x), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_y)); goto __pyx_L0; __pyx_L1_error:; @@ -4360,7 +4347,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_32__add__(PyObject *__pyx_v_x, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":119 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":125 * return self * * def __add__(SparseVector x, SparseVector y): # <<<<<<<<<<<<<< @@ -4378,20 +4365,20 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_31__add__(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__add__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":120 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":126 * * def __add__(SparseVector x, SparseVector y): * cdef SparseVector result = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * result.vector = new FastSparseVector[weight_t](x.vector[0] + y.vector[0]) * return result */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":121 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":127 * def __add__(SparseVector x, SparseVector y): * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](x.vector[0] + y.vector[0]) # <<<<<<<<<<<<<< @@ -4400,7 +4387,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_31__add__(struct __pyx_obj_5_cde */ __pyx_v_result->vector = new FastSparseVector<weight_t>(((__pyx_v_x->vector[0]) + (__pyx_v_y->vector[0]))); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":122 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":128 * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](x.vector[0] + y.vector[0]) * return result # <<<<<<<<<<<<<< @@ -4431,8 +4418,8 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_34__sub__(PyObject *__pyx_v_x, P PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__sub__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_SparseVector, 1, "x", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_SparseVector, 1, "y", 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_12SparseVector_33__sub__(((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_x), ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_v_y)); goto __pyx_L0; __pyx_L1_error:; @@ -4442,7 +4429,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_34__sub__(PyObject *__pyx_v_x, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":124 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":130 * return result * * def __sub__(SparseVector x, SparseVector y): # <<<<<<<<<<<<<< @@ -4460,20 +4447,20 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_33__sub__(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__sub__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":125 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":131 * * def __sub__(SparseVector x, SparseVector y): * cdef SparseVector result = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * result.vector = new FastSparseVector[weight_t](x.vector[0] - y.vector[0]) * return result */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":126 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":132 * def __sub__(SparseVector x, SparseVector y): * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](x.vector[0] - y.vector[0]) # <<<<<<<<<<<<<< @@ -4482,7 +4469,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_33__sub__(struct __pyx_obj_5_cde */ __pyx_v_result->vector = new FastSparseVector<weight_t>(((__pyx_v_x->vector[0]) - (__pyx_v_y->vector[0]))); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":127 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":133 * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](x.vector[0] - y.vector[0]) * return result # <<<<<<<<<<<<<< @@ -4518,7 +4505,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_36__mul__(PyObject *__pyx_v_x, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":129 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":135 * return result * * def __mul__(x, y): # <<<<<<<<<<<<<< @@ -4540,7 +4527,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_35__mul__(PyObject *__pyx_v_x, P int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__mul__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":132 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":138 * cdef SparseVector vector * cdef float scalar * if isinstance(x, SparseVector): vector, scalar = x, y # <<<<<<<<<<<<<< @@ -4552,10 +4539,10 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_35__mul__(PyObject *__pyx_v_x, P __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_x, __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - if (!(likely(((__pyx_v_x) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_x, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_x) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_x, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_x; __Pyx_INCREF(__pyx_t_1); - __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_y); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_y); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; __pyx_v_scalar = __pyx_t_3; @@ -4563,37 +4550,37 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_35__mul__(PyObject *__pyx_v_x, P } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":133 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":139 * cdef float scalar * if isinstance(x, SparseVector): vector, scalar = x, y * else: vector, scalar = y, x # <<<<<<<<<<<<<< * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar) */ - if (!(likely(((__pyx_v_y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_y, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_y, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_y; __Pyx_INCREF(__pyx_t_1); - __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_x); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_x); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; __pyx_v_scalar = __pyx_t_3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":134 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":140 * if isinstance(x, SparseVector): vector, scalar = x, y * else: vector, scalar = y, x * cdef SparseVector result = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar) * return result */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":135 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":141 * else: vector, scalar = y, x * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar) # <<<<<<<<<<<<<< @@ -4602,7 +4589,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_35__mul__(PyObject *__pyx_v_x, P */ __pyx_v_result->vector = new FastSparseVector<weight_t>(((__pyx_v_vector->vector[0]) * __pyx_v_scalar)); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":136 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":142 * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar) * return result # <<<<<<<<<<<<<< @@ -4641,7 +4628,7 @@ static PyObject *__pyx_pw_5_cdec_12SparseVector_38__div__(PyObject *__pyx_v_x, P } #endif /*!(#if PY_MAJOR_VERSION < 3)*/ -/* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":138 +/* "/home/vchahune/tools/cdec/python/src/vectors.pxi":144 * return result * * def __div__(x, y): # <<<<<<<<<<<<<< @@ -4664,7 +4651,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_37__div__(PyObject *__pyx_v_x, P int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__div__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":141 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":147 * cdef SparseVector vector * cdef float scalar * if isinstance(x, SparseVector): vector, scalar = x, y # <<<<<<<<<<<<<< @@ -4676,10 +4663,10 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_37__div__(PyObject *__pyx_v_x, P __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_x, __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - if (!(likely(((__pyx_v_x) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_x, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_x) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_x, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_x; __Pyx_INCREF(__pyx_t_1); - __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_y); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_y); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; __pyx_v_scalar = __pyx_t_3; @@ -4687,37 +4674,37 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_37__div__(PyObject *__pyx_v_x, P } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":142 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":148 * cdef float scalar * if isinstance(x, SparseVector): vector, scalar = x, y * else: vector, scalar = y, x # <<<<<<<<<<<<<< * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar) */ - if (!(likely(((__pyx_v_y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_y, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_y) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_y, __pyx_ptype_5_cdec_SparseVector))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = __pyx_v_y; __Pyx_INCREF(__pyx_t_1); - __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_x); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __pyx_PyFloat_AsFloat(__pyx_v_x); if (unlikely((__pyx_t_3 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; __pyx_v_scalar = __pyx_t_3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":143 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":149 * if isinstance(x, SparseVector): vector, scalar = x, y * else: vector, scalar = y, x * cdef SparseVector result = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar) * return result */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":144 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":150 * else: vector, scalar = y, x * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar) # <<<<<<<<<<<<<< @@ -4725,7 +4712,7 @@ static PyObject *__pyx_pf_5_cdec_12SparseVector_37__div__(PyObject *__pyx_v_x, P */ __pyx_v_result->vector = new FastSparseVector<weight_t>(((__pyx_v_vector->vector[0]) / __pyx_v_scalar)); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":145 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":151 * cdef SparseVector result = SparseVector.__new__(SparseVector) * result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar) * return result # <<<<<<<<<<<<<< @@ -4763,7 +4750,7 @@ static PyObject *__pyx_pw_5_cdec_1_phrase(PyObject *__pyx_self, PyObject *__pyx_ } static PyObject *__pyx_gb_5_cdec_7_phrase_2generator18(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value); /* proto */ -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":6 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":6 * * def _phrase(phrase): * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) # <<<<<<<<<<<<<< @@ -4842,16 +4829,16 @@ static PyObject *__pyx_gb_5_cdec_7_phrase_2generator18(__pyx_GeneratorObject *__ if (!__pyx_t_3 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_3 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); @@ -4929,7 +4916,7 @@ static PyObject *__pyx_gb_5_cdec_7_phrase_2generator18(__pyx_GeneratorObject *__ return NULL; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":5 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":5 * import cdec.sa._sa as _sa * * def _phrase(phrase): # <<<<<<<<<<<<<< @@ -4958,7 +4945,7 @@ static PyObject *__pyx_pf_5_cdec__phrase(CYTHON_UNUSED PyObject *__pyx_self, PyO __Pyx_INCREF(__pyx_cur_scope->__pyx_v_phrase); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_phrase); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":6 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":6 * * def _phrase(phrase): * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) # <<<<<<<<<<<<<< @@ -5000,6 +4987,10 @@ static PyObject *__pyx_pf_5_cdec__phrase(CYTHON_UNUSED PyObject *__pyx_self, PyO /* Python wrapper */ static int __pyx_pw_5_cdec_2NT_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_2NT___init__[] = "NT(bytes cat, int ref=0) -> Non-terminal from category `cat`."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_2NT___init__; +#endif static int __pyx_pw_5_cdec_2NT_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_cat = 0; unsigned int __pyx_v_ref; @@ -5065,12 +5056,12 @@ static int __pyx_pw_5_cdec_2NT_1__init__(PyObject *__pyx_v_self, PyObject *__pyx return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":11 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":11 * cdef public bytes cat * cdef public unsigned ref * def __init__(self, bytes cat, unsigned ref=0): # <<<<<<<<<<<<<< + * """NT(bytes cat, int ref=0) -> Non-terminal from category `cat`.""" * self.cat = cat - * self.ref = ref */ static int __pyx_pf_5_cdec_2NT___init__(struct __pyx_obj_5_cdec_NT *__pyx_v_self, PyObject *__pyx_v_cat, unsigned int __pyx_v_ref) { @@ -5078,9 +5069,9 @@ static int __pyx_pf_5_cdec_2NT___init__(struct __pyx_obj_5_cdec_NT *__pyx_v_self __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":12 - * cdef public unsigned ref + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":13 * def __init__(self, bytes cat, unsigned ref=0): + * """NT(bytes cat, int ref=0) -> Non-terminal from category `cat`.""" * self.cat = cat # <<<<<<<<<<<<<< * self.ref = ref * @@ -5091,8 +5082,8 @@ static int __pyx_pf_5_cdec_2NT___init__(struct __pyx_obj_5_cdec_NT *__pyx_v_self __Pyx_DECREF(((PyObject *)__pyx_v_self->cat)); __pyx_v_self->cat = __pyx_v_cat; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":13 - * def __init__(self, bytes cat, unsigned ref=0): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":14 + * """NT(bytes cat, int ref=0) -> Non-terminal from category `cat`.""" * self.cat = cat * self.ref = ref # <<<<<<<<<<<<<< * @@ -5116,7 +5107,7 @@ static PyObject *__pyx_pw_5_cdec_2NT_3__str__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":15 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":16 * self.ref = ref * * def __str__(self): # <<<<<<<<<<<<<< @@ -5135,7 +5126,7 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":16 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":17 * * def __str__(self): * if self.ref > 0: # <<<<<<<<<<<<<< @@ -5145,7 +5136,7 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ __pyx_t_1 = (__pyx_v_self->ref > 0); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":17 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":18 * def __str__(self): * if self.ref > 0: * return '[%s,%d]' % (self.cat, self.ref) # <<<<<<<<<<<<<< @@ -5153,9 +5144,9 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self->cat)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self->cat)); @@ -5163,7 +5154,7 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_8), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_8), ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_r = ((PyObject *)__pyx_t_2); @@ -5173,7 +5164,7 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":18 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":19 * if self.ref > 0: * return '[%s,%d]' % (self.cat, self.ref) * return '[%s]' % self.cat # <<<<<<<<<<<<<< @@ -5181,7 +5172,7 @@ static PyObject *__pyx_pf_5_cdec_2NT_2__str__(struct __pyx_obj_5_cdec_NT *__pyx_ * cdef class NTRef: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_9), ((PyObject *)__pyx_v_self->cat)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_9), ((PyObject *)__pyx_v_self->cat)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __pyx_r = ((PyObject *)__pyx_t_2); __pyx_t_2 = 0; @@ -5211,7 +5202,7 @@ static PyObject *__pyx_pw_5_cdec_2NT_3cat_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":9 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":9 * * cdef class NT: * cdef public bytes cat # <<<<<<<<<<<<<< @@ -5307,12 +5298,12 @@ static PyObject *__pyx_pw_5_cdec_2NT_3ref_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":10 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":10 * cdef class NT: * cdef public bytes cat * cdef public unsigned ref # <<<<<<<<<<<<<< * def __init__(self, bytes cat, unsigned ref=0): - * self.cat = cat + * """NT(bytes cat, int ref=0) -> Non-terminal from category `cat`.""" */ static PyObject *__pyx_pf_5_cdec_2NT_3ref___get__(struct __pyx_obj_5_cdec_NT *__pyx_v_self) { @@ -5376,6 +5367,10 @@ static int __pyx_pf_5_cdec_2NT_3ref_2__set__(struct __pyx_obj_5_cdec_NT *__pyx_v /* Python wrapper */ static int __pyx_pw_5_cdec_5NTRef_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_5NTRef___init__[] = "NTRef(int ref) -> Non-terminal reference."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_5NTRef___init__; +#endif static int __pyx_pw_5_cdec_5NTRef_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_ref; int __pyx_r; @@ -5399,18 +5394,18 @@ static int __pyx_pw_5_cdec_5NTRef_1__init__(PyObject *__pyx_v_self, PyObject *__ else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } - __pyx_v_ref = __Pyx_PyInt_AsUnsignedInt(values[0]); if (unlikely((__pyx_v_ref == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_ref = __Pyx_PyInt_AsUnsignedInt(values[0]); if (unlikely((__pyx_v_ref == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.NTRef.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -5421,12 +5416,12 @@ static int __pyx_pw_5_cdec_5NTRef_1__init__(PyObject *__pyx_v_self, PyObject *__ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":22 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":23 * cdef class NTRef: * cdef public unsigned ref * def __init__(self, unsigned ref): # <<<<<<<<<<<<<< + * """NTRef(int ref) -> Non-terminal reference.""" * self.ref = ref - * */ static int __pyx_pf_5_cdec_5NTRef___init__(struct __pyx_obj_5_cdec_NTRef *__pyx_v_self, unsigned int __pyx_v_ref) { @@ -5434,9 +5429,9 @@ static int __pyx_pf_5_cdec_5NTRef___init__(struct __pyx_obj_5_cdec_NTRef *__pyx_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":23 - * cdef public unsigned ref + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":25 * def __init__(self, unsigned ref): + * """NTRef(int ref) -> Non-terminal reference.""" * self.ref = ref # <<<<<<<<<<<<<< * * def __str__(self): @@ -5459,7 +5454,7 @@ static PyObject *__pyx_pw_5_cdec_5NTRef_3__str__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":25 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":27 * self.ref = ref * * def __str__(self): # <<<<<<<<<<<<<< @@ -5477,7 +5472,7 @@ static PyObject *__pyx_pf_5_cdec_5NTRef_2__str__(struct __pyx_obj_5_cdec_NTRef * int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":26 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":28 * * def __str__(self): * return '[%d]' % self.ref # <<<<<<<<<<<<<< @@ -5485,9 +5480,9 @@ static PyObject *__pyx_pf_5_cdec_5NTRef_2__str__(struct __pyx_obj_5_cdec_NTRef * * cdef TRule convert_rule(_sa.Rule rule): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_10), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_10), __pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = ((PyObject *)__pyx_t_2); @@ -5518,12 +5513,12 @@ static PyObject *__pyx_pw_5_cdec_5NTRef_3ref_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":21 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":22 * * cdef class NTRef: * cdef public unsigned ref # <<<<<<<<<<<<<< * def __init__(self, unsigned ref): - * self.ref = ref + * """NTRef(int ref) -> Non-terminal reference.""" */ static PyObject *__pyx_pf_5_cdec_5NTRef_3ref___get__(struct __pyx_obj_5_cdec_NTRef *__pyx_v_self) { @@ -5535,7 +5530,7 @@ static PyObject *__pyx_pf_5_cdec_5NTRef_3ref___get__(struct __pyx_obj_5_cdec_NTR int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyLong_FromUnsignedLong(__pyx_v_self->ref); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -5572,7 +5567,7 @@ static int __pyx_pf_5_cdec_5NTRef_3ref_2__set__(struct __pyx_obj_5_cdec_NTRef *_ const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_v_value); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->ref = __pyx_t_1; __pyx_r = 0; @@ -5585,7 +5580,7 @@ static int __pyx_pf_5_cdec_5NTRef_3ref_2__set__(struct __pyx_obj_5_cdec_NTRef *_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":28 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":30 * return '[%d]' % self.ref * * cdef TRule convert_rule(_sa.Rule rule): # <<<<<<<<<<<<<< @@ -5615,7 +5610,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_rule", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":29 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":31 * * cdef TRule convert_rule(_sa.Rule rule): * lhs = _sa.sym_tocat(rule.lhs) # <<<<<<<<<<<<<< @@ -5624,41 +5619,41 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o */ __pyx_v_lhs = __pyx_f_4cdec_2sa_3_sa_sym_tocat(__pyx_v_rule->lhs); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":30 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":32 * cdef TRule convert_rule(_sa.Rule rule): * lhs = _sa.sym_tocat(rule.lhs) * scores = dict(rule.scores) # <<<<<<<<<<<<<< * f, e = [], [] * cdef int* fsyms = rule.f.syms */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_rule->scores)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_rule->scores)); __Pyx_GIVEREF(((PyObject *)__pyx_v_rule->scores)); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyDict_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyDict_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_scores = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":31 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":33 * lhs = _sa.sym_tocat(rule.lhs) * scores = dict(rule.scores) * f, e = [], [] # <<<<<<<<<<<<<< * cdef int* fsyms = rule.f.syms * for i in range(rule.f.n): */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_f = __pyx_t_2; __pyx_t_2 = 0; __pyx_v_e = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":32 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":34 * scores = dict(rule.scores) * f, e = [], [] * cdef int* fsyms = rule.f.syms # <<<<<<<<<<<<<< @@ -5667,7 +5662,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o */ __pyx_v_fsyms = __pyx_v_rule->f->syms; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":33 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":35 * f, e = [], [] * cdef int* fsyms = rule.f.syms * for i in range(rule.f.n): # <<<<<<<<<<<<<< @@ -5678,7 +5673,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":34 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":36 * cdef int* fsyms = rule.f.syms * for i in range(rule.f.n): * if _sa.sym_isvar(fsyms[i]): # <<<<<<<<<<<<<< @@ -5688,45 +5683,45 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o __pyx_t_5 = __pyx_f_4cdec_2sa_3_sa_sym_isvar((__pyx_v_fsyms[__pyx_v_i])); if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":35 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":37 * for i in range(rule.f.n): * if _sa.sym_isvar(fsyms[i]): * f.append(NT(_sa.sym_tocat(fsyms[i]))) # <<<<<<<<<<<<<< * else: * f.append(_sa.sym_tostring(fsyms[i])) */ - __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tocat((__pyx_v_fsyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tocat((__pyx_v_fsyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_6 = PyList_Append(__pyx_v_f, __pyx_t_1); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyList_Append(__pyx_v_f, __pyx_t_1); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L5; } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":37 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":39 * f.append(NT(_sa.sym_tocat(fsyms[i]))) * else: * f.append(_sa.sym_tostring(fsyms[i])) # <<<<<<<<<<<<<< * cdef int* esyms = rule.e.syms * for i in range(rule.e.n): */ - __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tostring((__pyx_v_fsyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tostring((__pyx_v_fsyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_6 = PyList_Append(__pyx_v_f, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyList_Append(__pyx_v_f, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; } __pyx_L5:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":38 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":40 * else: * f.append(_sa.sym_tostring(fsyms[i])) * cdef int* esyms = rule.e.syms # <<<<<<<<<<<<<< @@ -5735,7 +5730,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o */ __pyx_v_esyms = __pyx_v_rule->e->syms; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":39 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":41 * f.append(_sa.sym_tostring(fsyms[i])) * cdef int* esyms = rule.e.syms * for i in range(rule.e.n): # <<<<<<<<<<<<<< @@ -5746,7 +5741,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":40 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":42 * cdef int* esyms = rule.e.syms * for i in range(rule.e.n): * if _sa.sym_isvar(esyms[i]): # <<<<<<<<<<<<<< @@ -5756,68 +5751,68 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o __pyx_t_5 = __pyx_f_4cdec_2sa_3_sa_sym_isvar((__pyx_v_esyms[__pyx_v_i])); if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":41 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":43 * for i in range(rule.e.n): * if _sa.sym_isvar(esyms[i]): * e.append(NTRef(_sa.sym_getindex(esyms[i]))) # <<<<<<<<<<<<<< * else: * e.append(_sa.sym_tostring(esyms[i])) */ - __pyx_t_1 = PyInt_FromLong(__pyx_f_4cdec_2sa_3_sa_sym_getindex((__pyx_v_esyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyInt_FromLong(__pyx_f_4cdec_2sa_3_sa_sym_getindex((__pyx_v_esyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_6 = PyList_Append(__pyx_v_e, __pyx_t_1); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyList_Append(__pyx_v_e, __pyx_t_1); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L8; } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":43 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":45 * e.append(NTRef(_sa.sym_getindex(esyms[i]))) * else: * e.append(_sa.sym_tostring(esyms[i])) # <<<<<<<<<<<<<< * a = list(rule.alignments()) * return TRule(lhs, f, e, scores, a) */ - __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tostring((__pyx_v_esyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(__pyx_f_4cdec_2sa_3_sa_sym_tostring((__pyx_v_esyms[__pyx_v_i]))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_6 = PyList_Append(__pyx_v_e, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyList_Append(__pyx_v_e, ((PyObject *)__pyx_t_1)); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; } __pyx_L8:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":44 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":46 * else: * e.append(_sa.sym_tostring(esyms[i])) * a = list(rule.alignments()) # <<<<<<<<<<<<<< * return TRule(lhs, f, e, scores, a) * */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_rule), __pyx_n_s__alignments); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_rule), __pyx_n_s__alignments); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyList_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_a = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":45 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":47 * e.append(_sa.sym_tostring(esyms[i])) * a = list(rule.alignments()) * return TRule(lhs, f, e, scores, a) # <<<<<<<<<<<<<< @@ -5825,9 +5820,9 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o * cdef class TRule: */ __Pyx_XDECREF(((PyObject *)__pyx_r)); - __pyx_t_2 = PyBytes_FromString(__pyx_v_lhs); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyBytes_FromString(__pyx_v_lhs); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_1 = PyTuple_New(5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); @@ -5844,7 +5839,7 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o PyTuple_SET_ITEM(__pyx_t_1, 4, ((PyObject *)__pyx_v_a)); __Pyx_GIVEREF(((PyObject *)__pyx_v_a)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_TRule)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_TRule)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_r = ((struct __pyx_obj_5_cdec_TRule *)__pyx_t_2); @@ -5870,6 +5865,10 @@ static struct __pyx_obj_5_cdec_TRule *__pyx_f_5_cdec_convert_rule(struct __pyx_o /* Python wrapper */ static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_5TRule___init__[] = "TRule(lhs, f, e, scores, a=None) -> Translation rule.\n lhs: left hand side non-terminal\n f: source phrase (list of words/NT)\n e: target phrase (list of words/NTRef)\n scores: dictionary of feature scores\n a: optional list of alignment points"; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_5TRule___init__; +#endif static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_lhs = 0; PyObject *__pyx_v_f = 0; @@ -5883,12 +5882,12 @@ static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__lhs,&__pyx_n_s__f,&__pyx_n_s__e,&__pyx_n_s__scores,&__pyx_n_s__a,0}; PyObject* values[5] = {0,0,0,0,0}; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":50 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":52 * cdef shared_ptr[grammar.TRule]* rule * * def __init__(self, lhs, f, e, scores, a=None): # <<<<<<<<<<<<<< - * self.rule = new shared_ptr[grammar.TRule](new grammar.TRule()) - * self.lhs = lhs + * """TRule(lhs, f, e, scores, a=None) -> Translation rule. + * lhs: left hand side non-terminal */ values[4] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { @@ -5911,17 +5910,17 @@ static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__ case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__f)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__e)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__scores)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 3); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, 3); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (kw_args > 0) { @@ -5930,7 +5929,7 @@ static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__ } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -5951,7 +5950,7 @@ static int __pyx_pw_5_cdec_5TRule_1__init__(PyObject *__pyx_v_self, PyObject *__ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 0, 4, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.TRule.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -5972,75 +5971,75 @@ static int __pyx_pf_5_cdec_5TRule___init__(struct __pyx_obj_5_cdec_TRule *__pyx_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":51 - * - * def __init__(self, lhs, f, e, scores, a=None): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":59 + * scores: dictionary of feature scores + * a: optional list of alignment points""" * self.rule = new shared_ptr[grammar.TRule](new grammar.TRule()) # <<<<<<<<<<<<<< * self.lhs = lhs * self.e = e */ - try {__pyx_t_1 = new TRule();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + try {__pyx_t_1 = new TRule();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} __pyx_v_self->rule = new boost::shared_ptr<TRule>(__pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":52 - * def __init__(self, lhs, f, e, scores, a=None): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":60 + * a: optional list of alignment points""" * self.rule = new shared_ptr[grammar.TRule](new grammar.TRule()) * self.lhs = lhs # <<<<<<<<<<<<<< * self.e = e * self.f = f */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__lhs, __pyx_v_lhs) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__lhs, __pyx_v_lhs) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":53 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":61 * self.rule = new shared_ptr[grammar.TRule](new grammar.TRule()) * self.lhs = lhs * self.e = e # <<<<<<<<<<<<<< * self.f = f * self.scores = scores */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__e, __pyx_v_e) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__e, __pyx_v_e) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":54 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":62 * self.lhs = lhs * self.e = e * self.f = f # <<<<<<<<<<<<<< * self.scores = scores * if a: */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__f, __pyx_v_f) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__f, __pyx_v_f) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":55 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":63 * self.e = e * self.f = f * self.scores = scores # <<<<<<<<<<<<<< * if a: * self.a = a */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__scores, __pyx_v_scores) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__scores, __pyx_v_scores) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":56 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":64 * self.f = f * self.scores = scores * if a: # <<<<<<<<<<<<<< * self.a = a * self.rule.get().ComputeArity() */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_a); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_a); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":57 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":65 * self.scores = scores * if a: * self.a = a # <<<<<<<<<<<<<< * self.rule.get().ComputeArity() * */ - if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__a, __pyx_v_a) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__a, __pyx_v_a) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":58 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":66 * if a: * self.a = a * self.rule.get().ComputeArity() # <<<<<<<<<<<<<< @@ -6068,7 +6067,7 @@ static void __pyx_pw_5_cdec_5TRule_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":60 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":68 * self.rule.get().ComputeArity() * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -6080,7 +6079,7 @@ static void __pyx_pf_5_cdec_5TRule_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_5 __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":61 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":69 * * def __dealloc__(self): * del self.rule # <<<<<<<<<<<<<< @@ -6103,7 +6102,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_5arity_1__get__(PyObject *__pyx_v_self) return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":64 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":72 * * property arity: * def __get__(self): # <<<<<<<<<<<<<< @@ -6120,7 +6119,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_5arity___get__(struct __pyx_obj_5_cdec_T int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":65 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":73 * property arity: * def __get__(self): * return self.rule.get().arity_ # <<<<<<<<<<<<<< @@ -6128,7 +6127,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_5arity___get__(struct __pyx_obj_5_cdec_T * property f: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(__pyx_v_self->rule->get()->arity_); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyInt_FromLong(__pyx_v_self->rule->get()->arity_); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -6157,7 +6156,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_1f_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":68 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":76 * * property f: * def __get__(self): # <<<<<<<<<<<<<< @@ -6184,7 +6183,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":69 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":77 * property f: * def __get__(self): * cdef vector[WordID]* f_ = &self.rule.get().f_ # <<<<<<<<<<<<<< @@ -6193,19 +6192,19 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_f_ = (&__pyx_v_self->rule->get()->f_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":71 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":79 * cdef vector[WordID]* f_ = &self.rule.get().f_ * cdef WordID w * cdef f = [] # <<<<<<<<<<<<<< * cdef unsigned i * cdef int idx = 0 */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_f = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":73 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":81 * cdef f = [] * cdef unsigned i * cdef int idx = 0 # <<<<<<<<<<<<<< @@ -6214,7 +6213,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_idx = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":74 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":82 * cdef unsigned i * cdef int idx = 0 * for i in range(f_.size()): # <<<<<<<<<<<<<< @@ -6225,7 +6224,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":75 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":83 * cdef int idx = 0 * for i in range(f_.size()): * w = f_[0][i] # <<<<<<<<<<<<<< @@ -6234,7 +6233,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_w = ((__pyx_v_f_[0])[__pyx_v_i]); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":76 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":84 * for i in range(f_.size()): * w = f_[0][i] * if w < 0: # <<<<<<<<<<<<<< @@ -6244,7 +6243,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule __pyx_t_4 = (__pyx_v_w < 0); if (__pyx_t_4) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":77 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":85 * w = f_[0][i] * if w < 0: * idx += 1 # <<<<<<<<<<<<<< @@ -6253,18 +6252,18 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_idx = (__pyx_v_idx + 1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":78 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":86 * if w < 0: * idx += 1 * f.append(NT(TDConvert(-w).c_str(), idx)) # <<<<<<<<<<<<<< * else: * f.append(unicode(TDConvert(w).c_str(), encoding='utf8')) */ - __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_w)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_w)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_5 = PyInt_FromLong(__pyx_v_idx); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyInt_FromLong(__pyx_v_idx); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); @@ -6272,10 +6271,10 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule __Pyx_GIVEREF(__pyx_t_5); __pyx_t_1 = 0; __pyx_t_5 = 0; - __pyx_t_5 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_Append(__pyx_v_f, __pyx_t_5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = __Pyx_PyObject_Append(__pyx_v_f, __pyx_t_5); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -6283,28 +6282,28 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":80 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":88 * f.append(NT(TDConvert(-w).c_str(), idx)) * else: * f.append(unicode(TDConvert(w).c_str(), encoding='utf8')) # <<<<<<<<<<<<<< * return f * */ - __pyx_t_6 = PyBytes_FromString(TD::Convert(__pyx_v_w).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyBytes_FromString(TD::Convert(__pyx_v_w).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_6)); __Pyx_GIVEREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__encoding), ((PyObject *)__pyx_n_s__utf8)) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_t_6, ((PyObject *)__pyx_n_s__encoding), ((PyObject *)__pyx_n_s__utf8)) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_Append(__pyx_v_f, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = __Pyx_PyObject_Append(__pyx_v_f, __pyx_t_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -6312,7 +6311,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1f___get__(struct __pyx_obj_5_cdec_TRule __pyx_L5:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":81 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":89 * else: * f.append(unicode(TDConvert(w).c_str(), encoding='utf8')) * return f # <<<<<<<<<<<<<< @@ -6350,7 +6349,7 @@ static int __pyx_pw_5_cdec_5TRule_1f_3__set__(PyObject *__pyx_v_self, PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":83 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":91 * return f * * def __set__(self, f): # <<<<<<<<<<<<<< @@ -6362,6 +6361,7 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p std::vector<WordID> *__pyx_v_f_; unsigned int __pyx_v_i; CYTHON_UNUSED int __pyx_v_idx; + PyObject *__pyx_v_fi = NULL; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; @@ -6375,7 +6375,7 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":84 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":92 * * def __set__(self, f): * cdef vector[WordID]* f_ = &self.rule.get().f_ # <<<<<<<<<<<<<< @@ -6384,17 +6384,17 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p */ __pyx_v_f_ = (&__pyx_v_self->rule->get()->f_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":85 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":93 * def __set__(self, f): * cdef vector[WordID]* f_ = &self.rule.get().f_ * f_.resize(len(f)) # <<<<<<<<<<<<<< * cdef unsigned i * cdef int idx = 0 */ - __pyx_t_1 = PyObject_Length(__pyx_v_f); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 85; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_f); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f_->resize(__pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":87 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":95 * f_.resize(len(f)) * cdef unsigned i * cdef int idx = 0 # <<<<<<<<<<<<<< @@ -6403,25 +6403,25 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p */ __pyx_v_idx = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":88 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":96 * cdef unsigned i * cdef int idx = 0 * for i in range(len(f)): # <<<<<<<<<<<<<< * if isinstance(f[i], NT): - * f_[0][i] = -TDConvert(<char *>f[i].cat) + * f_[0][i] = -TDConvert((<NT> f[i]).cat) */ - __pyx_t_1 = PyObject_Length(__pyx_v_f); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_f); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":89 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":97 * cdef int idx = 0 * for i in range(len(f)): * if isinstance(f[i], NT): # <<<<<<<<<<<<<< - * f_[0][i] = -TDConvert(<char *>f[i].cat) + * f_[0][i] = -TDConvert((<NT> f[i]).cat) * else: */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = ((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)); __Pyx_INCREF(__pyx_t_4); @@ -6430,36 +6430,47 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":90 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":98 * for i in range(len(f)): * if isinstance(f[i], NT): - * f_[0][i] = -TDConvert(<char *>f[i].cat) # <<<<<<<<<<<<<< + * f_[0][i] = -TDConvert((<NT> f[i]).cat) # <<<<<<<<<<<<<< * else: - * f_[0][i] = TDConvert(as_str(f[i])) + * fi = as_str(f[i]) */ - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__cat); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyBytes_AsString(((PyObject *)((struct __pyx_obj_5_cdec_NT *)__pyx_t_4)->cat)); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_6 = PyBytes_AsString(__pyx_t_3); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - ((__pyx_v_f_[0])[__pyx_v_i]) = (-TD::Convert(((char *)__pyx_t_6))); + ((__pyx_v_f_[0])[__pyx_v_i]) = (-TD::Convert(__pyx_t_6)); goto __pyx_L5; } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":92 - * f_[0][i] = -TDConvert(<char *>f[i].cat) + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":100 + * f_[0][i] = -TDConvert((<NT> f[i]).cat) * else: - * f_[0][i] = TDConvert(as_str(f[i])) # <<<<<<<<<<<<<< + * fi = as_str(f[i]) # <<<<<<<<<<<<<< + * f_[0][i] = TDConvert(fi) * - * property e: */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_f, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - ((__pyx_v_f_[0])[__pyx_v_i]) = TD::Convert(__pyx_f_5_cdec_as_str(__pyx_t_3, NULL)); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(((PyObject *)__pyx_v_fi)); + __pyx_v_fi = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":101 + * else: + * fi = as_str(f[i]) + * f_[0][i] = TDConvert(fi) # <<<<<<<<<<<<<< + * + * property e: + */ + __pyx_t_6 = PyBytes_AsString(((PyObject *)__pyx_v_fi)); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((__pyx_v_f_[0])[__pyx_v_i]) = TD::Convert(__pyx_t_6); } __pyx_L5:; } @@ -6472,6 +6483,7 @@ static int __pyx_pf_5_cdec_5TRule_1f_2__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_AddTraceback("_cdec.TRule.f.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; + __Pyx_XDECREF(__pyx_v_fi); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -6487,7 +6499,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_1e_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":95 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":104 * * property e: * def __get__(self): # <<<<<<<<<<<<<< @@ -6514,7 +6526,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":96 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":105 * property e: * def __get__(self): * cdef vector[WordID]* e_ = &self.rule.get().e_ # <<<<<<<<<<<<<< @@ -6523,19 +6535,19 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_e_ = (&__pyx_v_self->rule->get()->e_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":98 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":107 * cdef vector[WordID]* e_ = &self.rule.get().e_ * cdef WordID w * cdef e = [] # <<<<<<<<<<<<<< * cdef unsigned i * cdef int idx = 0 */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_e = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":100 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":109 * cdef e = [] * cdef unsigned i * cdef int idx = 0 # <<<<<<<<<<<<<< @@ -6544,7 +6556,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_idx = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":101 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":110 * cdef unsigned i * cdef int idx = 0 * for i in range(e_.size()): # <<<<<<<<<<<<<< @@ -6555,7 +6567,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":102 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":111 * cdef int idx = 0 * for i in range(e_.size()): * w = e_[0][i] # <<<<<<<<<<<<<< @@ -6564,7 +6576,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_w = ((__pyx_v_e_[0])[__pyx_v_i]); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":103 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":112 * for i in range(e_.size()): * w = e_[0][i] * if w < 1: # <<<<<<<<<<<<<< @@ -6574,7 +6586,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule __pyx_t_4 = (__pyx_v_w < 1); if (__pyx_t_4) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":104 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":113 * w = e_[0][i] * if w < 1: * idx += 1 # <<<<<<<<<<<<<< @@ -6583,24 +6595,24 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_idx = (__pyx_v_idx + 1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":105 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":114 * if w < 1: * idx += 1 * e.append(NTRef(1-w)) # <<<<<<<<<<<<<< * else: * e.append(unicode(TDConvert(w).c_str(), encoding='utf8')) */ - __pyx_t_1 = PyInt_FromLong((1 - __pyx_v_w)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyInt_FromLong((1 - __pyx_v_w)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Append(__pyx_v_e, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = __Pyx_PyObject_Append(__pyx_v_e, __pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -6608,28 +6620,28 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":107 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":116 * e.append(NTRef(1-w)) * else: * e.append(unicode(TDConvert(w).c_str(), encoding='utf8')) # <<<<<<<<<<<<<< * return e * */ - __pyx_t_5 = PyBytes_FromString(TD::Convert(__pyx_v_w).c_str()); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyBytes_FromString(TD::Convert(__pyx_v_w).c_str()); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); - if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__encoding), ((PyObject *)__pyx_n_s__utf8)) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_t_5, ((PyObject *)__pyx_n_s__encoding), ((PyObject *)__pyx_n_s__utf8)) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_1), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Append(__pyx_v_e, __pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = __Pyx_PyObject_Append(__pyx_v_e, __pyx_t_6); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -6637,7 +6649,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1e___get__(struct __pyx_obj_5_cdec_TRule __pyx_L5:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":108 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":117 * else: * e.append(unicode(TDConvert(w).c_str(), encoding='utf8')) * return e # <<<<<<<<<<<<<< @@ -6675,7 +6687,7 @@ static int __pyx_pw_5_cdec_5TRule_1e_3__set__(PyObject *__pyx_v_self, PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":110 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":119 * return e * * def __set__(self, e): # <<<<<<<<<<<<<< @@ -6686,6 +6698,7 @@ static int __pyx_pw_5_cdec_5TRule_1e_3__set__(PyObject *__pyx_v_self, PyObject * static int __pyx_pf_5_cdec_5TRule_1e_2__set__(struct __pyx_obj_5_cdec_TRule *__pyx_v_self, PyObject *__pyx_v_e) { std::vector<WordID> *__pyx_v_e_; unsigned int __pyx_v_i; + PyObject *__pyx_v_ei = NULL; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; @@ -6694,12 +6707,13 @@ static int __pyx_pf_5_cdec_5TRule_1e_2__set__(struct __pyx_obj_5_cdec_TRule *__p PyObject *__pyx_t_4 = NULL; int __pyx_t_5; WordID __pyx_t_6; + char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":111 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":120 * * def __set__(self, e): * cdef vector[WordID]* e_ = &self.rule.get().e_ # <<<<<<<<<<<<<< @@ -6708,35 +6722,35 @@ static int __pyx_pf_5_cdec_5TRule_1e_2__set__(struct __pyx_obj_5_cdec_TRule *__p */ __pyx_v_e_ = (&__pyx_v_self->rule->get()->e_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":112 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":121 * def __set__(self, e): * cdef vector[WordID]* e_ = &self.rule.get().e_ * e_.resize(len(e)) # <<<<<<<<<<<<<< * cdef unsigned i * for i in range(len(e)): */ - __pyx_t_1 = PyObject_Length(__pyx_v_e); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_e); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_e_->resize(__pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":114 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":123 * e_.resize(len(e)) * cdef unsigned i * for i in range(len(e)): # <<<<<<<<<<<<<< * if isinstance(e[i], NTRef): * e_[0][i] = 1-e[i].ref */ - __pyx_t_1 = PyObject_Length(__pyx_v_e); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_e); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":115 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":124 * cdef unsigned i * for i in range(len(e)): * if isinstance(e[i], NTRef): # <<<<<<<<<<<<<< * e_[0][i] = 1-e[i].ref * else: */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = ((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)); __Pyx_INCREF(__pyx_t_4); @@ -6745,39 +6759,53 @@ static int __pyx_pf_5_cdec_5TRule_1e_2__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":116 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":125 * for i in range(len(e)): * if isinstance(e[i], NTRef): * e_[0][i] = 1-e[i].ref # <<<<<<<<<<<<<< * else: - * e_[0][i] = TDConvert(as_str(e[i])) + * ei = as_str(e[i]) */ - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__ref); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s__ref); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Subtract(__pyx_int_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyNumber_Subtract(__pyx_int_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_PyInt_from_py_WordID(__pyx_t_4); if (unlikely((__pyx_t_6 == (WordID)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = __Pyx_PyInt_from_py_WordID(__pyx_t_4); if (unlikely((__pyx_t_6 == (WordID)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; ((__pyx_v_e_[0])[__pyx_v_i]) = __pyx_t_6; goto __pyx_L5; } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":118 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":127 * e_[0][i] = 1-e[i].ref * else: - * e_[0][i] = TDConvert(as_str(e[i])) # <<<<<<<<<<<<<< + * ei = as_str(e[i]) # <<<<<<<<<<<<<< + * e_[0][i] = TDConvert(ei) * - * property a: */ - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - ((__pyx_v_e_[0])[__pyx_v_i]) = TD::Convert(__pyx_f_5_cdec_as_str(__pyx_t_4, NULL)); + __pyx_t_3 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(((PyObject *)__pyx_v_ei)); + __pyx_v_ei = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":128 + * else: + * ei = as_str(e[i]) + * e_[0][i] = TDConvert(ei) # <<<<<<<<<<<<<< + * + * property a: + */ + __pyx_t_7 = PyBytes_AsString(((PyObject *)__pyx_v_ei)); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((__pyx_v_e_[0])[__pyx_v_i]) = TD::Convert(__pyx_t_7); } __pyx_L5:; } @@ -6790,6 +6818,7 @@ static int __pyx_pf_5_cdec_5TRule_1e_2__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_AddTraceback("_cdec.TRule.e.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; + __Pyx_XDECREF(__pyx_v_ei); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -6806,7 +6835,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_1a_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":121 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":131 * * property a: * def __get__(self): # <<<<<<<<<<<<<< @@ -6832,7 +6861,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_1a___get__(struct __pyx_obj_5_cdec_TRule __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_5TRule_1a_2generator2, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_5TRule_1a_2generator2, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -6869,9 +6898,9 @@ static PyObject *__pyx_gb_5_cdec_5TRule_1a_2generator2(__pyx_GeneratorObject *__ return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":123 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":133 * def __get__(self): * cdef unsigned i * cdef vector[grammar.AlignmentPoint]* a = &self.rule.get().a_ # <<<<<<<<<<<<<< @@ -6880,7 +6909,7 @@ static PyObject *__pyx_gb_5_cdec_5TRule_1a_2generator2(__pyx_GeneratorObject *__ */ __pyx_cur_scope->__pyx_v_a = (&__pyx_cur_scope->__pyx_v_self->rule->get()->a_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":124 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":134 * cdef unsigned i * cdef vector[grammar.AlignmentPoint]* a = &self.rule.get().a_ * for i in range(a.size()): # <<<<<<<<<<<<<< @@ -6891,18 +6920,18 @@ static PyObject *__pyx_gb_5_cdec_5TRule_1a_2generator2(__pyx_GeneratorObject *__ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":125 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":135 * cdef vector[grammar.AlignmentPoint]* a = &self.rule.get().a_ * for i in range(a.size()): * yield (a[0][i].s_, a[0][i].t_) # <<<<<<<<<<<<<< * * def __set__(self, a): */ - __pyx_t_3 = PyInt_FromLong(((__pyx_cur_scope->__pyx_v_a[0])[__pyx_cur_scope->__pyx_v_i]).s_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyInt_FromLong(((__pyx_cur_scope->__pyx_v_a[0])[__pyx_cur_scope->__pyx_v_i]).s_); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyInt_FromLong(((__pyx_cur_scope->__pyx_v_a[0])[__pyx_cur_scope->__pyx_v_i]).t_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyInt_FromLong(((__pyx_cur_scope->__pyx_v_a[0])[__pyx_cur_scope->__pyx_v_i]).t_); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); @@ -6922,7 +6951,7 @@ static PyObject *__pyx_gb_5_cdec_5TRule_1a_2generator2(__pyx_GeneratorObject *__ __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -6950,7 +6979,7 @@ static int __pyx_pw_5_cdec_5TRule_1a_4__set__(PyObject *__pyx_v_self, PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":127 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":137 * yield (a[0][i].s_, a[0][i].t_) * * def __set__(self, a): # <<<<<<<<<<<<<< @@ -6979,7 +7008,7 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":128 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":138 * * def __set__(self, a): * cdef vector[grammar.AlignmentPoint]* a_ = &self.rule.get().a_ # <<<<<<<<<<<<<< @@ -6988,35 +7017,35 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p */ __pyx_v_a_ = (&__pyx_v_self->rule->get()->a_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":129 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":139 * def __set__(self, a): * cdef vector[grammar.AlignmentPoint]* a_ = &self.rule.get().a_ * a_.resize(len(a)) # <<<<<<<<<<<<<< * cdef unsigned i * cdef int s, t */ - __pyx_t_1 = PyObject_Length(__pyx_v_a); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_a); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_a_->resize(__pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":132 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":142 * cdef unsigned i * cdef int s, t * for i in range(len(a)): # <<<<<<<<<<<<<< * s, t = a[i] * a_[0][i] = grammar.AlignmentPoint(s, t) */ - __pyx_t_1 = PyObject_Length(__pyx_v_a); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(__pyx_v_a); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":133 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":143 * cdef int s, t * for i in range(len(a)): * s, t = a[i] # <<<<<<<<<<<<<< * a_[0][i] = grammar.AlignmentPoint(s, t) * */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_a, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_a, __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { PyObject* sequence = __pyx_t_3; @@ -7028,7 +7057,7 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { @@ -7041,14 +7070,14 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; - __pyx_t_6 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = Py_TYPE(__pyx_t_6)->tp_iternext; @@ -7056,7 +7085,7 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_GOTREF(__pyx_t_4); index = 1; __pyx_t_5 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = NULL; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L6_unpacking_done; @@ -7064,17 +7093,17 @@ static int __pyx_pf_5_cdec_5TRule_1a_3__set__(struct __pyx_obj_5_cdec_TRule *__p __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_7 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } - __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_8 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_5); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_s = __pyx_t_8; __pyx_v_t = __pyx_t_9; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":134 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":144 * for i in range(len(a)): * s, t = a[i] * a_[0][i] = grammar.AlignmentPoint(s, t) # <<<<<<<<<<<<<< @@ -7109,7 +7138,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_6scores_1__get__(PyObject *__pyx_v_self) return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":137 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":147 * * property scores: * def __get__(self): # <<<<<<<<<<<<<< @@ -7127,20 +7156,20 @@ static PyObject *__pyx_pf_5_cdec_5TRule_6scores___get__(struct __pyx_obj_5_cdec_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":138 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":148 * property scores: * def __get__(self): * cdef SparseVector scores = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * scores.vector = new FastSparseVector[double](self.rule.get().scores_) * return scores */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_scores = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":139 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":149 * def __get__(self): * cdef SparseVector scores = SparseVector.__new__(SparseVector) * scores.vector = new FastSparseVector[double](self.rule.get().scores_) # <<<<<<<<<<<<<< @@ -7149,7 +7178,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_6scores___get__(struct __pyx_obj_5_cdec_ */ __pyx_v_scores->vector = new FastSparseVector<double>(__pyx_v_self->rule->get()->scores_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":140 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":150 * cdef SparseVector scores = SparseVector.__new__(SparseVector) * scores.vector = new FastSparseVector[double](self.rule.get().scores_) * return scores # <<<<<<<<<<<<<< @@ -7185,7 +7214,7 @@ static int __pyx_pw_5_cdec_5TRule_6scores_3__set__(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":142 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":152 * return scores * * def __set__(self, scores): # <<<<<<<<<<<<<< @@ -7198,6 +7227,7 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule int __pyx_v_fid; float __pyx_v_fval; PyObject *__pyx_v_fname = NULL; + PyObject *__pyx_v_fn = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; @@ -7209,13 +7239,14 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule PyObject *__pyx_t_7 = NULL; PyObject *(*__pyx_t_8)(PyObject *); float __pyx_t_9; - int __pyx_t_10; + char *__pyx_t_10; + int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":143 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":153 * * def __set__(self, scores): * cdef FastSparseVector[double]* scores_ = &self.rule.get().scores_ # <<<<<<<<<<<<<< @@ -7224,7 +7255,7 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_scores_ = (&__pyx_v_self->rule->get()->scores_); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":144 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":154 * def __set__(self, scores): * cdef FastSparseVector[double]* scores_ = &self.rule.get().scores_ * scores_.clear() # <<<<<<<<<<<<<< @@ -7233,23 +7264,23 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule */ __pyx_v_scores_->clear(); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":147 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":157 * cdef int fid * cdef float fval * for fname, fval in scores.items(): # <<<<<<<<<<<<<< - * fid = FDConvert(as_str(fname)) - * if fid < 0: raise KeyError(fname) + * fn = as_str(fname) + * fid = FDConvert(fn) */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_scores, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_v_scores, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyList_CheckExact(__pyx_t_2) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext; } @@ -7258,23 +7289,23 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_2 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_2)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -7290,7 +7321,7 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { @@ -7303,14 +7334,14 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else - __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; - __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; @@ -7318,7 +7349,7 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L6_unpacking_done; @@ -7326,51 +7357,65 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L6_unpacking_done:; } - __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_v_fname); __pyx_v_fname = __pyx_t_5; __pyx_t_5 = 0; __pyx_v_fval = __pyx_t_9; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":148 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":158 * cdef float fval * for fname, fval in scores.items(): - * fid = FDConvert(as_str(fname)) # <<<<<<<<<<<<<< + * fn = as_str(fname) # <<<<<<<<<<<<<< + * fid = FDConvert(fn) * if fid < 0: raise KeyError(fname) - * scores_.set_value(fid, fval) */ - __pyx_v_fid = FD::Convert(__pyx_f_5_cdec_as_str(__pyx_v_fname, NULL)); + __pyx_t_2 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_v_fname, NULL)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF(((PyObject *)__pyx_v_fn)); + __pyx_v_fn = ((PyObject*)__pyx_t_2); + __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":149 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":159 * for fname, fval in scores.items(): - * fid = FDConvert(as_str(fname)) + * fn = as_str(fname) + * fid = FDConvert(fn) # <<<<<<<<<<<<<< + * if fid < 0: raise KeyError(fname) + * scores_.set_value(fid, fval) + */ + __pyx_t_10 = PyBytes_AsString(((PyObject *)__pyx_v_fn)); if (unlikely((!__pyx_t_10) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_fid = FD::Convert(__pyx_t_10); + + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":160 + * fn = as_str(fname) + * fid = FDConvert(fn) * if fid < 0: raise KeyError(fname) # <<<<<<<<<<<<<< * scores_.set_value(fid, fval) * */ - __pyx_t_10 = (__pyx_v_fid < 0); - if (__pyx_t_10) { - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_11 = (__pyx_v_fid < 0); + if (__pyx_t_11) { + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_fname); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_fname); __Pyx_GIVEREF(__pyx_v_fname); - __pyx_t_6 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(__pyx_builtin_KeyError, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":150 - * fid = FDConvert(as_str(fname)) + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":161 + * fid = FDConvert(fn) * if fid < 0: raise KeyError(fname) * scores_.set_value(fid, fval) # <<<<<<<<<<<<<< * @@ -7392,6 +7437,7 @@ static int __pyx_pf_5_cdec_5TRule_6scores_2__set__(struct __pyx_obj_5_cdec_TRule __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_fname); + __Pyx_XDECREF(__pyx_v_fn); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -7407,7 +7453,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_3lhs_1__get__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":153 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":164 * * property lhs: * def __get__(self): # <<<<<<<<<<<<<< @@ -7425,7 +7471,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_3lhs___get__(struct __pyx_obj_5_cdec_TRu int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":154 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":165 * property lhs: * def __get__(self): * return NT(TDConvert(-self.rule.get().lhs_).c_str()) # <<<<<<<<<<<<<< @@ -7433,14 +7479,14 @@ static PyObject *__pyx_pf_5_cdec_5TRule_3lhs___get__(struct __pyx_obj_5_cdec_TRu * def __set__(self, lhs): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_self->rule->get()->lhs_)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_self->rule->get()->lhs_)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -7471,7 +7517,7 @@ static int __pyx_pw_5_cdec_5TRule_3lhs_3__set__(PyObject *__pyx_v_self, PyObject return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":156 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":167 * return NT(TDConvert(-self.rule.get().lhs_).c_str()) * * def __set__(self, lhs): # <<<<<<<<<<<<<< @@ -7493,12 +7539,12 @@ static int __pyx_pf_5_cdec_5TRule_3lhs_2__set__(struct __pyx_obj_5_cdec_TRule *_ __Pyx_RefNannySetupContext("__set__", 0); __Pyx_INCREF(__pyx_v_lhs); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":157 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":168 * * def __set__(self, lhs): * if not isinstance(lhs, NT): # <<<<<<<<<<<<<< * lhs = NT(lhs) - * self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) + * self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) */ __pyx_t_1 = ((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)); __Pyx_INCREF(__pyx_t_1); @@ -7507,19 +7553,19 @@ static int __pyx_pf_5_cdec_5TRule_3lhs_2__set__(struct __pyx_obj_5_cdec_TRule *_ __pyx_t_3 = (!__pyx_t_2); if (__pyx_t_3) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":158 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":169 * def __set__(self, lhs): * if not isinstance(lhs, NT): * lhs = NT(lhs) # <<<<<<<<<<<<<< - * self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) + * self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) * */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_lhs); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_lhs); __Pyx_GIVEREF(__pyx_v_lhs); - __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NT)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_v_lhs); @@ -7529,18 +7575,15 @@ static int __pyx_pf_5_cdec_5TRule_3lhs_2__set__(struct __pyx_obj_5_cdec_TRule *_ } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":159 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":170 * if not isinstance(lhs, NT): * lhs = NT(lhs) - * self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) # <<<<<<<<<<<<<< + * self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) # <<<<<<<<<<<<<< * * def __str__(self): */ - __pyx_t_4 = PyObject_GetAttr(__pyx_v_lhs, __pyx_n_s__cat); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyBytes_AsString(__pyx_t_4); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_self->rule->get()->lhs_ = (-TD::Convert(((char *)__pyx_t_5))); + __pyx_t_5 = PyBytes_AsString(((PyObject *)((struct __pyx_obj_5_cdec_NT *)__pyx_v_lhs)->cat)); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_self->rule->get()->lhs_ = (-TD::Convert(__pyx_t_5)); __pyx_r = 0; goto __pyx_L0; @@ -7567,7 +7610,7 @@ static PyObject *__pyx_pw_5_cdec_5TRule_5__str__(PyObject *__pyx_v_self) { } static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value); /* proto */ -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":162 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":173 * * def __str__(self): * scores = ' '.join('%s=%s' % feat for feat in self.scores) # <<<<<<<<<<<<<< @@ -7593,7 +7636,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_7__str___genexpr(PyObject *__pyx_self) { __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope)); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_5TRule_7__str___2generator19, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_5TRule_7__str___2generator19, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -7629,15 +7672,15 @@ static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObj return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_self)) { __Pyx_RaiseClosureNameError("self"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_outer_scope->__pyx_v_self), __pyx_n_s__scores); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_self)) { __Pyx_RaiseClosureNameError("self"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_outer_scope->__pyx_v_self), __pyx_n_s__scores); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyList_CheckExact(__pyx_t_1) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; } @@ -7646,23 +7689,23 @@ static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObj if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; + __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; + __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_1); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_1 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_1)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -7673,7 +7716,7 @@ static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObj __Pyx_GIVEREF(__pyx_t_1); __pyx_cur_scope->__pyx_v_feat = __pyx_t_1; __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_11), __pyx_cur_scope->__pyx_v_feat); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_11), __pyx_cur_scope->__pyx_v_feat); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_r = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; @@ -7692,7 +7735,7 @@ static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObj __Pyx_XGOTREF(__pyx_t_2); __pyx_t_3 = __pyx_cur_scope->__pyx_t_1; __pyx_t_4 = __pyx_cur_scope->__pyx_t_2; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyErr_SetNone(PyExc_StopIteration); @@ -7709,8 +7752,8 @@ static PyObject *__pyx_gb_5_cdec_5TRule_7__str___2generator19(__pyx_GeneratorObj return NULL; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":161 - * self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":172 + * self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) * * def __str__(self): # <<<<<<<<<<<<<< * scores = ' '.join('%s=%s' % feat for feat in self.scores) @@ -7741,30 +7784,30 @@ static PyObject *__pyx_pf_5_cdec_5TRule_4__str__(struct __pyx_obj_5_cdec_TRule * __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":162 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":173 * * def __str__(self): * scores = ' '.join('%s=%s' % feat for feat in self.scores) # <<<<<<<<<<<<<< * return '%s ||| %s ||| %s ||| %s' % (self.lhs, * _phrase(self.f), _phrase(self.e), scores) */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_7), __pyx_n_s__join); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_7), __pyx_n_s__join); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __pyx_pf_5_cdec_5TRule_7__str___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __pyx_pf_5_cdec_5TRule_7__str___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_v_scores = __pyx_t_2; __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":163 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":174 * def __str__(self): * scores = ' '.join('%s=%s' % feat for feat in self.scores) * return '%s ||| %s ||| %s ||| %s' % (self.lhs, # <<<<<<<<<<<<<< @@ -7772,43 +7815,43 @@ static PyObject *__pyx_pf_5_cdec_5TRule_4__str__(struct __pyx_obj_5_cdec_TRule * * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__lhs); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__lhs); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":164 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":175 * scores = ' '.join('%s=%s' % feat for feat in self.scores) * return '%s ||| %s ||| %s ||| %s' % (self.lhs, * _phrase(self.f), _phrase(self.e), scores) # <<<<<<<<<<<<<< * * cdef class MRule(TRule): */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s___phrase); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s___phrase); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__f); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__f); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___phrase); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s___phrase); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__e); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_GetAttr(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_n_s__e); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_4, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); @@ -7822,7 +7865,7 @@ static PyObject *__pyx_pf_5_cdec_5TRule_4__str__(struct __pyx_obj_5_cdec_TRule * __pyx_t_2 = 0; __pyx_t_1 = 0; __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_12), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_12), ((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_r = ((PyObject *)__pyx_t_3); @@ -7849,31 +7892,24 @@ static PyObject *__pyx_pf_5_cdec_5TRule_4__str__(struct __pyx_obj_5_cdec_TRule * /* Python wrapper */ static int __pyx_pw_5_cdec_5MRule_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_5MRule___init__[] = "MRule(lhs, rhs, scores, a=None) -> Monolingual rule.\n lhs: left hand side non-terminal\n rhs: right hand side phrase (list of words/NT)\n scores: dictionary of feature scores"; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_5MRule___init__; +#endif static int __pyx_pw_5_cdec_5MRule_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_lhs = 0; PyObject *__pyx_v_rhs = 0; PyObject *__pyx_v_scores = 0; - PyObject *__pyx_v_a = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__lhs,&__pyx_n_s__rhs,&__pyx_n_s__scores,&__pyx_n_s__a,0}; - PyObject* values[4] = {0,0,0,0}; - - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":167 - * - * cdef class MRule(TRule): - * def __init__(self, lhs, rhs, scores, a=None): # <<<<<<<<<<<<<< - * cdef unsigned i = 1 - * e = [] - */ - values[3] = ((PyObject *)Py_None); + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__lhs,&__pyx_n_s__rhs,&__pyx_n_s__scores,0}; + PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); @@ -7888,51 +7924,50 @@ static int __pyx_pw_5_cdec_5MRule_1__init__(PyObject *__pyx_v_self, PyObject *__ case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__rhs)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__scores)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} - } - case 3: - if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__a); - if (value) { values[3] = value; kw_args--; } + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v_lhs = values[0]; __pyx_v_rhs = values[1]; __pyx_v_scores = values[2]; - __pyx_v_a = values[3]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.MRule.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_5_cdec_5MRule___init__(((struct __pyx_obj_5_cdec_MRule *)__pyx_v_self), __pyx_v_lhs, __pyx_v_rhs, __pyx_v_scores, __pyx_v_a); + __pyx_r = __pyx_pf_5_cdec_5MRule___init__(((struct __pyx_obj_5_cdec_MRule *)__pyx_v_self), __pyx_v_lhs, __pyx_v_rhs, __pyx_v_scores); __Pyx_RefNannyFinishContext(); return __pyx_r; } -static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_v_self, PyObject *__pyx_v_lhs, PyObject *__pyx_v_rhs, PyObject *__pyx_v_scores, PyObject *__pyx_v_a) { +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":178 + * + * cdef class MRule(TRule): + * def __init__(self, lhs, rhs, scores): # <<<<<<<<<<<<<< + * """MRule(lhs, rhs, scores, a=None) -> Monolingual rule. + * lhs: left hand side non-terminal + */ + +static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_v_self, PyObject *__pyx_v_lhs, PyObject *__pyx_v_rhs, PyObject *__pyx_v_scores) { unsigned int __pyx_v_i; PyObject *__pyx_v_e = NULL; PyObject *__pyx_v_s = NULL; @@ -7950,28 +7985,28 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":168 - * cdef class MRule(TRule): - * def __init__(self, lhs, rhs, scores, a=None): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":183 + * rhs: right hand side phrase (list of words/NT) + * scores: dictionary of feature scores""" * cdef unsigned i = 1 # <<<<<<<<<<<<<< * e = [] * for s in rhs: */ __pyx_v_i = 1; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":169 - * def __init__(self, lhs, rhs, scores, a=None): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":184 + * scores: dictionary of feature scores""" * cdef unsigned i = 1 * e = [] # <<<<<<<<<<<<<< * for s in rhs: * if isinstance(s, NT): */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_e = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":170 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":185 * cdef unsigned i = 1 * e = [] * for s in rhs: # <<<<<<<<<<<<<< @@ -7982,7 +8017,7 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ __pyx_t_1 = __pyx_v_rhs; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { - __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_rhs); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_rhs); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; } @@ -7990,23 +8025,23 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ if (!__pyx_t_3 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_3 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -8016,7 +8051,7 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ __pyx_v_s = __pyx_t_4; __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":171 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":186 * e = [] * for s in rhs: * if isinstance(s, NT): # <<<<<<<<<<<<<< @@ -8029,27 +8064,27 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":172 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":187 * for s in rhs: * if isinstance(s, NT): * e.append(NTRef(i)) # <<<<<<<<<<<<<< * i += 1 * else: */ - __pyx_t_4 = PyLong_FromUnsignedLong(__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyLong_FromUnsignedLong(__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_NTRef)), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_7 = PyList_Append(__pyx_v_e, __pyx_t_4); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyList_Append(__pyx_v_e, __pyx_t_4); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":173 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":188 * if isinstance(s, NT): * e.append(NTRef(i)) * i += 1 # <<<<<<<<<<<<<< @@ -8061,27 +8096,27 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":175 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":190 * i += 1 * else: * e.append(s) # <<<<<<<<<<<<<< - * super(MRule, self).__init__(lhs, rhs, e, scores, a) + * super(MRule, self).__init__(lhs, rhs, e, scores, None) * */ - __pyx_t_7 = PyList_Append(__pyx_v_e, __pyx_v_s); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyList_Append(__pyx_v_e, __pyx_v_s); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L5:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":176 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":191 * else: * e.append(s) - * super(MRule, self).__init__(lhs, rhs, e, scores, a) # <<<<<<<<<<<<<< + * super(MRule, self).__init__(lhs, rhs, e, scores, None) # <<<<<<<<<<<<<< * * cdef class Grammar: */ - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_MRule))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)((PyObject*)__pyx_ptype_5_cdec_MRule))); @@ -8089,13 +8124,13 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __pyx_t_4 = PyObject_Call(__pyx_builtin_super, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(__pyx_builtin_super, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s____init__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_t_4, __pyx_n_s____init__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_lhs); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_lhs); @@ -8109,10 +8144,10 @@ static int __pyx_pf_5_cdec_5MRule___init__(struct __pyx_obj_5_cdec_MRule *__pyx_ __Pyx_INCREF(__pyx_v_scores); PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_scores); __Pyx_GIVEREF(__pyx_v_scores); - __Pyx_INCREF(__pyx_v_a); - PyTuple_SET_ITEM(__pyx_t_4, 4, __pyx_v_a); - __Pyx_GIVEREF(__pyx_v_a); - __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_4, 4, Py_None); + __Pyx_GIVEREF(Py_None); + __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; @@ -8142,7 +8177,7 @@ static void __pyx_pw_5_cdec_7Grammar_1__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":181 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":196 * cdef shared_ptr[grammar.Grammar]* grammar * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -8154,7 +8189,7 @@ static void __pyx_pf_5_cdec_7Grammar___dealloc__(CYTHON_UNUSED struct __pyx_obj_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":182 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":197 * * def __dealloc__(self): * del self.grammar # <<<<<<<<<<<<<< @@ -8178,7 +8213,7 @@ static PyObject *__pyx_pw_5_cdec_7Grammar_3__iter__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":184 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":199 * del self.grammar * * def __iter__(self): # <<<<<<<<<<<<<< @@ -8204,7 +8239,7 @@ static PyObject *__pyx_pf_5_cdec_7Grammar_2__iter__(struct __pyx_obj_5_cdec_Gram __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Grammar_4generator3, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Grammar_4generator3, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -8239,9 +8274,9 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":185 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":200 * * def __iter__(self): * cdef grammar.const_GrammarIter* root = self.grammar.get().GetRoot() # <<<<<<<<<<<<<< @@ -8250,7 +8285,7 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p */ __pyx_cur_scope->__pyx_v_root = __pyx_cur_scope->__pyx_v_self->grammar->get()->GetRoot(); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":186 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":201 * def __iter__(self): * cdef grammar.const_GrammarIter* root = self.grammar.get().GetRoot() * cdef grammar.const_RuleBin* rbin = root.GetRules() # <<<<<<<<<<<<<< @@ -8259,7 +8294,7 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p */ __pyx_cur_scope->__pyx_v_rbin = __pyx_cur_scope->__pyx_v_root->GetRules(); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":189 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":204 * cdef TRule trule * cdef unsigned i * for i in range(rbin.GetNumRules()): # <<<<<<<<<<<<<< @@ -8270,23 +8305,23 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":190 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":205 * cdef unsigned i * for i in range(rbin.GetNumRules()): * trule = TRule.__new__(TRule) # <<<<<<<<<<<<<< * trule.rule = new shared_ptr[grammar.TRule](rbin.GetIthRule(i)) * yield trule */ - __pyx_t_3 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_TRule)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_TRule)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - if (!(likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5_cdec_TRule)))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5_cdec_TRule)))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XGOTREF(((PyObject *)__pyx_cur_scope->__pyx_v_trule)); __Pyx_XDECREF(((PyObject *)__pyx_cur_scope->__pyx_v_trule)); __Pyx_GIVEREF(__pyx_t_3); __pyx_cur_scope->__pyx_v_trule = ((struct __pyx_obj_5_cdec_TRule *)__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":191 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":206 * for i in range(rbin.GetNumRules()): * trule = TRule.__new__(TRule) * trule.rule = new shared_ptr[grammar.TRule](rbin.GetIthRule(i)) # <<<<<<<<<<<<<< @@ -8295,7 +8330,7 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p */ __pyx_cur_scope->__pyx_v_trule->rule = new boost::shared_ptr<TRule>(__pyx_cur_scope->__pyx_v_rbin->GetIthRule(__pyx_cur_scope->__pyx_v_i)); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":192 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":207 * trule = TRule.__new__(TRule) * trule.rule = new shared_ptr[grammar.TRule](rbin.GetIthRule(i)) * yield trule # <<<<<<<<<<<<<< @@ -8314,7 +8349,7 @@ static PyObject *__pyx_gb_5_cdec_7Grammar_4generator3(__pyx_GeneratorObject *__p __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -8340,7 +8375,7 @@ static PyObject *__pyx_pw_5_cdec_7Grammar_4name_1__get__(PyObject *__pyx_v_self) return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":195 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":210 * * property name: * def __get__(self): # <<<<<<<<<<<<<< @@ -8358,21 +8393,21 @@ static PyObject *__pyx_pf_5_cdec_7Grammar_4name___get__(struct __pyx_obj_5_cdec_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":196 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":211 * property name: * def __get__(self): * str(self.grammar.get().GetGrammarName().c_str()) # <<<<<<<<<<<<<< * * def __set__(self, name): */ - __pyx_t_1 = PyBytes_FromString(__pyx_v_self->grammar->get()->GetGrammarName().c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(__pyx_v_self->grammar->get()->GetGrammarName().c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -8401,50 +8436,71 @@ static int __pyx_pw_5_cdec_7Grammar_4name_3__set__(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":198 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":213 * str(self.grammar.get().GetGrammarName().c_str()) * * def __set__(self, name): # <<<<<<<<<<<<<< - * self.grammar.get().SetGrammarName(string(<char *>name)) - * + * name = as_str(name) + * self.grammar.get().SetGrammarName(name) */ static int __pyx_pf_5_cdec_7Grammar_4name_2__set__(struct __pyx_obj_5_cdec_Grammar *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations - char *__pyx_t_1; + PyObject *__pyx_t_1 = NULL; + std::string __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); + __Pyx_INCREF(__pyx_v_name); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":199 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":214 * * def __set__(self, name): - * self.grammar.get().SetGrammarName(string(<char *>name)) # <<<<<<<<<<<<<< + * name = as_str(name) # <<<<<<<<<<<<<< + * self.grammar.get().SetGrammarName(name) + * + */ + __pyx_t_1 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_v_name, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_v_name); + __pyx_v_name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":215 + * def __set__(self, name): + * name = as_str(name) + * self.grammar.get().SetGrammarName(name) # <<<<<<<<<<<<<< * * cdef class TextGrammar(Grammar): */ - __pyx_t_1 = PyBytes_AsString(__pyx_v_name); if (unlikely((!__pyx_t_1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_self->grammar->get()->SetGrammarName(std::string(((char *)__pyx_t_1))); + __pyx_t_2 = __pyx_convert_string_from_py_(__pyx_v_name); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_self->grammar->get()->SetGrammarName(__pyx_t_2); __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("_cdec.Grammar.name.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; + __Pyx_XDECREF(__pyx_v_name); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* Python wrapper */ -static int __pyx_pw_5_cdec_11TextGrammar_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pw_5_cdec_11TextGrammar_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { +static int __pyx_pw_5_cdec_11TextGrammar_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_11TextGrammar___init__[] = "TextGrammar(rules) -> SCFG Grammar containing the rules."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_11TextGrammar___init__; +#endif +static int __pyx_pw_5_cdec_11TextGrammar_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_rules = 0; int __pyx_r; __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__rules,0}; PyObject* values[1] = {0}; @@ -8463,7 +8519,7 @@ static int __pyx_pw_5_cdec_11TextGrammar_1__cinit__(PyObject *__pyx_v_self, PyOb else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; @@ -8474,26 +8530,26 @@ static int __pyx_pw_5_cdec_11TextGrammar_1__cinit__(PyObject *__pyx_v_self, PyOb } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 202; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; - __Pyx_AddTraceback("_cdec.TextGrammar.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.TextGrammar.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_5_cdec_11TextGrammar___cinit__(((struct __pyx_obj_5_cdec_TextGrammar *)__pyx_v_self), __pyx_v_rules); + __pyx_r = __pyx_pf_5_cdec_11TextGrammar___init__(((struct __pyx_obj_5_cdec_TextGrammar *)__pyx_v_self), __pyx_v_rules); __Pyx_RefNannyFinishContext(); return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":202 +/* "/home/vchahune/tools/cdec/python/src/grammar.pxi":218 * * cdef class TextGrammar(Grammar): - * def __cinit__(self, rules): # <<<<<<<<<<<<<< + * def __init__(self, rules): # <<<<<<<<<<<<<< + * """TextGrammar(rules) -> SCFG Grammar containing the rules.""" * self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) - * cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() */ -static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextGrammar *__pyx_v_self, PyObject *__pyx_v_rules) { +static int __pyx_pf_5_cdec_11TextGrammar___init__(struct __pyx_obj_5_cdec_TextGrammar *__pyx_v_self, PyObject *__pyx_v_rules) { TextGrammar *__pyx_v__g; PyObject *__pyx_v_trule = NULL; int __pyx_r; @@ -8508,19 +8564,19 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_RefNannySetupContext("__init__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":203 - * cdef class TextGrammar(Grammar): - * def __cinit__(self, rules): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":220 + * def __init__(self, rules): + * """TextGrammar(rules) -> SCFG Grammar containing the rules.""" * self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) # <<<<<<<<<<<<<< * cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() * for trule in rules: */ __pyx_v_self->__pyx_base.grammar = new boost::shared_ptr<Grammar>(new TextGrammar()); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":204 - * def __cinit__(self, rules): + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":221 + * """TextGrammar(rules) -> SCFG Grammar containing the rules.""" * self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) * cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() # <<<<<<<<<<<<<< * for trule in rules: @@ -8528,7 +8584,7 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG */ __pyx_v__g = ((TextGrammar *)__pyx_v_self->__pyx_base.grammar->get()); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":205 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":222 * self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) * cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() * for trule in rules: # <<<<<<<<<<<<<< @@ -8539,7 +8595,7 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG __pyx_t_1 = __pyx_v_rules; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { - __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_rules); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_rules); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; } @@ -8547,23 +8603,23 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG if (!__pyx_t_3 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_3 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; + __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 205; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -8573,7 +8629,7 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG __pyx_v_trule = __pyx_t_4; __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":206 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":223 * cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() * for trule in rules: * if isinstance(trule, _sa.Rule): # <<<<<<<<<<<<<< @@ -8586,17 +8642,17 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_5) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":207 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":224 * for trule in rules: * if isinstance(trule, _sa.Rule): * trule = convert_rule(trule) # <<<<<<<<<<<<<< * elif not isinstance(trule, TRule): * raise ValueError('the grammar should contain TRule objects') */ - if (!(likely(((__pyx_v_trule) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_trule, __pyx_ptype_4cdec_2sa_3_sa_Rule))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_trule) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_trule, __pyx_ptype_4cdec_2sa_3_sa_Rule))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __pyx_v_trule; __Pyx_INCREF(__pyx_t_4); - __pyx_t_6 = ((PyObject *)__pyx_f_5_cdec_convert_rule(((struct __pyx_obj_4cdec_2sa_3_sa_Rule *)__pyx_t_4))); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = ((PyObject *)__pyx_f_5_cdec_convert_rule(((struct __pyx_obj_4cdec_2sa_3_sa_Rule *)__pyx_t_4))); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_v_trule); @@ -8605,7 +8661,7 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG goto __pyx_L5; } - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":208 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":225 * if isinstance(trule, _sa.Rule): * trule = convert_rule(trule) * elif not isinstance(trule, TRule): # <<<<<<<<<<<<<< @@ -8619,22 +8675,22 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG __pyx_t_7 = (!__pyx_t_5); if (__pyx_t_7) { - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":209 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":226 * trule = convert_rule(trule) * elif not isinstance(trule, TRule): * raise ValueError('the grammar should contain TRule objects') # <<<<<<<<<<<<<< * _g.AddRule((<TRule> trule).rule[0]) */ - __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - {__pyx_filename = __pyx_f[2]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":210 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":227 * elif not isinstance(trule, TRule): * raise ValueError('the grammar should contain TRule objects') * _g.AddRule((<TRule> trule).rule[0]) # <<<<<<<<<<<<<< @@ -8649,7 +8705,7 @@ static int __pyx_pf_5_cdec_11TextGrammar___cinit__(struct __pyx_obj_5_cdec_TextG __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("_cdec.TextGrammar.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.TextGrammar.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_trule); @@ -8666,7 +8722,7 @@ static void __pyx_pw_5_cdec_10Hypergraph_1__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":8 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":8 * cdef MT19937* rng * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -8679,7 +8735,7 @@ static void __pyx_pf_5_cdec_10Hypergraph___dealloc__(struct __pyx_obj_5_cdec_Hyp int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":9 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":9 * * def __dealloc__(self): * del self.hg # <<<<<<<<<<<<<< @@ -8688,7 +8744,7 @@ static void __pyx_pf_5_cdec_10Hypergraph___dealloc__(struct __pyx_obj_5_cdec_Hyp */ delete __pyx_v_self->hg; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":10 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":10 * def __dealloc__(self): * del self.hg * if self.rng != NULL: # <<<<<<<<<<<<<< @@ -8698,7 +8754,7 @@ static void __pyx_pf_5_cdec_10Hypergraph___dealloc__(struct __pyx_obj_5_cdec_Hyp __pyx_t_1 = (__pyx_v_self->rng != NULL); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":11 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":11 * del self.hg * if self.rng != NULL: * del self.rng # <<<<<<<<<<<<<< @@ -8713,7 +8769,7 @@ static void __pyx_pf_5_cdec_10Hypergraph___dealloc__(struct __pyx_obj_5_cdec_Hyp __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":13 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":13 * del self.rng * * cdef MT19937* _rng(self): # <<<<<<<<<<<<<< @@ -8731,7 +8787,7 @@ static MT19937 *__pyx_f_5_cdec_10Hypergraph__rng(struct __pyx_obj_5_cdec_Hypergr int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_rng", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":14 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":14 * * cdef MT19937* _rng(self): * if self.rng == NULL: # <<<<<<<<<<<<<< @@ -8741,7 +8797,7 @@ static MT19937 *__pyx_f_5_cdec_10Hypergraph__rng(struct __pyx_obj_5_cdec_Hypergr __pyx_t_1 = (__pyx_v_self->rng == NULL); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":15 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":15 * cdef MT19937* _rng(self): * if self.rng == NULL: * self.rng = new MT19937() # <<<<<<<<<<<<<< @@ -8754,7 +8810,7 @@ static MT19937 *__pyx_f_5_cdec_10Hypergraph__rng(struct __pyx_obj_5_cdec_Hypergr } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":16 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":16 * if self.rng == NULL: * self.rng = new MT19937() * return self.rng # <<<<<<<<<<<<<< @@ -8776,6 +8832,7 @@ static MT19937 *__pyx_f_5_cdec_10Hypergraph__rng(struct __pyx_obj_5_cdec_Hypergr /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_3viterbi(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_2viterbi[] = "hg.viterbi() -> String for the best hypothesis in the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_3viterbi(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -8785,12 +8842,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_3viterbi(PyObject *__pyx_v_self, C return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":18 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":18 * return self.rng * * def viterbi(self): # <<<<<<<<<<<<<< + * """hg.viterbi() -> String for the best hypothesis in the hypergraph.""" * cdef vector[WordID] trans - * hypergraph.ViterbiESentence(self.hg[0], &trans) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -8804,8 +8861,8 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_H int __pyx_clineno = 0; __Pyx_RefNannySetupContext("viterbi", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":20 - * def viterbi(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":21 + * """hg.viterbi() -> String for the best hypothesis in the hypergraph.""" * cdef vector[WordID] trans * hypergraph.ViterbiESentence(self.hg[0], &trans) # <<<<<<<<<<<<<< * return unicode(GetString(trans).c_str(), 'utf8') @@ -8813,7 +8870,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_H */ ViterbiESentence((__pyx_v_self->hg[0]), (&__pyx_v_trans)); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":21 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":22 * cdef vector[WordID] trans * hypergraph.ViterbiESentence(self.hg[0], &trans) * return unicode(GetString(trans).c_str(), 'utf8') # <<<<<<<<<<<<<< @@ -8821,9 +8878,9 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_H * def viterbi_trees(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(TD::GetString(__pyx_v_trans).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(TD::GetString(__pyx_v_trans).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); @@ -8831,7 +8888,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_H PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -8853,6 +8910,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_2viterbi(struct __pyx_obj_5_cdec_H /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_5viterbi_trees(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_4viterbi_trees[] = "hg.viterbi_trees() -> (f_tree, e_tree)\n f_tree: Source tree for the best hypothesis in the hypergraph.\n e_tree: Target tree for the best hypothesis in the hypergraph.\n "; static PyObject *__pyx_pw_5_cdec_10Hypergraph_5viterbi_trees(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -8862,12 +8920,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_5viterbi_trees(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":23 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":24 * return unicode(GetString(trans).c_str(), 'utf8') * * def viterbi_trees(self): # <<<<<<<<<<<<<< - * f_tree = unicode(hypergraph.ViterbiFTree(self.hg[0]).c_str(), 'utf8') - * e_tree = unicode(hypergraph.ViterbiETree(self.hg[0]).c_str(), 'utf8') + * """hg.viterbi_trees() -> (f_tree, e_tree) + * f_tree: Source tree for the best hypothesis in the hypergraph. */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -8882,16 +8940,16 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("viterbi_trees", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":24 - * - * def viterbi_trees(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":29 + * e_tree: Target tree for the best hypothesis in the hypergraph. + * """ * f_tree = unicode(hypergraph.ViterbiFTree(self.hg[0]).c_str(), 'utf8') # <<<<<<<<<<<<<< * e_tree = unicode(hypergraph.ViterbiETree(self.hg[0]).c_str(), 'utf8') * return (f_tree, e_tree) */ - __pyx_t_1 = PyBytes_FromString(ViterbiFTree((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(ViterbiFTree((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); @@ -8899,22 +8957,22 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_f_tree = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":25 - * def viterbi_trees(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":30 + * """ * f_tree = unicode(hypergraph.ViterbiFTree(self.hg[0]).c_str(), 'utf8') * e_tree = unicode(hypergraph.ViterbiETree(self.hg[0]).c_str(), 'utf8') # <<<<<<<<<<<<<< * return (f_tree, e_tree) * */ - __pyx_t_1 = PyBytes_FromString(ViterbiETree((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(ViterbiETree((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); @@ -8922,13 +8980,13 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_v_e_tree = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":26 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":31 * f_tree = unicode(hypergraph.ViterbiFTree(self.hg[0]).c_str(), 'utf8') * e_tree = unicode(hypergraph.ViterbiETree(self.hg[0]).c_str(), 'utf8') * return (f_tree, e_tree) # <<<<<<<<<<<<<< @@ -8936,7 +8994,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_ * def viterbi_features(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_f_tree)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_f_tree)); @@ -8965,6 +9023,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4viterbi_trees(struct __pyx_obj_5_ /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_7viterbi_features(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_6viterbi_features[] = "hg.viterbi_features() -> SparseVector with the features corresponding\n to the best derivation in the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_7viterbi_features(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -8974,12 +9033,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_7viterbi_features(PyObject *__pyx_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":28 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":33 * return (f_tree, e_tree) * * def viterbi_features(self): # <<<<<<<<<<<<<< - * cdef SparseVector fmap = SparseVector.__new__(SparseVector) - * fmap.vector = new FastSparseVector[weight_t](hypergraph.ViterbiFeatures(self.hg[0])) + * """hg.viterbi_features() -> SparseVector with the features corresponding + * to the best derivation in the hypergraph.""" */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6viterbi_features(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -8992,21 +9051,21 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6viterbi_features(struct __pyx_obj int __pyx_clineno = 0; __Pyx_RefNannySetupContext("viterbi_features", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":29 - * - * def viterbi_features(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":36 + * """hg.viterbi_features() -> SparseVector with the features corresponding + * to the best derivation in the hypergraph.""" * cdef SparseVector fmap = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * fmap.vector = new FastSparseVector[weight_t](hypergraph.ViterbiFeatures(self.hg[0])) * return fmap */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_fmap = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":30 - * def viterbi_features(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":37 + * to the best derivation in the hypergraph.""" * cdef SparseVector fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](hypergraph.ViterbiFeatures(self.hg[0])) # <<<<<<<<<<<<<< * return fmap @@ -9014,7 +9073,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6viterbi_features(struct __pyx_obj */ __pyx_v_fmap->vector = new FastSparseVector<weight_t>(ViterbiFeatures((__pyx_v_self->hg[0]))); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":31 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":38 * cdef SparseVector fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](hypergraph.ViterbiFeatures(self.hg[0])) * return fmap # <<<<<<<<<<<<<< @@ -9041,6 +9100,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6viterbi_features(struct __pyx_obj /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_9viterbi_joshua(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_8viterbi_joshua[] = "hg.viterbi_joshua() -> Joshua representation of the best derivation."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_9viterbi_joshua(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -9050,12 +9110,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_9viterbi_joshua(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":33 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":40 * return fmap * * def viterbi_joshua(self): # <<<<<<<<<<<<<< + * """hg.viterbi_joshua() -> Joshua representation of the best derivation.""" * return unicode(hypergraph.JoshuaVisualizationString(self.hg[0]).c_str(), 'utf8') - * */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_8viterbi_joshua(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -9068,17 +9128,17 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_8viterbi_joshua(struct __pyx_obj_5 int __pyx_clineno = 0; __Pyx_RefNannySetupContext("viterbi_joshua", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":34 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":42 * def viterbi_joshua(self): + * """hg.viterbi_joshua() -> Joshua representation of the best derivation.""" * return unicode(hypergraph.JoshuaVisualizationString(self.hg[0]).c_str(), 'utf8') # <<<<<<<<<<<<<< * * def kbest(self, size): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(JoshuaVisualizationString((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(JoshuaVisualizationString((__pyx_v_self->hg[0])).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); @@ -9086,7 +9146,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_8viterbi_joshua(struct __pyx_obj_5 PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -9109,6 +9169,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_11kbest(PyObject *__pyx_v_self, PyObject *__pyx_v_size); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_10kbest[] = "hg.kbest(size) -> List of k-best hypotheses in the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_11kbest(PyObject *__pyx_v_self, PyObject *__pyx_v_size) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -9118,12 +9179,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_11kbest(PyObject *__pyx_v_self, Py return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":36 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":44 * return unicode(hypergraph.JoshuaVisualizationString(self.hg[0]).c_str(), 'utf8') * * def kbest(self, size): # <<<<<<<<<<<<<< + * """hg.kbest(size) -> List of k-best hypotheses in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal]* derivations = new kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal].Derivation* derivation */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_10kbest(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_size) { @@ -9147,7 +9208,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_10kbest(struct __pyx_obj_5_cdec_Hy __Pyx_INCREF(__pyx_cur_scope->__pyx_v_size); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_size); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_12generator4, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_12generator4, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -9184,19 +9245,19 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":37 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":46 * def kbest(self, size): + * """hg.kbest(size) -> List of k-best hypotheses in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal]* derivations = new kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal](self.hg[0], size) # <<<<<<<<<<<<<< * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal].Derivation* derivation * cdef unsigned k */ - __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_cur_scope->__pyx_v_derivations = new KBest::KBestDerivations<std::vector<WordID>,ESentenceTraversal>((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":40 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":49 * cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal].Derivation* derivation * cdef unsigned k * try: # <<<<<<<<<<<<<< @@ -9205,18 +9266,18 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":41 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":50 * cdef unsigned k * try: * for k in range(size): # <<<<<<<<<<<<<< * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break */ - __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L5;} for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_2; __pyx_t_1+=1) { __pyx_cur_scope->__pyx_v_k = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":42 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":51 * try: * for k in range(size): * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) # <<<<<<<<<<<<<< @@ -9225,7 +9286,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject */ __pyx_cur_scope->__pyx_v_derivation = __pyx_cur_scope->__pyx_v_derivations->LazyKthBest((__pyx_cur_scope->__pyx_v_self->hg->nodes_.size() - 1), __pyx_cur_scope->__pyx_v_k); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":43 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":52 * for k in range(size): * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break # <<<<<<<<<<<<<< @@ -9239,16 +9300,16 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject } __pyx_L9:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":44 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":53 * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break * yield unicode(GetString(derivation._yield).c_str(), 'utf8') # <<<<<<<<<<<<<< * finally: * del derivations */ - __pyx_t_4 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_derivation->yield).c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_4 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_derivation->yield).c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); @@ -9256,7 +9317,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; @@ -9271,12 +9332,12 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_12generator4(__pyx_GeneratorObject __pyx_L10_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L5;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L5;} } __pyx_L8_break:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":46 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":55 * yield unicode(GetString(derivation._yield).c_str(), 'utf8') * finally: * del derivations # <<<<<<<<<<<<<< @@ -9327,6 +9388,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_14kbest_trees(PyObject *__pyx_v_self, PyObject *__pyx_v_size); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_13kbest_trees[] = "hg.kbest_trees(size) -> List of k-best trees in the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_14kbest_trees(PyObject *__pyx_v_self, PyObject *__pyx_v_size) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -9336,12 +9398,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_14kbest_trees(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":48 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":57 * del derivations * * def kbest_trees(self, size): # <<<<<<<<<<<<<< + * """hg.kbest_trees(size) -> List of k-best trees in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal]* f_derivations = new kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal].Derivation* f_derivation */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_13kbest_trees(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_size) { @@ -9365,7 +9427,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_13kbest_trees(struct __pyx_obj_5_c __Pyx_INCREF(__pyx_cur_scope->__pyx_v_size); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_size); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_15generator5, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_15generator5, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -9404,29 +9466,29 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":49 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":59 * def kbest_trees(self, size): + * """hg.kbest_trees(size) -> List of k-best trees in the hypergraph.""" * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal]* f_derivations = new kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal](self.hg[0], size) # <<<<<<<<<<<<<< * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal].Derivation* f_derivation * cdef kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal]* e_derivations = new kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal](self.hg[0], size) */ - __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_cur_scope->__pyx_v_f_derivations = new KBest::KBestDerivations<std::vector<WordID>,FTreeTraversal>((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":51 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":61 * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal]* f_derivations = new kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal](self.hg[0], size) * cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal].Derivation* f_derivation * cdef kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal]* e_derivations = new kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal](self.hg[0], size) # <<<<<<<<<<<<<< * cdef kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal].Derivation* e_derivation * cdef unsigned k */ - __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_cur_scope->__pyx_v_e_derivations = new KBest::KBestDerivations<std::vector<WordID>,ETreeTraversal>((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":54 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":64 * cdef kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal].Derivation* e_derivation * cdef unsigned k * try: # <<<<<<<<<<<<<< @@ -9435,18 +9497,18 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":55 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":65 * cdef unsigned k * try: * for k in range(size): # <<<<<<<<<<<<<< * f_derivation = f_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * e_derivation = e_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) */ - __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L5;} for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_2; __pyx_t_1+=1) { __pyx_cur_scope->__pyx_v_k = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":56 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":66 * try: * for k in range(size): * f_derivation = f_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) # <<<<<<<<<<<<<< @@ -9455,7 +9517,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject */ __pyx_cur_scope->__pyx_v_f_derivation = __pyx_cur_scope->__pyx_v_f_derivations->LazyKthBest((__pyx_cur_scope->__pyx_v_self->hg->nodes_.size() - 1), __pyx_cur_scope->__pyx_v_k); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":57 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":67 * for k in range(size): * f_derivation = f_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * e_derivation = e_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) # <<<<<<<<<<<<<< @@ -9464,7 +9526,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject */ __pyx_cur_scope->__pyx_v_e_derivation = __pyx_cur_scope->__pyx_v_e_derivations->LazyKthBest((__pyx_cur_scope->__pyx_v_self->hg->nodes_.size() - 1), __pyx_cur_scope->__pyx_v_k); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":58 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":68 * f_derivation = f_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * e_derivation = e_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not f_derivation or not e_derivation: break # <<<<<<<<<<<<<< @@ -9484,16 +9546,16 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject } __pyx_L9:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":59 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":69 * e_derivation = e_derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not f_derivation or not e_derivation: break * f_tree = unicode(GetString(f_derivation._yield).c_str(), 'utf8') # <<<<<<<<<<<<<< * e_tree = unicode(GetString(e_derivation._yield).c_str(), 'utf8') * yield (f_tree, e_tree) */ - __pyx_t_6 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_f_derivation->yield).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_6 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_f_derivation->yield).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_t_6)); __Pyx_GIVEREF(((PyObject *)__pyx_t_6)); @@ -9501,7 +9563,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __Pyx_XGOTREF(((PyObject *)__pyx_cur_scope->__pyx_v_f_tree)); @@ -9510,16 +9572,16 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject __pyx_cur_scope->__pyx_v_f_tree = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":60 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":70 * if not f_derivation or not e_derivation: break * f_tree = unicode(GetString(f_derivation._yield).c_str(), 'utf8') * e_tree = unicode(GetString(e_derivation._yield).c_str(), 'utf8') # <<<<<<<<<<<<<< * yield (f_tree, e_tree) * finally: */ - __pyx_t_6 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_e_derivation->yield).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_6 = PyBytes_FromString(TD::GetString(__pyx_cur_scope->__pyx_v_e_derivation->yield).c_str()); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); - __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_7, 0, ((PyObject *)__pyx_t_6)); __Pyx_GIVEREF(((PyObject *)__pyx_t_6)); @@ -9527,7 +9589,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject PyTuple_SET_ITEM(__pyx_t_7, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_7), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_7)); __pyx_t_7 = 0; __Pyx_XGOTREF(((PyObject *)__pyx_cur_scope->__pyx_v_e_tree)); @@ -9536,14 +9598,14 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject __pyx_cur_scope->__pyx_v_e_tree = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":61 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":71 * f_tree = unicode(GetString(f_derivation._yield).c_str(), 'utf8') * e_tree = unicode(GetString(e_derivation._yield).c_str(), 'utf8') * yield (f_tree, e_tree) # <<<<<<<<<<<<<< * finally: * del f_derivations */ - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_v_f_tree)); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_cur_scope->__pyx_v_f_tree)); @@ -9563,12 +9625,12 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject __pyx_L10_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L5;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 71; __pyx_clineno = __LINE__; goto __pyx_L5;} } __pyx_L8_break:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":63 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":73 * yield (f_tree, e_tree) * finally: * del f_derivations # <<<<<<<<<<<<<< @@ -9592,7 +9654,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_15generator5(__pyx_GeneratorObject __pyx_L6:; delete __pyx_cur_scope->__pyx_v_f_derivations; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":64 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":74 * finally: * del f_derivations * del e_derivations # <<<<<<<<<<<<<< @@ -9628,6 +9690,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_17kbest_features(PyObject *__pyx_v_self, PyObject *__pyx_v_size); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_16kbest_features[] = "hg.kbest_trees(size) -> List of k-best feature vectors in the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_17kbest_features(PyObject *__pyx_v_self, PyObject *__pyx_v_size) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -9637,12 +9700,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_17kbest_features(PyObject *__pyx_v return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":66 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":76 * del e_derivations * * def kbest_features(self, size): # <<<<<<<<<<<<<< + * """hg.kbest_trees(size) -> List of k-best feature vectors in the hypergraph.""" * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal]* derivations = new kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal](self.hg[0], size) - * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal].Derivation* derivation */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_16kbest_features(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_size) { @@ -9666,7 +9729,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_16kbest_features(struct __pyx_obj_ __Pyx_INCREF(__pyx_cur_scope->__pyx_v_size); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_size); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_18generator6, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_18generator6, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -9702,19 +9765,19 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":67 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":78 * def kbest_features(self, size): + * """hg.kbest_trees(size) -> List of k-best feature vectors in the hypergraph.""" * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal]* derivations = new kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal](self.hg[0], size) # <<<<<<<<<<<<<< * cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal].Derivation* derivation * cdef SparseVector fmap */ - __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyInt_AsUnsignedInt(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_1 == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_cur_scope->__pyx_v_derivations = new KBest::KBestDerivations<FastSparseVector<weight_t>,FeatureVectorTraversal>((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_t_1); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":71 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":82 * cdef SparseVector fmap * cdef unsigned k * try: # <<<<<<<<<<<<<< @@ -9723,18 +9786,18 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":72 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":83 * cdef unsigned k * try: * for k in range(size): # <<<<<<<<<<<<<< * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break */ - __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_cur_scope->__pyx_v_size); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L5;} for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_2; __pyx_t_1+=1) { __pyx_cur_scope->__pyx_v_k = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":73 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":84 * try: * for k in range(size): * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) # <<<<<<<<<<<<<< @@ -9743,7 +9806,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject */ __pyx_cur_scope->__pyx_v_derivation = __pyx_cur_scope->__pyx_v_derivations->LazyKthBest((__pyx_cur_scope->__pyx_v_self->hg->nodes_.size() - 1), __pyx_cur_scope->__pyx_v_k); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":74 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":85 * for k in range(size): * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break # <<<<<<<<<<<<<< @@ -9757,23 +9820,23 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject } __pyx_L9:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":75 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":86 * derivation = derivations.LazyKthBest(self.hg.nodes_.size() - 1, k) * if not derivation: break * fmap = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * fmap.vector = new FastSparseVector[weight_t](derivation._yield) * yield fmap */ - __pyx_t_4 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_4 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_GOTREF(__pyx_t_4); - if (!(likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L5;} + if (!(likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L5;} __Pyx_XGOTREF(((PyObject *)__pyx_cur_scope->__pyx_v_fmap)); __Pyx_XDECREF(((PyObject *)__pyx_cur_scope->__pyx_v_fmap)); __Pyx_GIVEREF(__pyx_t_4); __pyx_cur_scope->__pyx_v_fmap = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_4); __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":76 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":87 * if not derivation: break * fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](derivation._yield) # <<<<<<<<<<<<<< @@ -9782,7 +9845,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject */ __pyx_cur_scope->__pyx_v_fmap->vector = new FastSparseVector<weight_t>(__pyx_cur_scope->__pyx_v_derivation->yield); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":77 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":88 * fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](derivation._yield) * yield fmap # <<<<<<<<<<<<<< @@ -9801,12 +9864,12 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_18generator6(__pyx_GeneratorObject __pyx_L10_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L5;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L5;} } __pyx_L8_break:; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":79 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":90 * yield fmap * finally: * del derivations # <<<<<<<<<<<<<< @@ -9855,13 +9918,14 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_20sample(PyObject *__pyx_v_self, PyObject *__pyx_arg_n); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_19sample[] = "hg.sample(n) -> Sample of n hypotheses from the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_20sample(PyObject *__pyx_v_self, PyObject *__pyx_arg_n) { unsigned int __pyx_v_n; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sample (wrapper)", 0); assert(__pyx_arg_n); { - __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(__pyx_arg_n); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(__pyx_arg_n); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -9874,12 +9938,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_20sample(PyObject *__pyx_v_self, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":81 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":92 * del derivations * * def sample(self, unsigned n): # <<<<<<<<<<<<<< + * """hg.sample(n) -> Sample of n hypotheses from the hypergraph.""" * cdef vector[hypergraph.Hypothesis]* hypos = new vector[hypergraph.Hypothesis]() - * hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_19sample(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, unsigned int __pyx_v_n) { @@ -9901,7 +9965,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_19sample(struct __pyx_obj_5_cdec_H __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __pyx_cur_scope->__pyx_v_n = __pyx_v_n; { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_21generator7, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_21generator7, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -9923,10 +9987,11 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject { struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample *__pyx_cur_scope = ((struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample *)__pyx_generator->closure); PyObject *__pyx_r = NULL; - size_t __pyx_t_1; - unsigned int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; + std::vector<HypergraphSampler::Hypothesis> *__pyx_t_1; + size_t __pyx_t_2; + unsigned int __pyx_t_3; PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("None", 0); switch (__pyx_generator->resume_label) { @@ -9937,19 +10002,20 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":82 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":94 * def sample(self, unsigned n): + * """hg.sample(n) -> Sample of n hypotheses from the hypergraph.""" * cdef vector[hypergraph.Hypothesis]* hypos = new vector[hypergraph.Hypothesis]() # <<<<<<<<<<<<<< * hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) * cdef unsigned k */ - __pyx_cur_scope->__pyx_v_hypos = new std::vector<HypergraphSampler::Hypothesis>(); + try {__pyx_t_1 = new std::vector<HypergraphSampler::Hypothesis>();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[3]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_cur_scope->__pyx_v_hypos = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":83 - * def sample(self, unsigned n): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":95 + * """hg.sample(n) -> Sample of n hypotheses from the hypergraph.""" * cdef vector[hypergraph.Hypothesis]* hypos = new vector[hypergraph.Hypothesis]() * hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) # <<<<<<<<<<<<<< * cdef unsigned k @@ -9957,7 +10023,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject */ HypergraphSampler::sample_hypotheses((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_cur_scope->__pyx_v_n, ((struct __pyx_vtabstruct_5_cdec_Hypergraph *)__pyx_cur_scope->__pyx_v_self->__pyx_vtab)->_rng(__pyx_cur_scope->__pyx_v_self), __pyx_cur_scope->__pyx_v_hypos); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":85 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":97 * hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) * cdef unsigned k * try: # <<<<<<<<<<<<<< @@ -9966,54 +10032,54 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":86 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":98 * cdef unsigned k * try: * for k in range(hypos.size()): # <<<<<<<<<<<<<< * yield unicode(GetString(hypos[0][k].words).c_str(), 'utf8') * finally: */ - __pyx_t_1 = __pyx_cur_scope->__pyx_v_hypos->size(); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_cur_scope->__pyx_v_k = __pyx_t_2; + __pyx_t_2 = __pyx_cur_scope->__pyx_v_hypos->size(); + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_cur_scope->__pyx_v_k = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":87 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":99 * try: * for k in range(hypos.size()): * yield unicode(GetString(hypos[0][k].words).c_str(), 'utf8') # <<<<<<<<<<<<<< * finally: * del hypos */ - __pyx_t_3 = PyBytes_FromString(TD::GetString(((__pyx_cur_scope->__pyx_v_hypos[0])[__pyx_cur_scope->__pyx_v_k]).words).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); + __pyx_t_4 = PyBytes_FromString(TD::GetString(((__pyx_cur_scope->__pyx_v_hypos[0])[__pyx_cur_scope->__pyx_v_k]).words).c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(((PyObject *)__pyx_t_4)); + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_4)); + __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8)); - PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_n_s__utf8)); + PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_cur_scope->__pyx_t_0 = __pyx_t_1; - __pyx_cur_scope->__pyx_t_1 = __pyx_t_2; + __pyx_t_4 = 0; + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_cur_scope->__pyx_t_0 = __pyx_t_2; + __pyx_cur_scope->__pyx_t_1 = __pyx_t_3; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ __pyx_generator->resume_label = 1; return __pyx_r; __pyx_L9_resume_from_yield:; - __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; - __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_2 = __pyx_cur_scope->__pyx_t_0; + __pyx_t_3 = __pyx_cur_scope->__pyx_t_1; + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L5;} } } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":89 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":101 * yield unicode(GetString(hypos[0][k].words).c_str(), 'utf8') * finally: * del hypos # <<<<<<<<<<<<<< @@ -10028,8 +10094,8 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject __pyx_why = 0; goto __pyx_L6; __pyx_L5: { __pyx_why = 4; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_ErrFetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb); __pyx_exc_lineno = __pyx_lineno; goto __pyx_L6; @@ -10050,8 +10116,8 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_21generator7(__pyx_GeneratorObject PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("sample", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_L0:; __Pyx_XDECREF(__pyx_r); @@ -10064,13 +10130,14 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_23sample_trees(PyObject *__pyx_v_self, PyObject *__pyx_arg_n); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_22sample_trees[] = "hg.sample_trees(n) -> Sample of n trees from the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_23sample_trees(PyObject *__pyx_v_self, PyObject *__pyx_arg_n) { unsigned int __pyx_v_n; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("sample_trees (wrapper)", 0); assert(__pyx_arg_n); { - __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(__pyx_arg_n); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __pyx_v_n = __Pyx_PyInt_AsUnsignedInt(__pyx_arg_n); if (unlikely((__pyx_v_n == (unsigned int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -10083,12 +10150,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_23sample_trees(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":91 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":103 * del hypos * * def sample_trees(self, unsigned n): # <<<<<<<<<<<<<< + * """hg.sample_trees(n) -> Sample of n trees from the hypergraph.""" * cdef vector[string]* trees = new vector[string]() - * hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_22sample_trees(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, unsigned int __pyx_v_n) { @@ -10110,7 +10177,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_22sample_trees(struct __pyx_obj_5_ __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __pyx_cur_scope->__pyx_v_n = __pyx_v_n; { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_24generator8, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_24generator8, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -10132,10 +10199,11 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject { struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees *__pyx_cur_scope = ((struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees *)__pyx_generator->closure); PyObject *__pyx_r = NULL; - size_t __pyx_t_1; - unsigned int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; + std::vector<std::string> *__pyx_t_1; + size_t __pyx_t_2; + unsigned int __pyx_t_3; PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("None", 0); switch (__pyx_generator->resume_label) { @@ -10146,19 +10214,20 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":92 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":105 * def sample_trees(self, unsigned n): + * """hg.sample_trees(n) -> Sample of n trees from the hypergraph.""" * cdef vector[string]* trees = new vector[string]() # <<<<<<<<<<<<<< * hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) * cdef unsigned k */ - __pyx_cur_scope->__pyx_v_trees = new std::vector<std::string>(); + try {__pyx_t_1 = new std::vector<std::string>();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[3]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_cur_scope->__pyx_v_trees = __pyx_t_1; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":93 - * def sample_trees(self, unsigned n): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":106 + * """hg.sample_trees(n) -> Sample of n trees from the hypergraph.""" * cdef vector[string]* trees = new vector[string]() * hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) # <<<<<<<<<<<<<< * cdef unsigned k @@ -10166,7 +10235,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject */ HypergraphSampler::sample_trees((__pyx_cur_scope->__pyx_v_self->hg[0]), __pyx_cur_scope->__pyx_v_n, ((struct __pyx_vtabstruct_5_cdec_Hypergraph *)__pyx_cur_scope->__pyx_v_self->__pyx_vtab)->_rng(__pyx_cur_scope->__pyx_v_self), __pyx_cur_scope->__pyx_v_trees); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":95 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":108 * hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) * cdef unsigned k * try: # <<<<<<<<<<<<<< @@ -10175,54 +10244,54 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject */ /*try:*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":96 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":109 * cdef unsigned k * try: * for k in range(trees.size()): # <<<<<<<<<<<<<< * yield unicode(trees[0][k].c_str(), 'utf8') * finally: */ - __pyx_t_1 = __pyx_cur_scope->__pyx_v_trees->size(); - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { - __pyx_cur_scope->__pyx_v_k = __pyx_t_2; + __pyx_t_2 = __pyx_cur_scope->__pyx_v_trees->size(); + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_cur_scope->__pyx_v_k = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":97 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":110 * try: * for k in range(trees.size()): * yield unicode(trees[0][k].c_str(), 'utf8') # <<<<<<<<<<<<<< * finally: * del trees */ - __pyx_t_3 = PyBytes_FromString(((__pyx_cur_scope->__pyx_v_trees[0])[__pyx_cur_scope->__pyx_v_k]).c_str()); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_3)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); + __pyx_t_4 = PyBytes_FromString(((__pyx_cur_scope->__pyx_v_trees[0])[__pyx_cur_scope->__pyx_v_k]).c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(((PyObject *)__pyx_t_4)); + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_t_4)); + __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8)); - PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_n_s__utf8)); + PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); - __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L5;} - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_cur_scope->__pyx_t_0 = __pyx_t_1; - __pyx_cur_scope->__pyx_t_1 = __pyx_t_2; + __pyx_t_4 = 0; + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L5;} + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_cur_scope->__pyx_t_0 = __pyx_t_2; + __pyx_cur_scope->__pyx_t_1 = __pyx_t_3; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ __pyx_generator->resume_label = 1; return __pyx_r; __pyx_L9_resume_from_yield:; - __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; - __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L5;} + __pyx_t_2 = __pyx_cur_scope->__pyx_t_0; + __pyx_t_3 = __pyx_cur_scope->__pyx_t_1; + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L5;} } } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":99 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":112 * yield unicode(trees[0][k].c_str(), 'utf8') * finally: * del trees # <<<<<<<<<<<<<< @@ -10237,8 +10306,8 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject __pyx_why = 0; goto __pyx_L6; __pyx_L5: { __pyx_why = 4; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_ErrFetch(&__pyx_exc_type, &__pyx_exc_value, &__pyx_exc_tb); __pyx_exc_lineno = __pyx_lineno; goto __pyx_L6; @@ -10259,8 +10328,8 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("sample_trees", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_L0:; __Pyx_XDECREF(__pyx_r); @@ -10272,6 +10341,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_24generator8(__pyx_GeneratorObject /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_26intersect(PyObject *__pyx_v_self, PyObject *__pyx_v_inp); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_25intersect[] = "hg.intersect(Lattice/string): Intersect the hypergraph with the provided reference."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_26intersect(PyObject *__pyx_v_self, PyObject *__pyx_v_inp) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -10281,12 +10351,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_26intersect(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":101 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":114 * del trees * * def intersect(self, inp): # <<<<<<<<<<<<<< + * """hg.intersect(Lattice/string): Intersect the hypergraph with the provided reference.""" * cdef Lattice lat - * if isinstance(inp, Lattice): */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_inp) { @@ -10301,8 +10371,8 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("intersect", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":103 - * def intersect(self, inp): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":117 + * """hg.intersect(Lattice/string): Intersect the hypergraph with the provided reference.""" * cdef Lattice lat * if isinstance(inp, Lattice): # <<<<<<<<<<<<<< * lat = <Lattice> inp @@ -10314,7 +10384,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":104 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":118 * cdef Lattice lat * if isinstance(inp, Lattice): * lat = <Lattice> inp # <<<<<<<<<<<<<< @@ -10326,7 +10396,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde goto __pyx_L3; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":105 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":119 * if isinstance(inp, Lattice): * lat = <Lattice> inp * elif isinstance(inp, basestring): # <<<<<<<<<<<<<< @@ -10335,23 +10405,23 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde */ __pyx_t_1 = __pyx_builtin_basestring; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = PyObject_IsInstance(__pyx_v_inp, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_IsInstance(__pyx_v_inp, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":106 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":120 * lat = <Lattice> inp * elif isinstance(inp, basestring): * lat = Lattice(inp) # <<<<<<<<<<<<<< * else: * raise TypeError('cannot intersect hypergraph with %s' % type(inp)) */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_inp); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_inp); __Pyx_GIVEREF(__pyx_v_inp); - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Lattice)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Lattice)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_v_lat = ((struct __pyx_obj_5_cdec_Lattice *)__pyx_t_3); @@ -10360,30 +10430,30 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":108 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":122 * lat = Lattice(inp) * else: * raise TypeError('cannot intersect hypergraph with %s' % type(inp)) # <<<<<<<<<<<<<< * return hypergraph.Intersect(lat.lattice[0], self.hg) * */ - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_15), ((PyObject *)Py_TYPE(__pyx_v_inp))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_15), ((PyObject *)Py_TYPE(__pyx_v_inp))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[3]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":109 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":123 * else: * raise TypeError('cannot intersect hypergraph with %s' % type(inp)) * return hypergraph.Intersect(lat.lattice[0], self.hg) # <<<<<<<<<<<<<< @@ -10391,7 +10461,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde * def prune(self, beam_alpha=0, density=0, **kwargs): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_PyBool_FromLong(HG::Intersect((__pyx_v_lat->lattice[0]), __pyx_v_self->hg)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_PyBool_FromLong(HG::Intersect((__pyx_v_lat->lattice[0]), __pyx_v_self->hg)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; @@ -10413,6 +10483,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_25intersect(struct __pyx_obj_5_cde /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_28prune(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_27prune[] = "hg.prune(beam_alpha=0, density=0): Prune the hypergraph.\n beam_alpha: use beam pruning\n density: use density pruning"; static PyObject *__pyx_pw_5_cdec_10Hypergraph_28prune(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_beam_alpha = 0; PyObject *__pyx_v_density = 0; @@ -10450,7 +10521,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_28prune(PyObject *__pyx_v_self, Py } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "prune") < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "prune") < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -10465,7 +10536,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_28prune(PyObject *__pyx_v_self, Py } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("prune", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[3]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("prune", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[3]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; __Pyx_AddTraceback("_cdec.Hypergraph.prune", __pyx_clineno, __pyx_lineno, __pyx_filename); @@ -10478,12 +10549,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_28prune(PyObject *__pyx_v_self, Py return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":111 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":125 * return hypergraph.Intersect(lat.lattice[0], self.hg) * * def prune(self, beam_alpha=0, density=0, **kwargs): # <<<<<<<<<<<<<< - * cdef hypergraph.EdgeMask* preserve_mask = NULL - * if 'csplit_preserve_full_word' in kwargs: + * """hg.prune(beam_alpha=0, density=0): Prune the hypergraph. + * beam_alpha: use beam pruning */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_beam_alpha, PyObject *__pyx_v_density, PyObject *__pyx_v_kwargs) { @@ -10498,26 +10569,26 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hy int __pyx_clineno = 0; __Pyx_RefNannySetupContext("prune", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":112 - * - * def prune(self, beam_alpha=0, density=0, **kwargs): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":129 + * beam_alpha: use beam pruning + * density: use density pruning""" * cdef hypergraph.EdgeMask* preserve_mask = NULL # <<<<<<<<<<<<<< * if 'csplit_preserve_full_word' in kwargs: * preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) */ __pyx_v_preserve_mask = NULL; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":113 - * def prune(self, beam_alpha=0, density=0, **kwargs): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":130 + * density: use density pruning""" * cdef hypergraph.EdgeMask* preserve_mask = NULL * if 'csplit_preserve_full_word' in kwargs: # <<<<<<<<<<<<<< * preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) * preserve_mask[0][hypergraph.GetFullWordEdgeIndex(self.hg[0])] = True */ - __pyx_t_1 = ((PyDict_Contains(((PyObject *)__pyx_v_kwargs), ((PyObject *)__pyx_n_s_16)))); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = (__Pyx_PyDict_Contains(((PyObject *)__pyx_n_s_16), ((PyObject *)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":114 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":131 * cdef hypergraph.EdgeMask* preserve_mask = NULL * if 'csplit_preserve_full_word' in kwargs: * preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) # <<<<<<<<<<<<<< @@ -10526,7 +10597,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hy */ __pyx_v_preserve_mask = new std::vector<bool>(__pyx_v_self->hg->edges_.size()); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":115 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":132 * if 'csplit_preserve_full_word' in kwargs: * preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) * preserve_mask[0][hypergraph.GetFullWordEdgeIndex(self.hg[0])] = True # <<<<<<<<<<<<<< @@ -10538,18 +10609,18 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hy } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":116 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":133 * preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) * preserve_mask[0][hypergraph.GetFullWordEdgeIndex(self.hg[0])] = True * self.hg.PruneInsideOutside(beam_alpha, density, preserve_mask, False, 1, False) # <<<<<<<<<<<<<< * if preserve_mask: * del preserve_mask */ - __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_v_beam_alpha); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_v_density); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_v_beam_alpha); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __pyx_PyFloat_AsDouble(__pyx_v_density); if (unlikely((__pyx_t_3 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->hg->PruneInsideOutside(__pyx_t_2, __pyx_t_3, __pyx_v_preserve_mask, 0, 1.0, 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":117 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":134 * preserve_mask[0][hypergraph.GetFullWordEdgeIndex(self.hg[0])] = True * self.hg.PruneInsideOutside(beam_alpha, density, preserve_mask, False, 1, False) * if preserve_mask: # <<<<<<<<<<<<<< @@ -10559,7 +10630,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hy __pyx_t_1 = (__pyx_v_preserve_mask != 0); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":118 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":135 * self.hg.PruneInsideOutside(beam_alpha, density, preserve_mask, False, 1, False) * if preserve_mask: * del preserve_mask # <<<<<<<<<<<<<< @@ -10584,6 +10655,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_27prune(struct __pyx_obj_5_cdec_Hy /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_30lattice(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_29lattice[] = "hg.lattice() -> Lattice corresponding to the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_30lattice(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -10593,12 +10665,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_30lattice(PyObject *__pyx_v_self, return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":120 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":137 * del preserve_mask * * def lattice(self): # TODO direct hg -> lattice conversion in cdec # <<<<<<<<<<<<<< + * """hg.lattice() -> Lattice corresponding to the hypergraph.""" * cdef bytes plf = hypergraph.AsPLF(self.hg[0], True).c_str() - * return Lattice(eval(plf)) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_29lattice(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -10613,37 +10685,37 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_29lattice(struct __pyx_obj_5_cdec_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("lattice", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":121 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":139 * def lattice(self): # TODO direct hg -> lattice conversion in cdec + * """hg.lattice() -> Lattice corresponding to the hypergraph.""" * cdef bytes plf = hypergraph.AsPLF(self.hg[0], True).c_str() # <<<<<<<<<<<<<< * return Lattice(eval(plf)) * */ - __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->hg[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->hg[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); __pyx_v_plf = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":122 - * def lattice(self): # TODO direct hg -> lattice conversion in cdec + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":140 + * """hg.lattice() -> Lattice corresponding to the hypergraph.""" * cdef bytes plf = hypergraph.AsPLF(self.hg[0], True).c_str() * return Lattice(eval(plf)) # <<<<<<<<<<<<<< * * def plf(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_Globals(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_Globals(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); if (((PyObject *)__pyx_v_plf)) { - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__plf), ((PyObject *)__pyx_v_plf)) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__plf), ((PyObject *)__pyx_v_plf)) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (((PyObject *)__pyx_v_self)) { - if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__self), ((PyObject *)__pyx_v_self)) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__self), ((PyObject *)__pyx_v_self)) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_plf)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_plf)); @@ -10654,15 +10726,15 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_29lattice(struct __pyx_obj_5_cdec_ __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_1 = 0; __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_builtin_eval, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_builtin_eval, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Lattice)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Lattice)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; @@ -10686,6 +10758,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_29lattice(struct __pyx_obj_5_cdec_ /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_32plf(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_31plf[] = "hg.plf() -> Lattice PLF representation corresponding to the hypergraph."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_32plf(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -10695,12 +10768,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_32plf(PyObject *__pyx_v_self, CYTH return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":124 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":142 * return Lattice(eval(plf)) * * def plf(self): # <<<<<<<<<<<<<< + * """hg.plf() -> Lattice PLF representation corresponding to the hypergraph.""" * return bytes(hypergraph.AsPLF(self.hg[0], True).c_str()) - * */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_31plf(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -10713,22 +10786,22 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_31plf(struct __pyx_obj_5_cdec_Hype int __pyx_clineno = 0; __Pyx_RefNannySetupContext("plf", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":125 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":144 * def plf(self): + * """hg.plf() -> Lattice PLF representation corresponding to the hypergraph.""" * return bytes(hypergraph.AsPLF(self.hg[0], True).c_str()) # <<<<<<<<<<<<<< * * def reweight(self, weights): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->hg[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->hg[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyBytes_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyBytes_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -10750,6 +10823,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_31plf(struct __pyx_obj_5_cdec_Hype /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_34reweight(PyObject *__pyx_v_self, PyObject *__pyx_v_weights); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_33reweight[] = "hg.reweight(SparseVector/DenseVector): Reweight the hypergraph with a new vector."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_34reweight(PyObject *__pyx_v_self, PyObject *__pyx_v_weights) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -10759,12 +10833,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_34reweight(PyObject *__pyx_v_self, return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":127 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":146 * return bytes(hypergraph.AsPLF(self.hg[0], True).c_str()) * * def reweight(self, weights): # <<<<<<<<<<<<<< + * """hg.reweight(SparseVector/DenseVector): Reweight the hypergraph with a new vector.""" * if isinstance(weights, SparseVector): - * self.hg.Reweight((<SparseVector> weights).vector[0]) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self, PyObject *__pyx_v_weights) { @@ -10778,9 +10852,9 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec int __pyx_clineno = 0; __Pyx_RefNannySetupContext("reweight", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":128 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":148 * def reweight(self, weights): + * """hg.reweight(SparseVector/DenseVector): Reweight the hypergraph with a new vector.""" * if isinstance(weights, SparseVector): # <<<<<<<<<<<<<< * self.hg.Reweight((<SparseVector> weights).vector[0]) * elif isinstance(weights, DenseVector): @@ -10791,8 +10865,8 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":129 - * def reweight(self, weights): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":149 + * """hg.reweight(SparseVector/DenseVector): Reweight the hypergraph with a new vector.""" * if isinstance(weights, SparseVector): * self.hg.Reweight((<SparseVector> weights).vector[0]) # <<<<<<<<<<<<<< * elif isinstance(weights, DenseVector): @@ -10802,7 +10876,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec goto __pyx_L3; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":130 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":150 * if isinstance(weights, SparseVector): * self.hg.Reweight((<SparseVector> weights).vector[0]) * elif isinstance(weights, DenseVector): # <<<<<<<<<<<<<< @@ -10815,7 +10889,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":131 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":151 * self.hg.Reweight((<SparseVector> weights).vector[0]) * elif isinstance(weights, DenseVector): * self.hg.Reweight((<DenseVector> weights).vector[0]) # <<<<<<<<<<<<<< @@ -10827,26 +10901,26 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_33reweight(struct __pyx_obj_5_cdec } /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":133 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":153 * self.hg.Reweight((<DenseVector> weights).vector[0]) * else: * raise TypeError('cannot reweight hypergraph with %s' % type(weights)) # <<<<<<<<<<<<<< * * property edges: */ - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_17), ((PyObject *)Py_TYPE(__pyx_v_weights))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_17), ((PyObject *)Py_TYPE(__pyx_v_weights))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[3]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[3]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L3:; @@ -10875,7 +10949,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_5edges_1__get__(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":136 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":156 * * property edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -10901,7 +10975,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_5edges___get__(struct __pyx_obj_5_ __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_5edges_2generator9, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_5edges_2generator9, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -10937,9 +11011,9 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5edges_2generator9(__pyx_Generator return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":138 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":158 * def __get__(self): * cdef unsigned i * for i in range(self.hg.edges_.size()): # <<<<<<<<<<<<<< @@ -10950,16 +11024,16 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5edges_2generator9(__pyx_Generator for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":139 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":159 * cdef unsigned i * for i in range(self.hg.edges_.size()): * yield HypergraphEdge().init(self.hg, i) # <<<<<<<<<<<<<< * * property nodes: */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, __pyx_cur_scope->__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, __pyx_cur_scope->__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; @@ -10974,7 +11048,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5edges_2generator9(__pyx_Generator __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -11002,7 +11076,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_5nodes_1__get__(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":142 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":162 * * property nodes: * def __get__(self): # <<<<<<<<<<<<<< @@ -11028,7 +11102,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_5nodes___get__(struct __pyx_obj_5_ __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_5nodes_2generator10, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_10Hypergraph_5nodes_2generator10, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -11064,9 +11138,9 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5nodes_2generator10(__pyx_Generato return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":144 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":164 * def __get__(self): * cdef unsigned i * for i in range(self.hg.nodes_.size()): # <<<<<<<<<<<<<< @@ -11077,16 +11151,16 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5nodes_2generator10(__pyx_Generato for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":145 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":165 * cdef unsigned i * for i in range(self.hg.nodes_.size()): * yield HypergraphNode().init(self.hg, i) # <<<<<<<<<<<<<< * * property goal: */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, __pyx_cur_scope->__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, __pyx_cur_scope->__pyx_v_i); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; @@ -11101,7 +11175,7 @@ static PyObject *__pyx_gb_5_cdec_10Hypergraph_5nodes_2generator10(__pyx_Generato __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -11128,7 +11202,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_4goal_1__get__(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":148 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":168 * * property goal: * def __get__(self): # <<<<<<<<<<<<<< @@ -11146,7 +11220,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4goal___get__(struct __pyx_obj_5_c int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":149 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":169 * property goal: * def __get__(self): * return HypergraphNode().init(self.hg, self.hg.GoalNode()) # <<<<<<<<<<<<<< @@ -11154,9 +11228,9 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_4goal___get__(struct __pyx_obj_5_c * property npaths: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1), __pyx_v_self->hg, __pyx_v_self->hg->GoalNode()); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1), __pyx_v_self->hg, __pyx_v_self->hg->GoalNode()); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; @@ -11187,7 +11261,7 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_6npaths_1__get__(PyObject *__pyx_v return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":152 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":172 * * property npaths: * def __get__(self): # <<<<<<<<<<<<<< @@ -11204,7 +11278,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6npaths___get__(struct __pyx_obj_5 int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":153 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":173 * property npaths: * def __get__(self): * return self.hg.NumberOfPaths() # <<<<<<<<<<<<<< @@ -11212,7 +11286,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6npaths___get__(struct __pyx_obj_5 * def inside_outside(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->hg->NumberOfPaths()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->hg->NumberOfPaths()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -11232,6 +11306,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_6npaths___get__(struct __pyx_obj_5 /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_10Hypergraph_36inside_outside(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_10Hypergraph_35inside_outside[] = "hg.inside_outside() -> SparseVector with inside-outside scores for each feature."; static PyObject *__pyx_pw_5_cdec_10Hypergraph_36inside_outside(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -11241,12 +11316,12 @@ static PyObject *__pyx_pw_5_cdec_10Hypergraph_36inside_outside(PyObject *__pyx_v return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":155 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":175 * return self.hg.NumberOfPaths() * * def inside_outside(self): # <<<<<<<<<<<<<< + * """hg.inside_outside() -> SparseVector with inside-outside scores for each feature.""" * cdef FastSparseVector[prob_t]* result = new FastSparseVector[prob_t]() - * cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) */ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_self) { @@ -11265,17 +11340,17 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("inside_outside", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":156 - * + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":177 * def inside_outside(self): + * """hg.inside_outside() -> SparseVector with inside-outside scores for each feature.""" * cdef FastSparseVector[prob_t]* result = new FastSparseVector[prob_t]() # <<<<<<<<<<<<<< * cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) * result[0] /= z */ __pyx_v_result = new FastSparseVector<prob_t>(); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":157 - * def inside_outside(self): + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":178 + * """hg.inside_outside() -> SparseVector with inside-outside scores for each feature.""" * cdef FastSparseVector[prob_t]* result = new FastSparseVector[prob_t]() * cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) # <<<<<<<<<<<<<< * result[0] /= z @@ -11283,7 +11358,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ __pyx_v_z = InsideOutside<prob_t, EdgeProb, SparseVector<prob_t>, EdgeFeaturesAndProbWeightFunction>((__pyx_v_self->hg[0]), __pyx_v_result); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":158 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":179 * cdef FastSparseVector[prob_t]* result = new FastSparseVector[prob_t]() * cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) * result[0] /= z # <<<<<<<<<<<<<< @@ -11292,20 +11367,20 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ (__pyx_v_result[0]) /= __pyx_v_z; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":159 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":180 * cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) * result[0] /= z * cdef SparseVector vector = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * vector.vector = new FastSparseVector[double]() * cdef FastSparseVector[prob_t].const_iterator* it = new FastSparseVector[prob_t].const_iterator(result[0], False) */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":160 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":181 * result[0] /= z * cdef SparseVector vector = SparseVector.__new__(SparseVector) * vector.vector = new FastSparseVector[double]() # <<<<<<<<<<<<<< @@ -11314,7 +11389,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ __pyx_v_vector->vector = new FastSparseVector<double>(); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":161 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":182 * cdef SparseVector vector = SparseVector.__new__(SparseVector) * vector.vector = new FastSparseVector[double]() * cdef FastSparseVector[prob_t].const_iterator* it = new FastSparseVector[prob_t].const_iterator(result[0], False) # <<<<<<<<<<<<<< @@ -11323,7 +11398,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ __pyx_v_it = new FastSparseVector<prob_t>::const_iterator((__pyx_v_result[0]), 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":163 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":184 * cdef FastSparseVector[prob_t].const_iterator* it = new FastSparseVector[prob_t].const_iterator(result[0], False) * cdef unsigned i * for i in range(result.size()): # <<<<<<<<<<<<<< @@ -11334,7 +11409,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":164 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":185 * cdef unsigned i * for i in range(result.size()): * vector.vector.set_value(it[0].ptr().first, log(it[0].ptr().second)) # <<<<<<<<<<<<<< @@ -11343,7 +11418,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ __pyx_v_vector->vector->set_value((__pyx_v_it[0]).operator->()->first, log((__pyx_v_it[0]).operator->()->second)); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":165 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":186 * for i in range(result.size()): * vector.vector.set_value(it[0].ptr().first, log(it[0].ptr().second)) * pinc(it[0]) # ++it # <<<<<<<<<<<<<< @@ -11353,7 +11428,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ (++(__pyx_v_it[0])); } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":166 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":187 * vector.vector.set_value(it[0].ptr().first, log(it[0].ptr().second)) * pinc(it[0]) # ++it * del it # <<<<<<<<<<<<<< @@ -11362,7 +11437,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ delete __pyx_v_it; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":167 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":188 * pinc(it[0]) # ++it * del it * del result # <<<<<<<<<<<<<< @@ -11371,7 +11446,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ */ delete __pyx_v_result; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":168 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":189 * del it * del result * return vector # <<<<<<<<<<<<<< @@ -11396,7 +11471,7 @@ static PyObject *__pyx_pf_5_cdec_10Hypergraph_35inside_outside(struct __pyx_obj_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":175 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":196 * cdef public TRule trule * * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): # <<<<<<<<<<<<<< @@ -11413,7 +11488,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphEdge_init(struct __pyx_obj_5_cdec_Hy int __pyx_clineno = 0; __Pyx_RefNannySetupContext("init", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":176 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":197 * * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): * self.hg = hg # <<<<<<<<<<<<<< @@ -11422,7 +11497,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphEdge_init(struct __pyx_obj_5_cdec_Hy */ __pyx_v_self->hg = __pyx_v_hg; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":177 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":198 * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): * self.hg = hg * self.edge = &hg.edges_[i] # <<<<<<<<<<<<<< @@ -11431,23 +11506,23 @@ static PyObject *__pyx_f_5_cdec_14HypergraphEdge_init(struct __pyx_obj_5_cdec_Hy */ __pyx_v_self->edge = (&(__pyx_v_hg->edges_[__pyx_v_i])); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":178 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":199 * self.hg = hg * self.edge = &hg.edges_[i] * self.trule = TRule.__new__(TRule) # <<<<<<<<<<<<<< * self.trule.rule = new shared_ptr[grammar.TRule](self.edge.rule_) * return self */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_TRule)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_TRule)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_TRule)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_TRule)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->trule); __Pyx_DECREF(((PyObject *)__pyx_v_self->trule)); __pyx_v_self->trule = ((struct __pyx_obj_5_cdec_TRule *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":179 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":200 * self.edge = &hg.edges_[i] * self.trule = TRule.__new__(TRule) * self.trule.rule = new shared_ptr[grammar.TRule](self.edge.rule_) # <<<<<<<<<<<<<< @@ -11456,7 +11531,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphEdge_init(struct __pyx_obj_5_cdec_Hy */ __pyx_v_self->trule->rule = new boost::shared_ptr<TRule>(__pyx_v_self->edge->rule_); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":180 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":201 * self.trule = TRule.__new__(TRule) * self.trule.rule = new shared_ptr[grammar.TRule](self.edge.rule_) * return self # <<<<<<<<<<<<<< @@ -11491,7 +11566,7 @@ static Py_ssize_t __pyx_pw_5_cdec_14HypergraphEdge_1__len__(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":182 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":203 * return self * * def __len__(self): # <<<<<<<<<<<<<< @@ -11504,7 +11579,7 @@ static Py_ssize_t __pyx_pf_5_cdec_14HypergraphEdge___len__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":183 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":204 * * def __len__(self): * return self.edge.tail_nodes_.size() # <<<<<<<<<<<<<< @@ -11531,7 +11606,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_9head_node_1__get__(PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":186 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":207 * * property head_node: * def __get__(self): # <<<<<<<<<<<<<< @@ -11549,7 +11624,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_9head_node___get__(struct __py int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":187 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":208 * property head_node: * def __get__(self): * return HypergraphNode().init(self.hg, self.edge.head_node_) # <<<<<<<<<<<<<< @@ -11557,9 +11632,9 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_9head_node___get__(struct __py * property tail_nodes: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1), __pyx_v_self->hg, __pyx_v_self->edge->head_node_); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_1), __pyx_v_self->hg, __pyx_v_self->edge->head_node_); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; @@ -11591,7 +11666,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_10tail_nodes_1__get__(PyObject return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":190 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":211 * * property tail_nodes: * def __get__(self): # <<<<<<<<<<<<<< @@ -11617,7 +11692,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_10tail_nodes___get__(struct __ __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphEdge_10tail_nodes_2generator11, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphEdge_10tail_nodes_2generator11, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -11653,9 +11728,9 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphEdge_10tail_nodes_2generator11(__py return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":192 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":213 * def __get__(self): * cdef unsigned i * for i in range(self.edge.tail_nodes_.size()): # <<<<<<<<<<<<<< @@ -11666,16 +11741,16 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphEdge_10tail_nodes_2generator11(__py for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":193 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":214 * cdef unsigned i * for i in range(self.edge.tail_nodes_.size()): * yield HypergraphNode().init(self.hg, self.edge.tail_nodes_[i]) # <<<<<<<<<<<<<< * * property span: */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphNode)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->edge->tail_nodes_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphNode *)((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->edge->tail_nodes_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; @@ -11690,7 +11765,7 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphEdge_10tail_nodes_2generator11(__py __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -11717,7 +11792,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_4span_1__get__(PyObject *__pyx return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":196 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":217 * * property span: * def __get__(self): # <<<<<<<<<<<<<< @@ -11736,7 +11811,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_4span___get__(struct __pyx_obj int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":197 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":218 * property span: * def __get__(self): * return (self.edge.i_, self.edge.j_) # <<<<<<<<<<<<<< @@ -11744,11 +11819,11 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_4span___get__(struct __pyx_obj * property feature_values: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromLong(__pyx_v_self->edge->i_); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyInt_FromLong(__pyx_v_self->edge->i_); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromLong(__pyx_v_self->edge->j_); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyInt_FromLong(__pyx_v_self->edge->j_); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 197; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); @@ -11785,7 +11860,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_14feature_values_1__get__(PyOb return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":200 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":221 * * property feature_values: * def __get__(self): # <<<<<<<<<<<<<< @@ -11803,20 +11878,20 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_14feature_values___get__(struc int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":201 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":222 * property feature_values: * def __get__(self): * cdef SparseVector vector = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< * vector.vector = new FastSparseVector[double](self.edge.feature_values_) * return vector */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_SparseVector)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_SparseVector)))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_vector = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":202 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":223 * def __get__(self): * cdef SparseVector vector = SparseVector.__new__(SparseVector) * vector.vector = new FastSparseVector[double](self.edge.feature_values_) # <<<<<<<<<<<<<< @@ -11825,7 +11900,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_14feature_values___get__(struc */ __pyx_v_vector->vector = new FastSparseVector<double>(__pyx_v_self->edge->feature_values_); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":203 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":224 * cdef SparseVector vector = SparseVector.__new__(SparseVector) * vector.vector = new FastSparseVector[double](self.edge.feature_values_) * return vector # <<<<<<<<<<<<<< @@ -11861,7 +11936,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_4prob_1__get__(PyObject *__pyx return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":206 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":227 * * property prob: * def __get__(self): # <<<<<<<<<<<<<< @@ -11878,7 +11953,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_4prob___get__(struct __pyx_obj int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":207 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":228 * property prob: * def __get__(self): * return self.edge.edge_prob_.as_float() # <<<<<<<<<<<<<< @@ -11886,7 +11961,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_4prob___get__(struct __pyx_obj * def __richcmp__(HypergraphEdge x, HypergraphEdge y, int op): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->edge->edge_prob_.as_float()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->edge->edge_prob_.as_float()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 228; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -11910,8 +11985,8 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_3__richcmp__(PyObject *__pyx_v PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__richcmp__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_HypergraphEdge, 1, "x", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_HypergraphEdge, 1, "y", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_HypergraphEdge, 1, "x", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_HypergraphEdge, 1, "y", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_v_x), ((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_v_y), ((int)__pyx_v_op)); goto __pyx_L0; __pyx_L1_error:; @@ -11921,7 +11996,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_3__richcmp__(PyObject *__pyx_v return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":209 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":230 * return self.edge.edge_prob_.as_float() * * def __richcmp__(HypergraphEdge x, HypergraphEdge y, int op): # <<<<<<<<<<<<<< @@ -11939,7 +12014,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__richcmp__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":212 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":233 * if op == 2: # == * return x.edge == y.edge * elif op == 3: # != # <<<<<<<<<<<<<< @@ -11948,7 +12023,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ */ switch (__pyx_v_op) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":210 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":231 * * def __richcmp__(HypergraphEdge x, HypergraphEdge y, int op): * if op == 2: # == # <<<<<<<<<<<<<< @@ -11957,7 +12032,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ */ case 2: - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":211 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":232 * def __richcmp__(HypergraphEdge x, HypergraphEdge y, int op): * if op == 2: # == * return x.edge == y.edge # <<<<<<<<<<<<<< @@ -11965,14 +12040,14 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ * return not (x == y) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_x->edge == __pyx_v_y->edge)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_x->edge == __pyx_v_y->edge)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; break; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":212 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":233 * if op == 2: # == * return x.edge == y.edge * elif op == 3: # != # <<<<<<<<<<<<<< @@ -11981,7 +12056,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ */ case 3: - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":213 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":234 * return x.edge == y.edge * elif op == 3: # != * return not (x == y) # <<<<<<<<<<<<<< @@ -11989,11 +12064,10 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 213; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -12001,18 +12075,18 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphEdge_2__richcmp__(struct __pyx_obj_ break; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":214 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":235 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for HypergraphEdge') # <<<<<<<<<<<<<< * * cdef class HypergraphNode: */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_19), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; @@ -12037,7 +12111,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphEdge_5trule_1__get__(PyObject *__py return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":173 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":194 * cdef hypergraph.Hypergraph* hg * cdef hypergraph.HypergraphEdge* edge * cdef public TRule trule # <<<<<<<<<<<<<< @@ -12079,7 +12153,7 @@ static int __pyx_pf_5_cdec_14HypergraphEdge_5trule_2__set__(struct __pyx_obj_5_c const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5_cdec_TRule))))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(((__pyx_v_value) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_value, __pyx_ptype_5_cdec_TRule))))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); __Pyx_GOTREF(__pyx_v_self->trule); @@ -12122,7 +12196,7 @@ static int __pyx_pf_5_cdec_14HypergraphEdge_5trule_4__del__(struct __pyx_obj_5_c return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":220 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":241 * cdef hypergraph.HypergraphNode* node * * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): # <<<<<<<<<<<<<< @@ -12135,7 +12209,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphNode_init(struct __pyx_obj_5_cdec_Hy __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("init", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":221 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":242 * * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): * self.hg = hg # <<<<<<<<<<<<<< @@ -12144,7 +12218,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphNode_init(struct __pyx_obj_5_cdec_Hy */ __pyx_v_self->hg = __pyx_v_hg; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":222 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":243 * cdef init(self, hypergraph.Hypergraph* hg, unsigned i): * self.hg = hg * self.node = &hg.nodes_[i] # <<<<<<<<<<<<<< @@ -12153,7 +12227,7 @@ static PyObject *__pyx_f_5_cdec_14HypergraphNode_init(struct __pyx_obj_5_cdec_Hy */ __pyx_v_self->node = (&(__pyx_v_hg->nodes_[__pyx_v_i])); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":223 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":244 * self.hg = hg * self.node = &hg.nodes_[i] * return self # <<<<<<<<<<<<<< @@ -12184,7 +12258,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_8in_edges_1__get__(PyObject *_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":226 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":247 * * property in_edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -12210,7 +12284,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_8in_edges___get__(struct __pyx __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphNode_8in_edges_2generator12, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphNode_8in_edges_2generator12, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -12246,9 +12320,9 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_8in_edges_2generator12(__pyx_G return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":228 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":249 * def __get__(self): * cdef unsigned i * for i in range(self.node.in_edges_.size()): # <<<<<<<<<<<<<< @@ -12259,16 +12333,16 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_8in_edges_2generator12(__pyx_G for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":229 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":250 * cdef unsigned i * for i in range(self.node.in_edges_.size()): * yield HypergraphEdge().init(self.hg, self.node.in_edges_[i]) # <<<<<<<<<<<<<< * * property out_edges: */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->node->in_edges_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->node->in_edges_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; @@ -12283,7 +12357,7 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_8in_edges_2generator12(__pyx_G __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 229; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -12311,7 +12385,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_9out_edges_1__get__(PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":232 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":253 * * property out_edges: * def __get__(self): # <<<<<<<<<<<<<< @@ -12337,7 +12411,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_9out_edges___get__(struct __py __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphNode_9out_edges_2generator13, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_14HypergraphNode_9out_edges_2generator13, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -12373,9 +12447,9 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_9out_edges_2generator13(__pyx_ return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":234 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":255 * def __get__(self): * cdef unsigned i * for i in range(self.node.out_edges_.size()): # <<<<<<<<<<<<<< @@ -12386,16 +12460,16 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_9out_edges_2generator13(__pyx_ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":235 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":256 * cdef unsigned i * for i in range(self.node.out_edges_.size()): * yield HypergraphEdge().init(self.hg, self.node.out_edges_[i]) # <<<<<<<<<<<<<< * * property span: */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_HypergraphEdge)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->node->out_edges_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = ((struct __pyx_vtabstruct_5_cdec_HypergraphEdge *)((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3)->__pyx_vtab)->init(((struct __pyx_obj_5_cdec_HypergraphEdge *)__pyx_t_3), __pyx_cur_scope->__pyx_v_self->hg, (__pyx_cur_scope->__pyx_v_self->node->out_edges_[__pyx_cur_scope->__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; @@ -12410,7 +12484,7 @@ static PyObject *__pyx_gb_5_cdec_14HypergraphNode_9out_edges_2generator13(__pyx_ __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 256; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -12437,7 +12511,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_4span_1__get__(PyObject *__pyx return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":238 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":259 * * property span: * def __get__(self): # <<<<<<<<<<<<<< @@ -12455,7 +12529,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_4span___get__(struct __pyx_obj int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":239 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":260 * property span: * def __get__(self): * return next(self.in_edges).span # <<<<<<<<<<<<<< @@ -12463,12 +12537,12 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_4span___get__(struct __pyx_obj * property cat: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__in_edges); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s__in_edges); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyIter_Next(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyIter_Next(__pyx_t_1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__span); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__span); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -12499,7 +12573,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_3cat_1__get__(PyObject *__pyx_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":242 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":263 * * property cat: * def __get__(self): # <<<<<<<<<<<<<< @@ -12517,7 +12591,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_3cat___get__(struct __pyx_obj_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":243 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":264 * property cat: * def __get__(self): * if self.node.cat_: # <<<<<<<<<<<<<< @@ -12526,7 +12600,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_3cat___get__(struct __pyx_obj_ */ if (__pyx_v_self->node->cat_) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":244 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":265 * def __get__(self): * if self.node.cat_: * return str(TDConvert(-self.node.cat_).c_str()) # <<<<<<<<<<<<<< @@ -12534,14 +12608,14 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode_3cat___get__(struct __pyx_obj_ * def __richcmp__(HypergraphNode x, HypergraphNode y, int op): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_self->node->cat_)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(TD::Convert((-__pyx_v_self->node->cat_)).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 244; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 265; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -12570,8 +12644,8 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_1__richcmp__(PyObject *__pyx_v PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__richcmp__ (wrapper)", 0); - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_HypergraphNode, 1, "x", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_HypergraphNode, 1, "y", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5_cdec_HypergraphNode, 1, "x", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5_cdec_HypergraphNode, 1, "y", 0))) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 267; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_14HypergraphNode___richcmp__(((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_v_x), ((struct __pyx_obj_5_cdec_HypergraphNode *)__pyx_v_y), ((int)__pyx_v_op)); goto __pyx_L0; __pyx_L1_error:; @@ -12581,7 +12655,7 @@ static PyObject *__pyx_pw_5_cdec_14HypergraphNode_1__richcmp__(PyObject *__pyx_v return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":246 +/* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":267 * return str(TDConvert(-self.node.cat_).c_str()) * * def __richcmp__(HypergraphNode x, HypergraphNode y, int op): # <<<<<<<<<<<<<< @@ -12599,7 +12673,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__richcmp__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":249 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":270 * if op == 2: # == * return x.node == y.node * elif op == 3: # != # <<<<<<<<<<<<<< @@ -12608,7 +12682,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 */ switch (__pyx_v_op) { - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":247 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":268 * * def __richcmp__(HypergraphNode x, HypergraphNode y, int op): * if op == 2: # == # <<<<<<<<<<<<<< @@ -12617,7 +12691,7 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 */ case 2: - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":248 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":269 * def __richcmp__(HypergraphNode x, HypergraphNode y, int op): * if op == 2: # == * return x.node == y.node # <<<<<<<<<<<<<< @@ -12625,14 +12699,14 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 * return not (x == y) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_x->node == __pyx_v_y->node)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 248; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_v_x->node == __pyx_v_y->node)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; break; - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":249 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":270 * if op == 2: # == * return x.node == y.node * elif op == 3: # != # <<<<<<<<<<<<<< @@ -12641,18 +12715,17 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 */ case 3: - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":250 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":271 * return x.node == y.node * elif op == 3: # != * return not (x == y) # <<<<<<<<<<<<<< * raise NotImplemented('comparison not implemented for HypergraphNode') */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_x), ((PyObject *)__pyx_v_y), Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyBool_FromLong((!__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; @@ -12660,16 +12733,16 @@ static PyObject *__pyx_pf_5_cdec_14HypergraphNode___richcmp__(struct __pyx_obj_5 break; } - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":251 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":272 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for HypergraphNode') # <<<<<<<<<<<<<< */ - __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_NotImplemented, ((PyObject *)__pyx_k_tuple_21), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[3]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[3]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; @@ -12697,7 +12770,7 @@ static int __pyx_pw_5_cdec_7Lattice_1__cinit__(PyObject *__pyx_v_self, PyObject return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":6 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":6 * cdef lattice.Lattice* lattice * * def __cinit__(self): # <<<<<<<<<<<<<< @@ -12710,7 +12783,7 @@ static int __pyx_pf_5_cdec_7Lattice___cinit__(struct __pyx_obj_5_cdec_Lattice *_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":7 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":7 * * def __cinit__(self): * self.lattice = new lattice.Lattice() # <<<<<<<<<<<<<< @@ -12726,6 +12799,10 @@ static int __pyx_pf_5_cdec_7Lattice___cinit__(struct __pyx_obj_5_cdec_Lattice *_ /* Python wrapper */ static int __pyx_pw_5_cdec_7Lattice_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_7Lattice_2__init__[] = "Lattice(tuple) -> Lattice from node list.\n Lattice(string) -> Lattice from PLF representation."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_7Lattice_2__init__; +#endif static int __pyx_pw_5_cdec_7Lattice_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_inp = 0; int __pyx_r; @@ -12771,12 +12848,12 @@ static int __pyx_pw_5_cdec_7Lattice_3__init__(PyObject *__pyx_v_self, PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":9 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":9 * self.lattice = new lattice.Lattice() * * def __init__(self, inp): # <<<<<<<<<<<<<< - * if isinstance(inp, tuple): - * self.lattice.resize(len(inp)) + * """Lattice(tuple) -> Lattice from node list. + * Lattice(string) -> Lattice from PLF representation.""" */ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *__pyx_v_self, PyObject *__pyx_v_inp) { @@ -12790,17 +12867,15 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ PyObject *__pyx_t_4 = NULL; PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - char *__pyx_t_8; + std::string __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); - __Pyx_INCREF(__pyx_v_inp); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":10 - * - * def __init__(self, inp): + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":12 + * """Lattice(tuple) -> Lattice from node list. + * Lattice(string) -> Lattice from PLF representation.""" * if isinstance(inp, tuple): # <<<<<<<<<<<<<< * self.lattice.resize(len(inp)) * for i, arcs in enumerate(inp): @@ -12811,22 +12886,22 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":11 - * def __init__(self, inp): + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":13 + * Lattice(string) -> Lattice from PLF representation.""" * if isinstance(inp, tuple): * self.lattice.resize(len(inp)) # <<<<<<<<<<<<<< * for i, arcs in enumerate(inp): * self[i] = arcs */ - __pyx_t_3 = PyObject_Length(__pyx_v_inp); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 11; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Length(__pyx_v_inp); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->lattice->resize(__pyx_t_3); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":12 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":14 * if isinstance(inp, tuple): * self.lattice.resize(len(inp)) * for i, arcs in enumerate(inp): # <<<<<<<<<<<<<< * self[i] = arcs - * else: + * elif isinstance(inp, basestring): */ __Pyx_INCREF(__pyx_int_0); __pyx_t_1 = __pyx_int_0; @@ -12834,7 +12909,7 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ __pyx_t_4 = __pyx_v_inp; __Pyx_INCREF(__pyx_t_4); __pyx_t_3 = 0; __pyx_t_5 = NULL; } else { - __pyx_t_3 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_inp); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_inp); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = Py_TYPE(__pyx_t_4)->tp_iternext; } @@ -12842,23 +12917,23 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ if (!__pyx_t_5 && PyList_CheckExact(__pyx_t_4)) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; + __pyx_t_6 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_4, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_4, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_5 && PyTuple_CheckExact(__pyx_t_4)) { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; + __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_4, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_4, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_6 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_6)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -12870,106 +12945,75 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ __Pyx_INCREF(__pyx_t_1); __Pyx_XDECREF(__pyx_v_i); __pyx_v_i = __pyx_t_1; - __pyx_t_6 = PyNumber_Add(__pyx_t_1, __pyx_int_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyNumber_Add(__pyx_t_1, __pyx_int_1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = __pyx_t_6; __pyx_t_6 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":13 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":15 * self.lattice.resize(len(inp)) * for i, arcs in enumerate(inp): * self[i] = arcs # <<<<<<<<<<<<<< - * else: - * if isinstance(inp, unicode): + * elif isinstance(inp, basestring): + * lattice.ConvertTextOrPLF(as_str(inp), self.lattice) */ - if (PyObject_SetItem(((PyObject *)__pyx_v_self), __pyx_v_i, __pyx_v_arcs) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetItem(((PyObject *)__pyx_v_self), __pyx_v_i, __pyx_v_arcs) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L3; } - /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":15 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":16 + * for i, arcs in enumerate(inp): * self[i] = arcs + * elif isinstance(inp, basestring): # <<<<<<<<<<<<<< + * lattice.ConvertTextOrPLF(as_str(inp), self.lattice) * else: - * if isinstance(inp, unicode): # <<<<<<<<<<<<<< - * inp = inp.encode('utf8') - * if not isinstance(inp, str): */ - __pyx_t_1 = ((PyObject *)((PyObject*)(&PyUnicode_Type))); - __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_inp, __pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_2) { + __pyx_t_1 = __pyx_builtin_basestring; + __Pyx_INCREF(__pyx_t_1); + __pyx_t_2 = PyObject_IsInstance(__pyx_v_inp, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":16 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":17 + * self[i] = arcs + * elif isinstance(inp, basestring): + * lattice.ConvertTextOrPLF(as_str(inp), self.lattice) # <<<<<<<<<<<<<< * else: - * if isinstance(inp, unicode): - * inp = inp.encode('utf8') # <<<<<<<<<<<<<< - * if not isinstance(inp, str): - * raise TypeError('cannot create lattice from %s' % type(inp)) - */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_inp, __pyx_n_s__encode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_k_tuple_22), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_v_inp); - __pyx_v_inp = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L6; - } - __pyx_L6:; - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":17 - * if isinstance(inp, unicode): - * inp = inp.encode('utf8') - * if not isinstance(inp, str): # <<<<<<<<<<<<<< - * raise TypeError('cannot create lattice from %s' % type(inp)) - * lattice.ConvertTextOrPLF(string(<char *>inp), self.lattice) - */ - __pyx_t_4 = ((PyObject *)((PyObject*)(&PyString_Type))); - __Pyx_INCREF(__pyx_t_4); - __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_inp, __pyx_t_4); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_7 = (!__pyx_t_2); - if (__pyx_t_7) { - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":18 - * inp = inp.encode('utf8') - * if not isinstance(inp, str): - * raise TypeError('cannot create lattice from %s' % type(inp)) # <<<<<<<<<<<<<< - * lattice.ConvertTextOrPLF(string(<char *>inp), self.lattice) - * + * raise TypeError('cannot create lattice from %s' % type(inp)) */ - __pyx_t_4 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_23), ((PyObject *)Py_TYPE(__pyx_v_inp))); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_4)); - __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); - __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - {__pyx_filename = __pyx_f[4]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - goto __pyx_L7; - } - __pyx_L7:; + __pyx_t_1 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_v_inp, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_7 = __pyx_convert_string_from_py_(__pyx_t_1); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + LatticeTools::ConvertTextOrPLF(__pyx_t_7, __pyx_v_self->lattice); + goto __pyx_L3; + } + /*else*/ { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":19 - * if not isinstance(inp, str): - * raise TypeError('cannot create lattice from %s' % type(inp)) - * lattice.ConvertTextOrPLF(string(<char *>inp), self.lattice) # <<<<<<<<<<<<<< + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":19 + * lattice.ConvertTextOrPLF(as_str(inp), self.lattice) + * else: + * raise TypeError('cannot create lattice from %s' % type(inp)) # <<<<<<<<<<<<<< * * def __dealloc__(self): */ - __pyx_t_8 = PyBytes_AsString(__pyx_v_inp); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - LatticeTools::ConvertTextOrPLF(std::string(((char *)__pyx_t_8)), __pyx_v_self->lattice); + __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_22), ((PyObject *)Py_TYPE(__pyx_v_inp))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(((PyObject *)__pyx_t_1)); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_1)); + __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); + __pyx_t_1 = 0; + __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + {__pyx_filename = __pyx_f[4]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L3:; @@ -12984,7 +13028,6 @@ static int __pyx_pf_5_cdec_7Lattice_2__init__(struct __pyx_obj_5_cdec_Lattice *_ __pyx_L0:; __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_arcs); - __Pyx_XDECREF(__pyx_v_inp); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -12998,8 +13041,8 @@ static void __pyx_pw_5_cdec_7Lattice_5__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":21 - * lattice.ConvertTextOrPLF(string(<char *>inp), self.lattice) +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":21 + * raise TypeError('cannot create lattice from %s' % type(inp)) * * def __dealloc__(self): # <<<<<<<<<<<<<< * del self.lattice @@ -13010,7 +13053,7 @@ static void __pyx_pf_5_cdec_7Lattice_4__dealloc__(CYTHON_UNUSED struct __pyx_obj __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":22 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":22 * * def __dealloc__(self): * del self.lattice # <<<<<<<<<<<<<< @@ -13043,7 +13086,7 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_7__getitem__(PyObject *__pyx_v_self, P return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":24 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":24 * del self.lattice * * def __getitem__(self, int index): # <<<<<<<<<<<<<< @@ -13073,7 +13116,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":25 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":25 * * def __getitem__(self, int index): * if not 0 <= index < len(self): # <<<<<<<<<<<<<< @@ -13088,14 +13131,14 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L __pyx_t_3 = (!__pyx_t_1); if (__pyx_t_3) { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":26 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":26 * def __getitem__(self, int index): * if not 0 <= index < len(self): * raise IndexError('lattice index out of range') # <<<<<<<<<<<<<< * arcs = [] * cdef vector[lattice.LatticeArc] arc_vector = self.lattice[0][index] */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_24), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -13104,7 +13147,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":27 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":27 * if not 0 <= index < len(self): * raise IndexError('lattice index out of range') * arcs = [] # <<<<<<<<<<<<<< @@ -13116,7 +13159,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L __pyx_v_arcs = __pyx_t_4; __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":28 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":28 * raise IndexError('lattice index out of range') * arcs = [] * cdef vector[lattice.LatticeArc] arc_vector = self.lattice[0][index] # <<<<<<<<<<<<<< @@ -13125,7 +13168,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L */ __pyx_v_arc_vector = ((__pyx_v_self->lattice[0])[__pyx_v_index]); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":31 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":31 * cdef lattice.LatticeArc* arc * cdef unsigned i * for i in range(arc_vector.size()): # <<<<<<<<<<<<<< @@ -13136,7 +13179,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":32 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":32 * cdef unsigned i * for i in range(arc_vector.size()): * arc = &arc_vector[i] # <<<<<<<<<<<<<< @@ -13145,7 +13188,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L */ __pyx_v_arc = (&(__pyx_v_arc_vector[__pyx_v_i])); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":33 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":33 * for i in range(arc_vector.size()): * arc = &arc_vector[i] * label = unicode(TDConvert(arc.label).c_str(), 'utf8') # <<<<<<<<<<<<<< @@ -13169,7 +13212,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L __pyx_v_label = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":34 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":34 * arc = &arc_vector[i] * label = unicode(TDConvert(arc.label).c_str(), 'utf8') * arcs.append((label, arc.cost, arc.dist2next)) # <<<<<<<<<<<<<< @@ -13195,7 +13238,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_6__getitem__(struct __pyx_obj_5_cdec_L __Pyx_DECREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; } - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":35 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":35 * label = unicode(TDConvert(arc.label).c_str(), 'utf8') * arcs.append((label, arc.cost, arc.dist2next)) * return tuple(arcs) # <<<<<<<<<<<<<< @@ -13251,7 +13294,7 @@ static int __pyx_pw_5_cdec_7Lattice_9__setitem__(PyObject *__pyx_v_self, PyObjec return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":37 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":37 * return tuple(arcs) * * def __setitem__(self, int index, tuple arcs): # <<<<<<<<<<<<<< @@ -13264,6 +13307,7 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice PyObject *__pyx_v_label = NULL; PyObject *__pyx_v_cost = NULL; PyObject *__pyx_v_dist2next = NULL; + PyObject *__pyx_v_label_str = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; @@ -13284,7 +13328,7 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":38 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":38 * * def __setitem__(self, int index, tuple arcs): * if not 0 <= index < len(self): # <<<<<<<<<<<<<< @@ -13299,14 +13343,14 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice __pyx_t_3 = (!__pyx_t_1); if (__pyx_t_3) { - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":39 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":39 * def __setitem__(self, int index, tuple arcs): * if not 0 <= index < len(self): * raise IndexError('lattice index out of range') # <<<<<<<<<<<<<< * cdef lattice.LatticeArc* arc * for (label, cost, dist2next) in arcs: */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_26), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -13315,12 +13359,12 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":41 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":41 * raise IndexError('lattice index out of range') * cdef lattice.LatticeArc* arc * for (label, cost, dist2next) in arcs: # <<<<<<<<<<<<<< - * if isinstance(label, unicode): - * label = label.encode('utf8') + * label_str = as_str(label) + * arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) */ if (unlikely(((PyObject *)__pyx_v_arcs) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); @@ -13330,9 +13374,9 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_2); __Pyx_INCREF(__pyx_t_5); __pyx_t_2++; + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_2); __Pyx_INCREF(__pyx_t_5); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { PyObject* sequence = __pyx_t_5; @@ -13399,61 +13443,42 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice __pyx_v_dist2next = __pyx_t_8; __pyx_t_8 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":42 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":42 * cdef lattice.LatticeArc* arc * for (label, cost, dist2next) in arcs: - * if isinstance(label, unicode): # <<<<<<<<<<<<<< - * label = label.encode('utf8') - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) - */ - __pyx_t_5 = ((PyObject *)((PyObject*)(&PyUnicode_Type))); - __Pyx_INCREF(__pyx_t_5); - __pyx_t_3 = __Pyx_TypeCheck(__pyx_v_label, __pyx_t_5); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":43 - * for (label, cost, dist2next) in arcs: - * if isinstance(label, unicode): - * label = label.encode('utf8') # <<<<<<<<<<<<<< - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) + * label_str = as_str(label) # <<<<<<<<<<<<<< + * arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) * self.lattice[0][index].push_back(arc[0]) */ - __pyx_t_5 = PyObject_GetAttr(__pyx_v_label, __pyx_n_s__encode); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_k_tuple_27), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_v_label); - __pyx_v_label = __pyx_t_8; - __pyx_t_8 = 0; - goto __pyx_L8; - } - __pyx_L8:; + __pyx_t_5 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_v_label, NULL)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_5); + __Pyx_XDECREF(((PyObject *)__pyx_v_label_str)); + __pyx_v_label_str = ((PyObject*)__pyx_t_5); + __pyx_t_5 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":44 - * if isinstance(label, unicode): - * label = label.encode('utf8') - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) # <<<<<<<<<<<<<< + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":43 + * for (label, cost, dist2next) in arcs: + * label_str = as_str(label) + * arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) # <<<<<<<<<<<<<< * self.lattice[0][index].push_back(arc[0]) * del arc */ - __pyx_t_11 = PyBytes_AsString(__pyx_v_label); if (unlikely((!__pyx_t_11) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_v_cost); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_13 = __Pyx_PyInt_AsInt(__pyx_v_dist2next); if (unlikely((__pyx_t_13 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_arc = new LatticeArc(TD::Convert(((char *)__pyx_t_11)), __pyx_t_12, __pyx_t_13); + __pyx_t_11 = PyBytes_AsString(((PyObject *)__pyx_v_label_str)); if (unlikely((!__pyx_t_11) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_v_cost); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_13 = __Pyx_PyInt_AsInt(__pyx_v_dist2next); if (unlikely((__pyx_t_13 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_arc = new LatticeArc(TD::Convert(__pyx_t_11), __pyx_t_12, __pyx_t_13); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":45 - * label = label.encode('utf8') - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":44 + * label_str = as_str(label) + * arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) * self.lattice[0][index].push_back(arc[0]) # <<<<<<<<<<<<<< * del arc * */ ((__pyx_v_self->lattice[0])[__pyx_v_index]).push_back((__pyx_v_arc[0])); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":46 - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":45 + * arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) * self.lattice[0][index].push_back(arc[0]) * del arc # <<<<<<<<<<<<<< * @@ -13478,6 +13503,7 @@ static int __pyx_pf_5_cdec_7Lattice_8__setitem__(struct __pyx_obj_5_cdec_Lattice __Pyx_XDECREF(__pyx_v_label); __Pyx_XDECREF(__pyx_v_cost); __Pyx_XDECREF(__pyx_v_dist2next); + __Pyx_XDECREF(__pyx_v_label_str); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -13493,7 +13519,7 @@ static Py_ssize_t __pyx_pw_5_cdec_7Lattice_11__len__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":48 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":47 * del arc * * def __len__(self): # <<<<<<<<<<<<<< @@ -13506,7 +13532,7 @@ static Py_ssize_t __pyx_pf_5_cdec_7Lattice_10__len__(struct __pyx_obj_5_cdec_Lat __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":49 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":48 * * def __len__(self): * return self.lattice.size() # <<<<<<<<<<<<<< @@ -13533,7 +13559,7 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_13__str__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":51 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":50 * return self.lattice.size() * * def __str__(self): # <<<<<<<<<<<<<< @@ -13551,7 +13577,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_12__str__(struct __pyx_obj_5_cdec_Latt int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":52 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":51 * * def __str__(self): * return str(hypergraph.AsPLF(self.lattice[0], True).c_str()) # <<<<<<<<<<<<<< @@ -13559,14 +13585,14 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_12__str__(struct __pyx_obj_5_cdec_Latt * def __unicode__(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->lattice[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(HypergraphIO::AsPLF((__pyx_v_self->lattice[0]), 1).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -13597,7 +13623,7 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_15__unicode__(PyObject *__pyx_v_self, return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":54 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":53 * return str(hypergraph.AsPLF(self.lattice[0], True).c_str()) * * def __unicode__(self): # <<<<<<<<<<<<<< @@ -13615,7 +13641,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_14__unicode__(struct __pyx_obj_5_cdec_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__unicode__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":55 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":54 * * def __unicode__(self): * return unicode(str(self), 'utf8') # <<<<<<<<<<<<<< @@ -13623,15 +13649,15 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_14__unicode__(struct __pyx_obj_5_cdec_ * def __iter__(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); @@ -13639,7 +13665,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_14__unicode__(struct __pyx_obj_5_cdec_ PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyUnicode_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; @@ -13671,7 +13697,7 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_17__iter__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":57 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":56 * return unicode(str(self), 'utf8') * * def __iter__(self): # <<<<<<<<<<<<<< @@ -13697,7 +13723,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_16__iter__(struct __pyx_obj_5_cdec_Lat __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Lattice_18generator14, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Lattice_18generator14, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -13732,27 +13758,27 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_18generator14(__pyx_GeneratorObject *_ return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":59 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":58 * def __iter__(self): * cdef unsigned i * for i in range(len(self)): # <<<<<<<<<<<<<< * yield self[i] * */ - __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_cur_scope->__pyx_v_self)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_cur_scope->__pyx_v_self)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":60 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":59 * cdef unsigned i * for i in range(len(self)): * yield self[i] # <<<<<<<<<<<<<< * * def todot(self): */ - __pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_cur_scope->__pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_cur_scope->__pyx_v_self), __pyx_cur_scope->__pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; @@ -13766,7 +13792,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_18generator14(__pyx_GeneratorObject *_ __pyx_L6_resume_from_yield:; __pyx_t_1 = __pyx_cur_scope->__pyx_t_0; __pyx_t_2 = __pyx_cur_scope->__pyx_t_1; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 59; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } PyErr_SetNone(PyExc_StopIteration); goto __pyx_L0; @@ -13783,6 +13809,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_18generator14(__pyx_GeneratorObject *_ /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_7Lattice_20todot(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_7Lattice_19todot[] = "lattice.todot() -> Representation of the lattice in GraphViz dot format."; static PyObject *__pyx_pw_5_cdec_7Lattice_20todot(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -13805,9 +13832,9 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_5todot_1lines(PyObject *__pyx_self, CY return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":63 - * +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":63 * def todot(self): + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): # <<<<<<<<<<<<<< * yield 'digraph lattice {' * yield 'rankdir = LR;' @@ -13882,15 +13909,15 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":64 - * def todot(self): + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":64 + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): * yield 'digraph lattice {' # <<<<<<<<<<<<<< * yield 'rankdir = LR;' * yield 'node [shape=circle];' */ - __Pyx_INCREF(((PyObject *)__pyx_kp_s_28)); - __pyx_r = ((PyObject *)__pyx_kp_s_28); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_26)); + __pyx_r = ((PyObject *)__pyx_kp_s_26); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ @@ -13899,15 +13926,15 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_L4_resume_from_yield:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":65 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":65 * def lines(): * yield 'digraph lattice {' * yield 'rankdir = LR;' # <<<<<<<<<<<<<< * yield 'node [shape=circle];' * for i in range(len(self)): */ - __Pyx_INCREF(((PyObject *)__pyx_kp_s_29)); - __pyx_r = ((PyObject *)__pyx_kp_s_29); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_27)); + __pyx_r = ((PyObject *)__pyx_kp_s_27); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ @@ -13916,15 +13943,15 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_L5_resume_from_yield:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":66 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":66 * yield 'digraph lattice {' * yield 'rankdir = LR;' * yield 'node [shape=circle];' # <<<<<<<<<<<<<< * for i in range(len(self)): * for label, weight, delta in self[i]: */ - __Pyx_INCREF(((PyObject *)__pyx_kp_s_30)); - __pyx_r = ((PyObject *)__pyx_kp_s_30); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_28)); + __pyx_r = ((PyObject *)__pyx_kp_s_28); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ @@ -13933,7 +13960,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_L6_resume_from_yield:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":67 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":67 * yield 'rankdir = LR;' * yield 'node [shape=circle];' * for i in range(len(self)): # <<<<<<<<<<<<<< @@ -13968,16 +13995,16 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_3)) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; + __pyx_t_1 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_3, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_3, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_3)) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; + __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_2); __Pyx_INCREF(__pyx_t_1); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_3, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_3, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_1 = __pyx_t_4(__pyx_t_3); @@ -13996,7 +14023,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_cur_scope->__pyx_v_i = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":68 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":68 * yield 'node [shape=circle];' * for i in range(len(self)): * for label, weight, delta in self[i]: # <<<<<<<<<<<<<< @@ -14018,16 +14045,16 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj if (!__pyx_t_7 && PyList_CheckExact(__pyx_t_5)) { if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; + __pyx_t_1 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_5, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_5, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_7 && PyTuple_CheckExact(__pyx_t_5)) { if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; + __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_6); __Pyx_INCREF(__pyx_t_1); __pyx_t_6++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_5, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_1 = PySequence_ITEM(__pyx_t_5, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 68; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_1 = __pyx_t_7(__pyx_t_5); @@ -14111,7 +14138,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_cur_scope->__pyx_v_delta = __pyx_t_10; __pyx_t_10 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":69 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":69 * for i in range(len(self)): * for label, weight, delta in self[i]: * yield '%d -> %d [label="%s"];' % (i, i+delta, label.replace('"', '\\"')) # <<<<<<<<<<<<<< @@ -14122,7 +14149,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __Pyx_GOTREF(__pyx_t_1); __pyx_t_10 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_label, __pyx_n_s__replace); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); - __pyx_t_9 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_k_tuple_34), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_9 = PyObject_Call(__pyx_t_10, ((PyObject *)__pyx_k_tuple_32), NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(3); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -14136,7 +14163,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __Pyx_GIVEREF(__pyx_t_9); __pyx_t_1 = 0; __pyx_t_9 = 0; - __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_31), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_9 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_29), ((PyObject *)__pyx_t_10)); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_9)); __Pyx_DECREF(((PyObject *)__pyx_t_10)); __pyx_t_10 = 0; __pyx_r = ((PyObject *)__pyx_t_9); @@ -14171,7 +14198,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":70 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":70 * for label, weight, delta in self[i]: * yield '%d -> %d [label="%s"];' % (i, i+delta, label.replace('"', '\\"')) * yield '%d [shape=doublecircle]' % len(self) # <<<<<<<<<<<<<< @@ -14184,7 +14211,7 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_35), __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_33), __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = ((PyObject *)__pyx_t_5); @@ -14197,15 +14224,15 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj __pyx_L14_resume_from_yield:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":71 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":71 * yield '%d -> %d [label="%s"];' % (i, i+delta, label.replace('"', '\\"')) * yield '%d [shape=doublecircle]' % len(self) * yield '}' # <<<<<<<<<<<<<< * return '\n'.join(lines()).encode('utf8') * */ - __Pyx_INCREF(((PyObject *)__pyx_kp_s_36)); - __pyx_r = ((PyObject *)__pyx_kp_s_36); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_34)); + __pyx_r = ((PyObject *)__pyx_kp_s_34); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); /* return from generator, yielding value */ @@ -14232,12 +14259,12 @@ static PyObject *__pyx_gb_5_cdec_7Lattice_5todot_2generator20(__pyx_GeneratorObj return NULL; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":62 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":61 * yield self[i] * * def todot(self): # <<<<<<<<<<<<<< + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): - * yield 'digraph lattice {' */ static PyObject *__pyx_pf_5_cdec_7Lattice_19todot(struct __pyx_obj_5_cdec_Lattice *__pyx_v_self) { @@ -14262,19 +14289,19 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_19todot(struct __pyx_obj_5_cdec_Lattic __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_v_self); __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_v_self); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":63 - * + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":63 * def todot(self): + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): # <<<<<<<<<<<<<< * yield 'digraph lattice {' * yield 'rankdir = LR;' */ - __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5_cdec_7Lattice_5todot_1lines, 0, ((PyObject*)__pyx_cur_scope), __pyx_n_s___cdec, ((PyObject *)__pyx_k_codeobj_38)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_CyFunction_NewEx(&__pyx_mdef_5_cdec_7Lattice_5todot_1lines, 0, ((PyObject*)__pyx_cur_scope), __pyx_n_s___cdec, ((PyObject *)__pyx_k_codeobj_36)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_lines = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":72 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":72 * yield '%d [shape=doublecircle]' % len(self) * yield '}' * return '\n'.join(lines()).encode('utf8') # <<<<<<<<<<<<<< @@ -14282,7 +14309,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_19todot(struct __pyx_obj_5_cdec_Lattic * def as_hypergraph(self): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_40), __pyx_n_s__join); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_38), __pyx_n_s__join); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_Call(__pyx_v_lines, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14298,7 +14325,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_19todot(struct __pyx_obj_5_cdec_Lattic __pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__encode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_41), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_k_tuple_39), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; @@ -14323,6 +14350,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_19todot(struct __pyx_obj_5_cdec_Lattic /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_7Lattice_22as_hypergraph(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_7Lattice_21as_hypergraph[] = "lattice.as_hypergraph() -> Hypergraph representation of the lattice."; static PyObject *__pyx_pw_5_cdec_7Lattice_22as_hypergraph(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -14332,12 +14360,12 @@ static PyObject *__pyx_pw_5_cdec_7Lattice_22as_hypergraph(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":74 +/* "/home/vchahune/tools/cdec/python/src/lattice.pxi":74 * return '\n'.join(lines()).encode('utf8') * * def as_hypergraph(self): # <<<<<<<<<<<<<< + * """lattice.as_hypergraph() -> Hypergraph representation of the lattice.""" * cdef Hypergraph result = Hypergraph.__new__(Hypergraph) - * result.hg = new hypergraph.Hypergraph() */ static PyObject *__pyx_pf_5_cdec_7Lattice_21as_hypergraph(struct __pyx_obj_5_cdec_Lattice *__pyx_v_self) { @@ -14347,65 +14375,65 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_21as_hypergraph(struct __pyx_obj_5_cde __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; - char *__pyx_t_3; + std::string __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("as_hypergraph", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":75 - * + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":76 * def as_hypergraph(self): + * """lattice.as_hypergraph() -> Hypergraph representation of the lattice.""" * cdef Hypergraph result = Hypergraph.__new__(Hypergraph) # <<<<<<<<<<<<<< * result.hg = new hypergraph.Hypergraph() * cdef bytes plf = str(self) */ - __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_Hypergraph)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_Hypergraph)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_Hypergraph)))) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5_cdec_Hypergraph)))) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_obj_5_cdec_Hypergraph *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":76 - * def as_hypergraph(self): + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":77 + * """lattice.as_hypergraph() -> Hypergraph representation of the lattice.""" * cdef Hypergraph result = Hypergraph.__new__(Hypergraph) * result.hg = new hypergraph.Hypergraph() # <<<<<<<<<<<<<< * cdef bytes plf = str(self) - * hypergraph.ReadFromPLF(string(plf), result.hg) + * hypergraph.ReadFromPLF(plf, result.hg) */ __pyx_v_result->hg = new Hypergraph(); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":77 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":78 * cdef Hypergraph result = Hypergraph.__new__(Hypergraph) * result.hg = new hypergraph.Hypergraph() * cdef bytes plf = str(self) # <<<<<<<<<<<<<< - * hypergraph.ReadFromPLF(string(plf), result.hg) + * hypergraph.ReadFromPLF(plf, result.hg) * return result */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_2))||((__pyx_t_2) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected bytes, got %.200s", Py_TYPE(__pyx_t_2)->tp_name), 0))) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(PyBytes_CheckExact(__pyx_t_2))||((__pyx_t_2) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected bytes, got %.200s", Py_TYPE(__pyx_t_2)->tp_name), 0))) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_plf = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":78 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":79 * result.hg = new hypergraph.Hypergraph() * cdef bytes plf = str(self) - * hypergraph.ReadFromPLF(string(plf), result.hg) # <<<<<<<<<<<<<< + * hypergraph.ReadFromPLF(plf, result.hg) # <<<<<<<<<<<<<< * return result */ - __pyx_t_3 = PyBytes_AsString(((PyObject *)__pyx_v_plf)); if (unlikely((!__pyx_t_3) && PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - HypergraphIO::ReadFromPLF(std::string(__pyx_t_3), __pyx_v_result->hg); + __pyx_t_3 = __pyx_convert_string_from_py_(((PyObject *)__pyx_v_plf)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + HypergraphIO::ReadFromPLF(__pyx_t_3, __pyx_v_result->hg); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":79 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":80 * cdef bytes plf = str(self) - * hypergraph.ReadFromPLF(string(plf), result.hg) + * hypergraph.ReadFromPLF(plf, result.hg) * return result # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); @@ -14428,7 +14456,7 @@ static PyObject *__pyx_pf_5_cdec_7Lattice_21as_hypergraph(struct __pyx_obj_5_cde return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":3 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":3 * cimport mteval * * cdef SufficientStats as_stats(x, y): # <<<<<<<<<<<<<< @@ -14449,7 +14477,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject int __pyx_clineno = 0; __Pyx_RefNannySetupContext("as_stats", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":4 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":4 * * cdef SufficientStats as_stats(x, y): * if isinstance(x, SufficientStats): # <<<<<<<<<<<<<< @@ -14462,7 +14490,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":5 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":5 * cdef SufficientStats as_stats(x, y): * if isinstance(x, SufficientStats): * return x # <<<<<<<<<<<<<< @@ -14477,15 +14505,14 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject goto __pyx_L3; } - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":6 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":6 * if isinstance(x, SufficientStats): * return x * elif x == 0 and isinstance(y, SufficientStats): # <<<<<<<<<<<<<< * stats = SufficientStats() * stats.stats = new mteval.SufficientStats() */ - __pyx_t_1 = PyObject_RichCompare(__pyx_v_x, __pyx_int_0, Py_EQ); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_1); + __pyx_t_1 = PyObject_RichCompare(__pyx_v_x, __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { @@ -14499,7 +14526,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject } if (__pyx_t_4) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":7 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":7 * return x * elif x == 0 and isinstance(y, SufficientStats): * stats = SufficientStats() # <<<<<<<<<<<<<< @@ -14511,7 +14538,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject __pyx_v_stats = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":8 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":8 * elif x == 0 and isinstance(y, SufficientStats): * stats = SufficientStats() * stats.stats = new mteval.SufficientStats() # <<<<<<<<<<<<<< @@ -14520,7 +14547,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject */ __pyx_v_stats->stats = new SufficientStats(); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":9 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":9 * stats = SufficientStats() * stats.stats = new mteval.SufficientStats() * stats.metric = (<SufficientStats> y).metric # <<<<<<<<<<<<<< @@ -14529,7 +14556,7 @@ static struct __pyx_obj_5_cdec_SufficientStats *__pyx_f_5_cdec_as_stats(PyObject */ __pyx_v_stats->metric = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_v_y)->metric; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":10 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":10 * stats.stats = new mteval.SufficientStats() * stats.metric = (<SufficientStats> y).metric * return stats # <<<<<<<<<<<<<< @@ -14568,7 +14595,7 @@ static PyObject *__pyx_pw_5_cdec_9Candidate_5words_1__get__(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":17 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":17 * * property words: * def __get__(self): # <<<<<<<<<<<<<< @@ -14587,7 +14614,7 @@ static PyObject *__pyx_pf_5_cdec_9Candidate_5words___get__(struct __pyx_obj_5_cd int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":18 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":18 * property words: * def __get__(self): * return unicode(GetString(self.candidate.ewords).c_str(), encoding='utf8') # <<<<<<<<<<<<<< @@ -14638,7 +14665,7 @@ static PyObject *__pyx_pw_5_cdec_9Candidate_4fmap_1__get__(PyObject *__pyx_v_sel return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":21 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":21 * * property fmap: * def __get__(self): # <<<<<<<<<<<<<< @@ -14656,7 +14683,7 @@ static PyObject *__pyx_pf_5_cdec_9Candidate_4fmap___get__(struct __pyx_obj_5_cde int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":22 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":22 * property fmap: * def __get__(self): * cdef SparseVector fmap = SparseVector.__new__(SparseVector) # <<<<<<<<<<<<<< @@ -14669,7 +14696,7 @@ static PyObject *__pyx_pf_5_cdec_9Candidate_4fmap___get__(struct __pyx_obj_5_cde __pyx_v_fmap = ((struct __pyx_obj_5_cdec_SparseVector *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":23 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":23 * def __get__(self): * cdef SparseVector fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](self.candidate.fmap) # <<<<<<<<<<<<<< @@ -14678,7 +14705,7 @@ static PyObject *__pyx_pf_5_cdec_9Candidate_4fmap___get__(struct __pyx_obj_5_cde */ __pyx_v_fmap->vector = new FastSparseVector<weight_t>(__pyx_v_self->candidate->fmap); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":24 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":24 * cdef SparseVector fmap = SparseVector.__new__(SparseVector) * fmap.vector = new FastSparseVector[weight_t](self.candidate.fmap) * return fmap # <<<<<<<<<<<<<< @@ -14714,7 +14741,7 @@ static PyObject *__pyx_pw_5_cdec_9Candidate_5score_1__get__(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":14 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":14 * cdef class Candidate: * cdef mteval.const_Candidate* candidate * cdef public float score # <<<<<<<<<<<<<< @@ -14790,7 +14817,7 @@ static void __pyx_pw_5_cdec_15SufficientStats_1__dealloc__(PyObject *__pyx_v_sel __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":30 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":30 * cdef mteval.EvaluationMetric* metric * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -14802,7 +14829,7 @@ static void __pyx_pf_5_cdec_15SufficientStats___dealloc__(CYTHON_UNUSED struct _ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":31 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":31 * * def __dealloc__(self): * del self.stats # <<<<<<<<<<<<<< @@ -14825,7 +14852,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_5score_1__get__(PyObject *__p return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":34 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":34 * * property score: * def __get__(self): # <<<<<<<<<<<<<< @@ -14842,7 +14869,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_5score___get__(struct __pyx_o int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":35 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":35 * property score: * def __get__(self): * return self.metric.ComputeScore(self.stats[0]) # <<<<<<<<<<<<<< @@ -14879,7 +14906,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_6detail_1__get__(PyObject *__ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":38 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":38 * * property detail: * def __get__(self): # <<<<<<<<<<<<<< @@ -14897,7 +14924,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_6detail___get__(struct __pyx_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":39 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":39 * property detail: * def __get__(self): * return str(self.metric.DetailedScore(self.stats[0]).c_str()) # <<<<<<<<<<<<<< @@ -14943,7 +14970,7 @@ static Py_ssize_t __pyx_pw_5_cdec_15SufficientStats_3__len__(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":41 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":41 * return str(self.metric.DetailedScore(self.stats[0]).c_str()) * * def __len__(self): # <<<<<<<<<<<<<< @@ -14956,7 +14983,7 @@ static Py_ssize_t __pyx_pf_5_cdec_15SufficientStats_2__len__(struct __pyx_obj_5_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":42 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":42 * * def __len__(self): * return self.stats.size() # <<<<<<<<<<<<<< @@ -14984,7 +15011,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_5__iter__(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":44 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":44 * return self.stats.size() * * def __iter__(self): # <<<<<<<<<<<<<< @@ -15048,7 +15075,7 @@ static PyObject *__pyx_gb_5_cdec_15SufficientStats_6generator15(__pyx_GeneratorO __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":45 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":45 * * def __iter__(self): * for i in range(len(self)): # <<<<<<<<<<<<<< @@ -15079,16 +15106,16 @@ static PyObject *__pyx_gb_5_cdec_15SufficientStats_6generator15(__pyx_GeneratorO if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_3)) { if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_2); __pyx_t_1++; + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_2); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_3)) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_2); __pyx_t_1++; + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_2); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_2 = __pyx_t_4(__pyx_t_3); @@ -15107,7 +15134,7 @@ static PyObject *__pyx_gb_5_cdec_15SufficientStats_6generator15(__pyx_GeneratorO __pyx_cur_scope->__pyx_v_i = __pyx_t_2; __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":46 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":46 * def __iter__(self): * for i in range(len(self)): * yield self[i] # <<<<<<<<<<<<<< @@ -15171,7 +15198,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_8__getitem__(PyObject *__pyx_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":48 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":48 * yield self[i] * * def __getitem__(self, int index): # <<<<<<<<<<<<<< @@ -15191,7 +15218,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_7__getitem__(struct __pyx_obj int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":49 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":49 * * def __getitem__(self, int index): * if not 0 <= index < len(self): # <<<<<<<<<<<<<< @@ -15206,14 +15233,14 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_7__getitem__(struct __pyx_obj __pyx_t_3 = (!__pyx_t_1); if (__pyx_t_3) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":50 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":50 * def __getitem__(self, int index): * if not 0 <= index < len(self): * raise IndexError('sufficient stats vector index out of range') # <<<<<<<<<<<<<< * return self.stats[0][index] * */ - __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_43), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_41), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -15222,7 +15249,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_7__getitem__(struct __pyx_obj } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":51 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":51 * if not 0 <= index < len(self): * raise IndexError('sufficient stats vector index out of range') * return self.stats[0][index] # <<<<<<<<<<<<<< @@ -15264,7 +15291,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_10__iadd__(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":53 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":53 * return self.stats[0][index] * * def __iadd__(SufficientStats self, SufficientStats other): # <<<<<<<<<<<<<< @@ -15277,7 +15304,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_9__iadd__(struct __pyx_obj_5_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__iadd__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":54 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":54 * * def __iadd__(SufficientStats self, SufficientStats other): * self.stats[0] += other.stats[0] # <<<<<<<<<<<<<< @@ -15286,7 +15313,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_9__iadd__(struct __pyx_obj_5_ */ (__pyx_v_self->stats[0]) += (__pyx_v_other->stats[0]); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":55 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":55 * def __iadd__(SufficientStats self, SufficientStats other): * self.stats[0] += other.stats[0] * return self # <<<<<<<<<<<<<< @@ -15316,7 +15343,7 @@ static PyObject *__pyx_pw_5_cdec_15SufficientStats_12__add__(PyObject *__pyx_v_x return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":57 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":57 * return self * * def __add__(x, y): # <<<<<<<<<<<<<< @@ -15336,7 +15363,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__add__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":58 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":58 * * def __add__(x, y): * cdef SufficientStats sx = as_stats(x, y) # <<<<<<<<<<<<<< @@ -15348,7 +15375,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x __pyx_v_sx = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":59 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":59 * def __add__(x, y): * cdef SufficientStats sx = as_stats(x, y) * cdef SufficientStats sy = as_stats(y, x) # <<<<<<<<<<<<<< @@ -15360,7 +15387,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x __pyx_v_sy = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":60 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":60 * cdef SufficientStats sx = as_stats(x, y) * cdef SufficientStats sy = as_stats(y, x) * cdef SufficientStats result = SufficientStats() # <<<<<<<<<<<<<< @@ -15372,7 +15399,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x __pyx_v_result = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":61 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":61 * cdef SufficientStats sy = as_stats(y, x) * cdef SufficientStats result = SufficientStats() * result.stats = new mteval.SufficientStats(mteval.add(sx.stats[0], sy.stats[0])) # <<<<<<<<<<<<<< @@ -15381,7 +15408,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x */ __pyx_v_result->stats = new SufficientStats(operator+((__pyx_v_sx->stats[0]), (__pyx_v_sy->stats[0]))); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":62 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":62 * cdef SufficientStats result = SufficientStats() * result.stats = new mteval.SufficientStats(mteval.add(sx.stats[0], sy.stats[0])) * result.metric = sx.metric # <<<<<<<<<<<<<< @@ -15390,7 +15417,7 @@ static PyObject *__pyx_pf_5_cdec_15SufficientStats_11__add__(PyObject *__pyx_v_x */ __pyx_v_result->metric = __pyx_v_sx->metric; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":63 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":63 * result.stats = new mteval.SufficientStats(mteval.add(sx.stats[0], sy.stats[0])) * result.metric = sx.metric * return result # <<<<<<<<<<<<<< @@ -15469,7 +15496,7 @@ static int __pyx_pw_5_cdec_12CandidateSet_1__cinit__(PyObject *__pyx_v_self, PyO return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":70 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":70 * cdef mteval.CandidateSet* cs * * def __cinit__(self, SegmentEvaluator evaluator): # <<<<<<<<<<<<<< @@ -15482,7 +15509,7 @@ static int __pyx_pf_5_cdec_12CandidateSet___cinit__(struct __pyx_obj_5_cdec_Cand __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":71 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":71 * * def __cinit__(self, SegmentEvaluator evaluator): * self.scorer = new shared_ptr[mteval.SegmentEvaluator](evaluator.scorer[0]) # <<<<<<<<<<<<<< @@ -15491,7 +15518,7 @@ static int __pyx_pf_5_cdec_12CandidateSet___cinit__(struct __pyx_obj_5_cdec_Cand */ __pyx_v_self->scorer = new boost::shared_ptr<SegmentEvaluator>((__pyx_v_evaluator->scorer[0])); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":72 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":72 * def __cinit__(self, SegmentEvaluator evaluator): * self.scorer = new shared_ptr[mteval.SegmentEvaluator](evaluator.scorer[0]) * self.metric = evaluator.metric # <<<<<<<<<<<<<< @@ -15500,7 +15527,7 @@ static int __pyx_pf_5_cdec_12CandidateSet___cinit__(struct __pyx_obj_5_cdec_Cand */ __pyx_v_self->metric = __pyx_v_evaluator->metric; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":73 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":73 * self.scorer = new shared_ptr[mteval.SegmentEvaluator](evaluator.scorer[0]) * self.metric = evaluator.metric * self.cs = new mteval.CandidateSet() # <<<<<<<<<<<<<< @@ -15523,7 +15550,7 @@ static void __pyx_pw_5_cdec_12CandidateSet_3__dealloc__(PyObject *__pyx_v_self) __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":75 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":75 * self.cs = new mteval.CandidateSet() * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -15535,7 +15562,7 @@ static void __pyx_pf_5_cdec_12CandidateSet_2__dealloc__(CYTHON_UNUSED struct __p __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":76 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":76 * * def __dealloc__(self): * del self.scorer # <<<<<<<<<<<<<< @@ -15544,7 +15571,7 @@ static void __pyx_pf_5_cdec_12CandidateSet_2__dealloc__(CYTHON_UNUSED struct __p */ delete __pyx_v_self->scorer; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":77 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":77 * def __dealloc__(self): * del self.scorer * del self.cs # <<<<<<<<<<<<<< @@ -15567,7 +15594,7 @@ static Py_ssize_t __pyx_pw_5_cdec_12CandidateSet_5__len__(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":79 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":79 * del self.cs * * def __len__(self): # <<<<<<<<<<<<<< @@ -15580,7 +15607,7 @@ static Py_ssize_t __pyx_pf_5_cdec_12CandidateSet_4__len__(struct __pyx_obj_5_cde __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":80 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":80 * * def __len__(self): * return self.cs.size() # <<<<<<<<<<<<<< @@ -15617,7 +15644,7 @@ static PyObject *__pyx_pw_5_cdec_12CandidateSet_7__getitem__(PyObject *__pyx_v_s return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":82 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":82 * return self.cs.size() * * def __getitem__(self,int k): # <<<<<<<<<<<<<< @@ -15637,7 +15664,7 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":83 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":83 * * def __getitem__(self,int k): * if not 0 <= k < self.cs.size(): # <<<<<<<<<<<<<< @@ -15651,14 +15678,14 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ __pyx_t_2 = (!__pyx_t_1); if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":84 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":84 * def __getitem__(self,int k): * if not 0 <= k < self.cs.size(): * raise IndexError('candidate set index out of range') # <<<<<<<<<<<<<< * cdef Candidate candidate = Candidate() * candidate.candidate = &self.cs[0][k] */ - __pyx_t_3 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_45), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_builtin_IndexError, ((PyObject *)__pyx_k_tuple_43), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -15667,7 +15694,7 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":85 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":85 * if not 0 <= k < self.cs.size(): * raise IndexError('candidate set index out of range') * cdef Candidate candidate = Candidate() # <<<<<<<<<<<<<< @@ -15679,7 +15706,7 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ __pyx_v_candidate = ((struct __pyx_obj_5_cdec_Candidate *)__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":86 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":86 * raise IndexError('candidate set index out of range') * cdef Candidate candidate = Candidate() * candidate.candidate = &self.cs[0][k] # <<<<<<<<<<<<<< @@ -15688,7 +15715,7 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ */ __pyx_v_candidate->candidate = (&((__pyx_v_self->cs[0])[__pyx_v_k])); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":87 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":87 * cdef Candidate candidate = Candidate() * candidate.candidate = &self.cs[0][k] * candidate.score = self.metric.ComputeScore(self.cs[0][k].eval_feats) # <<<<<<<<<<<<<< @@ -15697,7 +15724,7 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_6__getitem__(struct __pyx_obj_5_ */ __pyx_v_candidate->score = __pyx_v_self->metric->ComputeScore(((__pyx_v_self->cs[0])[__pyx_v_k]).eval_feats); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":88 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":88 * candidate.candidate = &self.cs[0][k] * candidate.score = self.metric.ComputeScore(self.cs[0][k].eval_feats) * return candidate # <<<<<<<<<<<<<< @@ -15734,7 +15761,7 @@ static PyObject *__pyx_pw_5_cdec_12CandidateSet_9__iter__(PyObject *__pyx_v_self return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":90 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":90 * return candidate * * def __iter__(self): # <<<<<<<<<<<<<< @@ -15797,7 +15824,7 @@ static PyObject *__pyx_gb_5_cdec_12CandidateSet_10generator16(__pyx_GeneratorObj __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":92 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":92 * def __iter__(self): * cdef unsigned i * for i in range(len(self)): # <<<<<<<<<<<<<< @@ -15808,7 +15835,7 @@ static PyObject *__pyx_gb_5_cdec_12CandidateSet_10generator16(__pyx_GeneratorObj for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_cur_scope->__pyx_v_i = __pyx_t_2; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":93 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":93 * cdef unsigned i * for i in range(len(self)): * yield self[i] # <<<<<<<<<<<<<< @@ -15846,6 +15873,7 @@ static PyObject *__pyx_gb_5_cdec_12CandidateSet_10generator16(__pyx_GeneratorObj /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_12CandidateSet_12add_kbest(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_12CandidateSet_11add_kbest[] = "cs.add_kbest(Hypergraph hypergraph, int k) -> Extract K-best hypotheses \n from the hypergraph and add them to the candidate set."; static PyObject *__pyx_pw_5_cdec_12CandidateSet_12add_kbest(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_hypergraph = 0; unsigned int __pyx_v_k; @@ -15905,12 +15933,12 @@ static PyObject *__pyx_pw_5_cdec_12CandidateSet_12add_kbest(PyObject *__pyx_v_se return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":95 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":95 * yield self[i] * * def add_kbest(self, Hypergraph hypergraph, unsigned k): # <<<<<<<<<<<<<< - * self.cs.AddKBestCandidates(hypergraph.hg[0], k, self.scorer.get()) - * + * """cs.add_kbest(Hypergraph hypergraph, int k) -> Extract K-best hypotheses + * from the hypergraph and add them to the candidate set.""" */ static PyObject *__pyx_pf_5_cdec_12CandidateSet_11add_kbest(struct __pyx_obj_5_cdec_CandidateSet *__pyx_v_self, struct __pyx_obj_5_cdec_Hypergraph *__pyx_v_hypergraph, unsigned int __pyx_v_k) { @@ -15918,9 +15946,9 @@ static PyObject *__pyx_pf_5_cdec_12CandidateSet_11add_kbest(struct __pyx_obj_5_c __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("add_kbest", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":96 - * - * def add_kbest(self, Hypergraph hypergraph, unsigned k): + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":98 + * """cs.add_kbest(Hypergraph hypergraph, int k) -> Extract K-best hypotheses + * from the hypergraph and add them to the candidate set.""" * self.cs.AddKBestCandidates(hypergraph.hg[0], k, self.scorer.get()) # <<<<<<<<<<<<<< * * cdef class SegmentEvaluator: @@ -15942,7 +15970,7 @@ static void __pyx_pw_5_cdec_16SegmentEvaluator_1__dealloc__(PyObject *__pyx_v_se __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":102 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":104 * cdef mteval.EvaluationMetric* metric * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -15954,7 +15982,7 @@ static void __pyx_pf_5_cdec_16SegmentEvaluator___dealloc__(CYTHON_UNUSED struct __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":103 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":105 * * def __dealloc__(self): * del self.scorer # <<<<<<<<<<<<<< @@ -15968,6 +15996,7 @@ static void __pyx_pf_5_cdec_16SegmentEvaluator___dealloc__(CYTHON_UNUSED struct /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_3evaluate(PyObject *__pyx_v_self, PyObject *__pyx_v_sentence); /*proto*/ +static char __pyx_doc_5_cdec_16SegmentEvaluator_2evaluate[] = "se.evaluate(sentence) -> SufficientStats for the given hypothesis."; static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_3evaluate(PyObject *__pyx_v_self, PyObject *__pyx_v_sentence) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -15977,12 +16006,12 @@ static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_3evaluate(PyObject *__pyx_v_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":105 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":107 * del self.scorer * * def evaluate(self, sentence): # <<<<<<<<<<<<<< + * """se.evaluate(sentence) -> SufficientStats for the given hypothesis.""" * cdef vector[WordID] hyp - * cdef SufficientStats sf = SufficientStats() */ static PyObject *__pyx_pf_5_cdec_16SegmentEvaluator_2evaluate(struct __pyx_obj_5_cdec_SegmentEvaluator *__pyx_v_self, PyObject *__pyx_v_sentence) { @@ -15992,67 +16021,72 @@ static PyObject *__pyx_pf_5_cdec_16SegmentEvaluator_2evaluate(struct __pyx_obj_5 __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; + std::string __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("evaluate", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":107 - * def evaluate(self, sentence): + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":110 + * """se.evaluate(sentence) -> SufficientStats for the given hypothesis.""" * cdef vector[WordID] hyp * cdef SufficientStats sf = SufficientStats() # <<<<<<<<<<<<<< * sf.metric = self.metric * sf.stats = new mteval.SufficientStats() */ - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_SufficientStats)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_SufficientStats)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_sf = ((struct __pyx_obj_5_cdec_SufficientStats *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":108 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":111 * cdef vector[WordID] hyp * cdef SufficientStats sf = SufficientStats() * sf.metric = self.metric # <<<<<<<<<<<<<< * sf.stats = new mteval.SufficientStats() - * ConvertSentence(string(as_str(sentence.strip())), &hyp) + * ConvertSentence(as_str(sentence.strip()), &hyp) */ __pyx_v_sf->metric = __pyx_v_self->metric; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":109 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":112 * cdef SufficientStats sf = SufficientStats() * sf.metric = self.metric * sf.stats = new mteval.SufficientStats() # <<<<<<<<<<<<<< - * ConvertSentence(string(as_str(sentence.strip())), &hyp) + * ConvertSentence(as_str(sentence.strip()), &hyp) * self.scorer.get().Evaluate(hyp, sf.stats) */ __pyx_v_sf->stats = new SufficientStats(); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":110 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":113 * sf.metric = self.metric * sf.stats = new mteval.SufficientStats() - * ConvertSentence(string(as_str(sentence.strip())), &hyp) # <<<<<<<<<<<<<< + * ConvertSentence(as_str(sentence.strip()), &hyp) # <<<<<<<<<<<<<< * self.scorer.get().Evaluate(hyp, sf.stats) * return sf */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_sentence, __pyx_n_s__strip); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_v_sentence, __pyx_n_s__strip); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - TD::ConvertSentence(std::string(__pyx_f_5_cdec_as_str(__pyx_t_2, NULL)), (&__pyx_v_hyp)); + __pyx_t_1 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_t_2, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_3 = __pyx_convert_string_from_py_(__pyx_t_1); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + TD::ConvertSentence(__pyx_t_3, (&__pyx_v_hyp)); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":111 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":114 * sf.stats = new mteval.SufficientStats() - * ConvertSentence(string(as_str(sentence.strip())), &hyp) + * ConvertSentence(as_str(sentence.strip()), &hyp) * self.scorer.get().Evaluate(hyp, sf.stats) # <<<<<<<<<<<<<< * return sf * */ __pyx_v_self->scorer->get()->Evaluate(__pyx_v_hyp, __pyx_v_sf->stats); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":112 - * ConvertSentence(string(as_str(sentence.strip())), &hyp) + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":115 + * ConvertSentence(as_str(sentence.strip()), &hyp) * self.scorer.get().Evaluate(hyp, sf.stats) * return sf # <<<<<<<<<<<<<< * @@ -16079,6 +16113,7 @@ static PyObject *__pyx_pf_5_cdec_16SegmentEvaluator_2evaluate(struct __pyx_obj_5 /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_5candidate_set(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_5_cdec_16SegmentEvaluator_4candidate_set[] = "se.candidate_set() -> Candidate set using this segment evaluator for scoring."; static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_5candidate_set(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -16088,12 +16123,12 @@ static PyObject *__pyx_pw_5_cdec_16SegmentEvaluator_5candidate_set(PyObject *__p return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":114 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":117 * return sf * * def candidate_set(self): # <<<<<<<<<<<<<< + * """se.candidate_set() -> Candidate set using this segment evaluator for scoring.""" * return CandidateSet(self) - * */ static PyObject *__pyx_pf_5_cdec_16SegmentEvaluator_4candidate_set(struct __pyx_obj_5_cdec_SegmentEvaluator *__pyx_v_self) { @@ -16106,20 +16141,20 @@ static PyObject *__pyx_pf_5_cdec_16SegmentEvaluator_4candidate_set(struct __pyx_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("candidate_set", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":115 - * + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":119 * def candidate_set(self): + * """se.candidate_set() -> Candidate set using this segment evaluator for scoring.""" * return CandidateSet(self) # <<<<<<<<<<<<<< * * cdef class Scorer: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_CandidateSet)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_CandidateSet)), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; @@ -16150,7 +16185,7 @@ static int __pyx_pw_5_cdec_6Scorer_1__cinit__(PyObject *__pyx_v_self, PyObject * static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__name,0}; PyObject* values[1] = {0}; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":121 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":125 * cdef mteval.EvaluationMetric* metric * * def __cinit__(self, bytes name=None): # <<<<<<<<<<<<<< @@ -16175,7 +16210,7 @@ static int __pyx_pw_5_cdec_6Scorer_1__cinit__(PyObject *__pyx_v_self, PyObject * } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -16188,13 +16223,13 @@ static int __pyx_pw_5_cdec_6Scorer_1__cinit__(PyObject *__pyx_v_self, PyObject * } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.Scorer.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_name), (&PyBytes_Type), 1, "name", 1))) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_name), (&PyBytes_Type), 1, "name", 1))) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_5_cdec_6Scorer___cinit__(((struct __pyx_obj_5_cdec_Scorer *)__pyx_v_self), __pyx_v_name); goto __pyx_L0; __pyx_L1_error:; @@ -16209,12 +16244,13 @@ static int __pyx_pf_5_cdec_6Scorer___cinit__(struct __pyx_obj_5_cdec_Scorer *__p __Pyx_RefNannyDeclarations int __pyx_t_1; char *__pyx_t_2; + std::string *__pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":122 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":126 * * def __cinit__(self, bytes name=None): * if name: # <<<<<<<<<<<<<< @@ -16224,17 +16260,18 @@ static int __pyx_pf_5_cdec_6Scorer___cinit__(struct __pyx_obj_5_cdec_Scorer *__p __pyx_t_1 = (((PyObject *)__pyx_v_name) != Py_None) && (PyBytes_GET_SIZE(((PyObject *)__pyx_v_name)) != 0); if (__pyx_t_1) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":123 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":127 * def __cinit__(self, bytes name=None): * if name: * self.name = new string(name) # <<<<<<<<<<<<<< * self.metric = mteval.MetricInstance(self.name[0]) * */ - __pyx_t_2 = PyBytes_AsString(((PyObject *)__pyx_v_name)); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_self->name = new std::string(__pyx_t_2); + __pyx_t_2 = PyBytes_AsString(((PyObject *)__pyx_v_name)); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + try {__pyx_t_3 = new std::string(__pyx_t_2);} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_v_self->name = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":124 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":128 * if name: * self.name = new string(name) * self.metric = mteval.MetricInstance(self.name[0]) # <<<<<<<<<<<<<< @@ -16265,7 +16302,7 @@ static void __pyx_pw_5_cdec_6Scorer_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":126 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":130 * self.metric = mteval.MetricInstance(self.name[0]) * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -16277,7 +16314,7 @@ static void __pyx_pf_5_cdec_6Scorer_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":127 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":131 * * def __dealloc__(self): * del self.name # <<<<<<<<<<<<<< @@ -16314,7 +16351,7 @@ static PyObject *__pyx_pw_5_cdec_6Scorer_5__call__(PyObject *__pyx_v_self, PyObj else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; @@ -16325,7 +16362,7 @@ static PyObject *__pyx_pw_5_cdec_6Scorer_5__call__(PyObject *__pyx_v_self, PyObj } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.Scorer.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -16336,7 +16373,7 @@ static PyObject *__pyx_pw_5_cdec_6Scorer_5__call__(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":129 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":133 * del self.name * * def __call__(self, refs): # <<<<<<<<<<<<<< @@ -16353,17 +16390,20 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; + std::vector<std::vector<WordID> > *__pyx_t_3; + Py_ssize_t __pyx_t_4; + PyObject *(*__pyx_t_5)(PyObject *); PyObject *__pyx_t_6 = NULL; + std::vector<WordID> *__pyx_t_7; + PyObject *__pyx_t_8 = NULL; + std::string __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__call__", 0); __Pyx_INCREF(__pyx_v_refs); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":130 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":134 * * def __call__(self, refs): * if isinstance(refs, basestring): # <<<<<<<<<<<<<< @@ -16372,18 +16412,18 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score */ __pyx_t_1 = __pyx_builtin_basestring; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = PyObject_IsInstance(__pyx_v_refs, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_IsInstance(__pyx_v_refs, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":131 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":135 * def __call__(self, refs): * if isinstance(refs, basestring): * refs = [refs] # <<<<<<<<<<<<<< * cdef vector[vector[WordID]]* refsv = new vector[vector[WordID]]() * cdef vector[WordID]* refv */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_refs); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_v_refs); @@ -16395,95 +16435,101 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score } __pyx_L3:; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":132 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":136 * if isinstance(refs, basestring): * refs = [refs] * cdef vector[vector[WordID]]* refsv = new vector[vector[WordID]]() # <<<<<<<<<<<<<< * cdef vector[WordID]* refv * for ref in refs: */ - __pyx_v_refsv = new std::vector<std::vector<WordID> >(); + try {__pyx_t_3 = new std::vector<std::vector<WordID> >();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_v_refsv = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":134 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":138 * cdef vector[vector[WordID]]* refsv = new vector[vector[WordID]]() * cdef vector[WordID]* refv * for ref in refs: # <<<<<<<<<<<<<< * refv = new vector[WordID]() - * ConvertSentence(string(as_str(ref.strip())), refv) + * ConvertSentence(as_str(ref.strip()), refv) */ if (PyList_CheckExact(__pyx_v_refs) || PyTuple_CheckExact(__pyx_v_refs)) { - __pyx_t_1 = __pyx_v_refs; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; - __pyx_t_4 = NULL; + __pyx_t_1 = __pyx_v_refs; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0; + __pyx_t_5 = NULL; } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_refs); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_refs); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext; + __pyx_t_5 = Py_TYPE(__pyx_t_1)->tp_iternext; } for (;;) { - if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; + if (!__pyx_t_5 && PyList_CheckExact(__pyx_t_1)) { + if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; + __pyx_t_6 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_6); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif - } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_1)) { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + } else if (!__pyx_t_5 && PyTuple_CheckExact(__pyx_t_1)) { + if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; + __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_6); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_1); - if (unlikely(!__pyx_t_5)) { + __pyx_t_6 = __pyx_t_5(__pyx_t_1); + if (unlikely(!__pyx_t_6)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[5]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[5]; __pyx_lineno = 138; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } - __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); } __Pyx_XDECREF(__pyx_v_ref); - __pyx_v_ref = __pyx_t_5; - __pyx_t_5 = 0; + __pyx_v_ref = __pyx_t_6; + __pyx_t_6 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":135 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":139 * cdef vector[WordID]* refv * for ref in refs: * refv = new vector[WordID]() # <<<<<<<<<<<<<< - * ConvertSentence(string(as_str(ref.strip())), refv) + * ConvertSentence(as_str(ref.strip()), refv) * refsv.push_back(refv[0]) */ - __pyx_v_refv = new std::vector<WordID>(); + try {__pyx_t_7 = new std::vector<WordID>();} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_v_refv = __pyx_t_7; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":136 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":140 * for ref in refs: * refv = new vector[WordID]() - * ConvertSentence(string(as_str(ref.strip())), refv) # <<<<<<<<<<<<<< + * ConvertSentence(as_str(ref.strip()), refv) # <<<<<<<<<<<<<< * refsv.push_back(refv[0]) * del refv */ - __pyx_t_5 = PyObject_GetAttr(__pyx_v_ref, __pyx_n_s__strip); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyObject_Call(__pyx_t_5, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_GetAttr(__pyx_v_ref, __pyx_n_s__strip); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - TD::ConvertSentence(std::string(__pyx_f_5_cdec_as_str(__pyx_t_6, NULL)), __pyx_v_refv); + __pyx_t_8 = PyObject_Call(__pyx_t_6, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_t_8, NULL)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_9 = __pyx_convert_string_from_py_(__pyx_t_6); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + TD::ConvertSentence(__pyx_t_9, __pyx_v_refv); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":137 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":141 * refv = new vector[WordID]() - * ConvertSentence(string(as_str(ref.strip())), refv) + * ConvertSentence(as_str(ref.strip()), refv) * refsv.push_back(refv[0]) # <<<<<<<<<<<<<< * del refv * cdef unsigned i */ __pyx_v_refsv->push_back((__pyx_v_refv[0])); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":138 - * ConvertSentence(string(as_str(ref.strip())), refv) + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":142 + * ConvertSentence(as_str(ref.strip()), refv) * refsv.push_back(refv[0]) * del refv # <<<<<<<<<<<<<< * cdef unsigned i @@ -16493,19 +16539,19 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":140 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":144 * del refv * cdef unsigned i * cdef SegmentEvaluator evaluator = SegmentEvaluator() # <<<<<<<<<<<<<< * evaluator.metric = self.metric * evaluator.scorer = new shared_ptr[mteval.SegmentEvaluator]( */ - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_SegmentEvaluator)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_SegmentEvaluator)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_evaluator = ((struct __pyx_obj_5_cdec_SegmentEvaluator *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":141 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":145 * cdef unsigned i * cdef SegmentEvaluator evaluator = SegmentEvaluator() * evaluator.metric = self.metric # <<<<<<<<<<<<<< @@ -16514,7 +16560,7 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score */ __pyx_v_evaluator->metric = __pyx_v_self->metric; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":142 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":146 * cdef SegmentEvaluator evaluator = SegmentEvaluator() * evaluator.metric = self.metric * evaluator.scorer = new shared_ptr[mteval.SegmentEvaluator]( # <<<<<<<<<<<<<< @@ -16523,7 +16569,7 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score */ __pyx_v_evaluator->scorer = new boost::shared_ptr<SegmentEvaluator>(__pyx_v_self->metric->CreateSegmentEvaluator((__pyx_v_refsv[0]))); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":144 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":148 * evaluator.scorer = new shared_ptr[mteval.SegmentEvaluator]( * self.metric.CreateSegmentEvaluator(refsv[0])) * del refsv # in theory should not delete but store in SegmentEvaluator # <<<<<<<<<<<<<< @@ -16532,7 +16578,7 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score */ delete __pyx_v_refsv; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":145 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":149 * self.metric.CreateSegmentEvaluator(refsv[0])) * del refsv # in theory should not delete but store in SegmentEvaluator * return evaluator # <<<<<<<<<<<<<< @@ -16548,8 +16594,8 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_4__call__(struct __pyx_obj_5_cdec_Score goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("_cdec.Scorer.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; @@ -16572,7 +16618,7 @@ static PyObject *__pyx_pw_5_cdec_6Scorer_7__str__(PyObject *__pyx_v_self) { return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":147 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":151 * return evaluator * * def __str__(self): # <<<<<<<<<<<<<< @@ -16590,7 +16636,7 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_6__str__(struct __pyx_obj_5_cdec_Scorer int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":148 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":152 * * def __str__(self): * return str(self.name.c_str()) # <<<<<<<<<<<<<< @@ -16598,14 +16644,14 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_6__str__(struct __pyx_obj_5_cdec_Scorer * cdef float _compute_score(void* metric_, mteval.SufficientStats* stats): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(__pyx_v_self->name->c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(__pyx_v_self->name->c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -16625,7 +16671,7 @@ static PyObject *__pyx_pf_5_cdec_6Scorer_6__str__(struct __pyx_obj_5_cdec_Scorer return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":150 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":154 * return str(self.name.c_str()) * * cdef float _compute_score(void* metric_, mteval.SufficientStats* stats): # <<<<<<<<<<<<<< @@ -16651,7 +16697,7 @@ static float __pyx_f_5_cdec__compute_score(void *__pyx_v_metric_, SufficientStat int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_compute_score", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":151 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":155 * * cdef float _compute_score(void* metric_, mteval.SufficientStats* stats): * cdef Metric metric = <Metric> metric_ # <<<<<<<<<<<<<< @@ -16661,19 +16707,19 @@ static float __pyx_f_5_cdec__compute_score(void *__pyx_v_metric_, SufficientStat __Pyx_INCREF(((PyObject *)((struct __pyx_obj_5_cdec_Metric *)__pyx_v_metric_))); __pyx_v_metric = ((struct __pyx_obj_5_cdec_Metric *)__pyx_v_metric_); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":152 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":156 * cdef float _compute_score(void* metric_, mteval.SufficientStats* stats): * cdef Metric metric = <Metric> metric_ * cdef list ss = [] # <<<<<<<<<<<<<< * cdef unsigned i * for i in range(stats.size()): */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_ss = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":154 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":158 * cdef list ss = [] * cdef unsigned i * for i in range(stats.size()): # <<<<<<<<<<<<<< @@ -16684,38 +16730,38 @@ static float __pyx_f_5_cdec__compute_score(void *__pyx_v_metric_, SufficientStat for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":155 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":159 * cdef unsigned i * for i in range(stats.size()): * ss.append(stats[0][i]) # <<<<<<<<<<<<<< * return metric.score(ss) * */ - __pyx_t_1 = PyFloat_FromDouble(((__pyx_v_stats[0])[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyFloat_FromDouble(((__pyx_v_stats[0])[__pyx_v_i])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyList_Append(__pyx_v_ss, __pyx_t_1); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyList_Append(__pyx_v_ss, __pyx_t_1); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":156 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":160 * for i in range(stats.size()): * ss.append(stats[0][i]) * return metric.score(ss) # <<<<<<<<<<<<<< * * cdef void _compute_sufficient_stats(void* metric_, */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_metric), __pyx_n_s__score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_metric), __pyx_n_s__score); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_ss)); PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_ss)); __Pyx_GIVEREF(((PyObject *)__pyx_v_ss)); - __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_5), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; - __pyx_t_7 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_7 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 160; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_r = __pyx_t_7; goto __pyx_L0; @@ -16735,7 +16781,7 @@ static float __pyx_f_5_cdec__compute_score(void *__pyx_v_metric_, SufficientStat return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":158 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":162 * return metric.score(ss) * * cdef void _compute_sufficient_stats(void* metric_, # <<<<<<<<<<<<<< @@ -16762,7 +16808,7 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_compute_sufficient_stats", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":162 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":166 * vector[string]* refs, * mteval.SufficientStats* out): * cdef Metric metric = <Metric> metric_ # <<<<<<<<<<<<<< @@ -16772,19 +16818,19 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: __Pyx_INCREF(((PyObject *)((struct __pyx_obj_5_cdec_Metric *)__pyx_v_metric_))); __pyx_v_metric = ((struct __pyx_obj_5_cdec_Metric *)__pyx_v_metric_); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":163 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":167 * mteval.SufficientStats* out): * cdef Metric metric = <Metric> metric_ * cdef list refs_ = [] # <<<<<<<<<<<<<< * cdef unsigned i * for i in range(refs.size()): */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_refs_ = __pyx_t_1; __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":165 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":169 * cdef list refs_ = [] * cdef unsigned i * for i in range(refs.size()): # <<<<<<<<<<<<<< @@ -16795,47 +16841,47 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":166 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":170 * cdef unsigned i * for i in range(refs.size()): * refs_.append(str(refs[0][i].c_str())) # <<<<<<<<<<<<<< * cdef list ss = metric.evaluate(str(hyp.c_str()), refs_) * out.fields.resize(len(ss)) */ - __pyx_t_1 = PyBytes_FromString(((__pyx_v_refs[0])[__pyx_v_i]).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(((__pyx_v_refs[0])[__pyx_v_i]).c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_5 = PyList_Append(__pyx_v_refs_, __pyx_t_1); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PyList_Append(__pyx_v_refs_, __pyx_t_1); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":167 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":171 * for i in range(refs.size()): * refs_.append(str(refs[0][i].c_str())) * cdef list ss = metric.evaluate(str(hyp.c_str()), refs_) # <<<<<<<<<<<<<< * out.fields.resize(len(ss)) * for i in range(len(ss)): */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_metric), __pyx_n_s__evaluate); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_metric), __pyx_n_s__evaluate); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyBytes_FromString(__pyx_v_hyp->c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyBytes_FromString(__pyx_v_hyp->c_str()); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); @@ -16843,15 +16889,15 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: PyTuple_SET_ITEM(__pyx_t_6, 1, ((PyObject *)__pyx_v_refs_)); __Pyx_GIVEREF(((PyObject *)__pyx_v_refs_)); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - if (!(likely(PyList_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected list, got %.200s", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(PyList_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected list, got %.200s", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 171; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_ss = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":168 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":172 * refs_.append(str(refs[0][i].c_str())) * cdef list ss = metric.evaluate(str(hyp.c_str()), refs_) * out.fields.resize(len(ss)) # <<<<<<<<<<<<<< @@ -16860,12 +16906,12 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: */ if (unlikely(((PyObject *)__pyx_v_ss) == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - {__pyx_filename = __pyx_f[5]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[5]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_7 = PyList_GET_SIZE(((PyObject *)__pyx_v_ss)); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyList_GET_SIZE(((PyObject *)__pyx_v_ss)); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_out->fields.resize(__pyx_t_7); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":169 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":173 * cdef list ss = metric.evaluate(str(hyp.c_str()), refs_) * out.fields.resize(len(ss)) * for i in range(len(ss)): # <<<<<<<<<<<<<< @@ -16874,13 +16920,13 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: */ if (unlikely(((PyObject *)__pyx_v_ss) == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - {__pyx_filename = __pyx_f[5]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[5]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_7 = PyList_GET_SIZE(((PyObject *)__pyx_v_ss)); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyList_GET_SIZE(((PyObject *)__pyx_v_ss)); if (unlikely(__pyx_t_7 == -1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_7; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":170 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":174 * out.fields.resize(len(ss)) * for i in range(len(ss)): * out.fields[i] = ss[i] # <<<<<<<<<<<<<< @@ -16889,11 +16935,11 @@ static void __pyx_f_5_cdec__compute_sufficient_stats(void *__pyx_v_metric_, std: */ if (unlikely(((PyObject *)__pyx_v_ss) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[5]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_4 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_ss), __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_ss), __pyx_v_i, sizeof(unsigned int)+1, PyLong_FromUnsignedLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_4); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_4); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; (__pyx_v_out->fields[__pyx_v_i]) = __pyx_t_8; } @@ -16925,32 +16971,35 @@ static int __pyx_pw_5_cdec_6Metric_1__cinit__(PyObject *__pyx_v_self, PyObject * return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":174 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":178 * cdef class Metric: * cdef Scorer scorer * def __cinit__(self): # <<<<<<<<<<<<<< * self.scorer = Scorer() - * self.scorer.name = new string(as_str(self.__class__.__name__)) + * cdef bytes class_name = self.__class__.__name__ */ static int __pyx_pf_5_cdec_6Metric___cinit__(struct __pyx_obj_5_cdec_Metric *__pyx_v_self) { + PyObject *__pyx_v_class_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; + char *__pyx_t_3; + std::string *__pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":175 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":179 * cdef Scorer scorer * def __cinit__(self): * self.scorer = Scorer() # <<<<<<<<<<<<<< - * self.scorer.name = new string(as_str(self.__class__.__name__)) - * self.scorer.metric = mteval.PyMetricInstance(self.scorer.name[0], + * cdef bytes class_name = self.__class__.__name__ + * self.scorer.name = new string(class_name) */ - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 175; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v_self->scorer); @@ -16958,24 +17007,36 @@ static int __pyx_pf_5_cdec_6Metric___cinit__(struct __pyx_obj_5_cdec_Metric *__p __pyx_v_self->scorer = ((struct __pyx_obj_5_cdec_Scorer *)__pyx_t_1); __pyx_t_1 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":176 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":180 * def __cinit__(self): * self.scorer = Scorer() - * self.scorer.name = new string(as_str(self.__class__.__name__)) # <<<<<<<<<<<<<< + * cdef bytes class_name = self.__class__.__name__ # <<<<<<<<<<<<<< + * self.scorer.name = new string(class_name) * self.scorer.metric = mteval.PyMetricInstance(self.scorer.name[0], - * <void*> self, _compute_sufficient_stats, _compute_score) */ - __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s____class__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self), __pyx_n_s____class__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____name__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s____name__); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_self->scorer->name = new std::string(__pyx_f_5_cdec_as_str(__pyx_t_2, NULL)); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_2))||((__pyx_t_2) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected bytes, got %.200s", Py_TYPE(__pyx_t_2)->tp_name), 0))) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_class_name = ((PyObject*)__pyx_t_2); + __pyx_t_2 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":177 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":181 * self.scorer = Scorer() - * self.scorer.name = new string(as_str(self.__class__.__name__)) + * cdef bytes class_name = self.__class__.__name__ + * self.scorer.name = new string(class_name) # <<<<<<<<<<<<<< + * self.scorer.metric = mteval.PyMetricInstance(self.scorer.name[0], + * <void*> self, _compute_sufficient_stats, _compute_score) + */ + __pyx_t_3 = PyBytes_AsString(((PyObject *)__pyx_v_class_name)); if (unlikely((!__pyx_t_3) && PyErr_Occurred())) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + try {__pyx_t_4 = new std::string(__pyx_t_3);} catch(...) {__Pyx_CppExn2PyErr(); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}} + __pyx_v_self->scorer->name = __pyx_t_4; + + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":182 + * cdef bytes class_name = self.__class__.__name__ + * self.scorer.name = new string(class_name) * self.scorer.metric = mteval.PyMetricInstance(self.scorer.name[0], # <<<<<<<<<<<<<< * <void*> self, _compute_sufficient_stats, _compute_score) * @@ -16990,6 +17051,7 @@ static int __pyx_pf_5_cdec_6Metric___cinit__(struct __pyx_obj_5_cdec_Metric *__p __Pyx_AddTraceback("_cdec.Metric.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; + __Pyx_XDECREF(__pyx_v_class_name); __Pyx_RefNannyFinishContext(); return __pyx_r; } @@ -17019,7 +17081,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_3__call__(PyObject *__pyx_v_self, PyObj else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; @@ -17030,7 +17092,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_3__call__(PyObject *__pyx_v_self, PyObj } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 180; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.Metric.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -17041,7 +17103,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_3__call__(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":180 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":185 * <void*> self, _compute_sufficient_stats, _compute_score) * * def __call__(self, refs): # <<<<<<<<<<<<<< @@ -17059,7 +17121,7 @@ static PyObject *__pyx_pf_5_cdec_6Metric_2__call__(struct __pyx_obj_5_cdec_Metri int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__call__", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":181 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":186 * * def __call__(self, refs): * return self.scorer(refs) # <<<<<<<<<<<<<< @@ -17067,12 +17129,12 @@ static PyObject *__pyx_pf_5_cdec_6Metric_2__call__(struct __pyx_obj_5_cdec_Metri * def score(SufficientStats stats): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_refs); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_refs); __Pyx_GIVEREF(__pyx_v_refs); - __pyx_t_2 = PyObject_Call(((PyObject *)__pyx_v_self->scorer), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(((PyObject *)__pyx_v_self->scorer), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; @@ -17103,7 +17165,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_5score(PyObject *__pyx_v_stats, CYTHON_ return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":183 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":188 * return self.scorer(refs) * * def score(SufficientStats stats): # <<<<<<<<<<<<<< @@ -17116,7 +17178,7 @@ static PyObject *__pyx_pf_5_cdec_6Metric_4score(CYTHON_UNUSED struct __pyx_obj_5 __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("score", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":184 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":189 * * def score(SufficientStats stats): * return 0 # <<<<<<<<<<<<<< @@ -17163,11 +17225,11 @@ static PyObject *__pyx_pw_5_cdec_6Metric_7evaluate(PyObject *__pyx_v_self, PyObj case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__refs)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("evaluate", 1, 2, 2, 1); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("evaluate", 1, 2, 2, 1); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "evaluate") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "evaluate") < 0)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; @@ -17180,7 +17242,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_7evaluate(PyObject *__pyx_v_self, PyObj } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("evaluate", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("evaluate", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("_cdec.Metric.evaluate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -17191,7 +17253,7 @@ static PyObject *__pyx_pw_5_cdec_6Metric_7evaluate(PyObject *__pyx_v_self, PyObj return __pyx_r; } -/* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":186 +/* "/home/vchahune/tools/cdec/python/src/mteval.pxi":191 * return 0 * * def evaluate(self, hyp, refs): # <<<<<<<<<<<<<< @@ -17208,7 +17270,7 @@ static PyObject *__pyx_pf_5_cdec_6Metric_6evaluate(CYTHON_UNUSED struct __pyx_ob int __pyx_clineno = 0; __Pyx_RefNannySetupContext("evaluate", 0); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":187 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":192 * * def evaluate(self, hyp, refs): * return [] # <<<<<<<<<<<<<< @@ -17216,7 +17278,7 @@ static PyObject *__pyx_pf_5_cdec_6Metric_6evaluate(CYTHON_UNUSED struct __pyx_ob * BLEU = Scorer('IBM_BLEU') */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = ((PyObject *)__pyx_t_1); __pyx_t_1 = 0; @@ -17236,7 +17298,8 @@ static PyObject *__pyx_pf_5_cdec_6Metric_6evaluate(CYTHON_UNUSED struct __pyx_ob /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_3set_silent(PyObject *__pyx_self, PyObject *__pyx_v_yn); /*proto*/ -static PyMethodDef __pyx_mdef_5_cdec_3set_silent = {__Pyx_NAMESTR("set_silent"), (PyCFunction)__pyx_pw_5_cdec_3set_silent, METH_O, __Pyx_DOCSTR(0)}; +static char __pyx_doc_5_cdec_2set_silent[] = "set_silent(bool): Configure the verbosity of cdec."; +static PyMethodDef __pyx_mdef_5_cdec_3set_silent = {__Pyx_NAMESTR("set_silent"), (PyCFunction)__pyx_pw_5_cdec_3set_silent, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_2set_silent)}; static PyObject *__pyx_pw_5_cdec_3set_silent(PyObject *__pyx_self, PyObject *__pyx_v_yn) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -17250,8 +17313,8 @@ static PyObject *__pyx_pw_5_cdec_3set_silent(PyObject *__pyx_self, PyObject *__p * class ParseFailed(Exception): pass * * def set_silent(yn): # <<<<<<<<<<<<<< + * """set_silent(bool): Configure the verbosity of cdec.""" * SetSilent(yn) - * */ static PyObject *__pyx_pf_5_cdec_2set_silent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_yn) { @@ -17263,14 +17326,14 @@ static PyObject *__pyx_pf_5_cdec_2set_silent(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_clineno = 0; __Pyx_RefNannySetupContext("set_silent", 0); - /* "_cdec.pyx":29 - * + /* "_cdec.pyx":30 * def set_silent(yn): + * """set_silent(bool): Configure the verbosity of cdec.""" * SetSilent(yn) # <<<<<<<<<<<<<< * * def _make_config(config): */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_yn); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_yn); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;} SetSilent(__pyx_t_1); __pyx_r = Py_None; __Pyx_INCREF(Py_None); @@ -17297,7 +17360,7 @@ static PyObject *__pyx_pw_5_cdec_5_make_config(PyObject *__pyx_self, PyObject *_ return __pyx_r; } -/* "_cdec.pyx":31 +/* "_cdec.pyx":32 * SetSilent(yn) * * def _make_config(config): # <<<<<<<<<<<<<< @@ -17323,7 +17386,7 @@ static PyObject *__pyx_pf_5_cdec_4_make_config(CYTHON_UNUSED PyObject *__pyx_sel __Pyx_INCREF(__pyx_cur_scope->__pyx_v_config); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_config); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_6generator17, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_6generator17, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -17369,25 +17432,25 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "_cdec.pyx":32 + /* "_cdec.pyx":33 * * def _make_config(config): * for key, value in config.items(): # <<<<<<<<<<<<<< * if isinstance(value, dict): * for name, info in value.items(): */ - __pyx_t_1 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_config, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_config, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyList_CheckExact(__pyx_t_2) || PyTuple_CheckExact(__pyx_t_2)) { __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext; } @@ -17396,23 +17459,23 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_2 = __pyx_t_4(__pyx_t_1); if (unlikely(!__pyx_t_2)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -17428,7 +17491,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { @@ -17441,14 +17504,14 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); #else - __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; - __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; @@ -17456,7 +17519,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L6_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L7_unpacking_done; @@ -17464,7 +17527,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L7_unpacking_done:; } __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_key); @@ -17478,7 +17541,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_cur_scope->__pyx_v_value = __pyx_t_6; __pyx_t_6 = 0; - /* "_cdec.pyx":33 + /* "_cdec.pyx":34 * def _make_config(config): * for key, value in config.items(): * if isinstance(value, dict): # <<<<<<<<<<<<<< @@ -17491,23 +17554,23 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_9) { - /* "_cdec.pyx":34 + /* "_cdec.pyx":35 * for key, value in config.items(): * if isinstance(value, dict): * for name, info in value.items(): # <<<<<<<<<<<<<< * yield key, '%s %s' % (name, info) * elif isinstance(value, list): */ - __pyx_t_2 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_value, __pyx_n_s__items); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_value, __pyx_n_s__items); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (PyList_CheckExact(__pyx_t_6) || PyTuple_CheckExact(__pyx_t_6)) { __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_10 = 0; __pyx_t_11 = NULL; } else { - __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_11 = Py_TYPE(__pyx_t_2)->tp_iternext; } @@ -17516,23 +17579,23 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; + __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; + __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_6 = __pyx_t_11(__pyx_t_2); if (unlikely(!__pyx_t_6)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -17548,7 +17611,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { @@ -17561,14 +17624,14 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); #else - __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { Py_ssize_t index = -1; - __pyx_t_12 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_12 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_12)->tp_iternext; @@ -17576,7 +17639,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_GOTREF(__pyx_t_5); index = 1; __pyx_t_7 = __pyx_t_8(__pyx_t_12); if (unlikely(!__pyx_t_7)) goto __pyx_L11_unpacking_failed; __Pyx_GOTREF(__pyx_t_7); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_12), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_12), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; goto __pyx_L12_unpacking_done; @@ -17584,7 +17647,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L12_unpacking_done:; } __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_name); @@ -17598,14 +17661,14 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_cur_scope->__pyx_v_info = __pyx_t_7; __pyx_t_7 = 0; - /* "_cdec.pyx":35 + /* "_cdec.pyx":36 * if isinstance(value, dict): * for name, info in value.items(): * yield key, '%s %s' % (name, info) # <<<<<<<<<<<<<< * elif isinstance(value, list): * for name in value: */ - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_cur_scope->__pyx_v_name); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_cur_scope->__pyx_v_name); @@ -17613,10 +17676,10 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_INCREF(__pyx_cur_scope->__pyx_v_info); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_cur_scope->__pyx_v_info); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_info); - __pyx_t_7 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_46), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_44), ((PyObject *)__pyx_t_6)); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_7)); __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_cur_scope->__pyx_v_key); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_cur_scope->__pyx_v_key); @@ -17650,13 +17713,13 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_t_4 = __pyx_cur_scope->__pyx_t_3; __pyx_t_10 = __pyx_cur_scope->__pyx_t_4; __pyx_t_11 = __pyx_cur_scope->__pyx_t_5; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L8; } - /* "_cdec.pyx":36 + /* "_cdec.pyx":37 * for name, info in value.items(): * yield key, '%s %s' % (name, info) * elif isinstance(value, list): # <<<<<<<<<<<<<< @@ -17669,7 +17732,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_9) { - /* "_cdec.pyx":37 + /* "_cdec.pyx":38 * yield key, '%s %s' % (name, info) * elif isinstance(value, list): * for name in value: # <<<<<<<<<<<<<< @@ -17680,7 +17743,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_t_2 = __pyx_cur_scope->__pyx_v_value; __Pyx_INCREF(__pyx_t_2); __pyx_t_10 = 0; __pyx_t_11 = NULL; } else { - __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_cur_scope->__pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_cur_scope->__pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_11 = Py_TYPE(__pyx_t_2)->tp_iternext; } @@ -17688,23 +17751,23 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener if (!__pyx_t_11 && PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; + __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_11 && PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; + __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_6 = __pyx_t_11(__pyx_t_2); if (unlikely(!__pyx_t_6)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 37; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -17716,14 +17779,14 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_cur_scope->__pyx_v_name = __pyx_t_6; __pyx_t_6 = 0; - /* "_cdec.pyx":38 + /* "_cdec.pyx":39 * elif isinstance(value, list): * for name in value: * yield key, name # <<<<<<<<<<<<<< * else: * yield key, str(value) */ - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_cur_scope->__pyx_v_key); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_cur_scope->__pyx_v_key); @@ -17757,29 +17820,29 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __pyx_t_4 = __pyx_cur_scope->__pyx_t_3; __pyx_t_10 = __pyx_cur_scope->__pyx_t_4; __pyx_t_11 = __pyx_cur_scope->__pyx_t_5; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L8; } /*else*/ { - /* "_cdec.pyx":40 + /* "_cdec.pyx":41 * yield key, name * else: * yield key, str(value) # <<<<<<<<<<<<<< * * cdef class Decoder: */ - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_cur_scope->__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_cur_scope->__pyx_v_value); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_value); - __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_cur_scope->__pyx_v_key); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_cur_scope->__pyx_v_key); @@ -17804,7 +17867,7 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener __Pyx_XGOTREF(__pyx_t_1); __pyx_t_3 = __pyx_cur_scope->__pyx_t_2; __pyx_t_4 = __pyx_cur_scope->__pyx_t_3; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L8:; } @@ -17828,25 +17891,29 @@ static PyObject *__pyx_gb_5_cdec_6generator17(__pyx_GeneratorObject *__pyx_gener } /* Python wrapper */ -static int __pyx_pw_5_cdec_7Decoder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_pw_5_cdec_7Decoder_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { +static int __pyx_pw_5_cdec_7Decoder_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_7Decoder___init__[] = "Decoder('formalism = scfg') -> initialize from configuration string\n Decoder(formalism='scfg') -> initialize from named parameters\n Create a decoder using a given configuration. Formalism is required."; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5_cdec_7Decoder___init__; +#endif +static int __pyx_pw_5_cdec_7Decoder_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_config_str = 0; PyObject *__pyx_v_config = 0; int __pyx_r; __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); __pyx_v_config = PyDict_New(); if (unlikely(!__pyx_v_config)) return -1; __Pyx_GOTREF(__pyx_v_config); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__config_str,0}; PyObject* values[1] = {0}; - /* "_cdec.pyx":46 + /* "_cdec.pyx":47 * cdef DenseVector weights * - * def __cinit__(self, config_str=None, **config): # <<<<<<<<<<<<<< - * """ Configuration can be given as a string: - * Decoder('formalism = scfg') + * def __init__(self, config_str=None, **config): # <<<<<<<<<<<<<< + * """Decoder('formalism = scfg') -> initialize from configuration string + * Decoder(formalism='scfg') -> initialize from named parameters */ values[0] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { @@ -17866,7 +17933,7 @@ static int __pyx_pw_5_cdec_7Decoder_1__cinit__(PyObject *__pyx_v_self, PyObject } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_config, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_config, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -17879,21 +17946,21 @@ static int __pyx_pw_5_cdec_7Decoder_1__cinit__(PyObject *__pyx_v_self, PyObject } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_CLEAR(__pyx_v_config); - __Pyx_AddTraceback("_cdec.Decoder.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.Decoder.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_5_cdec_7Decoder___cinit__(((struct __pyx_obj_5_cdec_Decoder *)__pyx_v_self), __pyx_v_config_str, __pyx_v_config); + __pyx_r = __pyx_pf_5_cdec_7Decoder___init__(((struct __pyx_obj_5_cdec_Decoder *)__pyx_v_self), __pyx_v_config_str, __pyx_v_config); __Pyx_XDECREF(__pyx_v_config); __Pyx_RefNannyFinishContext(); return __pyx_r; } -static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value); /* proto */ +static PyObject *__pyx_gb_5_cdec_7Decoder_8__init___2generator21(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value); /* proto */ -/* "_cdec.pyx":57 +/* "_cdec.pyx":56 * 'csplit', 'tagger', 'lexalign'): * raise InvalidConfig('formalism "%s" unknown' % formalism) * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) # <<<<<<<<<<<<<< @@ -17901,7 +17968,7 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato * self.dec = new decoder.Decoder(config_stream) */ -static PyObject *__pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(PyObject *__pyx_self) { +static PyObject *__pyx_pf_5_cdec_7Decoder_8__init___genexpr(PyObject *__pyx_self) { struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations @@ -17915,11 +17982,11 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(PyObject *__pyx_sel return NULL; } __Pyx_GOTREF(__pyx_cur_scope); - __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *) __pyx_self; + __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *) __pyx_self; __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope)); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope); { - __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Decoder_9__cinit___2generator21, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_GeneratorObject *gen = __Pyx_Generator_New((__pyx_generator_body_t) __pyx_gb_5_cdec_7Decoder_8__init___2generator21, (PyObject *) __pyx_cur_scope); if (unlikely(!gen)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; @@ -17928,7 +17995,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(PyObject *__pyx_sel __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; - __Pyx_AddTraceback("_cdec.Decoder.__cinit__.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.Decoder.__init__.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_DECREF(((PyObject *)__pyx_cur_scope)); @@ -17937,7 +18004,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(PyObject *__pyx_sel return __pyx_r; } -static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value) /* generator body */ +static PyObject *__pyx_gb_5_cdec_7Decoder_8__init___2generator21(__pyx_GeneratorObject *__pyx_generator, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *__pyx_cur_scope = ((struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; @@ -17956,16 +18023,16 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato return NULL; } __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___make_config); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s___make_config); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_config)) { __Pyx_RaiseClosureNameError("config"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_config)) { __Pyx_RaiseClosureNameError("config"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_config); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_cur_scope->__pyx_outer_scope->__pyx_v_config); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_config); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; @@ -17973,7 +18040,7 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { - __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_5 = Py_TYPE(__pyx_t_2)->tp_iternext; } @@ -17982,23 +18049,23 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato if (!__pyx_t_5 && PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; + __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_5 && PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_3 = __pyx_t_5(__pyx_t_2); if (unlikely(!__pyx_t_3)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -18009,7 +18076,7 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato __Pyx_GIVEREF(__pyx_t_3); __pyx_cur_scope->__pyx_v_kv = __pyx_t_3; __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_47), __pyx_cur_scope->__pyx_v_kv); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_45), __pyx_cur_scope->__pyx_v_kv); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); __pyx_r = ((PyObject *)__pyx_t_3); __pyx_t_3 = 0; @@ -18028,7 +18095,7 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato __Pyx_XGOTREF(__pyx_t_2); __pyx_t_4 = __pyx_cur_scope->__pyx_t_1; __pyx_t_5 = __pyx_cur_scope->__pyx_t_2; - if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (unlikely(!__pyx_sent_value)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyErr_SetNone(PyExc_StopIteration); @@ -18046,16 +18113,16 @@ static PyObject *__pyx_gb_5_cdec_7Decoder_9__cinit___2generator21(__pyx_Generato return NULL; } -/* "_cdec.pyx":46 +/* "_cdec.pyx":47 * cdef DenseVector weights * - * def __cinit__(self, config_str=None, **config): # <<<<<<<<<<<<<< - * """ Configuration can be given as a string: - * Decoder('formalism = scfg') + * def __init__(self, config_str=None, **config): # <<<<<<<<<<<<<< + * """Decoder('formalism = scfg') -> initialize from configuration string + * Decoder(formalism='scfg') -> initialize from named parameters */ -static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_config_str, PyObject *__pyx_v_config) { - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *__pyx_cur_scope; +static int __pyx_pf_5_cdec_7Decoder___init__(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_config_str, PyObject *__pyx_v_config) { + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *__pyx_cur_scope; PyObject *__pyx_v_formalism = NULL; std::istringstream *__pyx_v_config_stream; int __pyx_r; @@ -18070,8 +18137,8 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __pyx_cur_scope = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)__pyx_ptype_5_cdec___pyx_scope_struct_24___cinit__->tp_new(__pyx_ptype_5_cdec___pyx_scope_struct_24___cinit__, __pyx_empty_tuple, NULL); + __Pyx_RefNannySetupContext("__init__", 0); + __pyx_cur_scope = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)__pyx_ptype_5_cdec___pyx_scope_struct_24___init__->tp_new(__pyx_ptype_5_cdec___pyx_scope_struct_24___init__, __pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __Pyx_RefNannyFinishContext(); return -1; @@ -18082,9 +18149,9 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_config); __Pyx_INCREF(__pyx_v_config_str); - /* "_cdec.pyx":52 - * Decoder(formalism='scfg') - * """ + /* "_cdec.pyx":51 + * Decoder(formalism='scfg') -> initialize from named parameters + * Create a decoder using a given configuration. Formalism is required.""" * if config_str is None: # <<<<<<<<<<<<<< * formalism = config.get('formalism', None) * if formalism not in ('scfg', 'fst', 'lextrans', 'pb', @@ -18092,22 +18159,22 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ __pyx_t_1 = (__pyx_v_config_str == Py_None); if (__pyx_t_1) { - /* "_cdec.pyx":53 - * """ + /* "_cdec.pyx":52 + * Create a decoder using a given configuration. Formalism is required.""" * if config_str is None: * formalism = config.get('formalism', None) # <<<<<<<<<<<<<< * if formalism not in ('scfg', 'fst', 'lextrans', 'pb', * 'csplit', 'tagger', 'lexalign'): */ - __pyx_t_2 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_config, __pyx_n_s__get); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_GetAttr(__pyx_cur_scope->__pyx_v_config, __pyx_n_s__get); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_48), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_46), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_formalism = __pyx_t_3; __pyx_t_3 = 0; - /* "_cdec.pyx":54 + /* "_cdec.pyx":53 * if config_str is None: * formalism = config.get('formalism', None) * if formalism not in ('scfg', 'fst', 'lextrans', 'pb', # <<<<<<<<<<<<<< @@ -18116,39 +18183,53 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ */ __Pyx_INCREF(__pyx_v_formalism); __pyx_t_3 = __pyx_v_formalism; - __pyx_t_1 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__scfg), Py_NE); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__scfg), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (((int)__pyx_t_1)) { - __pyx_t_4 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__fst), Py_NE); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__fst), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = ((int)__pyx_t_4); } else { __pyx_t_5 = ((int)__pyx_t_1); } if (__pyx_t_5) { - __pyx_t_1 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__lextrans), Py_NE); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__lextrans), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = ((int)__pyx_t_1); } else { __pyx_t_4 = __pyx_t_5; } if (__pyx_t_4) { - __pyx_t_5 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__pb), Py_NE); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__pb), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = ((int)__pyx_t_5); } else { __pyx_t_1 = __pyx_t_4; } if (__pyx_t_1) { - __pyx_t_4 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__csplit), Py_NE); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__csplit), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = ((int)__pyx_t_4); } else { __pyx_t_5 = __pyx_t_1; } if (__pyx_t_5) { - __pyx_t_1 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__tagger), Py_NE); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__tagger), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_4 = ((int)__pyx_t_1); } else { __pyx_t_4 = __pyx_t_5; } if (__pyx_t_4) { - __pyx_t_5 = __Pyx_PyString_Equals(__pyx_t_3, ((PyObject *)__pyx_n_s__lexalign), Py_NE); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, ((PyObject *)__pyx_n_s__lexalign), Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = ((int)__pyx_t_5); } else { __pyx_t_1 = __pyx_t_4; @@ -18157,50 +18238,50 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ __pyx_t_4 = __pyx_t_1; if (__pyx_t_4) { - /* "_cdec.pyx":56 + /* "_cdec.pyx":55 * if formalism not in ('scfg', 'fst', 'lextrans', 'pb', * 'csplit', 'tagger', 'lexalign'): * raise InvalidConfig('formalism "%s" unknown' % formalism) # <<<<<<<<<<<<<< * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) * cdef istringstream* config_stream = new istringstream(config_str) */ - __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__InvalidConfig); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__InvalidConfig); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_49), __pyx_v_formalism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_47), __pyx_v_formalism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); - __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, ((PyObject *)__pyx_t_2)); __Pyx_GIVEREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L4; } __pyx_L4:; - /* "_cdec.pyx":57 + /* "_cdec.pyx":56 * 'csplit', 'tagger', 'lexalign'): * raise InvalidConfig('formalism "%s" unknown' % formalism) * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) # <<<<<<<<<<<<<< * cdef istringstream* config_stream = new istringstream(config_str) * self.dec = new decoder.Decoder(config_stream) */ - __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_40), __pyx_n_s__join); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_GetAttr(((PyObject *)__pyx_kp_s_38), __pyx_n_s__join); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __pyx_pf_5_cdec_7Decoder_9__cinit___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = __pyx_pf_5_cdec_7Decoder_8__init___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; @@ -18211,17 +18292,17 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ } __pyx_L3:; - /* "_cdec.pyx":58 + /* "_cdec.pyx":57 * raise InvalidConfig('formalism "%s" unknown' % formalism) * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) * cdef istringstream* config_stream = new istringstream(config_str) # <<<<<<<<<<<<<< * self.dec = new decoder.Decoder(config_stream) * del config_stream */ - __pyx_t_7 = PyBytes_AsString(__pyx_v_config_str); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PyBytes_AsString(__pyx_v_config_str); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_config_stream = new std::istringstream(__pyx_t_7); - /* "_cdec.pyx":59 + /* "_cdec.pyx":58 * config_str = '\n'.join('%s = %s' % kv for kv in _make_config(config)) * cdef istringstream* config_stream = new istringstream(config_str) * self.dec = new decoder.Decoder(config_stream) # <<<<<<<<<<<<<< @@ -18230,7 +18311,7 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ */ __pyx_v_self->dec = new Decoder(__pyx_v_config_stream); - /* "_cdec.pyx":60 + /* "_cdec.pyx":59 * cdef istringstream* config_stream = new istringstream(config_str) * self.dec = new decoder.Decoder(config_stream) * del config_stream # <<<<<<<<<<<<<< @@ -18239,23 +18320,23 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ */ delete __pyx_v_config_stream; - /* "_cdec.pyx":61 + /* "_cdec.pyx":60 * self.dec = new decoder.Decoder(config_stream) * del config_stream * self.weights = DenseVector.__new__(DenseVector) # <<<<<<<<<<<<<< * self.weights.vector = &self.dec.CurrentWeightVector() * self.weights.owned = True */ - __pyx_t_6 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_DenseVector)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = __Pyx_tp_new(((PyObject*)__pyx_ptype_5_cdec_DenseVector)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); - if (!(likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5_cdec_DenseVector)))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5_cdec_DenseVector)))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GIVEREF(__pyx_t_6); __Pyx_GOTREF(__pyx_v_self->weights); __Pyx_DECREF(((PyObject *)__pyx_v_self->weights)); __pyx_v_self->weights = ((struct __pyx_obj_5_cdec_DenseVector *)__pyx_t_6); __pyx_t_6 = 0; - /* "_cdec.pyx":62 + /* "_cdec.pyx":61 * del config_stream * self.weights = DenseVector.__new__(DenseVector) * self.weights.vector = &self.dec.CurrentWeightVector() # <<<<<<<<<<<<<< @@ -18264,7 +18345,7 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ */ __pyx_v_self->weights->vector = (&__pyx_v_self->dec->CurrentWeightVector()); - /* "_cdec.pyx":63 + /* "_cdec.pyx":62 * self.weights = DenseVector.__new__(DenseVector) * self.weights.vector = &self.dec.CurrentWeightVector() * self.weights.owned = True # <<<<<<<<<<<<<< @@ -18279,7 +18360,7 @@ static int __pyx_pf_5_cdec_7Decoder___cinit__(struct __pyx_obj_5_cdec_Decoder *_ __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("_cdec.Decoder.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_AddTraceback("_cdec.Decoder.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_formalism); @@ -18298,7 +18379,7 @@ static void __pyx_pw_5_cdec_7Decoder_3__dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyFinishContext(); } -/* "_cdec.pyx":65 +/* "_cdec.pyx":64 * self.weights.owned = True * * def __dealloc__(self): # <<<<<<<<<<<<<< @@ -18310,7 +18391,7 @@ static void __pyx_pf_5_cdec_7Decoder_2__dealloc__(CYTHON_UNUSED struct __pyx_obj __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); - /* "_cdec.pyx":66 + /* "_cdec.pyx":65 * * def __dealloc__(self): * del self.dec # <<<<<<<<<<<<<< @@ -18333,7 +18414,7 @@ static PyObject *__pyx_pw_5_cdec_7Decoder_7weights_1__get__(PyObject *__pyx_v_se return __pyx_r; } -/* "_cdec.pyx":69 +/* "_cdec.pyx":68 * * property weights: * def __get__(self): # <<<<<<<<<<<<<< @@ -18346,7 +18427,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_7weights___get__(struct __pyx_obj_5_cd __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); - /* "_cdec.pyx":70 + /* "_cdec.pyx":69 * property weights: * def __get__(self): * return self.weights # <<<<<<<<<<<<<< @@ -18376,7 +18457,7 @@ static int __pyx_pw_5_cdec_7Decoder_7weights_3__set__(PyObject *__pyx_v_self, Py return __pyx_r; } -/* "_cdec.pyx":72 +/* "_cdec.pyx":71 * return self.weights * * def __set__(self, weights): # <<<<<<<<<<<<<< @@ -18403,7 +18484,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); - /* "_cdec.pyx":73 + /* "_cdec.pyx":72 * * def __set__(self, weights): * if isinstance(weights, DenseVector): # <<<<<<<<<<<<<< @@ -18416,7 +18497,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":74 + /* "_cdec.pyx":73 * def __set__(self, weights): * if isinstance(weights, DenseVector): * self.weights.vector[0] = (<DenseVector> weights).vector[0] # <<<<<<<<<<<<<< @@ -18427,7 +18508,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De goto __pyx_L3; } - /* "_cdec.pyx":75 + /* "_cdec.pyx":74 * if isinstance(weights, DenseVector): * self.weights.vector[0] = (<DenseVector> weights).vector[0] * elif isinstance(weights, SparseVector): # <<<<<<<<<<<<<< @@ -18440,7 +18521,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":76 + /* "_cdec.pyx":75 * self.weights.vector[0] = (<DenseVector> weights).vector[0] * elif isinstance(weights, SparseVector): * self.weights.vector.clear() # <<<<<<<<<<<<<< @@ -18449,7 +18530,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De */ __pyx_v_self->weights->vector->clear(); - /* "_cdec.pyx":77 + /* "_cdec.pyx":76 * elif isinstance(weights, SparseVector): * self.weights.vector.clear() * ((<SparseVector> weights).vector[0]).init_vector(self.weights.vector) # <<<<<<<<<<<<<< @@ -18460,7 +18541,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De goto __pyx_L3; } - /* "_cdec.pyx":78 + /* "_cdec.pyx":77 * self.weights.vector.clear() * ((<SparseVector> weights).vector[0]).init_vector(self.weights.vector) * elif isinstance(weights, dict): # <<<<<<<<<<<<<< @@ -18473,7 +18554,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":79 + /* "_cdec.pyx":78 * ((<SparseVector> weights).vector[0]).init_vector(self.weights.vector) * elif isinstance(weights, dict): * self.weights.vector.clear() # <<<<<<<<<<<<<< @@ -18482,23 +18563,23 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De */ __pyx_v_self->weights->vector->clear(); - /* "_cdec.pyx":80 + /* "_cdec.pyx":79 * elif isinstance(weights, dict): * self.weights.vector.clear() * for fname, fval in weights.items(): # <<<<<<<<<<<<<< * self.weights[fname] = fval * else: */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_weights, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_v_weights, __pyx_n_s__items); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyList_CheckExact(__pyx_t_3) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_1 = __pyx_t_3; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0; __pyx_t_5 = NULL; } else { - __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_4 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = Py_TYPE(__pyx_t_1)->tp_iternext; } @@ -18507,23 +18588,23 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De if (!__pyx_t_5 && PyList_CheckExact(__pyx_t_1)) { if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_3 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; + __pyx_t_3 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_5 && PyTuple_CheckExact(__pyx_t_1)) { if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_3 = __pyx_t_5(__pyx_t_1); if (unlikely(!__pyx_t_3)) { if (PyErr_Occurred()) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear(); - else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + else {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } @@ -18539,7 +18620,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { @@ -18552,14 +18633,14 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_7); #else - __pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; - __pyx_t_8 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_8 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_9 = Py_TYPE(__pyx_t_8)->tp_iternext; @@ -18567,7 +18648,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_GOTREF(__pyx_t_6); index = 1; __pyx_t_7 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L6_unpacking_failed; __Pyx_GOTREF(__pyx_t_7); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_8), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_8), 2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_9 = NULL; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_unpacking_done; @@ -18575,7 +18656,7 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_9 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 79; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_L7_unpacking_done:; } __Pyx_XDECREF(__pyx_v_fname); @@ -18585,40 +18666,40 @@ static int __pyx_pf_5_cdec_7Decoder_7weights_2__set__(struct __pyx_obj_5_cdec_De __pyx_v_fval = __pyx_t_7; __pyx_t_7 = 0; - /* "_cdec.pyx":81 + /* "_cdec.pyx":80 * self.weights.vector.clear() * for fname, fval in weights.items(): * self.weights[fname] = fval # <<<<<<<<<<<<<< * else: * raise TypeError('cannot initialize weights with %s' % type(weights)) */ - if (PyObject_SetItem(((PyObject *)__pyx_v_self->weights), __pyx_v_fname, __pyx_v_fval) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetItem(((PyObject *)__pyx_v_self->weights), __pyx_v_fname, __pyx_v_fval) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L3; } /*else*/ { - /* "_cdec.pyx":83 + /* "_cdec.pyx":82 * self.weights[fname] = fval * else: * raise TypeError('cannot initialize weights with %s' % type(weights)) # <<<<<<<<<<<<<< * * property formalism: */ - __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_50), ((PyObject *)Py_TYPE(__pyx_v_weights))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_48), ((PyObject *)Py_TYPE(__pyx_v_weights))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L3:; @@ -18650,7 +18731,7 @@ static PyObject *__pyx_pw_5_cdec_7Decoder_9formalism_1__get__(PyObject *__pyx_v_ return __pyx_r; } -/* "_cdec.pyx":86 +/* "_cdec.pyx":85 * * property formalism: * def __get__(self): # <<<<<<<<<<<<<< @@ -18669,7 +18750,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9formalism___get__(struct __pyx_obj_5_ int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); - /* "_cdec.pyx":87 + /* "_cdec.pyx":86 * property formalism: * def __get__(self): * cdef variables_map* conf = &self.dec.GetConf() # <<<<<<<<<<<<<< @@ -18678,7 +18759,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9formalism___get__(struct __pyx_obj_5_ */ __pyx_v_conf = (&__pyx_v_self->dec->GetConf()); - /* "_cdec.pyx":88 + /* "_cdec.pyx":87 * def __get__(self): * cdef variables_map* conf = &self.dec.GetConf() * return str(conf[0]['formalism'].as_str().c_str()) # <<<<<<<<<<<<<< @@ -18686,14 +18767,14 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9formalism___get__(struct __pyx_obj_5_ * def read_weights(self, weights): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyBytes_FromString(((__pyx_v_conf[0])[__pyx_k__formalism]).as<std::string>().c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyBytes_FromString(((__pyx_v_conf[0])[__pyx_k__formalism]).as<std::string>().c_str()); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_1)); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_1)); __Pyx_GIVEREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 87; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; @@ -18715,6 +18796,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_9formalism___get__(struct __pyx_obj_5_ /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_7Decoder_5read_weights(PyObject *__pyx_v_self, PyObject *__pyx_v_weights); /*proto*/ +static char __pyx_doc_5_cdec_7Decoder_4read_weights[] = "decoder.read_weights(filename): Read decoder weights from a file."; static PyObject *__pyx_pw_5_cdec_7Decoder_5read_weights(PyObject *__pyx_v_self, PyObject *__pyx_v_weights) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations @@ -18724,12 +18806,12 @@ static PyObject *__pyx_pw_5_cdec_7Decoder_5read_weights(PyObject *__pyx_v_self, return __pyx_r; } -/* "_cdec.pyx":90 +/* "_cdec.pyx":89 * return str(conf[0]['formalism'].as_str().c_str()) * * def read_weights(self, weights): # <<<<<<<<<<<<<< + * """decoder.read_weights(filename): Read decoder weights from a file.""" * with open(weights) as fp: - * for line in fp: */ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_Decoder *__pyx_v_self, PyObject *__pyx_v_weights) { @@ -18761,8 +18843,8 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ __Pyx_RefNannySetupContext("read_weights", 0); /* "_cdec.pyx":91 - * * def read_weights(self, weights): + * """decoder.read_weights(filename): Read decoder weights from a file.""" * with open(weights) as fp: # <<<<<<<<<<<<<< * for line in fp: * if line.strip().startswith('#'): continue @@ -18796,7 +18878,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "_cdec.pyx":92 - * def read_weights(self, weights): + * """decoder.read_weights(filename): Read decoder weights from a file.""" * with open(weights) as fp: * for line in fp: # <<<<<<<<<<<<<< * if line.strip().startswith('#'): continue @@ -18814,16 +18896,16 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ if (!__pyx_t_9 && PyList_CheckExact(__pyx_t_4)) { if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++; + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;} #endif } else if (!__pyx_t_9 && PyTuple_CheckExact(__pyx_t_4)) { if (__pyx_t_8 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++; + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;} #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;}; + __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L7_error;} #endif } else { __pyx_t_2 = __pyx_t_9(__pyx_t_4); @@ -18855,7 +18937,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__startswith); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L7_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_52), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L7_error;} + __pyx_t_1 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_k_tuple_50), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L7_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L7_error;} @@ -18968,8 +19050,8 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /* "_cdec.pyx":91 - * * def read_weights(self, weights): + * """decoder.read_weights(filename): Read decoder weights from a file.""" * with open(weights) as fp: # <<<<<<<<<<<<<< * for line in fp: * if line.strip().startswith('#'): continue @@ -19031,7 +19113,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ } /*finally:*/ { if (__pyx_t_3) { - __pyx_t_7 = PyObject_Call(__pyx_t_3, __pyx_k_tuple_53, NULL); + __pyx_t_7 = PyObject_Call(__pyx_t_3, __pyx_k_tuple_51, NULL); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); @@ -19069,6 +19151,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_4read_weights(struct __pyx_obj_5_cdec_ /* Python wrapper */ static PyObject *__pyx_pw_5_cdec_7Decoder_7translate(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5_cdec_7Decoder_6translate[] = "decoder.translate(sentence, grammar=None) -> Hypergraph\n Translate a sentence (string/Lattice) with a grammar (string/list of rules)."; static PyObject *__pyx_pw_5_cdec_7Decoder_7translate(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_sentence = 0; PyObject *__pyx_v_grammar = 0; @@ -19083,8 +19166,8 @@ static PyObject *__pyx_pw_5_cdec_7Decoder_7translate(PyObject *__pyx_v_self, PyO * self.weights[fname.strip()] = float(value) * * def translate(self, sentence, grammar=None): # <<<<<<<<<<<<<< - * cdef bytes input_str - * if isinstance(sentence, basestring): + * """decoder.translate(sentence, grammar=None) -> Hypergraph + * Translate a sentence (string/Lattice) with a grammar (string/list of rules).""" */ values[1] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { @@ -19143,14 +19226,14 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; + std::string __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("translate", 0); - /* "_cdec.pyx":99 - * def translate(self, sentence, grammar=None): + /* "_cdec.pyx":101 + * Translate a sentence (string/Lattice) with a grammar (string/list of rules).""" * cdef bytes input_str * if isinstance(sentence, basestring): # <<<<<<<<<<<<<< * input_str = as_str(sentence.strip()) @@ -19158,31 +19241,31 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec */ __pyx_t_1 = __pyx_builtin_basestring; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = PyObject_IsInstance(__pyx_v_sentence, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_IsInstance(__pyx_v_sentence, __pyx_t_1); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":100 + /* "_cdec.pyx":102 * cdef bytes input_str * if isinstance(sentence, basestring): * input_str = as_str(sentence.strip()) # <<<<<<<<<<<<<< * elif isinstance(sentence, Lattice): * input_str = str(sentence) # PLF format */ - __pyx_t_1 = PyObject_GetAttr(__pyx_v_sentence, __pyx_n_s__strip); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_GetAttr(__pyx_v_sentence, __pyx_n_s__strip); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyBytes_FromString(__pyx_f_5_cdec_as_str(__pyx_t_3, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(((PyObject *)__pyx_t_1)); + __pyx_t_1 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_t_3, NULL)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_input_str = __pyx_t_1; + __pyx_v_input_str = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L3; } - /* "_cdec.pyx":101 + /* "_cdec.pyx":103 * if isinstance(sentence, basestring): * input_str = as_str(sentence.strip()) * elif isinstance(sentence, Lattice): # <<<<<<<<<<<<<< @@ -19195,99 +19278,103 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":102 + /* "_cdec.pyx":104 * input_str = as_str(sentence.strip()) * elif isinstance(sentence, Lattice): * input_str = str(sentence) # PLF format # <<<<<<<<<<<<<< * else: * raise TypeError('Cannot translate input type %s' % type(sentence)) */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_sentence); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_sentence); __Pyx_GIVEREF(__pyx_v_sentence); - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)(&PyString_Type))), ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected bytes, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected bytes, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_input_str = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L3; } /*else*/ { - /* "_cdec.pyx":104 + /* "_cdec.pyx":106 * input_str = str(sentence) # PLF format * else: * raise TypeError('Cannot translate input type %s' % type(sentence)) # <<<<<<<<<<<<<< * if grammar: * if isinstance(grammar, basestring): */ - __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_54), ((PyObject *)Py_TYPE(__pyx_v_sentence))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_52), ((PyObject *)Py_TYPE(__pyx_v_sentence))); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_t_3)); __Pyx_GIVEREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_builtin_TypeError, ((PyObject *)__pyx_t_1), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L3:; - /* "_cdec.pyx":105 + /* "_cdec.pyx":107 * else: * raise TypeError('Cannot translate input type %s' % type(sentence)) * if grammar: # <<<<<<<<<<<<<< * if isinstance(grammar, basestring): - * self.dec.AddSupplementalGrammarFromString(string(as_str(grammar))) + * self.dec.AddSupplementalGrammarFromString(as_str(grammar)) */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_grammar); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_grammar); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { - /* "_cdec.pyx":106 + /* "_cdec.pyx":108 * raise TypeError('Cannot translate input type %s' % type(sentence)) * if grammar: * if isinstance(grammar, basestring): # <<<<<<<<<<<<<< - * self.dec.AddSupplementalGrammarFromString(string(as_str(grammar))) + * self.dec.AddSupplementalGrammarFromString(as_str(grammar)) * else: */ __pyx_t_3 = __pyx_builtin_basestring; __Pyx_INCREF(__pyx_t_3); - __pyx_t_2 = PyObject_IsInstance(__pyx_v_grammar, __pyx_t_3); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 106; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_IsInstance(__pyx_v_grammar, __pyx_t_3); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_2) { - /* "_cdec.pyx":107 + /* "_cdec.pyx":109 * if grammar: * if isinstance(grammar, basestring): - * self.dec.AddSupplementalGrammarFromString(string(as_str(grammar))) # <<<<<<<<<<<<<< + * self.dec.AddSupplementalGrammarFromString(as_str(grammar)) # <<<<<<<<<<<<<< * else: * self.dec.AddSupplementalGrammar(TextGrammar(grammar).grammar[0]) */ - __pyx_v_self->dec->AddSupplementalGrammarFromString(std::string(__pyx_f_5_cdec_as_str(__pyx_v_grammar, NULL))); + __pyx_t_3 = ((PyObject *)__pyx_f_5_cdec_as_str(__pyx_v_grammar, NULL)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __pyx_convert_string_from_py_(__pyx_t_3); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_self->dec->AddSupplementalGrammarFromString(__pyx_t_4); goto __pyx_L5; } /*else*/ { - /* "_cdec.pyx":109 - * self.dec.AddSupplementalGrammarFromString(string(as_str(grammar))) + /* "_cdec.pyx":111 + * self.dec.AddSupplementalGrammarFromString(as_str(grammar)) * else: * self.dec.AddSupplementalGrammar(TextGrammar(grammar).grammar[0]) # <<<<<<<<<<<<<< * cdef decoder.BasicObserver observer = decoder.BasicObserver() - * self.dec.Decode(string(input_str), &observer) + * self.dec.Decode(input_str, &observer) */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_grammar); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_grammar); __Pyx_GIVEREF(__pyx_v_grammar); - __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_TextGrammar)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_TextGrammar)), ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __pyx_v_self->dec->AddSupplementalGrammar((((struct __pyx_obj_5_cdec_TextGrammar *)__pyx_t_1)->__pyx_base.grammar[0])); @@ -19298,28 +19385,28 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec } __pyx_L4:; - /* "_cdec.pyx":110 + /* "_cdec.pyx":112 * else: * self.dec.AddSupplementalGrammar(TextGrammar(grammar).grammar[0]) * cdef decoder.BasicObserver observer = decoder.BasicObserver() # <<<<<<<<<<<<<< - * self.dec.Decode(string(input_str), &observer) + * self.dec.Decode(input_str, &observer) * if observer.hypergraph == NULL: */ __pyx_v_observer = BasicObserver(); - /* "_cdec.pyx":111 + /* "_cdec.pyx":113 * self.dec.AddSupplementalGrammar(TextGrammar(grammar).grammar[0]) * cdef decoder.BasicObserver observer = decoder.BasicObserver() - * self.dec.Decode(string(input_str), &observer) # <<<<<<<<<<<<<< + * self.dec.Decode(input_str, &observer) # <<<<<<<<<<<<<< * if observer.hypergraph == NULL: * raise ParseFailed() */ - __pyx_t_4 = PyBytes_AsString(((PyObject *)__pyx_v_input_str)); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_v_self->dec->Decode(std::string(__pyx_t_4), (&__pyx_v_observer)); + __pyx_t_4 = __pyx_convert_string_from_py_(((PyObject *)__pyx_v_input_str)); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_v_self->dec->Decode(__pyx_t_4, (&__pyx_v_observer)); - /* "_cdec.pyx":112 + /* "_cdec.pyx":114 * cdef decoder.BasicObserver observer = decoder.BasicObserver() - * self.dec.Decode(string(input_str), &observer) + * self.dec.Decode(input_str, &observer) * if observer.hypergraph == NULL: # <<<<<<<<<<<<<< * raise ParseFailed() * cdef Hypergraph hg = Hypergraph() @@ -19327,38 +19414,38 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec __pyx_t_2 = (__pyx_v_observer.hypergraph == NULL); if (__pyx_t_2) { - /* "_cdec.pyx":113 - * self.dec.Decode(string(input_str), &observer) + /* "_cdec.pyx":115 + * self.dec.Decode(input_str, &observer) * if observer.hypergraph == NULL: * raise ParseFailed() # <<<<<<<<<<<<<< * cdef Hypergraph hg = Hypergraph() * hg.hg = new hypergraph.Hypergraph(observer.hypergraph[0]) */ - __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__ParseFailed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__ParseFailed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; - /* "_cdec.pyx":114 + /* "_cdec.pyx":116 * if observer.hypergraph == NULL: * raise ParseFailed() * cdef Hypergraph hg = Hypergraph() # <<<<<<<<<<<<<< * hg.hg = new hypergraph.Hypergraph(observer.hypergraph[0]) * return hg */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Hypergraph)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Hypergraph)), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_hg = ((struct __pyx_obj_5_cdec_Hypergraph *)__pyx_t_3); __pyx_t_3 = 0; - /* "_cdec.pyx":115 + /* "_cdec.pyx":117 * raise ParseFailed() * cdef Hypergraph hg = Hypergraph() * hg.hg = new hypergraph.Hypergraph(observer.hypergraph[0]) # <<<<<<<<<<<<<< @@ -19366,7 +19453,7 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec */ __pyx_v_hg->hg = new Hypergraph((__pyx_v_observer.hypergraph[0])); - /* "_cdec.pyx":116 + /* "_cdec.pyx":118 * cdef Hypergraph hg = Hypergraph() * hg.hg = new hypergraph.Hypergraph(observer.hypergraph[0]) * return hg # <<<<<<<<<<<<<< @@ -19391,6 +19478,44 @@ static PyObject *__pyx_pf_5_cdec_7Decoder_6translate(struct __pyx_obj_5_cdec_Dec return __pyx_r; } +/* "string.from_py":11 + * + * @cname("__pyx_convert_string_from_py_") + * cdef string __pyx_convert_string_from_py_(object o) except *: # <<<<<<<<<<<<<< + * return string(<char*>o, len(o)) + * + */ + +static std::string __pyx_convert_string_from_py_(PyObject *__pyx_v_o) { + std::string __pyx_r; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + Py_ssize_t __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_string_from_py_", 0); + + /* "string.from_py":12 + * @cname("__pyx_convert_string_from_py_") + * cdef string __pyx_convert_string_from_py_(object o) except *: + * return string(<char*>o, len(o)) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = PyBytes_AsString(__pyx_v_o); if (unlikely((!__pyx_t_1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_2 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = std::string(((char *)__pyx_t_1), __pyx_t_2); + goto __pyx_L0; + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("string.from_py.__pyx_convert_string_from_py_", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + static PyObject *__pyx_tp_new_5_cdec_DenseVector(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { PyObject *o = (*t->tp_alloc)(t, 0); if (!o) return 0; @@ -19429,8 +19554,8 @@ static int __pyx_mp_ass_subscript_5_cdec_DenseVector(PyObject *o, PyObject *i, P } static PyMethodDef __pyx_methods_5_cdec_DenseVector[] = { - {__Pyx_NAMESTR("dot"), (PyCFunction)__pyx_pw_5_cdec_11DenseVector_14dot, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("tosparse"), (PyCFunction)__pyx_pw_5_cdec_11DenseVector_16tosparse, METH_NOARGS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("dot"), (PyCFunction)__pyx_pw_5_cdec_11DenseVector_14dot, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_11DenseVector_13dot)}, + {__Pyx_NAMESTR("tosparse"), (PyCFunction)__pyx_pw_5_cdec_11DenseVector_16tosparse, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_11DenseVector_15tosparse)}, {0, 0, 0, 0} }; @@ -19626,8 +19751,8 @@ static int __pyx_mp_ass_subscript_5_cdec_SparseVector(PyObject *o, PyObject *i, } static PyMethodDef __pyx_methods_5_cdec_SparseVector[] = { - {__Pyx_NAMESTR("copy"), (PyCFunction)__pyx_pw_5_cdec_12SparseVector_5copy, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("dot"), (PyCFunction)__pyx_pw_5_cdec_12SparseVector_14dot, METH_O, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("copy"), (PyCFunction)__pyx_pw_5_cdec_12SparseVector_5copy, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_12SparseVector_4copy)}, + {__Pyx_NAMESTR("dot"), (PyCFunction)__pyx_pw_5_cdec_12SparseVector_14dot, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_12SparseVector_13dot)}, {0, 0, 0, 0} }; @@ -19796,7 +19921,7 @@ static PyObject *__pyx_tp_new_5_cdec_NT(PyTypeObject *t, CYTHON_UNUSED PyObject static void __pyx_tp_dealloc_5_cdec_NT(PyObject *o) { struct __pyx_obj_5_cdec_NT *p = (struct __pyx_obj_5_cdec_NT *)o; - Py_XDECREF(((PyObject *)p->cat)); + Py_CLEAR(p->cat); (*Py_TYPE(o)->tp_free)(o); } @@ -20824,9 +20949,6 @@ static PyTypeObject __pyx_type_5_cdec_Grammar = { static PyObject *__pyx_tp_new_5_cdec_TextGrammar(PyTypeObject *t, PyObject *a, PyObject *k) { PyObject *o = __pyx_tp_new_5_cdec_Grammar(t, a, k); if (!o) return 0; - if (__pyx_pw_5_cdec_11TextGrammar_1__cinit__(o, a, k) < 0) { - Py_DECREF(o); o = 0; - } return o; } @@ -20976,7 +21098,7 @@ static PyTypeObject __pyx_type_5_cdec_TextGrammar = { 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ - 0, /*tp_init*/ + __pyx_pw_5_cdec_11TextGrammar_1__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5_cdec_TextGrammar, /*tp_new*/ 0, /*tp_free*/ @@ -21032,21 +21154,21 @@ static PyObject *__pyx_getprop_5_cdec_10Hypergraph_npaths(PyObject *o, CYTHON_UN } static PyMethodDef __pyx_methods_5_cdec_Hypergraph[] = { - {__Pyx_NAMESTR("viterbi"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_3viterbi, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("viterbi_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_5viterbi_trees, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("viterbi_features"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_7viterbi_features, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("viterbi_joshua"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_9viterbi_joshua, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("kbest"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_11kbest, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("kbest_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_14kbest_trees, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("kbest_features"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_17kbest_features, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("sample"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_20sample, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("sample_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_23sample_trees, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("intersect"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_26intersect, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("prune"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_28prune, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("lattice"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_30lattice, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("plf"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_32plf, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("reweight"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_34reweight, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("inside_outside"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_36inside_outside, METH_NOARGS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("viterbi"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_3viterbi, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_2viterbi)}, + {__Pyx_NAMESTR("viterbi_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_5viterbi_trees, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_4viterbi_trees)}, + {__Pyx_NAMESTR("viterbi_features"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_7viterbi_features, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_6viterbi_features)}, + {__Pyx_NAMESTR("viterbi_joshua"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_9viterbi_joshua, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_8viterbi_joshua)}, + {__Pyx_NAMESTR("kbest"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_11kbest, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_10kbest)}, + {__Pyx_NAMESTR("kbest_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_14kbest_trees, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_13kbest_trees)}, + {__Pyx_NAMESTR("kbest_features"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_17kbest_features, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_16kbest_features)}, + {__Pyx_NAMESTR("sample"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_20sample, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_19sample)}, + {__Pyx_NAMESTR("sample_trees"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_23sample_trees, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_22sample_trees)}, + {__Pyx_NAMESTR("intersect"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_26intersect, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_25intersect)}, + {__Pyx_NAMESTR("prune"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_28prune, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_27prune)}, + {__Pyx_NAMESTR("lattice"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_30lattice, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_29lattice)}, + {__Pyx_NAMESTR("plf"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_32plf, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_31plf)}, + {__Pyx_NAMESTR("reweight"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_34reweight, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_33reweight)}, + {__Pyx_NAMESTR("inside_outside"), (PyCFunction)__pyx_pw_5_cdec_10Hypergraph_36inside_outside, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_10Hypergraph_35inside_outside)}, {0, 0, 0, 0} }; @@ -21225,7 +21347,7 @@ static PyObject *__pyx_tp_new_5_cdec_HypergraphEdge(PyTypeObject *t, CYTHON_UNUS static void __pyx_tp_dealloc_5_cdec_HypergraphEdge(PyObject *o) { struct __pyx_obj_5_cdec_HypergraphEdge *p = (struct __pyx_obj_5_cdec_HypergraphEdge *)o; - Py_XDECREF(((PyObject *)p->trule)); + Py_CLEAR(p->trule); (*Py_TYPE(o)->tp_free)(o); } @@ -21686,8 +21808,8 @@ static int __pyx_mp_ass_subscript_5_cdec_Lattice(PyObject *o, PyObject *i, PyObj static PyMethodDef __pyx_methods_5_cdec_Lattice[] = { {__Pyx_NAMESTR("__unicode__"), (PyCFunction)__pyx_pw_5_cdec_7Lattice_15__unicode__, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("todot"), (PyCFunction)__pyx_pw_5_cdec_7Lattice_20todot, METH_NOARGS, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("as_hypergraph"), (PyCFunction)__pyx_pw_5_cdec_7Lattice_22as_hypergraph, METH_NOARGS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("todot"), (PyCFunction)__pyx_pw_5_cdec_7Lattice_20todot, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_7Lattice_19todot)}, + {__Pyx_NAMESTR("as_hypergraph"), (PyCFunction)__pyx_pw_5_cdec_7Lattice_22as_hypergraph, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_7Lattice_21as_hypergraph)}, {0, 0, 0, 0} }; @@ -22270,7 +22392,7 @@ static PyObject *__pyx_sq_item_5_cdec_CandidateSet(PyObject *o, Py_ssize_t i) { } static PyMethodDef __pyx_methods_5_cdec_CandidateSet[] = { - {__Pyx_NAMESTR("add_kbest"), (PyCFunction)__pyx_pw_5_cdec_12CandidateSet_12add_kbest, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("add_kbest"), (PyCFunction)__pyx_pw_5_cdec_12CandidateSet_12add_kbest, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5_cdec_12CandidateSet_11add_kbest)}, {0, 0, 0, 0} }; @@ -22448,8 +22570,8 @@ static void __pyx_tp_dealloc_5_cdec_SegmentEvaluator(PyObject *o) { } static PyMethodDef __pyx_methods_5_cdec_SegmentEvaluator[] = { - {__Pyx_NAMESTR("evaluate"), (PyCFunction)__pyx_pw_5_cdec_16SegmentEvaluator_3evaluate, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("candidate_set"), (PyCFunction)__pyx_pw_5_cdec_16SegmentEvaluator_5candidate_set, METH_NOARGS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("evaluate"), (PyCFunction)__pyx_pw_5_cdec_16SegmentEvaluator_3evaluate, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_16SegmentEvaluator_2evaluate)}, + {__Pyx_NAMESTR("candidate_set"), (PyCFunction)__pyx_pw_5_cdec_16SegmentEvaluator_5candidate_set, METH_NOARGS, __Pyx_DOCSTR(__pyx_doc_5_cdec_16SegmentEvaluator_4candidate_set)}, {0, 0, 0, 0} }; @@ -22801,7 +22923,7 @@ static PyObject *__pyx_tp_new_5_cdec_Metric(PyTypeObject *t, CYTHON_UNUSED PyObj static void __pyx_tp_dealloc_5_cdec_Metric(PyObject *o) { struct __pyx_obj_5_cdec_Metric *p = (struct __pyx_obj_5_cdec_Metric *)o; - Py_XDECREF(((PyObject *)p->scorer)); + Py_CLEAR(p->scorer); (*Py_TYPE(o)->tp_free)(o); } @@ -22983,15 +23105,12 @@ static PyTypeObject __pyx_type_5_cdec_Metric = { #endif }; -static PyObject *__pyx_tp_new_5_cdec_Decoder(PyTypeObject *t, PyObject *a, PyObject *k) { +static PyObject *__pyx_tp_new_5_cdec_Decoder(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_obj_5_cdec_Decoder *p; PyObject *o = (*t->tp_alloc)(t, 0); if (!o) return 0; p = ((struct __pyx_obj_5_cdec_Decoder *)o); p->weights = ((struct __pyx_obj_5_cdec_DenseVector *)Py_None); Py_INCREF(Py_None); - if (__pyx_pw_5_cdec_7Decoder_1__cinit__(o, a, k) < 0) { - Py_DECREF(o); o = 0; - } return o; } @@ -23006,7 +23125,7 @@ static void __pyx_tp_dealloc_5_cdec_Decoder(PyObject *o) { --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } - Py_XDECREF(((PyObject *)p->weights)); + Py_CLEAR(p->weights); (*Py_TYPE(o)->tp_free)(o); } @@ -23047,8 +23166,8 @@ static PyObject *__pyx_getprop_5_cdec_7Decoder_formalism(PyObject *o, CYTHON_UNU } static PyMethodDef __pyx_methods_5_cdec_Decoder[] = { - {__Pyx_NAMESTR("read_weights"), (PyCFunction)__pyx_pw_5_cdec_7Decoder_5read_weights, METH_O, __Pyx_DOCSTR(0)}, - {__Pyx_NAMESTR("translate"), (PyCFunction)__pyx_pw_5_cdec_7Decoder_7translate, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}, + {__Pyx_NAMESTR("read_weights"), (PyCFunction)__pyx_pw_5_cdec_7Decoder_5read_weights, METH_O, __Pyx_DOCSTR(__pyx_doc_5_cdec_7Decoder_4read_weights)}, + {__Pyx_NAMESTR("translate"), (PyCFunction)__pyx_pw_5_cdec_7Decoder_7translate, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5_cdec_7Decoder_6translate)}, {0, 0, 0, 0} }; @@ -23196,7 +23315,7 @@ static PyTypeObject __pyx_type_5_cdec_Decoder = { 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ - 0, /*tp_init*/ + __pyx_pw_5_cdec_7Decoder_1__init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_5_cdec_Decoder, /*tp_new*/ 0, /*tp_free*/ @@ -23223,7 +23342,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct____iter__(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct____iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct____iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct____iter__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -23414,7 +23533,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_1___iter__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_1___iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_1___iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_1___iter__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -23605,7 +23724,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_2__phrase(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_2__phrase(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_2__phrase *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_2__phrase *)o; - Py_XDECREF(p->__pyx_v_phrase); + Py_CLEAR(p->__pyx_v_phrase); (*Py_TYPE(o)->tp_free)(o); } @@ -23798,9 +23917,9 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_3_genexpr(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_3_genexpr(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_3_genexpr *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_3_genexpr *)o; - Py_XDECREF(((PyObject *)p->__pyx_outer_scope)); - Py_XDECREF(p->__pyx_v_w); - Py_XDECREF(p->__pyx_t_0); + Py_CLEAR(p->__pyx_outer_scope); + Py_CLEAR(p->__pyx_v_w); + Py_CLEAR(p->__pyx_t_0); (*Py_TYPE(o)->tp_free)(o); } @@ -24003,7 +24122,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_4___get__(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_4___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_4___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_4___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -24194,7 +24313,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_5___str__(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_5___str__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_5___str__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_5___str__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -24387,9 +24506,9 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_6_genexpr(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_6_genexpr(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_6_genexpr *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_6_genexpr *)o; - Py_XDECREF(((PyObject *)p->__pyx_outer_scope)); - Py_XDECREF(p->__pyx_v_feat); - Py_XDECREF(p->__pyx_t_0); + Py_CLEAR(p->__pyx_outer_scope); + Py_CLEAR(p->__pyx_v_feat); + Py_CLEAR(p->__pyx_t_0); (*Py_TYPE(o)->tp_free)(o); } @@ -24593,8 +24712,8 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_7___iter__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_7___iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_7___iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_7___iter__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); - Py_XDECREF(((PyObject *)p->__pyx_v_trule)); + Py_CLEAR(p->__pyx_v_self); + Py_CLEAR(p->__pyx_v_trule); (*Py_TYPE(o)->tp_free)(o); } @@ -24792,8 +24911,8 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_8_kbest(PyTypeObject *t, static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_8_kbest(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_8_kbest *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_8_kbest *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); - Py_XDECREF(p->__pyx_v_size); + Py_CLEAR(p->__pyx_v_self); + Py_CLEAR(p->__pyx_v_size); (*Py_TYPE(o)->tp_free)(o); } @@ -24993,10 +25112,10 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_9_kbest_trees(PyTypeObje static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_9_kbest_trees(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_9_kbest_trees *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_9_kbest_trees *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_e_tree)); - Py_XDECREF(((PyObject *)p->__pyx_v_f_tree)); - Py_XDECREF(((PyObject *)p->__pyx_v_self)); - Py_XDECREF(p->__pyx_v_size); + Py_CLEAR(p->__pyx_v_e_tree); + Py_CLEAR(p->__pyx_v_f_tree); + Py_CLEAR(p->__pyx_v_self); + Py_CLEAR(p->__pyx_v_size); (*Py_TYPE(o)->tp_free)(o); } @@ -25207,9 +25326,9 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_10_kbest_features(PyType static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_10_kbest_features(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_10_kbest_features *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_10_kbest_features *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_fmap)); - Py_XDECREF(((PyObject *)p->__pyx_v_self)); - Py_XDECREF(p->__pyx_v_size); + Py_CLEAR(p->__pyx_v_fmap); + Py_CLEAR(p->__pyx_v_self); + Py_CLEAR(p->__pyx_v_size); (*Py_TYPE(o)->tp_free)(o); } @@ -25412,7 +25531,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_11_sample(PyTypeObject * static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_11_sample(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_11_sample *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -25603,7 +25722,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_12_sample_trees(PyTypeOb static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_12_sample_trees(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_12_sample_trees *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -25794,7 +25913,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_13___get__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_13___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_13___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_13___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -25985,7 +26104,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_14___get__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_14___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_14___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_14___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -26176,7 +26295,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_15___get__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_15___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_15___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_15___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -26367,7 +26486,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_16___get__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_16___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_16___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_16___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -26558,7 +26677,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_17___get__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_17___get__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_17___get__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_17___get__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -26749,7 +26868,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_18___iter__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_18___iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_18___iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_18___iter__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -26940,7 +27059,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_19_todot(PyTypeObject *t static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_19_todot(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_19_todot *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_19_todot *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -27137,13 +27256,13 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_20_lines(PyTypeObject *t static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_20_lines(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_20_lines *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_20_lines *)o; - Py_XDECREF(((PyObject *)p->__pyx_outer_scope)); - Py_XDECREF(p->__pyx_v_delta); - Py_XDECREF(p->__pyx_v_i); - Py_XDECREF(p->__pyx_v_label); - Py_XDECREF(p->__pyx_v_weight); - Py_XDECREF(p->__pyx_t_1); - Py_XDECREF(p->__pyx_t_3); + Py_CLEAR(p->__pyx_outer_scope); + Py_CLEAR(p->__pyx_v_delta); + Py_CLEAR(p->__pyx_v_i); + Py_CLEAR(p->__pyx_v_label); + Py_CLEAR(p->__pyx_v_weight); + Py_CLEAR(p->__pyx_t_1); + Py_CLEAR(p->__pyx_t_3); (*Py_TYPE(o)->tp_free)(o); } @@ -27372,9 +27491,9 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_21___iter__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_21___iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_21___iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_21___iter__ *)o; - Py_XDECREF(p->__pyx_v_i); - Py_XDECREF(((PyObject *)p->__pyx_v_self)); - Py_XDECREF(p->__pyx_t_1); + Py_CLEAR(p->__pyx_v_i); + Py_CLEAR(p->__pyx_v_self); + Py_CLEAR(p->__pyx_t_1); (*Py_TYPE(o)->tp_free)(o); } @@ -27577,7 +27696,7 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_22___iter__(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_22___iter__(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_22___iter__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_22___iter__ *)o; - Py_XDECREF(((PyObject *)p->__pyx_v_self)); + Py_CLEAR(p->__pyx_v_self); (*Py_TYPE(o)->tp_free)(o); } @@ -27774,13 +27893,13 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_23__make_config(PyTypeOb static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_23__make_config(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_23__make_config *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_23__make_config *)o; - Py_XDECREF(p->__pyx_v_config); - Py_XDECREF(p->__pyx_v_info); - Py_XDECREF(p->__pyx_v_key); - Py_XDECREF(p->__pyx_v_name); - Py_XDECREF(p->__pyx_v_value); - Py_XDECREF(p->__pyx_t_0); - Py_XDECREF(p->__pyx_t_1); + Py_CLEAR(p->__pyx_v_config); + Py_CLEAR(p->__pyx_v_info); + Py_CLEAR(p->__pyx_v_key); + Py_CLEAR(p->__pyx_v_name); + Py_CLEAR(p->__pyx_v_value); + Py_CLEAR(p->__pyx_t_0); + Py_CLEAR(p->__pyx_t_1); (*Py_TYPE(o)->tp_free)(o); } @@ -27996,32 +28115,32 @@ static PyTypeObject __pyx_type_5_cdec___pyx_scope_struct_23__make_config = { #endif }; -static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_24___cinit__(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *p; +static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_24___init__(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *p; PyObject *o = (*t->tp_alloc)(t, 0); if (!o) return 0; - p = ((struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)o); + p = ((struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)o); p->__pyx_v_config = 0; return o; } -static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_24___cinit__(PyObject *o) { - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)o; - Py_XDECREF(p->__pyx_v_config); +static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_24___init__(PyObject *o) { + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)o; + Py_CLEAR(p->__pyx_v_config); (*Py_TYPE(o)->tp_free)(o); } -static int __pyx_tp_traverse_5_cdec___pyx_scope_struct_24___cinit__(PyObject *o, visitproc v, void *a) { +static int __pyx_tp_traverse_5_cdec___pyx_scope_struct_24___init__(PyObject *o, visitproc v, void *a) { int e; - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)o; + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)o; if (p->__pyx_v_config) { e = (*v)(p->__pyx_v_config, a); if (e) return e; } return 0; } -static int __pyx_tp_clear_5_cdec___pyx_scope_struct_24___cinit__(PyObject *o) { - struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)o; +static int __pyx_tp_clear_5_cdec___pyx_scope_struct_24___init__(PyObject *o) { + struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)o; PyObject* tmp; tmp = ((PyObject*)p->__pyx_v_config); p->__pyx_v_config = Py_None; Py_INCREF(Py_None); @@ -28029,11 +28148,11 @@ static int __pyx_tp_clear_5_cdec___pyx_scope_struct_24___cinit__(PyObject *o) { return 0; } -static PyMethodDef __pyx_methods_5_cdec___pyx_scope_struct_24___cinit__[] = { +static PyMethodDef __pyx_methods_5_cdec___pyx_scope_struct_24___init__[] = { {0, 0, 0, 0} }; -static PyNumberMethods __pyx_tp_as_number___pyx_scope_struct_24___cinit__ = { +static PyNumberMethods __pyx_tp_as_number___pyx_scope_struct_24___init__ = { 0, /*nb_add*/ 0, /*nb_subtract*/ 0, /*nb_multiply*/ @@ -28091,7 +28210,7 @@ static PyNumberMethods __pyx_tp_as_number___pyx_scope_struct_24___cinit__ = { #endif }; -static PySequenceMethods __pyx_tp_as_sequence___pyx_scope_struct_24___cinit__ = { +static PySequenceMethods __pyx_tp_as_sequence___pyx_scope_struct_24___init__ = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ @@ -28104,13 +28223,13 @@ static PySequenceMethods __pyx_tp_as_sequence___pyx_scope_struct_24___cinit__ = 0, /*sq_inplace_repeat*/ }; -static PyMappingMethods __pyx_tp_as_mapping___pyx_scope_struct_24___cinit__ = { +static PyMappingMethods __pyx_tp_as_mapping___pyx_scope_struct_24___init__ = { 0, /*mp_length*/ 0, /*mp_subscript*/ 0, /*mp_ass_subscript*/ }; -static PyBufferProcs __pyx_tp_as_buffer___pyx_scope_struct_24___cinit__ = { +static PyBufferProcs __pyx_tp_as_buffer___pyx_scope_struct_24___init__ = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif @@ -28131,12 +28250,12 @@ static PyBufferProcs __pyx_tp_as_buffer___pyx_scope_struct_24___cinit__ = { #endif }; -static PyTypeObject __pyx_type_5_cdec___pyx_scope_struct_24___cinit__ = { +static PyTypeObject __pyx_type_5_cdec___pyx_scope_struct_24___init__ = { PyVarObject_HEAD_INIT(0, 0) - __Pyx_NAMESTR("_cdec.__pyx_scope_struct_24___cinit__"), /*tp_name*/ - sizeof(struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__), /*tp_basicsize*/ + __Pyx_NAMESTR("_cdec.__pyx_scope_struct_24___init__"), /*tp_name*/ + sizeof(struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__), /*tp_basicsize*/ 0, /*tp_itemsize*/ - __pyx_tp_dealloc_5_cdec___pyx_scope_struct_24___cinit__, /*tp_dealloc*/ + __pyx_tp_dealloc_5_cdec___pyx_scope_struct_24___init__, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ @@ -28146,24 +28265,24 @@ static PyTypeObject __pyx_type_5_cdec___pyx_scope_struct_24___cinit__ = { 0, /*reserved*/ #endif 0, /*tp_repr*/ - &__pyx_tp_as_number___pyx_scope_struct_24___cinit__, /*tp_as_number*/ - &__pyx_tp_as_sequence___pyx_scope_struct_24___cinit__, /*tp_as_sequence*/ - &__pyx_tp_as_mapping___pyx_scope_struct_24___cinit__, /*tp_as_mapping*/ + &__pyx_tp_as_number___pyx_scope_struct_24___init__, /*tp_as_number*/ + &__pyx_tp_as_sequence___pyx_scope_struct_24___init__, /*tp_as_sequence*/ + &__pyx_tp_as_mapping___pyx_scope_struct_24___init__, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ - &__pyx_tp_as_buffer___pyx_scope_struct_24___cinit__, /*tp_as_buffer*/ + &__pyx_tp_as_buffer___pyx_scope_struct_24___init__, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ - __pyx_tp_traverse_5_cdec___pyx_scope_struct_24___cinit__, /*tp_traverse*/ - __pyx_tp_clear_5_cdec___pyx_scope_struct_24___cinit__, /*tp_clear*/ + __pyx_tp_traverse_5_cdec___pyx_scope_struct_24___init__, /*tp_traverse*/ + __pyx_tp_clear_5_cdec___pyx_scope_struct_24___init__, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ - __pyx_methods_5_cdec___pyx_scope_struct_24___cinit__, /*tp_methods*/ + __pyx_methods_5_cdec___pyx_scope_struct_24___init__, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ @@ -28173,7 +28292,7 @@ static PyTypeObject __pyx_type_5_cdec___pyx_scope_struct_24___cinit__ = { 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ - __pyx_tp_new_5_cdec___pyx_scope_struct_24___cinit__, /*tp_new*/ + __pyx_tp_new_5_cdec___pyx_scope_struct_24___init__, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ @@ -28200,9 +28319,9 @@ static PyObject *__pyx_tp_new_5_cdec___pyx_scope_struct_25_genexpr(PyTypeObject static void __pyx_tp_dealloc_5_cdec___pyx_scope_struct_25_genexpr(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *)o; - Py_XDECREF(((PyObject *)p->__pyx_outer_scope)); - Py_XDECREF(p->__pyx_v_kv); - Py_XDECREF(p->__pyx_t_0); + Py_CLEAR(p->__pyx_outer_scope); + Py_CLEAR(p->__pyx_v_kv); + Py_CLEAR(p->__pyx_t_0); (*Py_TYPE(o)->tp_free)(o); } @@ -28225,7 +28344,7 @@ static int __pyx_tp_clear_5_cdec___pyx_scope_struct_25_genexpr(PyObject *o) { struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *p = (struct __pyx_obj_5_cdec___pyx_scope_struct_25_genexpr *)o; PyObject* tmp; tmp = ((PyObject*)p->__pyx_outer_scope); - p->__pyx_outer_scope = ((struct __pyx_obj_5_cdec___pyx_scope_struct_24___cinit__ *)Py_None); Py_INCREF(Py_None); + p->__pyx_outer_scope = ((struct __pyx_obj_5_cdec___pyx_scope_struct_24___init__ *)Py_None); Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->__pyx_v_kv); p->__pyx_v_kv = Py_None; Py_INCREF(Py_None); @@ -28422,32 +28541,32 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_17, __pyx_k_17, sizeof(__pyx_k_17), 0, 0, 1, 0}, {&__pyx_kp_s_18, __pyx_k_18, sizeof(__pyx_k_18), 0, 0, 1, 0}, {&__pyx_kp_s_20, __pyx_k_20, sizeof(__pyx_k_20), 0, 0, 1, 0}, + {&__pyx_kp_s_22, __pyx_k_22, sizeof(__pyx_k_22), 0, 0, 1, 0}, {&__pyx_kp_s_23, __pyx_k_23, sizeof(__pyx_k_23), 0, 0, 1, 0}, - {&__pyx_kp_s_24, __pyx_k_24, sizeof(__pyx_k_24), 0, 0, 1, 0}, + {&__pyx_kp_s_26, __pyx_k_26, sizeof(__pyx_k_26), 0, 0, 1, 0}, + {&__pyx_kp_s_27, __pyx_k_27, sizeof(__pyx_k_27), 0, 0, 1, 0}, {&__pyx_kp_s_28, __pyx_k_28, sizeof(__pyx_k_28), 0, 0, 1, 0}, {&__pyx_kp_s_29, __pyx_k_29, sizeof(__pyx_k_29), 0, 0, 1, 0}, {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0}, {&__pyx_kp_s_30, __pyx_k_30, sizeof(__pyx_k_30), 0, 0, 1, 0}, {&__pyx_kp_s_31, __pyx_k_31, sizeof(__pyx_k_31), 0, 0, 1, 0}, - {&__pyx_kp_s_32, __pyx_k_32, sizeof(__pyx_k_32), 0, 0, 1, 0}, {&__pyx_kp_s_33, __pyx_k_33, sizeof(__pyx_k_33), 0, 0, 1, 0}, - {&__pyx_kp_s_35, __pyx_k_35, sizeof(__pyx_k_35), 0, 0, 1, 0}, - {&__pyx_kp_s_36, __pyx_k_36, sizeof(__pyx_k_36), 0, 0, 1, 0}, - {&__pyx_kp_s_39, __pyx_k_39, sizeof(__pyx_k_39), 0, 0, 1, 0}, + {&__pyx_kp_s_34, __pyx_k_34, sizeof(__pyx_k_34), 0, 0, 1, 0}, + {&__pyx_kp_s_37, __pyx_k_37, sizeof(__pyx_k_37), 0, 0, 1, 0}, + {&__pyx_kp_s_38, __pyx_k_38, sizeof(__pyx_k_38), 0, 0, 1, 0}, {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0}, {&__pyx_kp_s_40, __pyx_k_40, sizeof(__pyx_k_40), 0, 0, 1, 0}, {&__pyx_kp_s_42, __pyx_k_42, sizeof(__pyx_k_42), 0, 0, 1, 0}, {&__pyx_kp_s_44, __pyx_k_44, sizeof(__pyx_k_44), 0, 0, 1, 0}, - {&__pyx_kp_s_46, __pyx_k_46, sizeof(__pyx_k_46), 0, 0, 1, 0}, + {&__pyx_kp_s_45, __pyx_k_45, sizeof(__pyx_k_45), 0, 0, 1, 0}, {&__pyx_kp_s_47, __pyx_k_47, sizeof(__pyx_k_47), 0, 0, 1, 0}, + {&__pyx_kp_s_48, __pyx_k_48, sizeof(__pyx_k_48), 0, 0, 1, 0}, {&__pyx_kp_s_49, __pyx_k_49, sizeof(__pyx_k_49), 0, 0, 1, 0}, - {&__pyx_kp_s_50, __pyx_k_50, sizeof(__pyx_k_50), 0, 0, 1, 0}, - {&__pyx_kp_s_51, __pyx_k_51, sizeof(__pyx_k_51), 0, 0, 1, 0}, - {&__pyx_kp_s_54, __pyx_k_54, sizeof(__pyx_k_54), 0, 0, 1, 0}, - {&__pyx_n_s_55, __pyx_k_55, sizeof(__pyx_k_55), 0, 0, 1, 1}, - {&__pyx_n_s_56, __pyx_k_56, sizeof(__pyx_k_56), 0, 0, 1, 1}, - {&__pyx_kp_s_59, __pyx_k_59, sizeof(__pyx_k_59), 0, 0, 1, 0}, - {&__pyx_kp_s_65, __pyx_k_65, sizeof(__pyx_k_65), 0, 0, 1, 0}, + {&__pyx_kp_s_52, __pyx_k_52, sizeof(__pyx_k_52), 0, 0, 1, 0}, + {&__pyx_n_s_53, __pyx_k_53, sizeof(__pyx_k_53), 0, 0, 1, 1}, + {&__pyx_n_s_54, __pyx_k_54, sizeof(__pyx_k_54), 0, 0, 1, 1}, + {&__pyx_kp_s_57, __pyx_k_57, sizeof(__pyx_k_57), 0, 0, 1, 0}, + {&__pyx_kp_s_63, __pyx_k_63, sizeof(__pyx_k_63), 0, 0, 1, 0}, {&__pyx_kp_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 0}, {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0}, {&__pyx_kp_s_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 0, 1, 0}, @@ -28546,14 +28665,14 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = { static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_Exception = __Pyx_GetName(__pyx_b, __pyx_n_s__Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_TypeError = __Pyx_GetName(__pyx_b, __pyx_n_s__TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_KeyError = __Pyx_GetName(__pyx_b, __pyx_n_s__KeyError); if (!__pyx_builtin_KeyError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_NotImplemented = __Pyx_GetName(__pyx_b, __pyx_n_s__NotImplemented); if (!__pyx_builtin_NotImplemented) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_super = __Pyx_GetName(__pyx_b, __pyx_n_s__super); if (!__pyx_builtin_super) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_basestring = __Pyx_GetName(__pyx_b, __pyx_n_s__basestring); if (!__pyx_builtin_basestring) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_eval = __Pyx_GetName(__pyx_b, __pyx_n_s__eval); if (!__pyx_builtin_eval) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_builtin_enumerate = __Pyx_GetName(__pyx_b, __pyx_n_s__enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_KeyError = __Pyx_GetName(__pyx_b, __pyx_n_s__KeyError); if (!__pyx_builtin_KeyError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_NotImplemented = __Pyx_GetName(__pyx_b, __pyx_n_s__NotImplemented); if (!__pyx_builtin_NotImplemented) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_super = __Pyx_GetName(__pyx_b, __pyx_n_s__super); if (!__pyx_builtin_super) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_basestring = __Pyx_GetName(__pyx_b, __pyx_n_s__basestring); if (!__pyx_builtin_basestring) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_eval = __Pyx_GetName(__pyx_b, __pyx_n_s__eval); if (!__pyx_builtin_eval) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_builtin_enumerate = __Pyx_GetName(__pyx_b, __pyx_n_s__enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_IndexError = __Pyx_GetName(__pyx_b, __pyx_n_s__IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_open = __Pyx_GetName(__pyx_b, __pyx_n_s__open); if (!__pyx_builtin_open) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; @@ -28579,21 +28698,21 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); - /* "/Users/vchahun/Sandbox/cdec/python/src/vectors.pxi":89 + /* "/home/vchahune/tools/cdec/python/src/vectors.pxi":95 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for SparseVector') # <<<<<<<<<<<<<< * * def __len__(self): */ - __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_k_tuple_5 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_5); __Pyx_INCREF(((PyObject *)__pyx_kp_s_4)); PyTuple_SET_ITEM(__pyx_k_tuple_5, 0, ((PyObject *)__pyx_kp_s_4)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_5)); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":6 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":6 * * def _phrase(phrase): * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) # <<<<<<<<<<<<<< @@ -28607,200 +28726,172 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":209 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":226 * trule = convert_rule(trule) * elif not isinstance(trule, TRule): * raise ValueError('the grammar should contain TRule objects') # <<<<<<<<<<<<<< * _g.AddRule((<TRule> trule).rule[0]) */ - __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 209; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_14); __Pyx_INCREF(((PyObject *)__pyx_kp_s_13)); PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_s_13)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_13)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":214 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":235 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for HypergraphEdge') # <<<<<<<<<<<<<< * * cdef class HypergraphNode: */ - __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_k_tuple_19 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_19)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_19); __Pyx_INCREF(((PyObject *)__pyx_kp_s_18)); PyTuple_SET_ITEM(__pyx_k_tuple_19, 0, ((PyObject *)__pyx_kp_s_18)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_18)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_19)); - /* "/Users/vchahun/Sandbox/cdec/python/src/hypergraph.pxi":251 + /* "/home/vchahune/tools/cdec/python/src/hypergraph.pxi":272 * elif op == 3: # != * return not (x == y) * raise NotImplemented('comparison not implemented for HypergraphNode') # <<<<<<<<<<<<<< */ - __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_k_tuple_21 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_21)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_21); __Pyx_INCREF(((PyObject *)__pyx_kp_s_20)); PyTuple_SET_ITEM(__pyx_k_tuple_21, 0, ((PyObject *)__pyx_kp_s_20)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_20)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_21)); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":16 - * else: - * if isinstance(inp, unicode): - * inp = inp.encode('utf8') # <<<<<<<<<<<<<< - * if not isinstance(inp, str): - * raise TypeError('cannot create lattice from %s' % type(inp)) - */ - __pyx_k_tuple_22 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_22)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_22); - __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8)); - PyTuple_SET_ITEM(__pyx_k_tuple_22, 0, ((PyObject *)__pyx_n_s__utf8)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_22)); - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":26 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":26 * def __getitem__(self, int index): * if not 0 <= index < len(self): * raise IndexError('lattice index out of range') # <<<<<<<<<<<<<< * arcs = [] * cdef vector[lattice.LatticeArc] arc_vector = self.lattice[0][index] */ - __pyx_k_tuple_25 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_25); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_24)); - PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_kp_s_24)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); + __pyx_k_tuple_24 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_24)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_24); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_23)); + PyTuple_SET_ITEM(__pyx_k_tuple_24, 0, ((PyObject *)__pyx_kp_s_23)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_23)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_24)); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":39 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":39 * def __setitem__(self, int index, tuple arcs): * if not 0 <= index < len(self): * raise IndexError('lattice index out of range') # <<<<<<<<<<<<<< * cdef lattice.LatticeArc* arc * for (label, cost, dist2next) in arcs: */ - __pyx_k_tuple_26 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_26)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_26); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_24)); - PyTuple_SET_ITEM(__pyx_k_tuple_26, 0, ((PyObject *)__pyx_kp_s_24)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_24)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_26)); - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":43 - * for (label, cost, dist2next) in arcs: - * if isinstance(label, unicode): - * label = label.encode('utf8') # <<<<<<<<<<<<<< - * arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) - * self.lattice[0][index].push_back(arc[0]) - */ - __pyx_k_tuple_27 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_27)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_27); - __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8)); - PyTuple_SET_ITEM(__pyx_k_tuple_27, 0, ((PyObject *)__pyx_n_s__utf8)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_27)); + __pyx_k_tuple_25 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_25); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_23)); + PyTuple_SET_ITEM(__pyx_k_tuple_25, 0, ((PyObject *)__pyx_kp_s_23)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_23)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_25)); - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":69 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":69 * for i in range(len(self)): * for label, weight, delta in self[i]: * yield '%d -> %d [label="%s"];' % (i, i+delta, label.replace('"', '\\"')) # <<<<<<<<<<<<<< * yield '%d [shape=doublecircle]' % len(self) * yield '}' */ - __pyx_k_tuple_34 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_34)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_34); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_32)); - PyTuple_SET_ITEM(__pyx_k_tuple_34, 0, ((PyObject *)__pyx_kp_s_32)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_32)); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_33)); - PyTuple_SET_ITEM(__pyx_k_tuple_34, 1, ((PyObject *)__pyx_kp_s_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_33)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_34)); - - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":63 - * + __pyx_k_tuple_32 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_32)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_32); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_30)); + PyTuple_SET_ITEM(__pyx_k_tuple_32, 0, ((PyObject *)__pyx_kp_s_30)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_30)); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_31)); + PyTuple_SET_ITEM(__pyx_k_tuple_32, 1, ((PyObject *)__pyx_kp_s_31)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_31)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_32)); + + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":63 * def todot(self): + * """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" * def lines(): # <<<<<<<<<<<<<< * yield 'digraph lattice {' * yield 'rankdir = LR;' */ - __pyx_k_tuple_37 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_37)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_37); + __pyx_k_tuple_35 = PyTuple_New(4); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_35); __Pyx_INCREF(((PyObject *)__pyx_n_s__i)); - PyTuple_SET_ITEM(__pyx_k_tuple_37, 0, ((PyObject *)__pyx_n_s__i)); + PyTuple_SET_ITEM(__pyx_k_tuple_35, 0, ((PyObject *)__pyx_n_s__i)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__i)); __Pyx_INCREF(((PyObject *)__pyx_n_s__label)); - PyTuple_SET_ITEM(__pyx_k_tuple_37, 1, ((PyObject *)__pyx_n_s__label)); + PyTuple_SET_ITEM(__pyx_k_tuple_35, 1, ((PyObject *)__pyx_n_s__label)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__label)); __Pyx_INCREF(((PyObject *)__pyx_n_s__weight)); - PyTuple_SET_ITEM(__pyx_k_tuple_37, 2, ((PyObject *)__pyx_n_s__weight)); + PyTuple_SET_ITEM(__pyx_k_tuple_35, 2, ((PyObject *)__pyx_n_s__weight)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__weight)); __Pyx_INCREF(((PyObject *)__pyx_n_s__delta)); - PyTuple_SET_ITEM(__pyx_k_tuple_37, 3, ((PyObject *)__pyx_n_s__delta)); + PyTuple_SET_ITEM(__pyx_k_tuple_35, 3, ((PyObject *)__pyx_n_s__delta)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__delta)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_37)); - __pyx_k_codeobj_38 = (PyObject*)__Pyx_PyCode_New(0, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_37, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_39, __pyx_n_s__lines, 63, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_38)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_35)); + __pyx_k_codeobj_36 = (PyObject*)__Pyx_PyCode_New(0, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_35, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_37, __pyx_n_s__lines, 63, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_36)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/lattice.pxi":72 + /* "/home/vchahune/tools/cdec/python/src/lattice.pxi":72 * yield '%d [shape=doublecircle]' % len(self) * yield '}' * return '\n'.join(lines()).encode('utf8') # <<<<<<<<<<<<<< * * def as_hypergraph(self): */ - __pyx_k_tuple_41 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_41); + __pyx_k_tuple_39 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_39)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_39); __Pyx_INCREF(((PyObject *)__pyx_n_s__utf8)); - PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_n_s__utf8)); + PyTuple_SET_ITEM(__pyx_k_tuple_39, 0, ((PyObject *)__pyx_n_s__utf8)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__utf8)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_39)); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":50 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":50 * def __getitem__(self, int index): * if not 0 <= index < len(self): * raise IndexError('sufficient stats vector index out of range') # <<<<<<<<<<<<<< * return self.stats[0][index] * */ - __pyx_k_tuple_43 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_43)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_43); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_42)); - PyTuple_SET_ITEM(__pyx_k_tuple_43, 0, ((PyObject *)__pyx_kp_s_42)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_43)); + __pyx_k_tuple_41 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_41)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_41); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_40)); + PyTuple_SET_ITEM(__pyx_k_tuple_41, 0, ((PyObject *)__pyx_kp_s_40)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_40)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_41)); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":84 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":84 * def __getitem__(self,int k): * if not 0 <= k < self.cs.size(): * raise IndexError('candidate set index out of range') # <<<<<<<<<<<<<< * cdef Candidate candidate = Candidate() * candidate.candidate = &self.cs[0][k] */ - __pyx_k_tuple_45 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_45); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_44)); - PyTuple_SET_ITEM(__pyx_k_tuple_45, 0, ((PyObject *)__pyx_kp_s_44)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_44)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_45)); + __pyx_k_tuple_43 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_43)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_43); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_42)); + PyTuple_SET_ITEM(__pyx_k_tuple_43, 0, ((PyObject *)__pyx_kp_s_42)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_42)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_43)); - /* "_cdec.pyx":53 - * """ + /* "_cdec.pyx":52 + * Create a decoder using a given configuration. Formalism is required.""" * if config_str is None: * formalism = config.get('formalism', None) # <<<<<<<<<<<<<< * if formalism not in ('scfg', 'fst', 'lextrans', 'pb', * 'csplit', 'tagger', 'lexalign'): */ - __pyx_k_tuple_48 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_48)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_48); + __pyx_k_tuple_46 = PyTuple_New(2); if (unlikely(!__pyx_k_tuple_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_46); __Pyx_INCREF(((PyObject *)__pyx_n_s__formalism)); - PyTuple_SET_ITEM(__pyx_k_tuple_48, 0, ((PyObject *)__pyx_n_s__formalism)); + PyTuple_SET_ITEM(__pyx_k_tuple_46, 0, ((PyObject *)__pyx_n_s__formalism)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__formalism)); __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_48, 1, Py_None); + PyTuple_SET_ITEM(__pyx_k_tuple_46, 1, Py_None); __Pyx_GIVEREF(Py_None); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_48)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_46)); /* "_cdec.pyx":93 * with open(weights) as fp: @@ -28809,134 +28900,134 @@ static int __Pyx_InitCachedConstants(void) { * fname, value = line.split() * self.weights[fname.strip()] = float(value) */ - __pyx_k_tuple_52 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_52); - __Pyx_INCREF(((PyObject *)__pyx_kp_s_51)); - PyTuple_SET_ITEM(__pyx_k_tuple_52, 0, ((PyObject *)__pyx_kp_s_51)); - __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_51)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_52)); + __pyx_k_tuple_50 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_50); + __Pyx_INCREF(((PyObject *)__pyx_kp_s_49)); + PyTuple_SET_ITEM(__pyx_k_tuple_50, 0, ((PyObject *)__pyx_kp_s_49)); + __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_49)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_50)); /* "_cdec.pyx":91 - * * def read_weights(self, weights): + * """decoder.read_weights(filename): Read decoder weights from a file.""" * with open(weights) as fp: # <<<<<<<<<<<<<< * for line in fp: * if line.strip().startswith('#'): continue */ - __pyx_k_tuple_53 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_53)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_53); + __pyx_k_tuple_51 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_51)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_51); __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_53, 0, Py_None); + PyTuple_SET_ITEM(__pyx_k_tuple_51, 0, Py_None); __Pyx_GIVEREF(Py_None); __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_53, 1, Py_None); + PyTuple_SET_ITEM(__pyx_k_tuple_51, 1, Py_None); __Pyx_GIVEREF(Py_None); __Pyx_INCREF(Py_None); - PyTuple_SET_ITEM(__pyx_k_tuple_53, 2, Py_None); + PyTuple_SET_ITEM(__pyx_k_tuple_51, 2, Py_None); __Pyx_GIVEREF(Py_None); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_53)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_51)); - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":5 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":5 * import cdec.sa._sa as _sa * * def _phrase(phrase): # <<<<<<<<<<<<<< * return ' '.join(w.encode('utf8') if isinstance(w, unicode) else str(w) for w in phrase) * */ - __pyx_k_tuple_57 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_57)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_57); + __pyx_k_tuple_55 = PyTuple_New(3); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_55); __Pyx_INCREF(((PyObject *)__pyx_n_s__phrase)); - PyTuple_SET_ITEM(__pyx_k_tuple_57, 0, ((PyObject *)__pyx_n_s__phrase)); + PyTuple_SET_ITEM(__pyx_k_tuple_55, 0, ((PyObject *)__pyx_n_s__phrase)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__phrase)); __Pyx_INCREF(((PyObject *)__pyx_n_s__genexpr)); - PyTuple_SET_ITEM(__pyx_k_tuple_57, 1, ((PyObject *)__pyx_n_s__genexpr)); + PyTuple_SET_ITEM(__pyx_k_tuple_55, 1, ((PyObject *)__pyx_n_s__genexpr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__genexpr)); __Pyx_INCREF(((PyObject *)__pyx_n_s__genexpr)); - PyTuple_SET_ITEM(__pyx_k_tuple_57, 2, ((PyObject *)__pyx_n_s__genexpr)); + PyTuple_SET_ITEM(__pyx_k_tuple_55, 2, ((PyObject *)__pyx_n_s__genexpr)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__genexpr)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_57)); - __pyx_k_codeobj_58 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_57, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_59, __pyx_n_s___phrase, 5, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_58)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_55)); + __pyx_k_codeobj_56 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_55, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_57, __pyx_n_s___phrase, 5, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_56)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":189 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":194 * return [] * * BLEU = Scorer('IBM_BLEU') # <<<<<<<<<<<<<< * TER = Scorer('TER') * CER = Scorer('CER') */ - __pyx_k_tuple_60 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_60)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_60); + __pyx_k_tuple_58 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_58)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_58); __Pyx_INCREF(((PyObject *)__pyx_n_s__IBM_BLEU)); - PyTuple_SET_ITEM(__pyx_k_tuple_60, 0, ((PyObject *)__pyx_n_s__IBM_BLEU)); + PyTuple_SET_ITEM(__pyx_k_tuple_58, 0, ((PyObject *)__pyx_n_s__IBM_BLEU)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__IBM_BLEU)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_60)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_58)); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":190 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":195 * * BLEU = Scorer('IBM_BLEU') * TER = Scorer('TER') # <<<<<<<<<<<<<< * CER = Scorer('CER') */ - __pyx_k_tuple_61 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_61); + __pyx_k_tuple_59 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_59)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_59); __Pyx_INCREF(((PyObject *)__pyx_n_s__TER)); - PyTuple_SET_ITEM(__pyx_k_tuple_61, 0, ((PyObject *)__pyx_n_s__TER)); + PyTuple_SET_ITEM(__pyx_k_tuple_59, 0, ((PyObject *)__pyx_n_s__TER)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__TER)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_59)); - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":191 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":196 * BLEU = Scorer('IBM_BLEU') * TER = Scorer('TER') * CER = Scorer('CER') # <<<<<<<<<<<<<< */ - __pyx_k_tuple_62 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_62)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_62); + __pyx_k_tuple_60 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_60)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_60); __Pyx_INCREF(((PyObject *)__pyx_n_s__CER)); - PyTuple_SET_ITEM(__pyx_k_tuple_62, 0, ((PyObject *)__pyx_n_s__CER)); + PyTuple_SET_ITEM(__pyx_k_tuple_60, 0, ((PyObject *)__pyx_n_s__CER)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__CER)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_62)); + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_60)); /* "_cdec.pyx":28 * class ParseFailed(Exception): pass * * def set_silent(yn): # <<<<<<<<<<<<<< + * """set_silent(bool): Configure the verbosity of cdec.""" * SetSilent(yn) - * */ - __pyx_k_tuple_63 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_63)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_63); + __pyx_k_tuple_61 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_61); __Pyx_INCREF(((PyObject *)__pyx_n_s__yn)); - PyTuple_SET_ITEM(__pyx_k_tuple_63, 0, ((PyObject *)__pyx_n_s__yn)); + PyTuple_SET_ITEM(__pyx_k_tuple_61, 0, ((PyObject *)__pyx_n_s__yn)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__yn)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_63)); - __pyx_k_codeobj_64 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_63, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_65, __pyx_n_s__set_silent, 28, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_61)); + __pyx_k_codeobj_62 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_61, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_63, __pyx_n_s__set_silent, 28, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_62)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - /* "_cdec.pyx":31 + /* "_cdec.pyx":32 * SetSilent(yn) * * def _make_config(config): # <<<<<<<<<<<<<< * for key, value in config.items(): * if isinstance(value, dict): */ - __pyx_k_tuple_66 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_66)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __Pyx_GOTREF(__pyx_k_tuple_66); + __pyx_k_tuple_64 = PyTuple_New(5); if (unlikely(!__pyx_k_tuple_64)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GOTREF(__pyx_k_tuple_64); __Pyx_INCREF(((PyObject *)__pyx_n_s__config)); - PyTuple_SET_ITEM(__pyx_k_tuple_66, 0, ((PyObject *)__pyx_n_s__config)); + PyTuple_SET_ITEM(__pyx_k_tuple_64, 0, ((PyObject *)__pyx_n_s__config)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__config)); __Pyx_INCREF(((PyObject *)__pyx_n_s__key)); - PyTuple_SET_ITEM(__pyx_k_tuple_66, 1, ((PyObject *)__pyx_n_s__key)); + PyTuple_SET_ITEM(__pyx_k_tuple_64, 1, ((PyObject *)__pyx_n_s__key)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__key)); __Pyx_INCREF(((PyObject *)__pyx_n_s__value)); - PyTuple_SET_ITEM(__pyx_k_tuple_66, 2, ((PyObject *)__pyx_n_s__value)); + PyTuple_SET_ITEM(__pyx_k_tuple_64, 2, ((PyObject *)__pyx_n_s__value)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__value)); __Pyx_INCREF(((PyObject *)__pyx_n_s__name)); - PyTuple_SET_ITEM(__pyx_k_tuple_66, 3, ((PyObject *)__pyx_n_s__name)); + PyTuple_SET_ITEM(__pyx_k_tuple_64, 3, ((PyObject *)__pyx_n_s__name)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__name)); __Pyx_INCREF(((PyObject *)__pyx_n_s__info)); - PyTuple_SET_ITEM(__pyx_k_tuple_66, 4, ((PyObject *)__pyx_n_s__info)); + PyTuple_SET_ITEM(__pyx_k_tuple_64, 4, ((PyObject *)__pyx_n_s__info)); __Pyx_GIVEREF(((PyObject *)__pyx_n_s__info)); - __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_66)); - __pyx_k_codeobj_67 = (PyObject*)__Pyx_PyCode_New(1, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_66, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_65, __pyx_n_s___make_config, 31, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_67)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_64)); + __pyx_k_codeobj_65 = (PyObject*)__Pyx_PyCode_New(1, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_k_tuple_64, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_63, __pyx_n_s___make_config, 32, __pyx_empty_bytes); if (unlikely(!__pyx_k_codeobj_65)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; @@ -28997,16 +29088,12 @@ PyMODINIT_FUNC PyInit__cdec(void) #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_cdec"), __pyx_methods, 0, 0, PYTHON_API_VERSION); + __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_cdec"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif - if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; - #if PY_MAJOR_VERSION < 3 - Py_INCREF(__pyx_m); - #endif - __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); - if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif @@ -29025,30 +29112,100 @@ PyMODINIT_FUNC PyInit__cdec(void) /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type_5_cdec_DenseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_DenseVector, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_11DenseVector___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_11DenseVector___init__.doc = __pyx_doc_5_cdec_11DenseVector___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_11DenseVector___init__; + } + } + #endif if (__Pyx_SetAttrString(__pyx_m, "DenseVector", (PyObject *)&__pyx_type_5_cdec_DenseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_DenseVector = &__pyx_type_5_cdec_DenseVector; - if (PyType_Ready(&__pyx_type_5_cdec_SparseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "SparseVector", (PyObject *)&__pyx_type_5_cdec_SparseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_SparseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_SparseVector, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_12SparseVector___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_12SparseVector___init__.doc = __pyx_doc_5_cdec_12SparseVector___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_12SparseVector___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "SparseVector", (PyObject *)&__pyx_type_5_cdec_SparseVector) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_SparseVector = &__pyx_type_5_cdec_SparseVector; if (PyType_Ready(&__pyx_type_5_cdec_NT) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_NT, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_2NT___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_2NT___init__.doc = __pyx_doc_5_cdec_2NT___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_2NT___init__; + } + } + #endif if (__Pyx_SetAttrString(__pyx_m, "NT", (PyObject *)&__pyx_type_5_cdec_NT) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_NT = &__pyx_type_5_cdec_NT; - if (PyType_Ready(&__pyx_type_5_cdec_NTRef) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "NTRef", (PyObject *)&__pyx_type_5_cdec_NTRef) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_NTRef) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_NTRef, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_5NTRef___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_5NTRef___init__.doc = __pyx_doc_5_cdec_5NTRef___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_5NTRef___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "NTRef", (PyObject *)&__pyx_type_5_cdec_NTRef) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_NTRef = &__pyx_type_5_cdec_NTRef; - if (PyType_Ready(&__pyx_type_5_cdec_TRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "TRule", (PyObject *)&__pyx_type_5_cdec_TRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_TRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_TRule, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_5TRule___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_5TRule___init__.doc = __pyx_doc_5_cdec_5TRule___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_5TRule___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "TRule", (PyObject *)&__pyx_type_5_cdec_TRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_TRule = &__pyx_type_5_cdec_TRule; __pyx_type_5_cdec_MRule.tp_base = __pyx_ptype_5_cdec_TRule; - if (PyType_Ready(&__pyx_type_5_cdec_MRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "MRule", (PyObject *)&__pyx_type_5_cdec_MRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_MRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_MRule, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_5MRule___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_5MRule___init__.doc = __pyx_doc_5_cdec_5MRule___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_5MRule___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "MRule", (PyObject *)&__pyx_type_5_cdec_MRule) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 177; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_MRule = &__pyx_type_5_cdec_MRule; - if (PyType_Ready(&__pyx_type_5_cdec_Grammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "Grammar", (PyObject *)&__pyx_type_5_cdec_Grammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_Grammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "Grammar", (PyObject *)&__pyx_type_5_cdec_Grammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_Grammar = &__pyx_type_5_cdec_Grammar; __pyx_type_5_cdec_TextGrammar.tp_base = __pyx_ptype_5_cdec_Grammar; - if (PyType_Ready(&__pyx_type_5_cdec_TextGrammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "TextGrammar", (PyObject *)&__pyx_type_5_cdec_TextGrammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 201; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_TextGrammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_TextGrammar, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_11TextGrammar___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_11TextGrammar___init__.doc = __pyx_doc_5_cdec_11TextGrammar___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_11TextGrammar___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "TextGrammar", (PyObject *)&__pyx_type_5_cdec_TextGrammar) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_TextGrammar = &__pyx_type_5_cdec_TextGrammar; __pyx_vtabptr_5_cdec_Hypergraph = &__pyx_vtable_5_cdec_Hypergraph; __pyx_vtable_5_cdec_Hypergraph._rng = (MT19937 *(*)(struct __pyx_obj_5_cdec_Hypergraph *))__pyx_f_5_cdec_10Hypergraph__rng; @@ -29058,17 +29215,27 @@ PyMODINIT_FUNC PyInit__cdec(void) __pyx_ptype_5_cdec_Hypergraph = &__pyx_type_5_cdec_Hypergraph; __pyx_vtabptr_5_cdec_HypergraphEdge = &__pyx_vtable_5_cdec_HypergraphEdge; __pyx_vtable_5_cdec_HypergraphEdge.init = (PyObject *(*)(struct __pyx_obj_5_cdec_HypergraphEdge *, Hypergraph *, unsigned int))__pyx_f_5_cdec_14HypergraphEdge_init; - if (PyType_Ready(&__pyx_type_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5_cdec_HypergraphEdge.tp_dict, __pyx_vtabptr_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "HypergraphEdge", (PyObject *)&__pyx_type_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetVtable(__pyx_type_5_cdec_HypergraphEdge.tp_dict, __pyx_vtabptr_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "HypergraphEdge", (PyObject *)&__pyx_type_5_cdec_HypergraphEdge) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_HypergraphEdge = &__pyx_type_5_cdec_HypergraphEdge; __pyx_vtabptr_5_cdec_HypergraphNode = &__pyx_vtable_5_cdec_HypergraphNode; __pyx_vtable_5_cdec_HypergraphNode.init = (PyObject *(*)(struct __pyx_obj_5_cdec_HypergraphNode *, Hypergraph *, unsigned int))__pyx_f_5_cdec_14HypergraphNode_init; - if (PyType_Ready(&__pyx_type_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetVtable(__pyx_type_5_cdec_HypergraphNode.tp_dict, __pyx_vtabptr_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "HypergraphNode", (PyObject *)&__pyx_type_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 216; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetVtable(__pyx_type_5_cdec_HypergraphNode.tp_dict, __pyx_vtabptr_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "HypergraphNode", (PyObject *)&__pyx_type_5_cdec_HypergraphNode) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 237; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_HypergraphNode = &__pyx_type_5_cdec_HypergraphNode; if (PyType_Ready(&__pyx_type_5_cdec_Lattice) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_Lattice, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_7Lattice_2__init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_7Lattice_2__init__.doc = __pyx_doc_5_cdec_7Lattice_2__init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_7Lattice_2__init__; + } + } + #endif if (__Pyx_SetAttrString(__pyx_m, "Lattice", (PyObject *)&__pyx_type_5_cdec_Lattice) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_Lattice = &__pyx_type_5_cdec_Lattice; if (PyType_Ready(&__pyx_type_5_cdec_Candidate) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -29080,57 +29247,67 @@ PyMODINIT_FUNC PyInit__cdec(void) if (PyType_Ready(&__pyx_type_5_cdec_CandidateSet) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_SetAttrString(__pyx_m, "CandidateSet", (PyObject *)&__pyx_type_5_cdec_CandidateSet) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_CandidateSet = &__pyx_type_5_cdec_CandidateSet; - if (PyType_Ready(&__pyx_type_5_cdec_SegmentEvaluator) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "SegmentEvaluator", (PyObject *)&__pyx_type_5_cdec_SegmentEvaluator) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_SegmentEvaluator) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "SegmentEvaluator", (PyObject *)&__pyx_type_5_cdec_SegmentEvaluator) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 100; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_SegmentEvaluator = &__pyx_type_5_cdec_SegmentEvaluator; - if (PyType_Ready(&__pyx_type_5_cdec_Scorer) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "Scorer", (PyObject *)&__pyx_type_5_cdec_Scorer) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_Scorer) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "Scorer", (PyObject *)&__pyx_type_5_cdec_Scorer) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_Scorer = &__pyx_type_5_cdec_Scorer; - if (PyType_Ready(&__pyx_type_5_cdec_Metric) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "Metric", (PyObject *)&__pyx_type_5_cdec_Metric) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_Metric) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetAttrString(__pyx_m, "Metric", (PyObject *)&__pyx_type_5_cdec_Metric) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_Metric = &__pyx_type_5_cdec_Metric; - if (PyType_Ready(&__pyx_type_5_cdec_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - if (__Pyx_SetAttrString(__pyx_m, "Decoder", (PyObject *)&__pyx_type_5_cdec_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&__pyx_type_5_cdec_Decoder, "__init__"); if (unlikely(!wrapper)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5_cdec_7Decoder___init__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5_cdec_7Decoder___init__.doc = __pyx_doc_5_cdec_7Decoder___init__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5_cdec_7Decoder___init__; + } + } + #endif + if (__Pyx_SetAttrString(__pyx_m, "Decoder", (PyObject *)&__pyx_type_5_cdec_Decoder) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec_Decoder = &__pyx_type_5_cdec_Decoder; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct____iter__) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct____iter__) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct____iter__ = &__pyx_type_5_cdec___pyx_scope_struct____iter__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_1___iter__) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_1___iter__) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_1___iter__ = &__pyx_type_5_cdec___pyx_scope_struct_1___iter__; if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_2__phrase) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_2__phrase = &__pyx_type_5_cdec___pyx_scope_struct_2__phrase; if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_3_genexpr) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_3_genexpr = &__pyx_type_5_cdec___pyx_scope_struct_3_genexpr; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_4___get__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_4___get__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_4___get__ = &__pyx_type_5_cdec___pyx_scope_struct_4___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_5___str__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_5___str__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_5___str__ = &__pyx_type_5_cdec___pyx_scope_struct_5___str__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_6_genexpr) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_6_genexpr) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_6_genexpr = &__pyx_type_5_cdec___pyx_scope_struct_6_genexpr; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_7___iter__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 184; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_7___iter__) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 199; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_7___iter__ = &__pyx_type_5_cdec___pyx_scope_struct_7___iter__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_8_kbest) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_8_kbest) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_8_kbest = &__pyx_type_5_cdec___pyx_scope_struct_8_kbest; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_9_kbest_trees) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_9_kbest_trees) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_9_kbest_trees = &__pyx_type_5_cdec___pyx_scope_struct_9_kbest_trees; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_10_kbest_features) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_10_kbest_features) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_10_kbest_features = &__pyx_type_5_cdec___pyx_scope_struct_10_kbest_features; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_11_sample) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_11_sample) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_11_sample = &__pyx_type_5_cdec___pyx_scope_struct_11_sample; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_12_sample_trees) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_12_sample_trees) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_12_sample_trees = &__pyx_type_5_cdec___pyx_scope_struct_12_sample_trees; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_13___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 136; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_13___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_13___get__ = &__pyx_type_5_cdec___pyx_scope_struct_13___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_14___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_14___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_14___get__ = &__pyx_type_5_cdec___pyx_scope_struct_14___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_15___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_15___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_15___get__ = &__pyx_type_5_cdec___pyx_scope_struct_15___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_16___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_16___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_16___get__ = &__pyx_type_5_cdec___pyx_scope_struct_16___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_17___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 232; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_17___get__) < 0) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_17___get__ = &__pyx_type_5_cdec___pyx_scope_struct_17___get__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_18___iter__) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_18___iter__) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_18___iter__ = &__pyx_type_5_cdec___pyx_scope_struct_18___iter__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_19_todot) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_19_todot) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_19_todot = &__pyx_type_5_cdec___pyx_scope_struct_19_todot; if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_20_lines) < 0) {__pyx_filename = __pyx_f[4]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_20_lines = &__pyx_type_5_cdec___pyx_scope_struct_20_lines; @@ -29138,21 +29315,21 @@ PyMODINIT_FUNC PyInit__cdec(void) __pyx_ptype_5_cdec___pyx_scope_struct_21___iter__ = &__pyx_type_5_cdec___pyx_scope_struct_21___iter__; if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_22___iter__) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_22___iter__ = &__pyx_type_5_cdec___pyx_scope_struct_22___iter__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_23__make_config) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_23__make_config) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_23__make_config = &__pyx_type_5_cdec___pyx_scope_struct_23__make_config; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_24___cinit__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_5_cdec___pyx_scope_struct_24___cinit__ = &__pyx_type_5_cdec___pyx_scope_struct_24___cinit__; - if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_25_genexpr) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_24___init__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_5_cdec___pyx_scope_struct_24___init__ = &__pyx_type_5_cdec___pyx_scope_struct_24___init__; + if (PyType_Ready(&__pyx_type_5_cdec___pyx_scope_struct_25_genexpr) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5_cdec___pyx_scope_struct_25_genexpr = &__pyx_type_5_cdec___pyx_scope_struct_25_genexpr; /*--- Type import code ---*/ - __pyx_ptype_4cdec_2sa_3_sa_FloatList = __Pyx_ImportType("cdec.sa._sa", "FloatList", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_FloatList), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_FloatList)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_vtabptr_4cdec_2sa_3_sa_FloatList = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_FloatList*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_FloatList->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_FloatList)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_4cdec_2sa_3_sa_IntList = __Pyx_ImportType("cdec.sa._sa", "IntList", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_IntList), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_IntList)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_vtabptr_4cdec_2sa_3_sa_IntList = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_IntList*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_IntList->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_IntList)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_4cdec_2sa_3_sa_FeatureVector = __Pyx_ImportType("cdec.sa._sa", "FeatureVector", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_FeatureVector), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_FeatureVector)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_4cdec_2sa_3_sa_Phrase = __Pyx_ImportType("cdec.sa._sa", "Phrase", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_Phrase), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_Phrase)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_vtabptr_4cdec_2sa_3_sa_Phrase = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_Phrase*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_Phrase->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_Phrase)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} - __pyx_ptype_4cdec_2sa_3_sa_Rule = __Pyx_ImportType("cdec.sa._sa", "Rule", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_Rule), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_Rule)) {__pyx_filename = __pyx_f[6]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_4cdec_2sa_3_sa_FloatList = __Pyx_ImportType("cdec.sa._sa", "FloatList", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_FloatList), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_FloatList)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_vtabptr_4cdec_2sa_3_sa_FloatList = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_FloatList*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_FloatList->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_FloatList)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_4cdec_2sa_3_sa_IntList = __Pyx_ImportType("cdec.sa._sa", "IntList", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_IntList), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_IntList)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_vtabptr_4cdec_2sa_3_sa_IntList = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_IntList*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_IntList->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_IntList)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_4cdec_2sa_3_sa_FeatureVector = __Pyx_ImportType("cdec.sa._sa", "FeatureVector", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_FeatureVector), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_FeatureVector)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_4cdec_2sa_3_sa_Phrase = __Pyx_ImportType("cdec.sa._sa", "Phrase", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_Phrase), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_Phrase)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_vtabptr_4cdec_2sa_3_sa_Phrase = (struct __pyx_vtabstruct_4cdec_2sa_3_sa_Phrase*)__Pyx_GetVtable(__pyx_ptype_4cdec_2sa_3_sa_Phrase->tp_dict); if (unlikely(!__pyx_vtabptr_4cdec_2sa_3_sa_Phrase)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_4cdec_2sa_3_sa_Rule = __Pyx_ImportType("cdec.sa._sa", "Rule", sizeof(struct __pyx_obj_4cdec_2sa_3_sa_Rule), 1); if (unlikely(!__pyx_ptype_4cdec_2sa_3_sa_Rule)) {__pyx_filename = __pyx_f[7]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ __pyx_t_1 = __Pyx_ImportModule("cdec.sa._sa"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -29163,7 +29340,7 @@ PyMODINIT_FUNC PyInit__cdec(void) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*--- Execution code ---*/ - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":3 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":3 * cimport grammar * cimport cdec.sa._sa as _sa * import cdec.sa._sa as _sa # <<<<<<<<<<<<<< @@ -29172,16 +29349,16 @@ PyMODINIT_FUNC PyInit__cdec(void) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(((PyObject *)__pyx_n_s_56)); - PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s_56)); - __Pyx_GIVEREF(((PyObject *)__pyx_n_s_56)); - __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_55), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __Pyx_INCREF(((PyObject *)__pyx_n_s_54)); + PyList_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_n_s_54)); + __Pyx_GIVEREF(((PyObject *)__pyx_n_s_54)); + __pyx_t_3 = __Pyx_Import(((PyObject *)__pyx_n_s_53), ((PyObject *)__pyx_t_2), -1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s___sa, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 3; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/grammar.pxi":5 + /* "/home/vchahune/tools/cdec/python/src/grammar.pxi":5 * import cdec.sa._sa as _sa * * def _phrase(phrase): # <<<<<<<<<<<<<< @@ -29193,37 +29370,37 @@ PyMODINIT_FUNC PyInit__cdec(void) if (PyObject_SetAttr(__pyx_m, __pyx_n_s___phrase, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":189 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":194 * return [] * * BLEU = Scorer('IBM_BLEU') # <<<<<<<<<<<<<< * TER = Scorer('TER') * CER = Scorer('CER') */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_60), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_58), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__BLEU, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_n_s__BLEU, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 194; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":190 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":195 * * BLEU = Scorer('IBM_BLEU') * TER = Scorer('TER') # <<<<<<<<<<<<<< * CER = Scorer('CER') */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_61), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_59), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__TER, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_n_s__TER, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 195; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "/Users/vchahun/Sandbox/cdec/python/src/mteval.pxi":191 + /* "/home/vchahune/tools/cdec/python/src/mteval.pxi":196 * BLEU = Scorer('IBM_BLEU') * TER = Scorer('TER') * CER = Scorer('CER') # <<<<<<<<<<<<<< */ - __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_62), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_5_cdec_Scorer)), ((PyObject *)__pyx_k_tuple_60), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s__CER, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_n_s__CER, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[5]; __pyx_lineno = 196; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "_cdec.pyx":22 @@ -29290,24 +29467,24 @@ PyMODINIT_FUNC PyInit__cdec(void) * class ParseFailed(Exception): pass * * def set_silent(yn): # <<<<<<<<<<<<<< + * """set_silent(bool): Configure the verbosity of cdec.""" * SetSilent(yn) - * */ __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_5_cdec_3set_silent, NULL, __pyx_n_s___cdec); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__set_silent, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "_cdec.pyx":31 + /* "_cdec.pyx":32 * SetSilent(yn) * * def _make_config(config): # <<<<<<<<<<<<<< * for key, value in config.items(): * if isinstance(value, dict): */ - __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_5_cdec_5_make_config, NULL, __pyx_n_s___cdec); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_t_3 = PyCFunction_NewEx(&__pyx_mdef_5_cdec_5_make_config, NULL, __pyx_n_s___cdec); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); - if (PyObject_SetAttr(__pyx_m, __pyx_n_s___make_config, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_n_s___make_config, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "_cdec.pyx":1 @@ -29319,6 +29496,14 @@ PyMODINIT_FUNC PyInit__cdec(void) __Pyx_GOTREF(((PyObject *)__pyx_t_3)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_3)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; + + /* "string.from_py":11 + * + * @cname("__pyx_convert_string_from_py_") + * cdef string __pyx_convert_string_from_py_(object o) except *: # <<<<<<<<<<<<<< + * return string(<char*>o, len(o)) + * + */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); @@ -29407,33 +29592,38 @@ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyOb static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); - Py_XINCREF(value); - Py_XINCREF(tb); - if (tb == Py_None) { - Py_DECREF(tb); - tb = 0; - } - else if (tb != NULL && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - if (value == NULL) { - value = Py_None; + if (!value || value == Py_None) + value = NULL; + else Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } } #if PY_VERSION_HEX < 0x02050000 - if (!PyClass_Check(type)) + if (PyClass_Check(type)) { #else - if (!PyType_Check(type)) + if (PyType_Check(type)) { #endif - { - if (value != Py_None) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } - Py_DECREF(value); value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { @@ -29466,6 +29656,7 @@ raise_error: } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { @@ -29483,12 +29674,36 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject } value = type; type = (PyObject*) Py_TYPE(value); - } else if (!PyExceptionClass_Check(type)) { + } else if (PyExceptionClass_Check(type)) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } + else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyEval_CallObject(type, args); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } - if (cause) { + if (cause && cause != Py_None) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); @@ -29505,9 +29720,6 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject "BaseException"); goto bad; } - if (!value) { - value = PyObject_CallObject(type, NULL); - } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); @@ -29521,6 +29733,7 @@ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject } } bad: + Py_XDECREF(owned_instance); return; } #endif @@ -29545,7 +29758,7 @@ static void __Pyx_RaiseArgtupleInvalid( more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, - "%s() takes %s %" PY_FORMAT_SIZE_T "d positional argument%s (%" PY_FORMAT_SIZE_T "d given)", + "%s() takes %s %" CYTHON_FORMAT_SSIZE_T "d positional argument%s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } @@ -29565,10 +29778,9 @@ static CYTHON_INLINE int __Pyx_CheckKeywordStrings( while (PyDict_Next(kwdict, &pos, &key, 0)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) - #else - if (unlikely(!PyUnicode_Check(key))) #endif - goto invalid_keyword_type; + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; } if ((!kw_allowed) && unlikely(key)) goto invalid_keyword; @@ -29635,7 +29847,7 @@ static void __Pyx_RaiseDoubleKeywordsError( "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AS_STRING(kw_name)); + PyString_AsString(kw_name)); #endif } @@ -29656,48 +29868,72 @@ static int __Pyx_ParseOptionalKeywords( while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; - } else { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { - #else - if (unlikely(!PyUnicode_Check(key))) { - #endif - goto invalid_keyword_type; - } else { - for (name = first_kw_arg; *name; name++) { - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) break; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) break; - #endif - } - if (*name) { + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { values[name-argnames] = value; - } else { - for (name=argnames; name != first_kw_arg; name++) { - if (**name == key) goto arg_passed_twice; - #if PY_MAJOR_VERSION >= 3 - if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && - PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; - #else - if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && - _PyString_Eq(**name, key)) goto arg_passed_twice; - #endif - } - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; } + name++; } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; } } return 0; arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, **name); + __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, @@ -29718,12 +29954,12 @@ bad: static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" PY_FORMAT_SIZE_T "d)", expected); + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, - "need more than %" PY_FORMAT_SIZE_T "d value%s to unpack", + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%s to unpack", index, (index == 1) ? "" : "s"); } @@ -30091,9 +30327,8 @@ static CYTHON_INLINE WordID __Pyx_PyInt_from_py_WordID(PyObject* x) { } } -static PyObject* __Pyx_Globals() { +static PyObject* __Pyx_Globals(void) { Py_ssize_t i; - /*PyObject *d;*/ PyObject *names = NULL; PyObject *globals = PyObject_GetAttrString(__pyx_m, "__dict__"); if (!globals) { @@ -30104,25 +30339,36 @@ static PyObject* __Pyx_Globals() { names = PyObject_Dir(__pyx_m); if (!names) goto bad; - for (i = 0; i < PyList_GET_SIZE(names); i++) { + for (i = PyList_GET_SIZE(names)-1; i >= 0; i--) { +#if CYTHON_COMPILING_IN_PYPY + PyObject* name = PySequence_GetItem(names, i); + if (!name) + goto bad; +#else PyObject* name = PyList_GET_ITEM(names, i); +#endif if (!PyDict_Contains(globals, name)) { - PyObject* value = PyObject_GetAttr(__pyx_m, PyList_GET_ITEM(names, i)); - if (!value) + PyObject* value = PyObject_GetAttr(__pyx_m, name); + if (!value) { +#if CYTHON_COMPILING_IN_PYPY + Py_DECREF(name); +#endif goto bad; + } if (PyDict_SetItem(globals, name, value) < 0) { +#if CYTHON_COMPILING_IN_PYPY + Py_DECREF(name); +#endif Py_DECREF(value); goto bad; } } +#if CYTHON_COMPILING_IN_PYPY + Py_DECREF(name); +#endif } Py_DECREF(names); return globals; - /* - d = PyDictProxy_New(globals); - Py_DECREF(globals); - return d; - */ bad: Py_XDECREF(names); Py_XDECREF(globals); @@ -30538,86 +30784,6 @@ static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyOb Py_INCREF(tuple); } -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - if (PyBytes_GET_SIZE(s1) != PyBytes_GET_SIZE(s2)) { - return (equals == Py_NE); - } else if (PyBytes_GET_SIZE(s1) == 1) { - if (equals == Py_EQ) - return (PyBytes_AS_STRING(s1)[0] == PyBytes_AS_STRING(s2)[0]); - else - return (PyBytes_AS_STRING(s1)[0] != PyBytes_AS_STRING(s2)[0]); - } else { - int result = memcmp(PyBytes_AS_STRING(s1), PyBytes_AS_STRING(s2), (size_t)PyBytes_GET_SIZE(s1)); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyUnicode_CheckExact(s1) & PyUnicode_CheckExact(s2)) { - #if CYTHON_PEP393_ENABLED - if ((PyUnicode_READY(s1) < 0) || (PyUnicode_READY(s2) < 0)) - return -1; - if (PyUnicode_GET_LENGTH(s1) != PyUnicode_GET_LENGTH(s2)) { - return (equals == Py_NE); - } else if (PyUnicode_GET_LENGTH(s1) == 1) { - Py_UCS4 ch1 = PyUnicode_READ_CHAR(s1, 0); - Py_UCS4 ch2 = PyUnicode_READ_CHAR(s2, 0); - return (equals == Py_EQ) ? (ch1 == ch2) : (ch1 != ch2); - #else - if (PyUnicode_GET_SIZE(s1) != PyUnicode_GET_SIZE(s2)) { - return (equals == Py_NE); - } else if (PyUnicode_GET_SIZE(s1) == 1) { - Py_UNICODE ch1 = PyUnicode_AS_UNICODE(s1)[0]; - Py_UNICODE ch2 = PyUnicode_AS_UNICODE(s2)[0]; - return (equals == Py_EQ) ? (ch1 == ch2) : (ch1 != ch2); - #endif - } else { - int result = PyUnicode_Compare(s1, s2); - if ((result == -1) && unlikely(PyErr_Occurred())) - return -1; - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyUnicode_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyUnicode_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; @@ -31018,25 +31184,6 @@ static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* } } -static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, - CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename) { - PyObject *old_exc, *old_val, *old_tb; - PyObject *ctx; - __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); - #if PY_MAJOR_VERSION < 3 - ctx = PyString_FromString(name); - #else - ctx = PyUnicode_FromString(name); - #endif - __Pyx_ErrRestore(old_exc, old_val, old_tb); - if (!ctx) { - PyErr_WriteUnraisable(Py_None); - } else { - PyErr_WriteUnraisable(ctx); - Py_DECREF(ctx); - } -} - static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_COMPILING_IN_CPYTHON @@ -31459,7 +31606,7 @@ static PyMemberDef __pyx_Generator_memberlist[] = { #if PY_VERSION_HEX >= 0x02060000 T_BOOL, #else - T_INT, + T_BYTE, #endif offsetof(__pyx_GeneratorObject, is_running), READONLY, @@ -31557,6 +31704,25 @@ static int __pyx_Generator_init(void) { return 0; } +static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, + CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename) { + PyObject *old_exc, *old_val, *old_tb; + PyObject *ctx; + __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); + #if PY_MAJOR_VERSION < 3 + ctx = PyString_FromString(name); + #else + ctx = PyUnicode_FromString(name); + #endif + __Pyx_ErrRestore(old_exc, old_val, old_tb); + if (!ctx) { + PyErr_WriteUnraisable(Py_None); + } else { + PyErr_WriteUnraisable(ctx); + Py_DECREF(ctx); + } +} + static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); @@ -31593,6 +31759,23 @@ bad: return -1; } +#ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, @@ -31645,23 +31828,6 @@ bad: } #endif -#ifndef __PYX_HAVE_RT_ImportModule -#define __PYX_HAVE_RT_ImportModule -static PyObject *__Pyx_ImportModule(const char *name) { - PyObject *py_name = 0; - PyObject *py_module = 0; - py_name = __Pyx_PyIdentifier_FromString(name); - if (!py_name) - goto bad; - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - return py_module; -bad: - Py_XDECREF(py_name); - return 0; -} -#endif - static void* __Pyx_GetVtable(PyObject *dict) { void* ptr; PyObject *ob = PyMapping_GetItemString(dict, (char *)"__pyx_vtable__"); @@ -31700,7 +31866,7 @@ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (** PyModule_GetName(module), funcname); goto bad; } -#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) +#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3 && PY_MINOR_VERSION==0) if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %s.%s has wrong signature (expected %s, got %s)", diff --git a/python/src/_cdec.pyx b/python/src/_cdec.pyx index 6c6c8eee..71e083e4 100644 --- a/python/src/_cdec.pyx +++ b/python/src/_cdec.pyx @@ -3,7 +3,7 @@ from libcpp.vector cimport vector from utils cimport * cimport decoder -cdef char* as_str(data, char* error_msg='Cannot convert type %s to str'): +cdef bytes as_str(data, char* error_msg='Cannot convert type %s to str'): cdef bytes ret if isinstance(data, unicode): ret = data.encode('utf8') @@ -26,6 +26,7 @@ class InvalidConfig(Exception): pass class ParseFailed(Exception): pass def set_silent(yn): + """set_silent(bool): Configure the verbosity of cdec.""" SetSilent(yn) def _make_config(config): @@ -43,12 +44,10 @@ cdef class Decoder: cdef decoder.Decoder* dec cdef DenseVector weights - def __cinit__(self, config_str=None, **config): - """ Configuration can be given as a string: - Decoder('formalism = scfg') - or using keyword arguments: - Decoder(formalism='scfg') - """ + def __init__(self, config_str=None, **config): + """Decoder('formalism = scfg') -> initialize from configuration string + Decoder(formalism='scfg') -> initialize from named parameters + Create a decoder using a given configuration. Formalism is required.""" if config_str is None: formalism = config.get('formalism', None) if formalism not in ('scfg', 'fst', 'lextrans', 'pb', @@ -88,6 +87,7 @@ cdef class Decoder: return str(conf[0]['formalism'].as_str().c_str()) def read_weights(self, weights): + """decoder.read_weights(filename): Read decoder weights from a file.""" with open(weights) as fp: for line in fp: if line.strip().startswith('#'): continue @@ -95,6 +95,8 @@ cdef class Decoder: self.weights[fname.strip()] = float(value) def translate(self, sentence, grammar=None): + """decoder.translate(sentence, grammar=None) -> Hypergraph + Translate a sentence (string/Lattice) with a grammar (string/list of rules).""" cdef bytes input_str if isinstance(sentence, basestring): input_str = as_str(sentence.strip()) @@ -104,11 +106,11 @@ cdef class Decoder: raise TypeError('Cannot translate input type %s' % type(sentence)) if grammar: if isinstance(grammar, basestring): - self.dec.AddSupplementalGrammarFromString(string(as_str(grammar))) + self.dec.AddSupplementalGrammarFromString(as_str(grammar)) else: self.dec.AddSupplementalGrammar(TextGrammar(grammar).grammar[0]) cdef decoder.BasicObserver observer = decoder.BasicObserver() - self.dec.Decode(string(input_str), &observer) + self.dec.Decode(input_str, &observer) if observer.hypergraph == NULL: raise ParseFailed() cdef Hypergraph hg = Hypergraph() diff --git a/python/src/grammar.pxi b/python/src/grammar.pxi index b05d07a0..d523e4d2 100644 --- a/python/src/grammar.pxi +++ b/python/src/grammar.pxi @@ -9,6 +9,7 @@ cdef class NT: cdef public bytes cat cdef public unsigned ref def __init__(self, bytes cat, unsigned ref=0): + """NT(bytes cat, int ref=0) -> Non-terminal from category `cat`.""" self.cat = cat self.ref = ref @@ -20,6 +21,7 @@ cdef class NT: cdef class NTRef: cdef public unsigned ref def __init__(self, unsigned ref): + """NTRef(int ref) -> Non-terminal reference.""" self.ref = ref def __str__(self): @@ -48,6 +50,12 @@ cdef class TRule: cdef shared_ptr[grammar.TRule]* rule def __init__(self, lhs, f, e, scores, a=None): + """TRule(lhs, f, e, scores, a=None) -> Translation rule. + lhs: left hand side non-terminal + f: source phrase (list of words/NT) + e: target phrase (list of words/NTRef) + scores: dictionary of feature scores + a: optional list of alignment points""" self.rule = new shared_ptr[grammar.TRule](new grammar.TRule()) self.lhs = lhs self.e = e @@ -87,9 +95,10 @@ cdef class TRule: cdef int idx = 0 for i in range(len(f)): if isinstance(f[i], NT): - f_[0][i] = -TDConvert(<char *>f[i].cat) + f_[0][i] = -TDConvert((<NT> f[i]).cat) else: - f_[0][i] = TDConvert(as_str(f[i])) + fi = as_str(f[i]) + f_[0][i] = TDConvert(fi) property e: def __get__(self): @@ -115,7 +124,8 @@ cdef class TRule: if isinstance(e[i], NTRef): e_[0][i] = 1-e[i].ref else: - e_[0][i] = TDConvert(as_str(e[i])) + ei = as_str(e[i]) + e_[0][i] = TDConvert(ei) property a: def __get__(self): @@ -145,7 +155,8 @@ cdef class TRule: cdef int fid cdef float fval for fname, fval in scores.items(): - fid = FDConvert(as_str(fname)) + fn = as_str(fname) + fid = FDConvert(fn) if fid < 0: raise KeyError(fname) scores_.set_value(fid, fval) @@ -156,7 +167,7 @@ cdef class TRule: def __set__(self, lhs): if not isinstance(lhs, NT): lhs = NT(lhs) - self.rule.get().lhs_ = -TDConvert(<char *>lhs.cat) + self.rule.get().lhs_ = -TDConvert((<NT> lhs).cat) def __str__(self): scores = ' '.join('%s=%s' % feat for feat in self.scores) @@ -164,7 +175,11 @@ cdef class TRule: _phrase(self.f), _phrase(self.e), scores) cdef class MRule(TRule): - def __init__(self, lhs, rhs, scores, a=None): + def __init__(self, lhs, rhs, scores): + """MRule(lhs, rhs, scores, a=None) -> Monolingual rule. + lhs: left hand side non-terminal + rhs: right hand side phrase (list of words/NT) + scores: dictionary of feature scores""" cdef unsigned i = 1 e = [] for s in rhs: @@ -173,7 +188,7 @@ cdef class MRule(TRule): i += 1 else: e.append(s) - super(MRule, self).__init__(lhs, rhs, e, scores, a) + super(MRule, self).__init__(lhs, rhs, e, scores, None) cdef class Grammar: cdef shared_ptr[grammar.Grammar]* grammar @@ -196,10 +211,12 @@ cdef class Grammar: str(self.grammar.get().GetGrammarName().c_str()) def __set__(self, name): - self.grammar.get().SetGrammarName(string(<char *>name)) + name = as_str(name) + self.grammar.get().SetGrammarName(name) cdef class TextGrammar(Grammar): - def __cinit__(self, rules): + def __init__(self, rules): + """TextGrammar(rules) -> SCFG Grammar containing the rules.""" self.grammar = new shared_ptr[grammar.Grammar](new grammar.TextGrammar()) cdef grammar.TextGrammar* _g = <grammar.TextGrammar*> self.grammar.get() for trule in rules: diff --git a/python/src/hypergraph.pxi b/python/src/hypergraph.pxi index bb6141df..5b675531 100644 --- a/python/src/hypergraph.pxi +++ b/python/src/hypergraph.pxi @@ -16,24 +16,33 @@ cdef class Hypergraph: return self.rng def viterbi(self): + """hg.viterbi() -> String for the best hypothesis in the hypergraph.""" cdef vector[WordID] trans hypergraph.ViterbiESentence(self.hg[0], &trans) return unicode(GetString(trans).c_str(), 'utf8') def viterbi_trees(self): + """hg.viterbi_trees() -> (f_tree, e_tree) + f_tree: Source tree for the best hypothesis in the hypergraph. + e_tree: Target tree for the best hypothesis in the hypergraph. + """ f_tree = unicode(hypergraph.ViterbiFTree(self.hg[0]).c_str(), 'utf8') e_tree = unicode(hypergraph.ViterbiETree(self.hg[0]).c_str(), 'utf8') return (f_tree, e_tree) def viterbi_features(self): + """hg.viterbi_features() -> SparseVector with the features corresponding + to the best derivation in the hypergraph.""" cdef SparseVector fmap = SparseVector.__new__(SparseVector) fmap.vector = new FastSparseVector[weight_t](hypergraph.ViterbiFeatures(self.hg[0])) return fmap def viterbi_joshua(self): + """hg.viterbi_joshua() -> Joshua representation of the best derivation.""" return unicode(hypergraph.JoshuaVisualizationString(self.hg[0]).c_str(), 'utf8') def kbest(self, size): + """hg.kbest(size) -> List of k-best hypotheses in the hypergraph.""" cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal]* derivations = new kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal](self.hg[0], size) cdef kbest.KBestDerivations[vector[WordID], kbest.ESentenceTraversal].Derivation* derivation cdef unsigned k @@ -46,6 +55,7 @@ cdef class Hypergraph: del derivations def kbest_trees(self, size): + """hg.kbest_trees(size) -> List of k-best trees in the hypergraph.""" cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal]* f_derivations = new kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal](self.hg[0], size) cdef kbest.KBestDerivations[vector[WordID], kbest.FTreeTraversal].Derivation* f_derivation cdef kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal]* e_derivations = new kbest.KBestDerivations[vector[WordID], kbest.ETreeTraversal](self.hg[0], size) @@ -64,6 +74,7 @@ cdef class Hypergraph: del e_derivations def kbest_features(self, size): + """hg.kbest_trees(size) -> List of k-best feature vectors in the hypergraph.""" cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal]* derivations = new kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal](self.hg[0], size) cdef kbest.KBestDerivations[FastSparseVector[weight_t], kbest.FeatureVectorTraversal].Derivation* derivation cdef SparseVector fmap @@ -79,6 +90,7 @@ cdef class Hypergraph: del derivations def sample(self, unsigned n): + """hg.sample(n) -> Sample of n hypotheses from the hypergraph.""" cdef vector[hypergraph.Hypothesis]* hypos = new vector[hypergraph.Hypothesis]() hypergraph.sample_hypotheses(self.hg[0], n, self._rng(), hypos) cdef unsigned k @@ -89,6 +101,7 @@ cdef class Hypergraph: del hypos def sample_trees(self, unsigned n): + """hg.sample_trees(n) -> Sample of n trees from the hypergraph.""" cdef vector[string]* trees = new vector[string]() hypergraph.sample_trees(self.hg[0], n, self._rng(), trees) cdef unsigned k @@ -99,6 +112,7 @@ cdef class Hypergraph: del trees def intersect(self, inp): + """hg.intersect(Lattice/string): Intersect the hypergraph with the provided reference.""" cdef Lattice lat if isinstance(inp, Lattice): lat = <Lattice> inp @@ -109,6 +123,9 @@ cdef class Hypergraph: return hypergraph.Intersect(lat.lattice[0], self.hg) def prune(self, beam_alpha=0, density=0, **kwargs): + """hg.prune(beam_alpha=0, density=0): Prune the hypergraph. + beam_alpha: use beam pruning + density: use density pruning""" cdef hypergraph.EdgeMask* preserve_mask = NULL if 'csplit_preserve_full_word' in kwargs: preserve_mask = new hypergraph.EdgeMask(self.hg.edges_.size()) @@ -118,13 +135,16 @@ cdef class Hypergraph: del preserve_mask def lattice(self): # TODO direct hg -> lattice conversion in cdec + """hg.lattice() -> Lattice corresponding to the hypergraph.""" cdef bytes plf = hypergraph.AsPLF(self.hg[0], True).c_str() return Lattice(eval(plf)) def plf(self): + """hg.plf() -> Lattice PLF representation corresponding to the hypergraph.""" return bytes(hypergraph.AsPLF(self.hg[0], True).c_str()) def reweight(self, weights): + """hg.reweight(SparseVector/DenseVector): Reweight the hypergraph with a new vector.""" if isinstance(weights, SparseVector): self.hg.Reweight((<SparseVector> weights).vector[0]) elif isinstance(weights, DenseVector): @@ -153,6 +173,7 @@ cdef class Hypergraph: return self.hg.NumberOfPaths() def inside_outside(self): + """hg.inside_outside() -> SparseVector with inside-outside scores for each feature.""" cdef FastSparseVector[prob_t]* result = new FastSparseVector[prob_t]() cdef prob_t z = hypergraph.InsideOutside(self.hg[0], result) result[0] /= z diff --git a/python/src/lattice.pxi b/python/src/lattice.pxi index 57e340d2..8000b61e 100644 --- a/python/src/lattice.pxi +++ b/python/src/lattice.pxi @@ -7,16 +7,16 @@ cdef class Lattice: self.lattice = new lattice.Lattice() def __init__(self, inp): + """Lattice(tuple) -> Lattice from node list. + Lattice(string) -> Lattice from PLF representation.""" if isinstance(inp, tuple): self.lattice.resize(len(inp)) for i, arcs in enumerate(inp): self[i] = arcs + elif isinstance(inp, basestring): + lattice.ConvertTextOrPLF(as_str(inp), self.lattice) else: - if isinstance(inp, unicode): - inp = inp.encode('utf8') - if not isinstance(inp, str): - raise TypeError('cannot create lattice from %s' % type(inp)) - lattice.ConvertTextOrPLF(string(<char *>inp), self.lattice) + raise TypeError('cannot create lattice from %s' % type(inp)) def __dealloc__(self): del self.lattice @@ -39,9 +39,8 @@ cdef class Lattice: raise IndexError('lattice index out of range') cdef lattice.LatticeArc* arc for (label, cost, dist2next) in arcs: - if isinstance(label, unicode): - label = label.encode('utf8') - arc = new lattice.LatticeArc(TDConvert(<char *>label), cost, dist2next) + label_str = as_str(label) + arc = new lattice.LatticeArc(TDConvert(label_str), cost, dist2next) self.lattice[0][index].push_back(arc[0]) del arc @@ -60,6 +59,7 @@ cdef class Lattice: yield self[i] def todot(self): + """lattice.todot() -> Representation of the lattice in GraphViz dot format.""" def lines(): yield 'digraph lattice {' yield 'rankdir = LR;' @@ -72,8 +72,9 @@ cdef class Lattice: return '\n'.join(lines()).encode('utf8') def as_hypergraph(self): + """lattice.as_hypergraph() -> Hypergraph representation of the lattice.""" cdef Hypergraph result = Hypergraph.__new__(Hypergraph) result.hg = new hypergraph.Hypergraph() cdef bytes plf = str(self) - hypergraph.ReadFromPLF(string(plf), result.hg) + hypergraph.ReadFromPLF(plf, result.hg) return result diff --git a/python/src/mteval.pxi b/python/src/mteval.pxi index 00355f96..f3bec393 100644 --- a/python/src/mteval.pxi +++ b/python/src/mteval.pxi @@ -93,6 +93,8 @@ cdef class CandidateSet: yield self[i] def add_kbest(self, Hypergraph hypergraph, unsigned k): + """cs.add_kbest(Hypergraph hypergraph, int k) -> Extract K-best hypotheses + from the hypergraph and add them to the candidate set.""" self.cs.AddKBestCandidates(hypergraph.hg[0], k, self.scorer.get()) cdef class SegmentEvaluator: @@ -103,15 +105,17 @@ cdef class SegmentEvaluator: del self.scorer def evaluate(self, sentence): + """se.evaluate(sentence) -> SufficientStats for the given hypothesis.""" cdef vector[WordID] hyp cdef SufficientStats sf = SufficientStats() sf.metric = self.metric sf.stats = new mteval.SufficientStats() - ConvertSentence(string(as_str(sentence.strip())), &hyp) + ConvertSentence(as_str(sentence.strip()), &hyp) self.scorer.get().Evaluate(hyp, sf.stats) return sf def candidate_set(self): + """se.candidate_set() -> Candidate set using this segment evaluator for scoring.""" return CandidateSet(self) cdef class Scorer: @@ -133,7 +137,7 @@ cdef class Scorer: cdef vector[WordID]* refv for ref in refs: refv = new vector[WordID]() - ConvertSentence(string(as_str(ref.strip())), refv) + ConvertSentence(as_str(ref.strip()), refv) refsv.push_back(refv[0]) del refv cdef unsigned i @@ -173,7 +177,8 @@ cdef class Metric: cdef Scorer scorer def __cinit__(self): self.scorer = Scorer() - self.scorer.name = new string(as_str(self.__class__.__name__)) + cdef bytes class_name = self.__class__.__name__ + self.scorer.name = new string(class_name) self.scorer.metric = mteval.PyMetricInstance(self.scorer.name[0], <void*> self, _compute_sufficient_stats, _compute_score) diff --git a/python/src/vectors.pxi b/python/src/vectors.pxi index 87780556..46f58fd4 100644 --- a/python/src/vectors.pxi +++ b/python/src/vectors.pxi @@ -5,6 +5,7 @@ cdef class DenseVector: cdef bint owned # if True, do not manage memory def __init__(self): + """DenseVector() -> Dense weight/feature vector.""" self.vector = new vector[weight_t]() self.owned = False @@ -22,7 +23,7 @@ cdef class DenseVector: raise KeyError(fname) def __setitem__(self, char* fname, float value): - cdef int fid = FDConvert(<char *>fname) + cdef int fid = FDConvert(fname) if fid < 0: raise KeyError(fname) if self.vector.size() <= fid: self.vector.resize(fid + 1) @@ -34,9 +35,11 @@ cdef class DenseVector: yield str(FDConvert(fid).c_str()), self.vector[0][fid] def dot(self, SparseVector other): + """vector.dot(SparseVector other) -> Dot product of the two vectors.""" return other.dot(self) def tosparse(self): + """vector.tosparse() -> Equivalent SparseVector.""" cdef SparseVector sparse = SparseVector.__new__(SparseVector) sparse.vector = new FastSparseVector[weight_t]() InitSparseVector(self.vector[0], sparse.vector) @@ -46,12 +49,14 @@ cdef class SparseVector: cdef FastSparseVector[weight_t]* vector def __init__(self): + """SparseVector() -> Sparse feature/weight vector.""" self.vector = new FastSparseVector[weight_t]() def __dealloc__(self): del self.vector def copy(self): + """vector.copy() -> SparseVector copy.""" return self * 1 def __getitem__(self, char* fname): @@ -60,7 +65,7 @@ cdef class SparseVector: return self.vector.value(fid) def __setitem__(self, char* fname, float value): - cdef int fid = FDConvert(<char *>fname) + cdef int fid = FDConvert(fname) if fid < 0: raise KeyError(fname) self.vector.set_value(fid, value) @@ -75,6 +80,7 @@ cdef class SparseVector: del it def dot(self, other): + """vector.dot(SparseVector/DenseVector other) -> Dot product of the two vectors.""" if isinstance(other, DenseVector): return self.vector.dot((<DenseVector> other).vector[0]) elif isinstance(other, SparseVector): diff --git a/rst_parser/Makefile.am b/rst_parser/Makefile.am deleted file mode 100644 index 8650cdab..00000000 --- a/rst_parser/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ -bin_PROGRAMS = \ - mst_train rst_train rst_parse random_tree - -noinst_LIBRARIES = librst.a - -librst_a_SOURCES = arc_factored.cc arc_factored_marginals.cc rst.cc arc_ff.cc dep_training.cc global_ff.cc - -mst_train_SOURCES = mst_train.cc -mst_train_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a ../training/optimize.o -lz - -rst_train_SOURCES = rst_train.cc -rst_train_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -rst_parse_SOURCES = rst_parse.cc -rst_parse_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -random_tree_SOURCES = random_tree.cc -random_tree_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -I$(top_srcdir)/training -I$(top_srcdir)/utils -I$(top_srcdir)/mteval -I../klm diff --git a/rst_parser/arc_factored.cc b/rst_parser/arc_factored.cc deleted file mode 100644 index 74bf7516..00000000 --- a/rst_parser/arc_factored.cc +++ /dev/null @@ -1,151 +0,0 @@ -#include "arc_factored.h" - -#include <set> -#include <tr1/unordered_set> - -#include <boost/pending/disjoint_sets.hpp> -#include <boost/functional/hash.hpp> - -#include "arc_ff.h" - -using namespace std; -using namespace std::tr1; -using namespace boost; - -void EdgeSubset::ExtractFeatures(const TaggedSentence& sentence, - const ArcFeatureFunctions& ffs, - SparseVector<double>* features) const { - SparseVector<weight_t> efmap; - for (int j = 0; j < h_m_pairs.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(sentence, h_m_pairs[j].first, - h_m_pairs[j].second, - &efmap); - (*features) += efmap; - } - for (int j = 0; j < roots.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(sentence, -1, roots[j], &efmap); - (*features) += efmap; - } -} - -void ArcFactoredForest::ExtractFeatures(const TaggedSentence& sentence, - const ArcFeatureFunctions& ffs) { - for (int m = 0; m < num_words_; ++m) { - for (int h = 0; h < num_words_; ++h) { - ffs.EdgeFeatures(sentence, h, m, &edges_(h,m).features); - } - ffs.EdgeFeatures(sentence, -1, m, &root_edges_[m].features); - } -} - -void ArcFactoredForest::PickBestParentForEachWord(EdgeSubset* st) const { - for (int m = 0; m < num_words_; ++m) { - int best_head = -2; - prob_t best_score; - for (int h = -1; h < num_words_; ++h) { - const Edge& edge = (*this)(h,m); - if (best_head < -1 || edge.edge_prob > best_score) { - best_score = edge.edge_prob; - best_head = h; - } - } - assert(best_head >= -1); - if (best_head >= 0) - st->h_m_pairs.push_back(make_pair<short,short>(best_head, m)); - else - st->roots.push_back(m); - } -} - -struct WeightedEdge { - WeightedEdge() : h(), m(), weight() {} - WeightedEdge(short hh, short mm, float w) : h(hh), m(mm), weight(w) {} - short h, m; - float weight; - inline bool operator==(const WeightedEdge& o) const { - return h == o.h && m == o.m && weight == o.weight; - } - inline bool operator!=(const WeightedEdge& o) const { - return h != o.h || m != o.m || weight != o.weight; - } -}; -inline bool operator<(const WeightedEdge& l, const WeightedEdge& o) { return l.weight < o.weight; } -inline size_t hash_value(const WeightedEdge& e) { return reinterpret_cast<const size_t&>(e); } - - -struct PriorityQueue { - void push(const WeightedEdge& e) {} - const WeightedEdge& top() const { - static WeightedEdge w(1,2,3); - return w; - } - void pop() {} - void increment_all(float p) {} -}; - -// based on Trajan 1977 -void ArcFactoredForest::MaximumSpanningTree(EdgeSubset* st) const { - typedef disjoint_sets_with_storage<identity_property_map, identity_property_map, - find_with_full_path_compression> DisjointSet; - DisjointSet strongly(num_words_ + 1); - DisjointSet weakly(num_words_ + 1); - set<unsigned> roots, rset; - unordered_set<WeightedEdge, boost::hash<WeightedEdge> > h; - vector<PriorityQueue> qs(num_words_ + 1); - vector<WeightedEdge> enter(num_words_ + 1); - vector<unsigned> mins(num_words_ + 1); - const WeightedEdge kDUMMY(0,0,0.0f); - for (unsigned i = 0; i <= num_words_; ++i) { - if (i > 0) { - // I(i) incidence on i -- all incoming edges - for (unsigned j = 0; j <= num_words_; ++j) { - qs[i].push(WeightedEdge(j, i, Weight(j,i))); - } - } - strongly.make_set(i); - weakly.make_set(i); - roots.insert(i); - enter[i] = kDUMMY; - mins[i] = i; - } - while(!roots.empty()) { - set<unsigned>::iterator it = roots.begin(); - const unsigned k = *it; - roots.erase(it); - cerr << "k=" << k << endl; - WeightedEdge ij = qs[k].top(); // MAX(k) - qs[k].pop(); - if (ij.weight <= 0) { - rset.insert(k); - } else { - if (strongly.find_set(ij.h) == k) { - roots.insert(k); - } else { - h.insert(ij); - if (weakly.find_set(ij.h) != weakly.find_set(ij.m)) { - weakly.union_set(ij.h, ij.m); - enter[k] = ij; - } else { - unsigned vertex = 0; - float val = 99999999999; - WeightedEdge xy = ij; - while(xy != kDUMMY) { - if (xy.weight < val) { - val = xy.weight; - vertex = strongly.find_set(xy.m); - } - xy = enter[strongly.find_set(xy.h)]; - } - qs[k].increment_all(val - ij.weight); - mins[k] = mins[vertex]; - xy = enter[strongly.find_set(ij.h)]; - while (xy != kDUMMY) { - } - } - } - } - } -} - diff --git a/rst_parser/arc_factored.h b/rst_parser/arc_factored.h deleted file mode 100644 index c5481d80..00000000 --- a/rst_parser/arc_factored.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef _ARC_FACTORED_H_ -#define _ARC_FACTORED_H_ - -#include <iostream> -#include <cassert> -#include <vector> -#include <utility> -#include <boost/shared_ptr.hpp> -#include "array2d.h" -#include "sparse_vector.h" -#include "prob.h" -#include "weights.h" -#include "wordid.h" - -struct TaggedSentence { - std::vector<WordID> words; - std::vector<WordID> pos; -}; - -struct ArcFeatureFunctions; -struct EdgeSubset { - EdgeSubset() {} - std::vector<short> roots; // unless multiroot trees are supported, this - // will have a single member - std::vector<std::pair<short, short> > h_m_pairs; // h,m start at 0 - // assumes ArcFeatureFunction::PrepareForInput has already been called - void ExtractFeatures(const TaggedSentence& sentence, - const ArcFeatureFunctions& ffs, - SparseVector<double>* features) const; -}; - -class ArcFactoredForest { - public: - ArcFactoredForest() : num_words_() {} - explicit ArcFactoredForest(short num_words) : num_words_(num_words) { - resize(num_words); - } - - unsigned size() const { return num_words_; } - - void resize(unsigned num_words) { - num_words_ = num_words; - root_edges_.clear(); - edges_.clear(); - root_edges_.resize(num_words); - edges_.resize(num_words, num_words); - for (int h = 0; h < num_words; ++h) { - for (int m = 0; m < num_words; ++m) { - edges_(h, m).h = h; - edges_(h, m).m = m; - } - root_edges_[h].h = -1; - root_edges_[h].m = h; - } - } - - // compute the maximum spanning tree based on the current weighting - // using the O(n^2) CLE algorithm - void MaximumSpanningTree(EdgeSubset* st) const; - - // Reweight edges so that edge_prob is the edge's marginals - // optionally returns log partition - void EdgeMarginals(prob_t* p_log_z = NULL); - - // This may not return a tree - void PickBestParentForEachWord(EdgeSubset* st) const; - - struct Edge { - Edge() : h(), m(), features(), edge_prob(prob_t::Zero()) {} - short h; - short m; - SparseVector<weight_t> features; - prob_t edge_prob; - }; - - // set eges_[*].features - void ExtractFeatures(const TaggedSentence& sentence, - const ArcFeatureFunctions& ffs); - - const Edge& operator()(short h, short m) const { - return h >= 0 ? edges_(h, m) : root_edges_[m]; - } - - Edge& operator()(short h, short m) { - return h >= 0 ? edges_(h, m) : root_edges_[m]; - } - - float Weight(short h, short m) const { - return log((*this)(h,m).edge_prob); - } - - template <class V> - void Reweight(const V& weights) { - for (int m = 0; m < num_words_; ++m) { - for (int h = 0; h < num_words_; ++h) { - if (h != m) { - Edge& e = edges_(h, m); - e.edge_prob.logeq(e.features.dot(weights)); - } - } - Edge& e = root_edges_[m]; - e.edge_prob.logeq(e.features.dot(weights)); - } - } - - private: - int num_words_; - std::vector<Edge> root_edges_; - Array2D<Edge> edges_; -}; - -inline std::ostream& operator<<(std::ostream& os, const ArcFactoredForest::Edge& edge) { - return os << "(" << edge.h << " < " << edge.m << ")"; -} - -inline std::ostream& operator<<(std::ostream& os, const EdgeSubset& ss) { - for (unsigned i = 0; i < ss.roots.size(); ++i) - os << "ROOT < " << ss.roots[i] << std::endl; - for (unsigned i = 0; i < ss.h_m_pairs.size(); ++i) - os << ss.h_m_pairs[i].first << " < " << ss.h_m_pairs[i].second << std::endl; - return os; -} - -#endif diff --git a/rst_parser/arc_factored_marginals.cc b/rst_parser/arc_factored_marginals.cc deleted file mode 100644 index 3e8c9f86..00000000 --- a/rst_parser/arc_factored_marginals.cc +++ /dev/null @@ -1,58 +0,0 @@ -#include "arc_factored.h" - -#include <iostream> - -#include "config.h" - -using namespace std; - -#if HAVE_EIGEN - -#include <Eigen/Dense> -typedef Eigen::Matrix<prob_t, Eigen::Dynamic, Eigen::Dynamic> ArcMatrix; -typedef Eigen::Matrix<prob_t, Eigen::Dynamic, 1> RootVector; - -void ArcFactoredForest::EdgeMarginals(prob_t *plog_z) { - ArcMatrix A(num_words_,num_words_); - RootVector r(num_words_); - for (int h = 0; h < num_words_; ++h) { - for (int m = 0; m < num_words_; ++m) { - if (h != m) - A(h,m) = edges_(h,m).edge_prob; - else - A(h,m) = prob_t::Zero(); - } - r(h) = root_edges_[h].edge_prob; - } - - ArcMatrix L = -A; - L.diagonal() = A.colwise().sum(); - L.row(0) = r; - ArcMatrix Linv = L.inverse(); - if (plog_z) *plog_z = Linv.determinant(); - RootVector rootMarginals = r.cwiseProduct(Linv.col(0)); - static const prob_t ZERO(0); - static const prob_t ONE(1); -// ArcMatrix T = Linv; - for (int h = 0; h < num_words_; ++h) { - for (int m = 0; m < num_words_; ++m) { - const prob_t marginal = (m == 0 ? ZERO : ONE) * A(h,m) * Linv(m,m) - - (h == 0 ? ZERO : ONE) * A(h,m) * Linv(m,h); - edges_(h,m).edge_prob = marginal; -// T(h,m) = marginal; - } - root_edges_[h].edge_prob = rootMarginals(h); - } -// cerr << "ROOT MARGINALS: " << rootMarginals.transpose() << endl; -// cerr << "M:\n" << T << endl; -} - -#else - -void ArcFactoredForest::EdgeMarginals(prob_t *) { - cerr << "EdgeMarginals() requires --with-eigen!\n"; - abort(); -} - -#endif - diff --git a/rst_parser/arc_ff.cc b/rst_parser/arc_ff.cc deleted file mode 100644 index c4e5aa17..00000000 --- a/rst_parser/arc_ff.cc +++ /dev/null @@ -1,183 +0,0 @@ -#include "arc_ff.h" - -#include <iostream> -#include <sstream> - -#include "stringlib.h" -#include "tdict.h" -#include "fdict.h" -#include "sentence_metadata.h" - -using namespace std; - -struct ArcFFImpl { - ArcFFImpl() : kROOT("ROOT"), kLEFT_POS("LEFT"), kRIGHT_POS("RIGHT") {} - const string kROOT; - const string kLEFT_POS; - const string kRIGHT_POS; - map<WordID, vector<int> > pcs; - - void PrepareForInput(const TaggedSentence& sent) { - pcs.clear(); - for (int i = 0; i < sent.pos.size(); ++i) - pcs[sent.pos[i]].resize(1, 0); - pcs[sent.pos[0]][0] = 1; - for (int i = 1; i < sent.pos.size(); ++i) { - const WordID posi = sent.pos[i]; - for (map<WordID, vector<int> >::iterator j = pcs.begin(); j != pcs.end(); ++j) { - const WordID posj = j->first; - vector<int>& cs = j->second; - cs.push_back(cs.back() + (posj == posi ? 1 : 0)); - } - } - } - - template <typename A> - static void Fire(SparseVector<weight_t>* v, const A& a) { - ostringstream os; - os << a; - v->set_value(FD::Convert(os.str()), 1); - } - - template <typename A, typename B> - static void Fire(SparseVector<weight_t>* v, const A& a, const B& b) { - ostringstream os; - os << a << ':' << b; - v->set_value(FD::Convert(os.str()), 1); - } - - template <typename A, typename B, typename C> - static void Fire(SparseVector<weight_t>* v, const A& a, const B& b, const C& c) { - ostringstream os; - os << a << ':' << b << '_' << c; - v->set_value(FD::Convert(os.str()), 1); - } - - template <typename A, typename B, typename C, typename D> - static void Fire(SparseVector<weight_t>* v, const A& a, const B& b, const C& c, const D& d) { - ostringstream os; - os << a << ':' << b << '_' << c << '_' << d; - v->set_value(FD::Convert(os.str()), 1); - } - - template <typename A, typename B, typename C, typename D, typename E> - static void Fire(SparseVector<weight_t>* v, const A& a, const B& b, const C& c, const D& d, const E& e) { - ostringstream os; - os << a << ':' << b << '_' << c << '_' << d << '_' << e; - v->set_value(FD::Convert(os.str()), 1); - } - - static void AddConjoin(const SparseVector<double>& v, const string& feat, SparseVector<double>* pf) { - for (SparseVector<double>::const_iterator it = v.begin(); it != v.end(); ++it) - pf->set_value(FD::Convert(FD::Convert(it->first) + "_" + feat), it->second); - } - - static inline string Fixup(const string& str) { - string res = LowercaseString(str); - if (res.size() < 6) return res; - return res.substr(0, 5) + "*"; - } - - static inline string Suffix(const string& str) { - if (str.size() < 4) return ""; else return str.substr(str.size() - 3); - } - - void EdgeFeatures(const TaggedSentence& sent, - short h, - short m, - SparseVector<weight_t>* features) const { - const bool is_root = (h == -1); - const string head_word = (is_root ? kROOT : Fixup(TD::Convert(sent.words[h]))); - int num_words = sent.words.size(); - const string& head_pos = (is_root ? kROOT : TD::Convert(sent.pos[h])); - const string mod_word = Fixup(TD::Convert(sent.words[m])); - const string& mod_pos = TD::Convert(sent.pos[m]); - const string& mod_pos_L = (m > 0 ? TD::Convert(sent.pos[m-1]) : kLEFT_POS); - const string& mod_pos_R = (m < sent.pos.size() - 1 ? TD::Convert(sent.pos[m]) : kRIGHT_POS); - const bool bdir = m < h; - const string dir = (bdir ? "MLeft" : "MRight"); - int v = m - h; - if (v < 0) { - v= -1 - int(log(-v) / log(1.6)); - } else { - v= int(log(v) / log(1.6)) + 1; - } - ostringstream os; - if (v < 0) os << "LenL" << -v; else os << "LenR" << v; - const string lenstr = os.str(); - Fire(features, dir); - Fire(features, lenstr); - // dir, lenstr - if (is_root) { - Fire(features, "wROOT", mod_word); - Fire(features, "pROOT", mod_pos); - Fire(features, "wpROOT", mod_word, mod_pos); - Fire(features, "DROOT", mod_pos, lenstr); - Fire(features, "LROOT", mod_pos_L); - Fire(features, "RROOT", mod_pos_R); - Fire(features, "LROOT", mod_pos_L, mod_pos); - Fire(features, "RROOT", mod_pos_R, mod_pos); - Fire(features, "LDist", m); - Fire(features, "RDist", num_words - m); - } else { // not root - const string& head_pos_L = (h > 0 ? TD::Convert(sent.pos[h-1]) : kLEFT_POS); - const string& head_pos_R = (h < sent.pos.size() - 1 ? TD::Convert(sent.pos[h]) : kRIGHT_POS); - SparseVector<double> fv; - SparseVector<double>* f = &fv; - Fire(f, "H", head_pos); - Fire(f, "M", mod_pos); - Fire(f, "HM", head_pos, mod_pos); - - // surrounders - Fire(f, "posLL", head_pos, mod_pos, head_pos_L, mod_pos_L); - Fire(f, "posRR", head_pos, mod_pos, head_pos_R, mod_pos_R); - Fire(f, "posLR", head_pos, mod_pos, head_pos_L, mod_pos_R); - Fire(f, "posRL", head_pos, mod_pos, head_pos_R, mod_pos_L); - - // between features - int left = min(h,m); - int right = max(h,m); - if (right - left >= 2) { - if (bdir) --right; else ++left; - for (map<WordID, vector<int> >::const_iterator it = pcs.begin(); it != pcs.end(); ++it) { - if (it->second[left] != it->second[right]) { - Fire(f, "BT", head_pos, TD::Convert(it->first), mod_pos); - } - } - } - - Fire(f, "wH", head_word); - Fire(f, "wM", mod_word); - Fire(f, "wpH", head_word, head_pos); - Fire(f, "wpM", mod_word, mod_pos); - Fire(f, "pHwM", head_pos, mod_word); - Fire(f, "wHpM", head_word, mod_pos); - - Fire(f, "wHM", head_word, mod_word); - Fire(f, "pHMwH", head_pos, mod_pos, head_word); - Fire(f, "pHMwM", head_pos, mod_pos, mod_word); - Fire(f, "wHMpH", head_word, mod_word, head_pos); - Fire(f, "wHMpM", head_word, mod_word, mod_pos); - Fire(f, "wHMpHM", head_word, mod_word, head_pos, mod_pos); - - AddConjoin(fv, dir, features); - AddConjoin(fv, lenstr, features); - (*features) += fv; - } - } -}; - -ArcFeatureFunctions::ArcFeatureFunctions() : pimpl(new ArcFFImpl) {} -ArcFeatureFunctions::~ArcFeatureFunctions() { delete pimpl; } - -void ArcFeatureFunctions::PrepareForInput(const TaggedSentence& sentence) { - pimpl->PrepareForInput(sentence); -} - -void ArcFeatureFunctions::EdgeFeatures(const TaggedSentence& sentence, - short h, - short m, - SparseVector<weight_t>* features) const { - pimpl->EdgeFeatures(sentence, h, m, features); -} - diff --git a/rst_parser/arc_ff.h b/rst_parser/arc_ff.h deleted file mode 100644 index 52f311d2..00000000 --- a/rst_parser/arc_ff.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _ARC_FF_H_ -#define _ARC_FF_H_ - -#include <string> -#include "sparse_vector.h" -#include "weights.h" -#include "arc_factored.h" - -struct TaggedSentence; -struct ArcFFImpl; -class ArcFeatureFunctions { - public: - ArcFeatureFunctions(); - ~ArcFeatureFunctions(); - - // called once, per input, before any calls to EdgeFeatures - // used to initialize sentence-specific data structures - void PrepareForInput(const TaggedSentence& sentence); - - void EdgeFeatures(const TaggedSentence& sentence, - short h, - short m, - SparseVector<weight_t>* features) const; - private: - ArcFFImpl* pimpl; -}; - -#endif diff --git a/rst_parser/dep_training.cc b/rst_parser/dep_training.cc deleted file mode 100644 index ef97798b..00000000 --- a/rst_parser/dep_training.cc +++ /dev/null @@ -1,76 +0,0 @@ -#include "dep_training.h" - -#include <vector> -#include <iostream> - -#include "stringlib.h" -#include "filelib.h" -#include "tdict.h" -#include "picojson.h" - -using namespace std; - -static void ParseInstance(const string& line, int start, TrainingInstance* out, int lc = 0) { - picojson::value obj; - string err; - picojson::parse(obj, line.begin() + start, line.end(), &err); - if (err.size() > 0) { cerr << "JSON parse error in " << lc << ": " << err << endl; abort(); } - TrainingInstance& cur = *out; - TaggedSentence& ts = cur.ts; - EdgeSubset& tree = cur.tree; - ts.pos.clear(); - ts.words.clear(); - tree.roots.clear(); - tree.h_m_pairs.clear(); - assert(obj.is<picojson::object>()); - const picojson::object& d = obj.get<picojson::object>(); - const picojson::array& ta = d.find("tokens")->second.get<picojson::array>(); - for (unsigned i = 0; i < ta.size(); ++i) { - ts.words.push_back(TD::Convert(ta[i].get<picojson::array>()[0].get<string>())); - ts.pos.push_back(TD::Convert(ta[i].get<picojson::array>()[1].get<string>())); - } - if (d.find("deps") != d.end()) { - const picojson::array& da = d.find("deps")->second.get<picojson::array>(); - for (unsigned i = 0; i < da.size(); ++i) { - const picojson::array& thm = da[i].get<picojson::array>(); - // get dep type here - short h = thm[2].get<double>(); - short m = thm[1].get<double>(); - if (h < 0) - tree.roots.push_back(m); - else - tree.h_m_pairs.push_back(make_pair(h,m)); - } - } - //cerr << TD::GetString(ts.words) << endl << TD::GetString(ts.pos) << endl << tree << endl; -} - -bool TrainingInstance::ReadInstance(std::istream* in, TrainingInstance* instance) { - string line; - if (!getline(*in, line)) return false; - size_t pos = line.rfind('\t'); - assert(pos != string::npos); - static int lc = 0; ++lc; - ParseInstance(line, pos + 1, instance, lc); - return true; -} - -void TrainingInstance::ReadTrainingCorpus(const string& fname, vector<TrainingInstance>* corpus, int rank, int size) { - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - int lc = 0; - bool flag = false; - while(getline(in, line)) { - ++lc; - if ((lc-1) % size != rank) continue; - if (rank == 0 && lc % 10 == 0) { cerr << '.' << flush; flag = true; } - if (rank == 0 && lc % 400 == 0) { cerr << " [" << lc << "]\n"; flag = false; } - size_t pos = line.rfind('\t'); - assert(pos != string::npos); - corpus->push_back(TrainingInstance()); - ParseInstance(line, pos + 1, &corpus->back(), lc); - } - if (flag) cerr << "\nRead " << lc << " training instances\n"; -} - diff --git a/rst_parser/dep_training.h b/rst_parser/dep_training.h deleted file mode 100644 index 3eeee22e..00000000 --- a/rst_parser/dep_training.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _DEP_TRAINING_H_ -#define _DEP_TRAINING_H_ - -#include <iostream> -#include <string> -#include <vector> -#include "arc_factored.h" -#include "weights.h" - -struct TrainingInstance { - TaggedSentence ts; - EdgeSubset tree; - SparseVector<weight_t> features; - // reads a "Jsent" formatted dependency file - static bool ReadInstance(std::istream* in, TrainingInstance* instance); // returns false at EOF - static void ReadTrainingCorpus(const std::string& fname, std::vector<TrainingInstance>* corpus, int rank = 0, int size = 1); -}; - -#endif diff --git a/rst_parser/global_ff.cc b/rst_parser/global_ff.cc deleted file mode 100644 index ae410875..00000000 --- a/rst_parser/global_ff.cc +++ /dev/null @@ -1,44 +0,0 @@ -#include "global_ff.h" - -#include <iostream> -#include <sstream> - -#include "tdict.h" - -using namespace std; - -struct GFFImpl { - void PrepareForInput(const TaggedSentence& sentence) { - } - void Features(const TaggedSentence& sentence, - const EdgeSubset& tree, - SparseVector<double>* feats) const { - const vector<WordID>& words = sentence.words; - const vector<WordID>& tags = sentence.pos; - const vector<pair<short,short> >& hms = tree.h_m_pairs; - assert(words.size() == tags.size()); - vector<int> mods(words.size()); - for (int i = 0; i < hms.size(); ++i) { - mods[hms[i].first]++; // first = head, second = modifier - } - for (int i = 0; i < mods.size(); ++i) { - ostringstream os; - os << "NM:" << TD::Convert(tags[i]) << "_" << mods[i]; - feats->add_value(FD::Convert(os.str()), 1.0); - } - } -}; - -GlobalFeatureFunctions::GlobalFeatureFunctions() : pimpl(new GFFImpl) {} -GlobalFeatureFunctions::~GlobalFeatureFunctions() { delete pimpl; } - -void GlobalFeatureFunctions::PrepareForInput(const TaggedSentence& sentence) { - pimpl->PrepareForInput(sentence); -} - -void GlobalFeatureFunctions::Features(const TaggedSentence& sentence, - const EdgeSubset& tree, - SparseVector<double>* feats) const { - pimpl->Features(sentence, tree, feats); -} - diff --git a/rst_parser/global_ff.h b/rst_parser/global_ff.h deleted file mode 100644 index d71d0fa1..00000000 --- a/rst_parser/global_ff.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _GLOBAL_FF_H_ -#define _GLOBAL_FF_H_ - -#include "arc_factored.h" - -struct GFFImpl; -struct GlobalFeatureFunctions { - GlobalFeatureFunctions(); - ~GlobalFeatureFunctions(); - void PrepareForInput(const TaggedSentence& sentence); - void Features(const TaggedSentence& sentence, - const EdgeSubset& tree, - SparseVector<double>* feats) const; - private: - GFFImpl* pimpl; -}; - -#endif diff --git a/rst_parser/mst_train.cc b/rst_parser/mst_train.cc deleted file mode 100644 index a78df600..00000000 --- a/rst_parser/mst_train.cc +++ /dev/null @@ -1,228 +0,0 @@ -#include "arc_factored.h" - -#include <vector> -#include <iostream> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> -// #define HAVE_THREAD 1 -#if HAVE_THREAD -#include <boost/thread.hpp> -#endif - -#include "arc_ff.h" -#include "stringlib.h" -#include "filelib.h" -#include "tdict.h" -#include "dep_training.h" -#include "optimize.h" -#include "weights.h" - -using namespace std; -namespace po = boost::program_options; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - string cfg_file; - opts.add_options() - ("training_data,t",po::value<string>()->default_value("-"), "File containing training data (jsent format)") - ("weights,w",po::value<string>(), "Optional starting weights") - ("output_every_i_iterations,I",po::value<unsigned>()->default_value(1), "Write weights every I iterations") - ("regularization_strength,C",po::value<double>()->default_value(1.0), "Regularization strength") -#ifdef HAVE_CMPH - ("cmph_perfect_feature_hash,h", po::value<string>(), "Load perfect hash function for features") -#endif -#if HAVE_THREAD - ("threads,T",po::value<unsigned>()->default_value(1), "Number of threads") -#endif - ("correction_buffers,m", po::value<int>()->default_value(10), "LBFGS correction buffers"); - po::options_description clo("Command line options"); - clo.add_options() - ("config,c", po::value<string>(&cfg_file), "Configuration file") - ("help,?", "Print this help message and exit"); - - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(dconfig_options).add(clo); - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (cfg_file.size() > 0) { - ReadFile rf(cfg_file); - po::store(po::parse_config_file(*rf.stream(), dconfig_options), *conf); - } - if (conf->count("help")) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -void AddFeatures(double prob, const SparseVector<double>& fmap, vector<double>* g) { - for (SparseVector<double>::const_iterator it = fmap.begin(); it != fmap.end(); ++it) - (*g)[it->first] += it->second * prob; -} - -double ApplyRegularizationTerms(const double C, - const vector<double>& weights, - vector<double>* g) { - assert(weights.size() == g->size()); - double reg = 0; - for (size_t i = 0; i < weights.size(); ++i) { -// const double prev_w_i = (i < prev_weights.size() ? prev_weights[i] : 0.0); - const double& w_i = weights[i]; - double& g_i = (*g)[i]; - reg += C * w_i * w_i; - g_i += 2 * C * w_i; - -// reg += T * (w_i - prev_w_i) * (w_i - prev_w_i); -// g_i += 2 * T * (w_i - prev_w_i); - } - return reg; -} - -struct GradientWorker { - GradientWorker(int f, - int t, - vector<double>* w, - vector<TrainingInstance>* c, - vector<ArcFactoredForest>* fs) : obj(), weights(*w), from(f), to(t), corpus(*c), forests(*fs), g(w->size()) {} - void operator()() { - int every = (to - from) / 20; - if (!every) every++; - for (int i = from; i < to; ++i) { - if ((from == 0) && (i + 1) % every == 0) cerr << '.' << flush; - const int num_words = corpus[i].ts.words.size(); - forests[i].Reweight(weights); - prob_t z; - forests[i].EdgeMarginals(&z); - obj -= log(z); - //cerr << " O = " << (-corpus[i].features.dot(weights)) << " D=" << -lz << " OO= " << (-corpus[i].features.dot(weights) - lz) << endl; - //cerr << " ZZ = " << zz << endl; - for (int h = -1; h < num_words; ++h) { - for (int m = 0; m < num_words; ++m) { - if (h == m) continue; - const ArcFactoredForest::Edge& edge = forests[i](h,m); - const SparseVector<weight_t>& fmap = edge.features; - double prob = edge.edge_prob.as_float(); - if (prob < -0.000001) { cerr << "Prob < 0: " << prob << endl; prob = 0; } - if (prob > 1.000001) { cerr << "Prob > 1: " << prob << endl; prob = 1; } - AddFeatures(prob, fmap, &g); - //mfm += fmap * prob; // DE - } - } - } - } - double obj; - vector<double>& weights; - const int from, to; - vector<TrainingInstance>& corpus; - vector<ArcFactoredForest>& forests; - vector<double> g; // local gradient -}; - -int main(int argc, char** argv) { - int rank = 0; - int size = 1; - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - if (conf.count("cmph_perfect_feature_hash")) { - cerr << "Loading perfect hash function from " << conf["cmph_perfect_feature_hash"].as<string>() << " ...\n"; - FD::EnableHash(conf["cmph_perfect_feature_hash"].as<string>()); - cerr << " " << FD::NumFeats() << " features in map\n"; - } - ArcFeatureFunctions ffs; - vector<TrainingInstance> corpus; - TrainingInstance::ReadTrainingCorpus(conf["training_data"].as<string>(), &corpus, rank, size); - vector<weight_t> weights; - Weights::InitFromFile(conf["weights"].as<string>(), &weights); - vector<ArcFactoredForest> forests(corpus.size()); - SparseVector<double> empirical; - cerr << "Extracting features...\n"; - bool flag = false; - for (int i = 0; i < corpus.size(); ++i) { - TrainingInstance& cur = corpus[i]; - if (rank == 0 && (i+1) % 10 == 0) { cerr << '.' << flush; flag = true; } - if (rank == 0 && (i+1) % 400 == 0) { cerr << " [" << (i+1) << "]\n"; flag = false; } - ffs.PrepareForInput(cur.ts); - SparseVector<weight_t> efmap; - for (int j = 0; j < cur.tree.h_m_pairs.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(cur.ts, cur.tree.h_m_pairs[j].first, - cur.tree.h_m_pairs[j].second, - &efmap); - cur.features += efmap; - } - for (int j = 0; j < cur.tree.roots.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(cur.ts, -1, cur.tree.roots[j], &efmap); - cur.features += efmap; - } - empirical += cur.features; - forests[i].resize(cur.ts.words.size()); - forests[i].ExtractFeatures(cur.ts, ffs); - } - if (flag) cerr << endl; - //cerr << "EMP: " << empirical << endl; //DE - weights.resize(FD::NumFeats(), 0.0); - vector<weight_t> g(FD::NumFeats(), 0.0); - cerr << "features initialized\noptimizing...\n"; - boost::shared_ptr<BatchOptimizer> o; -#if HAVE_THREAD - unsigned threads = conf["threads"].as<unsigned>(); - if (threads > corpus.size()) threads = corpus.size(); -#else - const unsigned threads = 1; -#endif - int chunk = corpus.size() / threads; - o.reset(new LBFGSOptimizer(g.size(), conf["correction_buffers"].as<int>())); - int iterations = 1000; - for (int iter = 0; iter < iterations; ++iter) { - cerr << "ITERATION " << iter << " " << flush; - fill(g.begin(), g.end(), 0.0); - for (SparseVector<double>::iterator it = empirical.begin(); it != empirical.end(); ++it) - g[it->first] = -it->second; - double obj = -empirical.dot(weights); - vector<boost::shared_ptr<GradientWorker> > jobs; - for (int from = 0; from < corpus.size(); from += chunk) { - int to = from + chunk; - if (to > corpus.size()) to = corpus.size(); - jobs.push_back(boost::shared_ptr<GradientWorker>(new GradientWorker(from, to, &weights, &corpus, &forests))); - } -#if HAVE_THREAD - boost::thread_group tg; - for (int i = 0; i < threads; ++i) - tg.create_thread(boost::ref(*jobs[i])); - tg.join_all(); -#else - (*jobs[0])(); -#endif - for (int i = 0; i < threads; ++i) { - obj += jobs[i]->obj; - vector<double>& tg = jobs[i]->g; - for (unsigned j = 0; j < g.size(); ++j) - g[j] += tg[j]; - } - // SparseVector<double> mfm; //DE - //cerr << endl << "E: " << empirical << endl; // DE - //cerr << "M: " << mfm << endl; // DE - double r = ApplyRegularizationTerms(conf["regularization_strength"].as<double>(), weights, &g); - double gnorm = 0; - for (int i = 0; i < g.size(); ++i) - gnorm += g[i]*g[i]; - ostringstream ll; - ll << "ITER=" << (iter+1) << "\tOBJ=" << (obj+r) << "\t[F=" << obj << " R=" << r << "]\tGnorm=" << sqrt(gnorm); - cerr << ' ' << ll.str().substr(ll.str().find('\t')+1) << endl; - obj += r; - assert(obj >= 0); - o->Optimize(obj, g, &weights); - Weights::ShowLargestFeatures(weights); - const bool converged = o->HasConverged(); - const char* ofname = converged ? "weights.final.gz" : "weights.cur.gz"; - if (converged || ((iter+1) % conf["output_every_i_iterations"].as<unsigned>()) == 0) { - cerr << "writing..." << flush; - const string sl = ll.str(); - Weights::WriteToFile(ofname, weights, true, &sl); - cerr << "done" << endl; - } - if (converged) { cerr << "CONVERGED\n"; break; } - } - return 0; -} - diff --git a/rst_parser/picojson.h b/rst_parser/picojson.h deleted file mode 100644 index bdb26057..00000000 --- a/rst_parser/picojson.h +++ /dev/null @@ -1,979 +0,0 @@ -/* - * Copyright 2009-2010 Cybozu Labs, Inc. - * Copyright 2011 Kazuho Oku - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY CYBOZU LABS, INC. ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL CYBOZU LABS, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Cybozu Labs, Inc. - * - */ -#ifndef picojson_h -#define picojson_h - -#include <cassert> -#include <cmath> -#include <cstdio> -#include <cstdlib> -#include <cstring> -#include <iostream> -#include <iterator> -#include <map> -#include <string> -#include <vector> - -#ifdef _MSC_VER - #define SNPRINTF _snprintf_s - #pragma warning(push) - #pragma warning(disable : 4244) // conversion from int to char -#else - #define SNPRINTF snprintf -#endif - -namespace picojson { - - enum { - null_type, - boolean_type, - number_type, - string_type, - array_type, - object_type - }; - - struct null {}; - - class value { - public: - typedef std::vector<value> array; - typedef std::map<std::string, value> object; - protected: - int type_; - union { - bool boolean_; - double number_; - std::string* string_; - array* array_; - object* object_; - }; - public: - value(); - value(int type, bool); - explicit value(bool b); - explicit value(double n); - explicit value(const std::string& s); - explicit value(const array& a); - explicit value(const object& o); - explicit value(const char* s); - value(const char* s, size_t len); - ~value(); - value(const value& x); - value& operator=(const value& x); - template <typename T> bool is() const; - template <typename T> const T& get() const; - template <typename T> T& get(); - bool evaluate_as_boolean() const; - const value& get(size_t idx) const; - const value& get(const std::string& key) const; - bool contains(size_t idx) const; - bool contains(const std::string& key) const; - std::string to_str() const; - template <typename Iter> void serialize(Iter os) const; - std::string serialize() const; - private: - template <typename T> value(const T*); // intentionally defined to block implicit conversion of pointer to bool - }; - - typedef value::array array; - typedef value::object object; - - inline value::value() : type_(null_type) {} - - inline value::value(int type, bool) : type_(type) { - switch (type) { -#define INIT(p, v) case p##type: p = v; break - INIT(boolean_, false); - INIT(number_, 0.0); - INIT(string_, new std::string()); - INIT(array_, new array()); - INIT(object_, new object()); -#undef INIT - default: break; - } - } - - inline value::value(bool b) : type_(boolean_type) { - boolean_ = b; - } - - inline value::value(double n) : type_(number_type) { - number_ = n; - } - - inline value::value(const std::string& s) : type_(string_type) { - string_ = new std::string(s); - } - - inline value::value(const array& a) : type_(array_type) { - array_ = new array(a); - } - - inline value::value(const object& o) : type_(object_type) { - object_ = new object(o); - } - - inline value::value(const char* s) : type_(string_type) { - string_ = new std::string(s); - } - - inline value::value(const char* s, size_t len) : type_(string_type) { - string_ = new std::string(s, len); - } - - inline value::~value() { - switch (type_) { -#define DEINIT(p) case p##type: delete p; break - DEINIT(string_); - DEINIT(array_); - DEINIT(object_); -#undef DEINIT - default: break; - } - } - - inline value::value(const value& x) : type_(x.type_) { - switch (type_) { -#define INIT(p, v) case p##type: p = v; break - INIT(boolean_, x.boolean_); - INIT(number_, x.number_); - INIT(string_, new std::string(*x.string_)); - INIT(array_, new array(*x.array_)); - INIT(object_, new object(*x.object_)); -#undef INIT - default: break; - } - } - - inline value& value::operator=(const value& x) { - if (this != &x) { - this->~value(); - new (this) value(x); - } - return *this; - } - -#define IS(ctype, jtype) \ - template <> inline bool value::is<ctype>() const { \ - return type_ == jtype##_type; \ - } - IS(null, null) - IS(bool, boolean) - IS(int, number) - IS(double, number) - IS(std::string, string) - IS(array, array) - IS(object, object) -#undef IS - -#define GET(ctype, var) \ - template <> inline const ctype& value::get<ctype>() const { \ - assert("type mismatch! call vis<type>() before get<type>()" \ - && is<ctype>()); \ - return var; \ - } \ - template <> inline ctype& value::get<ctype>() { \ - assert("type mismatch! call is<type>() before get<type>()" \ - && is<ctype>()); \ - return var; \ - } - GET(bool, boolean_) - GET(double, number_) - GET(std::string, *string_) - GET(array, *array_) - GET(object, *object_) -#undef GET - - inline bool value::evaluate_as_boolean() const { - switch (type_) { - case null_type: - return false; - case boolean_type: - return boolean_; - case number_type: - return number_ != 0; - case string_type: - return ! string_->empty(); - default: - return true; - } - } - - inline const value& value::get(size_t idx) const { - static value s_null; - assert(is<array>()); - return idx < array_->size() ? (*array_)[idx] : s_null; - } - - inline const value& value::get(const std::string& key) const { - static value s_null; - assert(is<object>()); - object::const_iterator i = object_->find(key); - return i != object_->end() ? i->second : s_null; - } - - inline bool value::contains(size_t idx) const { - assert(is<array>()); - return idx < array_->size(); - } - - inline bool value::contains(const std::string& key) const { - assert(is<object>()); - object::const_iterator i = object_->find(key); - return i != object_->end(); - } - - inline std::string value::to_str() const { - switch (type_) { - case null_type: return "null"; - case boolean_type: return boolean_ ? "true" : "false"; - case number_type: { - char buf[256]; - double tmp; - SNPRINTF(buf, sizeof(buf), modf(number_, &tmp) == 0 ? "%.f" : "%f", number_); - return buf; - } - case string_type: return *string_; - case array_type: return "array"; - case object_type: return "object"; - default: assert(0); -#ifdef _MSC_VER - __assume(0); -#endif - } - } - - template <typename Iter> void copy(const std::string& s, Iter oi) { - std::copy(s.begin(), s.end(), oi); - } - - template <typename Iter> void serialize_str(const std::string& s, Iter oi) { - *oi++ = '"'; - for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) { - switch (*i) { -#define MAP(val, sym) case val: copy(sym, oi); break - MAP('"', "\\\""); - MAP('\\', "\\\\"); - MAP('/', "\\/"); - MAP('\b', "\\b"); - MAP('\f', "\\f"); - MAP('\n', "\\n"); - MAP('\r', "\\r"); - MAP('\t', "\\t"); -#undef MAP - default: - if ((unsigned char)*i < 0x20 || *i == 0x7f) { - char buf[7]; - SNPRINTF(buf, sizeof(buf), "\\u%04x", *i & 0xff); - copy(buf, buf + 6, oi); - } else { - *oi++ = *i; - } - break; - } - } - *oi++ = '"'; - } - - template <typename Iter> void value::serialize(Iter oi) const { - switch (type_) { - case string_type: - serialize_str(*string_, oi); - break; - case array_type: { - *oi++ = '['; - for (array::const_iterator i = array_->begin(); i != array_->end(); ++i) { - if (i != array_->begin()) { - *oi++ = ','; - } - i->serialize(oi); - } - *oi++ = ']'; - break; - } - case object_type: { - *oi++ = '{'; - for (object::const_iterator i = object_->begin(); - i != object_->end(); - ++i) { - if (i != object_->begin()) { - *oi++ = ','; - } - serialize_str(i->first, oi); - *oi++ = ':'; - i->second.serialize(oi); - } - *oi++ = '}'; - break; - } - default: - copy(to_str(), oi); - break; - } - } - - inline std::string value::serialize() const { - std::string s; - serialize(std::back_inserter(s)); - return s; - } - - template <typename Iter> class input { - protected: - Iter cur_, end_; - int last_ch_; - bool ungot_; - int line_; - public: - input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {} - int getc() { - if (ungot_) { - ungot_ = false; - return last_ch_; - } - if (cur_ == end_) { - last_ch_ = -1; - return -1; - } - if (last_ch_ == '\n') { - line_++; - } - last_ch_ = *cur_++ & 0xff; - return last_ch_; - } - void ungetc() { - if (last_ch_ != -1) { - assert(! ungot_); - ungot_ = true; - } - } - Iter cur() const { return cur_; } - int line() const { return line_; } - void skip_ws() { - while (1) { - int ch = getc(); - if (! (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) { - ungetc(); - break; - } - } - } - int expect(int expect) { - skip_ws(); - if (getc() != expect) { - ungetc(); - return false; - } - return true; - } - bool match(const std::string& pattern) { - for (std::string::const_iterator pi(pattern.begin()); - pi != pattern.end(); - ++pi) { - if (getc() != *pi) { - ungetc(); - return false; - } - } - return true; - } - }; - - template<typename Iter> inline int _parse_quadhex(input<Iter> &in) { - int uni_ch = 0, hex; - for (int i = 0; i < 4; i++) { - if ((hex = in.getc()) == -1) { - return -1; - } - if ('0' <= hex && hex <= '9') { - hex -= '0'; - } else if ('A' <= hex && hex <= 'F') { - hex -= 'A' - 0xa; - } else if ('a' <= hex && hex <= 'f') { - hex -= 'a' - 0xa; - } else { - in.ungetc(); - return -1; - } - uni_ch = uni_ch * 16 + hex; - } - return uni_ch; - } - - template<typename String, typename Iter> inline bool _parse_codepoint(String& out, input<Iter>& in) { - int uni_ch; - if ((uni_ch = _parse_quadhex(in)) == -1) { - return false; - } - if (0xd800 <= uni_ch && uni_ch <= 0xdfff) { - if (0xdc00 <= uni_ch) { - // a second 16-bit of a surrogate pair appeared - return false; - } - // first 16-bit of surrogate pair, get the next one - if (in.getc() != '\\' || in.getc() != 'u') { - in.ungetc(); - return false; - } - int second = _parse_quadhex(in); - if (! (0xdc00 <= second && second <= 0xdfff)) { - return false; - } - uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff); - uni_ch += 0x10000; - } - if (uni_ch < 0x80) { - out.push_back(uni_ch); - } else { - if (uni_ch < 0x800) { - out.push_back(0xc0 | (uni_ch >> 6)); - } else { - if (uni_ch < 0x10000) { - out.push_back(0xe0 | (uni_ch >> 12)); - } else { - out.push_back(0xf0 | (uni_ch >> 18)); - out.push_back(0x80 | ((uni_ch >> 12) & 0x3f)); - } - out.push_back(0x80 | ((uni_ch >> 6) & 0x3f)); - } - out.push_back(0x80 | (uni_ch & 0x3f)); - } - return true; - } - - template<typename String, typename Iter> inline bool _parse_string(String& out, input<Iter>& in) { - while (1) { - int ch = in.getc(); - if (ch < ' ') { - in.ungetc(); - return false; - } else if (ch == '"') { - return true; - } else if (ch == '\\') { - if ((ch = in.getc()) == -1) { - return false; - } - switch (ch) { -#define MAP(sym, val) case sym: out.push_back(val); break - MAP('"', '\"'); - MAP('\\', '\\'); - MAP('/', '/'); - MAP('b', '\b'); - MAP('f', '\f'); - MAP('n', '\n'); - MAP('r', '\r'); - MAP('t', '\t'); -#undef MAP - case 'u': - if (! _parse_codepoint(out, in)) { - return false; - } - break; - default: - return false; - } - } else { - out.push_back(ch); - } - } - return false; - } - - template <typename Context, typename Iter> inline bool _parse_array(Context& ctx, input<Iter>& in) { - if (! ctx.parse_array_start()) { - return false; - } - if (in.expect(']')) { - return true; - } - size_t idx = 0; - do { - if (! ctx.parse_array_item(in, idx)) { - return false; - } - idx++; - } while (in.expect(',')); - return in.expect(']'); - } - - template <typename Context, typename Iter> inline bool _parse_object(Context& ctx, input<Iter>& in) { - if (! ctx.parse_object_start()) { - return false; - } - if (in.expect('}')) { - return true; - } - do { - std::string key; - if (! in.expect('"') - || ! _parse_string(key, in) - || ! in.expect(':')) { - return false; - } - if (! ctx.parse_object_item(in, key)) { - return false; - } - } while (in.expect(',')); - return in.expect('}'); - } - - template <typename Iter> inline bool _parse_number(double& out, input<Iter>& in) { - std::string num_str; - while (1) { - int ch = in.getc(); - if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == '.' - || ch == 'e' || ch == 'E') { - num_str.push_back(ch); - } else { - in.ungetc(); - break; - } - } - char* endp; - out = strtod(num_str.c_str(), &endp); - return endp == num_str.c_str() + num_str.size(); - } - - template <typename Context, typename Iter> inline bool _parse(Context& ctx, input<Iter>& in) { - in.skip_ws(); - int ch = in.getc(); - switch (ch) { -#define IS(ch, text, op) case ch: \ - if (in.match(text) && op) { \ - return true; \ - } else { \ - return false; \ - } - IS('n', "ull", ctx.set_null()); - IS('f', "alse", ctx.set_bool(false)); - IS('t', "rue", ctx.set_bool(true)); -#undef IS - case '"': - return ctx.parse_string(in); - case '[': - return _parse_array(ctx, in); - case '{': - return _parse_object(ctx, in); - default: - if (('0' <= ch && ch <= '9') || ch == '-') { - in.ungetc(); - double f; - if (_parse_number(f, in)) { - ctx.set_number(f); - return true; - } else { - return false; - } - } - break; - } - in.ungetc(); - return false; - } - - class deny_parse_context { - public: - bool set_null() { return false; } - bool set_bool(bool) { return false; } - bool set_number(double) { return false; } - template <typename Iter> bool parse_string(input<Iter>&) { return false; } - bool parse_array_start() { return false; } - template <typename Iter> bool parse_array_item(input<Iter>&, size_t) { - return false; - } - bool parse_object_start() { return false; } - template <typename Iter> bool parse_object_item(input<Iter>&, const std::string&) { - return false; - } - }; - - class default_parse_context { - protected: - value* out_; - public: - default_parse_context(value* out) : out_(out) {} - bool set_null() { - *out_ = value(); - return true; - } - bool set_bool(bool b) { - *out_ = value(b); - return true; - } - bool set_number(double f) { - *out_ = value(f); - return true; - } - template<typename Iter> bool parse_string(input<Iter>& in) { - *out_ = value(string_type, false); - return _parse_string(out_->get<std::string>(), in); - } - bool parse_array_start() { - *out_ = value(array_type, false); - return true; - } - template <typename Iter> bool parse_array_item(input<Iter>& in, size_t) { - array& a = out_->get<array>(); - a.push_back(value()); - default_parse_context ctx(&a.back()); - return _parse(ctx, in); - } - bool parse_object_start() { - *out_ = value(object_type, false); - return true; - } - template <typename Iter> bool parse_object_item(input<Iter>& in, const std::string& key) { - object& o = out_->get<object>(); - default_parse_context ctx(&o[key]); - return _parse(ctx, in); - } - private: - default_parse_context(const default_parse_context&); - default_parse_context& operator=(const default_parse_context&); - }; - - class null_parse_context { - public: - struct dummy_str { - void push_back(int) {} - }; - public: - null_parse_context() {} - bool set_null() { return true; } - bool set_bool(bool) { return true; } - bool set_number(double) { return true; } - template <typename Iter> bool parse_string(input<Iter>& in) { - dummy_str s; - return _parse_string(s, in); - } - bool parse_array_start() { return true; } - template <typename Iter> bool parse_array_item(input<Iter>& in, size_t) { - return _parse(*this, in); - } - bool parse_object_start() { return true; } - template <typename Iter> bool parse_object_item(input<Iter>& in, const std::string&) { - return _parse(*this, in); - } - private: - null_parse_context(const null_parse_context&); - null_parse_context& operator=(const null_parse_context&); - }; - - // obsolete, use the version below - template <typename Iter> inline std::string parse(value& out, Iter& pos, const Iter& last) { - std::string err; - pos = parse(out, pos, last, &err); - return err; - } - - template <typename Context, typename Iter> inline Iter _parse(Context& ctx, const Iter& first, const Iter& last, std::string* err) { - input<Iter> in(first, last); - if (! _parse(ctx, in) && err != NULL) { - char buf[64]; - SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line()); - *err = buf; - while (1) { - int ch = in.getc(); - if (ch == -1 || ch == '\n') { - break; - } else if (ch >= ' ') { - err->push_back(ch); - } - } - } - return in.cur(); - } - - template <typename Iter> inline Iter parse(value& out, const Iter& first, const Iter& last, std::string* err) { - default_parse_context ctx(&out); - return _parse(ctx, first, last, err); - } - - inline std::string parse(value& out, std::istream& is) { - std::string err; - parse(out, std::istreambuf_iterator<char>(is.rdbuf()), - std::istreambuf_iterator<char>(), &err); - return err; - } - - template <typename T> struct last_error_t { - static std::string s; - }; - template <typename T> std::string last_error_t<T>::s; - - inline void set_last_error(const std::string& s) { - last_error_t<bool>::s = s; - } - - inline const std::string& get_last_error() { - return last_error_t<bool>::s; - } - - inline bool operator==(const value& x, const value& y) { - if (x.is<null>()) - return y.is<null>(); -#define PICOJSON_CMP(type) \ - if (x.is<type>()) \ - return y.is<type>() && x.get<type>() == y.get<type>() - PICOJSON_CMP(bool); - PICOJSON_CMP(double); - PICOJSON_CMP(std::string); - PICOJSON_CMP(array); - PICOJSON_CMP(object); -#undef PICOJSON_CMP - assert(0); -#ifdef _MSC_VER - __assume(0); -#endif - return false; - } - - inline bool operator!=(const value& x, const value& y) { - return ! (x == y); - } -} - -inline std::istream& operator>>(std::istream& is, picojson::value& x) -{ - picojson::set_last_error(std::string()); - std::string err = picojson::parse(x, is); - if (! err.empty()) { - picojson::set_last_error(err); - is.setstate(std::ios::failbit); - } - return is; -} - -inline std::ostream& operator<<(std::ostream& os, const picojson::value& x) -{ - x.serialize(std::ostream_iterator<char>(os)); - return os; -} -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -#endif -#ifdef TEST_PICOJSON -#ifdef _MSC_VER - #pragma warning(disable : 4127) // conditional expression is constant -#endif - -using namespace std; - -static void plan(int num) -{ - printf("1..%d\n", num); -} - -static bool success = true; - -static void ok(bool b, const char* name = "") -{ - static int n = 1; - if (! b) - success = false; - printf("%s %d - %s\n", b ? "ok" : "ng", n++, name); -} - -template <typename T> void is(const T& x, const T& y, const char* name = "") -{ - if (x == y) { - ok(true, name); - } else { - ok(false, name); - } -} - -#include <algorithm> - -int main(void) -{ - plan(75); - - // constructors -#define TEST(expr, expected) \ - is(picojson::value expr .serialize(), string(expected), "picojson::value" #expr) - - TEST( (true), "true"); - TEST( (false), "false"); - TEST( (42.0), "42"); - TEST( (string("hello")), "\"hello\""); - TEST( ("hello"), "\"hello\""); - TEST( ("hello", 4), "\"hell\""); - -#undef TEST - -#define TEST(in, type, cmp, serialize_test) { \ - picojson::value v; \ - const char* s = in; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - ok(err.empty(), in " no error"); \ - ok(v.is<type>(), in " check type"); \ - is<type>(v.get<type>(), cmp, in " correct output"); \ - is(*s, '\0', in " read to eof"); \ - if (serialize_test) { \ - is(v.serialize(), string(in), in " serialize"); \ - } \ - } - TEST("false", bool, false, true); - TEST("true", bool, true, true); - TEST("90.5", double, 90.5, false); - TEST("\"hello\"", string, string("hello"), true); - TEST("\"\\\"\\\\\\/\\b\\f\\n\\r\\t\"", string, string("\"\\/\b\f\n\r\t"), - true); - TEST("\"\\u0061\\u30af\\u30ea\\u30b9\"", string, - string("a\xe3\x82\xaf\xe3\x83\xaa\xe3\x82\xb9"), false); - TEST("\"\\ud840\\udc0b\"", string, string("\xf0\xa0\x80\x8b"), false); -#undef TEST - -#define TEST(type, expr) { \ - picojson::value v; \ - const char *s = expr; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - ok(err.empty(), "empty " #type " no error"); \ - ok(v.is<picojson::type>(), "empty " #type " check type"); \ - ok(v.get<picojson::type>().empty(), "check " #type " array size"); \ - } - TEST(array, "[]"); - TEST(object, "{}"); -#undef TEST - - { - picojson::value v; - const char *s = "[1,true,\"hello\"]"; - string err = picojson::parse(v, s, s + strlen(s)); - ok(err.empty(), "array no error"); - ok(v.is<picojson::array>(), "array check type"); - is(v.get<picojson::array>().size(), size_t(3), "check array size"); - ok(v.contains(0), "check contains array[0]"); - ok(v.get(0).is<double>(), "check array[0] type"); - is(v.get(0).get<double>(), 1.0, "check array[0] value"); - ok(v.contains(1), "check contains array[1]"); - ok(v.get(1).is<bool>(), "check array[1] type"); - ok(v.get(1).get<bool>(), "check array[1] value"); - ok(v.contains(2), "check contains array[2]"); - ok(v.get(2).is<string>(), "check array[2] type"); - is(v.get(2).get<string>(), string("hello"), "check array[2] value"); - ok(!v.contains(3), "check not contains array[3]"); - } - - { - picojson::value v; - const char *s = "{ \"a\": true }"; - string err = picojson::parse(v, s, s + strlen(s)); - ok(err.empty(), "object no error"); - ok(v.is<picojson::object>(), "object check type"); - is(v.get<picojson::object>().size(), size_t(1), "check object size"); - ok(v.contains("a"), "check contains property"); - ok(v.get("a").is<bool>(), "check bool property exists"); - is(v.get("a").get<bool>(), true, "check bool property value"); - is(v.serialize(), string("{\"a\":true}"), "serialize object"); - ok(!v.contains("z"), "check not contains property"); - } - -#define TEST(json, msg) do { \ - picojson::value v; \ - const char *s = json; \ - string err = picojson::parse(v, s, s + strlen(s)); \ - is(err, string("syntax error at line " msg), msg); \ - } while (0) - TEST("falsoa", "1 near: oa"); - TEST("{]", "1 near: ]"); - TEST("\n\bbell", "2 near: bell"); - TEST("\"abc\nd\"", "1 near: "); -#undef TEST - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"b\": true, \"a\": [1,2,\"three\"] }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 == v2), "check == operator in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"a\": [1,\"three\"], \"b\": true }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 != v2), "check != operator for array in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - s = "{ \"d\": 2.0, \"a\": [1,2,\"three\"], \"b\": false }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 != v2), "check != operator for object in deep comparison"); - } - - { - picojson::value v1, v2; - const char *s; - string err; - s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; - err = picojson::parse(v1, s, s + strlen(s)); - picojson::object& o = v1.get<picojson::object>(); - o.erase("b"); - picojson::array& a = o["a"].get<picojson::array>(); - picojson::array::iterator i; - i = std::remove(a.begin(), a.end(), picojson::value(std::string("three"))); - a.erase(i, a.end()); - s = "{ \"a\": [1,2], \"d\": 2 }"; - err = picojson::parse(v2, s, s + strlen(s)); - ok((v1 == v2), "check erase()"); - } - - ok(picojson::value(3.0).serialize() == "3", - "integral number should be serialized as a integer"); - - { - const char* s = "{ \"a\": [1,2], \"d\": 2 }"; - picojson::null_parse_context ctx; - string err; - picojson::_parse(ctx, s, s + strlen(s), &err); - ok(err.empty(), "null_parse_context"); - } - - return success ? 0 : 1; -} - -#endif diff --git a/rst_parser/random_tree.cc b/rst_parser/random_tree.cc deleted file mode 100644 index 23e6e7f7..00000000 --- a/rst_parser/random_tree.cc +++ /dev/null @@ -1,36 +0,0 @@ -#include "arc_factored.h" - -#include <vector> -#include <iostream> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "timing_stats.h" -#include "arc_ff.h" -#include "dep_training.h" -#include "stringlib.h" -#include "filelib.h" -#include "tdict.h" -#include "weights.h" -#include "rst.h" -#include "global_ff.h" - -using namespace std; -namespace po = boost::program_options; - -int main(int argc, char** argv) { - if (argc != 2) { - cerr << argv[0] << " N\n" << endl; - return 1; - } - MT19937 rng; - unsigned n = atoi(argv[1]); - - ArcFactoredForest forest(n); - TreeSampler ts(forest); - EdgeSubset tree; - ts.SampleRandomSpanningTree(&tree, &rng); - cout << tree << endl; - return 0; -} - diff --git a/rst_parser/rst.cc b/rst_parser/rst.cc deleted file mode 100644 index bc91330b..00000000 --- a/rst_parser/rst.cc +++ /dev/null @@ -1,82 +0,0 @@ -#include "rst.h" - -using namespace std; - -// David B. Wilson. Generating Random Spanning Trees More Quickly than the Cover Time. -// this is an awesome algorithm -TreeSampler::TreeSampler(const ArcFactoredForest& af) : forest(af), usucc(af.size() + 1) { - // edges are directed from modifiers to heads, and finally to the root - vector<double> p; - for (int m = 1; m <= forest.size(); ++m) { -#if USE_ALIAS_SAMPLER - p.clear(); -#else - SampleSet<double>& ss = usucc[m]; -#endif - double z = 0; - for (int h = 0; h <= forest.size(); ++h) { - double u = forest(h-1,m-1).edge_prob.as_float(); - z += u; -#if USE_ALIAS_SAMPLER - p.push_back(u); -#else - ss.add(u); -#endif - } -#if USE_ALIAS_SAMPLER - for (int i = 0; i < p.size(); ++i) { p[i] /= z; } - usucc[m].Init(p); -#endif - } -} - -void TreeSampler::SampleRandomSpanningTree(EdgeSubset* tree, MT19937* prng) { - MT19937& rng = *prng; - const int r = 0; - bool success = false; - while (!success) { - int roots = 0; - tree->h_m_pairs.clear(); - tree->roots.clear(); - vector<int> next(forest.size() + 1, -1); - vector<char> in_tree(forest.size() + 1, 0); - in_tree[r] = 1; - //cerr << "Forest size: " << forest.size() << endl; - for (int i = 0; i <= forest.size(); ++i) { - //cerr << "Sampling starting at u=" << i << endl; - int u = i; - if (in_tree[u]) continue; - while(!in_tree[u]) { -#if USE_ALIAS_SAMPLER - next[u] = usucc[u].Draw(rng); -#else - next[u] = rng.SelectSample(usucc[u]); -#endif - u = next[u]; - } - u = i; - //cerr << (u-1); - int prev = u-1; - while(!in_tree[u]) { - in_tree[u] = true; - u = next[u]; - //cerr << " > " << (u-1); - if (u == r) { - ++roots; - tree->roots.push_back(prev); - } else { - tree->h_m_pairs.push_back(make_pair<short,short>(u-1,prev)); - } - prev = u-1; - } - //cerr << endl; - } - assert(roots > 0); - if (roots > 1) { - //cerr << "FAILURE\n"; - } else { - success = true; - } - } -}; - diff --git a/rst_parser/rst.h b/rst_parser/rst.h deleted file mode 100644 index 8bf389f7..00000000 --- a/rst_parser/rst.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _RST_H_ -#define _RST_H_ - -#include <vector> -#include "sampler.h" -#include "arc_factored.h" -#include "alias_sampler.h" - -struct TreeSampler { - explicit TreeSampler(const ArcFactoredForest& af); - void SampleRandomSpanningTree(EdgeSubset* tree, MT19937* rng); - const ArcFactoredForest& forest; -#define USE_ALIAS_SAMPLER 1 -#if USE_ALIAS_SAMPLER - std::vector<AliasSampler> usucc; -#else - std::vector<SampleSet<double> > usucc; -#endif -}; - -#endif diff --git a/rst_parser/rst_parse.cc b/rst_parser/rst_parse.cc deleted file mode 100644 index 9c42a8f4..00000000 --- a/rst_parser/rst_parse.cc +++ /dev/null @@ -1,111 +0,0 @@ -#include "arc_factored.h" - -#include <vector> -#include <iostream> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "timing_stats.h" -#include "arc_ff.h" -#include "dep_training.h" -#include "stringlib.h" -#include "filelib.h" -#include "tdict.h" -#include "weights.h" -#include "rst.h" -#include "global_ff.h" - -using namespace std; -namespace po = boost::program_options; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - string cfg_file; - opts.add_options() - ("input,i",po::value<string>()->default_value("-"), "File containing test data (jsent format)") - ("q_weights,q",po::value<string>(), "Arc-factored weights for proposal distribution (mandatory)") - ("p_weights,p",po::value<string>(), "Weights for target distribution (optional)") - ("samples,n",po::value<unsigned>()->default_value(1000), "Number of samples"); - po::options_description clo("Command line options"); - clo.add_options() - ("config,c", po::value<string>(&cfg_file), "Configuration file") - ("help,?", "Print this help message and exit"); - - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(dconfig_options).add(clo); - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (cfg_file.size() > 0) { - ReadFile rf(cfg_file); - po::store(po::parse_config_file(*rf.stream(), dconfig_options), *conf); - } - if (conf->count("help") || conf->count("q_weights") == 0) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - vector<weight_t> qweights, pweights; - Weights::InitFromFile(conf["q_weights"].as<string>(), &qweights); - if (conf.count("p_weights")) - Weights::InitFromFile(conf["p_weights"].as<string>(), &pweights); - const bool global = pweights.size() > 0; - ArcFeatureFunctions ffs; - GlobalFeatureFunctions gff; - ReadFile rf(conf["input"].as<string>()); - istream* in = rf.stream(); - TrainingInstance sent; - MT19937 rng; - int samples = conf["samples"].as<unsigned>(); - int totroot = 0, root_right = 0, tot = 0, cor = 0; - while(TrainingInstance::ReadInstance(in, &sent)) { - ffs.PrepareForInput(sent.ts); - if (global) gff.PrepareForInput(sent.ts); - ArcFactoredForest forest(sent.ts.pos.size()); - forest.ExtractFeatures(sent.ts, ffs); - forest.Reweight(qweights); - TreeSampler ts(forest); - double best_score = -numeric_limits<double>::infinity(); - EdgeSubset best_tree; - for (int n = 0; n < samples; ++n) { - EdgeSubset tree; - ts.SampleRandomSpanningTree(&tree, &rng); - SparseVector<double> qfeats, gfeats; - tree.ExtractFeatures(sent.ts, ffs, &qfeats); - double score = 0; - if (global) { - gff.Features(sent.ts, tree, &gfeats); - score = (qfeats + gfeats).dot(pweights); - } else { - score = qfeats.dot(qweights); - } - if (score > best_score) { - best_tree = tree; - best_score = score; - } - } - cerr << "BEST SCORE: " << best_score << endl; - cout << best_tree << endl; - const bool sent_has_ref = sent.tree.h_m_pairs.size() > 0; - if (sent_has_ref) { - map<pair<short,short>, bool> ref; - for (int i = 0; i < sent.tree.h_m_pairs.size(); ++i) - ref[sent.tree.h_m_pairs[i]] = true; - int ref_root = sent.tree.roots.front(); - if (ref_root == best_tree.roots.front()) { ++root_right; } - ++totroot; - for (int i = 0; i < best_tree.h_m_pairs.size(); ++i) { - if (ref[best_tree.h_m_pairs[i]]) { - ++cor; - } - ++tot; - } - } - } - cerr << "F = " << (double(cor + root_right) / (tot + totroot)) << endl; - return 0; -} - diff --git a/rst_parser/rst_train.cc b/rst_parser/rst_train.cc deleted file mode 100644 index a8b8dd84..00000000 --- a/rst_parser/rst_train.cc +++ /dev/null @@ -1,144 +0,0 @@ -#include "arc_factored.h" - -#include <vector> -#include <iostream> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "timing_stats.h" -#include "arc_ff.h" -#include "dep_training.h" -#include "stringlib.h" -#include "filelib.h" -#include "tdict.h" -#include "weights.h" -#include "rst.h" -#include "global_ff.h" - -using namespace std; -namespace po = boost::program_options; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - string cfg_file; - opts.add_options() - ("training_data,t",po::value<string>()->default_value("-"), "File containing training data (jsent format)") - ("q_weights,q",po::value<string>(), "Arc-factored weights for proposal distribution") - ("samples,n",po::value<unsigned>()->default_value(1000), "Number of samples"); - po::options_description clo("Command line options"); - clo.add_options() - ("config,c", po::value<string>(&cfg_file), "Configuration file") - ("help,?", "Print this help message and exit"); - - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(dconfig_options).add(clo); - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (cfg_file.size() > 0) { - ReadFile rf(cfg_file); - po::store(po::parse_config_file(*rf.stream(), dconfig_options), *conf); - } - if (conf->count("help")) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -int main(int argc, char** argv) { - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - vector<weight_t> qweights(FD::NumFeats(), 0.0); - Weights::InitFromFile(conf["q_weights"].as<string>(), &qweights); - vector<TrainingInstance> corpus; - ArcFeatureFunctions ffs; - GlobalFeatureFunctions gff; - TrainingInstance::ReadTrainingCorpus(conf["training_data"].as<string>(), &corpus); - vector<ArcFactoredForest> forests(corpus.size()); - vector<prob_t> zs(corpus.size()); - SparseVector<double> empirical; - bool flag = false; - for (int i = 0; i < corpus.size(); ++i) { - TrainingInstance& cur = corpus[i]; - if ((i+1) % 10 == 0) { cerr << '.' << flush; flag = true; } - if ((i+1) % 400 == 0) { cerr << " [" << (i+1) << "]\n"; flag = false; } - SparseVector<weight_t> efmap; - ffs.PrepareForInput(cur.ts); - gff.PrepareForInput(cur.ts); - for (int j = 0; j < cur.tree.h_m_pairs.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(cur.ts, cur.tree.h_m_pairs[j].first, - cur.tree.h_m_pairs[j].second, - &efmap); - cur.features += efmap; - } - for (int j = 0; j < cur.tree.roots.size(); ++j) { - efmap.clear(); - ffs.EdgeFeatures(cur.ts, -1, cur.tree.roots[j], &efmap); - cur.features += efmap; - } - efmap.clear(); - gff.Features(cur.ts, cur.tree, &efmap); - cur.features += efmap; - empirical += cur.features; - forests[i].resize(cur.ts.words.size()); - forests[i].ExtractFeatures(cur.ts, ffs); - forests[i].Reweight(qweights); - forests[i].EdgeMarginals(&zs[i]); - zs[i] = prob_t::One() / zs[i]; - // cerr << zs[i] << endl; - forests[i].Reweight(qweights); // EdgeMarginals overwrites edge_prob - } - if (flag) cerr << endl; - MT19937 rng; - SparseVector<double> model_exp; - SparseVector<double> weights; - Weights::InitSparseVector(qweights, &weights); - int samples = conf["samples"].as<unsigned>(); - for (int i = 0; i < corpus.size(); ++i) { -#if 0 - forests[i].EdgeMarginals(); - model_exp.clear(); - for (int h = -1; h < num_words; ++h) { - for (int m = 0; m < num_words; ++m) { - if (h == m) continue; - const ArcFactoredForest::Edge& edge = forests[i](h,m); - const SparseVector<weight_t>& fmap = edge.features; - double prob = edge.edge_prob.as_float(); - model_exp += fmap * prob; - } - } - cerr << "TRUE EXP: " << model_exp << endl; - forests[i].Reweight(weights); -#endif - - TreeSampler ts(forests[i]); - prob_t zhat = prob_t::Zero(); - SparseVector<prob_t> sampled_exp; - for (int n = 0; n < samples; ++n) { - EdgeSubset tree; - ts.SampleRandomSpanningTree(&tree, &rng); - SparseVector<double> qfeats, gfeats; - tree.ExtractFeatures(corpus[i].ts, ffs, &qfeats); - prob_t u; u.logeq(qfeats.dot(qweights)); - const prob_t q = u / zs[i]; // proposal mass - gff.Features(corpus[i].ts, tree, &gfeats); - SparseVector<double> tot_feats = qfeats + gfeats; - u.logeq(tot_feats.dot(weights)); - prob_t w = u / q; - zhat += w; - for (SparseVector<double>::iterator it = tot_feats.begin(); it != tot_feats.end(); ++it) - sampled_exp.add_value(it->first, w * prob_t(it->second)); - } - sampled_exp /= zhat; - SparseVector<double> tot_m; - for (SparseVector<prob_t>::iterator it = sampled_exp.begin(); it != sampled_exp.end(); ++it) - tot_m.add_value(it->first, it->second.as_float()); - //cerr << "DIFF: " << (tot_m - corpus[i].features) << endl; - const double eta = 0.03; - weights -= (tot_m - corpus[i].features) * eta; - } - cerr << "WEIGHTS.\n"; - cerr << weights << endl; - return 0; -} - diff --git a/training/Jamfile b/training/Jamfile deleted file mode 100644 index 073451fa..00000000 --- a/training/Jamfile +++ /dev/null @@ -1,25 +0,0 @@ -import testing ; -import option ; - -lib training : - ..//utils - ..//mteval - ..//decoder - ../klm/lm//kenlm - ..//boost_program_options - ttables.cc - : <include>. - : : - <library>..//decoder - <library>../klm/lm//kenlm - <library>..//utils - <library>..//mteval - <library>..//boost_program_options - ; - -exe model1 : model1.cc : <include>../decoder ; - -# // all_tests [ glob *_test.cc ] : ..//decoder : <testing.arg>$(TOP)/decoder/test_data ; - -alias programs : model1 ; - diff --git a/training/Makefile.am b/training/Makefile.am index 4cef0d5b..5254333a 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -1,5 +1,5 @@ bin_PROGRAMS = \ - model1 \ + fast_align \ lbl_model \ test_ngram \ mr_em_map_adapter \ @@ -55,8 +55,8 @@ augment_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/lib test_ngram_SOURCES = test_ngram.cc test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz -model1_SOURCES = model1.cc ttables.cc -model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz +fast_align_SOURCES = fast_align.cc ttables.cc +fast_align_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz lbl_model_SOURCES = lbl_model.cc lbl_model_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz diff --git a/training/cllh_observer.cc b/training/cllh_observer.cc index 58232769..4ec2fa65 100644 --- a/training/cllh_observer.cc +++ b/training/cllh_observer.cc @@ -45,7 +45,7 @@ void ConditionalLikelihoodObserver::NotifyAlignmentForest(const SentenceMetadata cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; exit(1); } - assert(!isnan(log_ref_z)); + assert(!std::isnan(log_ref_z)); acc_obj += (cur_obj - log_ref_z); trg_words += smeta.GetReference().size(); } diff --git a/training/collapse_weights.cc b/training/collapse_weights.cc index dc480f6c..c03eb031 100644 --- a/training/collapse_weights.cc +++ b/training/collapse_weights.cc @@ -95,7 +95,7 @@ int main(int argc, char** argv) { if (line.empty()) continue; TRule tr(line, true); const double lp = tr.GetFeatureValues().dot(w); - if (isinf(lp)) { continue; } + if (std::isinf(lp)) { continue; } tr.scores_.clear(); cout << tr.AsString() << " ||| F_and_E=" << lp - log(tot); diff --git a/training/model1.cc b/training/fast_align.cc index 19692b9a..7492d26f 100644 --- a/training/model1.cc +++ b/training/fast_align.cc @@ -17,18 +17,21 @@ using namespace std; bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() - ("iterations,i",po::value<unsigned>()->default_value(5),"Number of iterations of EM training") - ("beam_threshold,t",po::value<double>()->default_value(-4),"log_10 of beam threshold (-10000 to include everything, 0 max)") - ("bidir,b", "Run bidirectional alignment") - ("no_null_word,N","Do not generate from the null token") - ("write_alignments,A", "Write alignments instead of parameters") + ("input,i",po::value<string>(),"Parallel corpus input file") + ("reverse,r","Reverse estimation (swap source and target during training)") + ("iterations,I",po::value<unsigned>()->default_value(5),"Number of iterations of EM training") + //("bidir,b", "Run bidirectional alignment") ("favor_diagonal,d", "Use a static alignment distribution that assigns higher probabilities to alignments near the diagonal") - ("diagonal_tension,T", po::value<double>()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)") ("prob_align_null", po::value<double>()->default_value(0.08), "When --favor_diagonal is set, what's the probability of a null alignment?") - ("variational_bayes,v","Add a symmetric Dirichlet prior and infer VB estimate of weights") - ("testset,x", po::value<string>(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model") + ("diagonal_tension,T", po::value<double>()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)") + ("variational_bayes,v","Infer VB estimate of parameters under a symmetric Dirichlet prior") ("alpha,a", po::value<double>()->default_value(0.01), "Hyperparameter for optional Dirichlet prior") - ("no_add_viterbi,V","Do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)"); + ("no_null_word,N","Do not generate from a null token") + ("output_parameters,p", "Write model parameters instead of alignments") + ("beam_threshold,t",po::value<double>()->default_value(-4),"When writing parameters, log_10 of beam threshold for writing parameter (-10000 to include everything, 0 max parameter only)") + ("hide_training_alignments,H", "Hide training alignments (only useful if you want to use -x option and just compute testset statistics)") + ("testset,x", po::value<string>(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model") + ("no_add_viterbi,V","When writing model parameters, do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)"); po::options_description clo("Command line options"); clo.add_options() ("config", po::value<string>(), "Configuration file") @@ -44,36 +47,29 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { } po::notify(*conf); - if (argc < 2 || conf->count("help")) { - cerr << "Usage " << argv[0] << " [OPTIONS] corpus.fr-en\n"; + if (conf->count("help") || conf->count("input") == 0) { + cerr << "Usage " << argv[0] << " [OPTIONS] -i corpus.fr-en\n"; cerr << dcmdline_options << endl; return false; } return true; } -// src and trg are source and target strings, respectively (not really lattices) -double PosteriorInference(const vector<WordID>& src, const vector<WordID>& trg) { - double llh = 0; - static vector<double> unnormed_a_i; - if (src.size() > unnormed_a_i.size()) - unnormed_a_i.resize(src.size()); - return llh; -} - int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; - const string fname = argv[argc - 1]; + const string fname = conf["input"].as<string>(); + const bool reverse = conf.count("reverse") > 0; const int ITERATIONS = conf["iterations"].as<unsigned>(); const double BEAM_THRESHOLD = pow(10.0, conf["beam_threshold"].as<double>()); const bool use_null = (conf.count("no_null_word") == 0); const WordID kNULL = TD::Convert("<eps>"); const bool add_viterbi = (conf.count("no_add_viterbi") == 0); const bool variational_bayes = (conf.count("variational_bayes") > 0); - const bool write_alignments = (conf.count("write_alignments") > 0); + const bool write_alignments = (conf.count("output_parameters") == 0); const double diagonal_tension = conf["diagonal_tension"].as<double>(); const double prob_align_null = conf["prob_align_null"].as<double>(); + const bool hide_training_alignments = (conf.count("hide_training_alignments") > 0); string testset; if (conf.count("testset")) testset = conf["testset"].as<string>(); const double prob_align_not_null = 1.0 - prob_align_null; @@ -100,14 +96,16 @@ int main(int argc, char** argv) { bool flag = false; string line; string ssrc, strg; + vector<WordID> src, trg; while(true) { getline(in, line); if (!in) break; ++lc; if (lc % 1000 == 0) { cerr << '.'; flag = true; } if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } - vector<WordID> src, trg; + src.clear(); trg.clear(); CorpusTools::ReadLine(line, &src, &trg); + if (reverse) swap(src, trg); if (src.size() == 0 || trg.size() == 0) { cerr << "Error: " << lc << "\n" << line << endl; return 1; @@ -160,10 +158,13 @@ int main(int argc, char** argv) { max_i = src[i-1]; } } - if (write_alignments) { + if (!hide_training_alignments && write_alignments) { if (max_index > 0) { if (first_al) first_al = false; else cout << ' '; - cout << (max_index - 1) << "-" << j; + if (reverse) + cout << j << '-' << (max_index - 1); + else + cout << (max_index - 1) << '-' << j; } } s2t_viterbi[max_i][f_j] = 1.0; @@ -176,7 +177,7 @@ int main(int argc, char** argv) { } likelihood += log(sum); } - if (write_alignments && final_iteration) cout << endl; + if (write_alignments && final_iteration && !hide_training_alignments) cout << endl; } // log(e) = 1.0 @@ -203,11 +204,13 @@ int main(int argc, char** argv) { istream& in = *rf.stream(); int lc = 0; double tlp = 0; - string ssrc, strg, line; + string line; while (getline(in, line)) { ++lc; vector<WordID> src, trg; CorpusTools::ReadLine(line, &src, &trg); + cout << TD::GetString(src) << " ||| " << TD::GetString(trg) << " |||"; + if (reverse) swap(src, trg); double log_prob = Md::log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier); if (src.size() > unnormed_a_i.size()) unnormed_a_i.resize(src.size()); @@ -216,11 +219,14 @@ int main(int argc, char** argv) { for (int j = 0; j < trg.size(); ++j) { const WordID& f_j = trg[j]; double sum = 0; + int a_j = 0; + double max_pat = 0; const double j_over_ts = double(j) / trg.size(); double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1) if (use_null) { if (favor_diagonal) prob_a_i = prob_align_null; - sum += s2t.prob(kNULL, f_j) * prob_a_i; + max_pat = s2t.prob(kNULL, f_j) * prob_a_i; + sum += max_pat; } double az = 0; if (favor_diagonal) { @@ -233,13 +239,24 @@ int main(int argc, char** argv) { for (int i = 1; i <= src.size(); ++i) { if (favor_diagonal) prob_a_i = unnormed_a_i[i-1] / az; - sum += s2t.prob(src[i-1], f_j) * prob_a_i; + double pat = s2t.prob(src[i-1], f_j) * prob_a_i; + if (pat > max_pat) { max_pat = pat; a_j = i; } + sum += pat; } log_prob += log(sum); + if (write_alignments) { + if (a_j > 0) { + cout << ' '; + if (reverse) + cout << j << '-' << (a_j - 1); + else + cout << (a_j - 1) << '-' << j; + } + } } tlp += log_prob; - cerr << ssrc << " ||| " << strg << " ||| " << log_prob << endl; - } + cout << " ||| " << log_prob << endl << flush; + } // loop over test set sentences cerr << "TOTAL LOG PROB " << tlp << endl; } diff --git a/training/liblbfgs/Jamfile b/training/liblbfgs/Jamfile deleted file mode 100644 index 49c82748..00000000 --- a/training/liblbfgs/Jamfile +++ /dev/null @@ -1,5 +0,0 @@ -import testing ; - -lib liblbfgs : lbfgs.c : <include>.. ; - -unit-test ll_test : ll_test.cc liblbfgs : <include>.. ; diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc index 6432f4a2..2eff07e4 100644 --- a/training/mpi_batch_optimize.cc +++ b/training/mpi_batch_optimize.cc @@ -142,7 +142,7 @@ struct TrainingObserver : public DecoderObserver { cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; exit(1); } - assert(!isnan(log_ref_z)); + assert(!std::isnan(log_ref_z)); ref_exp -= cur_model_exp; acc_grad -= ref_exp; acc_obj += (cur_obj - log_ref_z); diff --git a/training/mpi_online_optimize.cc b/training/mpi_online_optimize.cc index 993627f0..d6968848 100644 --- a/training/mpi_online_optimize.cc +++ b/training/mpi_online_optimize.cc @@ -143,7 +143,7 @@ struct TrainingObserver : public DecoderObserver { cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; exit(1); } - assert(!isnan(log_ref_z)); + assert(!std::isnan(log_ref_z)); ref_exp -= cur_model_exp; acc_grad += ref_exp; acc_obj += (cur_obj - log_ref_z); @@ -330,7 +330,7 @@ int main(int argc, char** argv) { if (rank == 0) { converged = (iter == max_iteration); Weights::SanityCheck(lambdas); - Weights::ShowLargestFeatures(lambdas); + static int cc = 0; ++cc; if (cc > 1) { Weights::ShowLargestFeatures(lambdas); } string fname = "weights.cur.gz"; if (iter % write_weights_every_ith == 0) { ostringstream o; o << "weights.epoch_" << (ai+1) << '.' << iter << ".gz"; diff --git a/training/mr_optimize_reduce.cc b/training/mr_optimize_reduce.cc index 461e6b5f..d490192f 100644 --- a/training/mr_optimize_reduce.cc +++ b/training/mr_optimize_reduce.cc @@ -19,8 +19,8 @@ namespace po = boost::program_options; void SanityCheck(const vector<double>& w) { for (int i = 0; i < w.size(); ++i) { - assert(!isnan(w[i])); - assert(!isinf(w[i])); + assert(!std::isnan(w[i])); + assert(!std::isinf(w[i])); } } diff --git a/utils/Jamfile b/utils/Jamfile deleted file mode 100644 index 4444b25f..00000000 --- a/utils/Jamfile +++ /dev/null @@ -1,32 +0,0 @@ -import testing ; -import option ; - -additional = ; -if [ option.get with-cmph ] { - additional += perfect_hash.cc ; -} - -lib utils : - alignment_io.cc - b64tools.cc - corpus_tools.cc - dict.cc - tdict.cc - fdict.cc - gzstream.cc - filelib.cc - stringlib.cc - sparse_vector.cc - timing_stats.cc - verbose.cc - weights.cc - $(additional) - ..//z - : <include>.. <include>. : : <include>.. <include>. ; - -exe atools : atools.cc utils ..//boost_program_options ; -exe reconstruct_weights : reconstruct_weights.cc utils ..//boost_program_options ; - -alias programs : reconstruct_weights atools ; - -all_tests [ glob *_test.cc phmt.cc ts.cc ] : utils : <testing.arg>$(TOP)/utils/test_data ; diff --git a/utils/Makefile.am b/utils/Makefile.am index 799ec879..3ad9d69e 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -3,16 +3,13 @@ bin_PROGRAMS = reconstruct_weights atools noinst_PROGRAMS = \ ts \ phmt \ - mfcr_test \ - crp_test \ dict_test \ m_test \ weights_test \ logval_test \ - small_vector_test \ - unigram_pyp_lm + small_vector_test -TESTS = ts mfcr_test crp_test small_vector_test logval_test weights_test dict_test m_test +TESTS = ts small_vector_test logval_test weights_test dict_test m_test noinst_LIBRARIES = libutils.a @@ -48,18 +45,12 @@ m_test_SOURCES = m_test.cc m_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz dict_test_SOURCES = dict_test.cc dict_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz -mfcr_test_SOURCES = mfcr_test.cc -mfcr_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz weights_test_SOURCES = weights_test.cc weights_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz -crp_test_SOURCES = crp_test.cc -crp_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz logval_test_SOURCES = logval_test.cc logval_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz small_vector_test_SOURCES = small_vector_test.cc small_vector_test_LDADD = libutils.a $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) -lz -unigram_pyp_lm_SOURCES = unigram_pyp_lm.cc -unigram_pyp_lm_LDADD = libutils.a -lz ################################################################ # do NOT NOT NOT add any other -I includes NO NO NO NO NO ###### diff --git a/utils/ccrp.h b/utils/ccrp.h deleted file mode 100644 index f5d3fc78..00000000 --- a/utils/ccrp.h +++ /dev/null @@ -1,270 +0,0 @@ -#ifndef _CCRP_H_ -#define _CCRP_H_ - -#include <numeric> -#include <cassert> -#include <cmath> -#include <list> -#include <iostream> -#include <vector> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include "sampler.h" -#include "slice_sampler.h" -#include "crp_table_manager.h" -#include "m.h" - -// Chinese restaurant process (Pitman-Yor parameters) with table tracking. - -template <typename Dish, typename DishHash = boost::hash<Dish> > -class CCRP { - public: - CCRP(double disc, double strength) : - num_tables_(), - num_customers_(), - discount_(disc), - strength_(strength), - discount_prior_strength_(std::numeric_limits<double>::quiet_NaN()), - discount_prior_beta_(std::numeric_limits<double>::quiet_NaN()), - strength_prior_shape_(std::numeric_limits<double>::quiet_NaN()), - strength_prior_rate_(std::numeric_limits<double>::quiet_NaN()) { - check_hyperparameters(); - } - - CCRP(double d_strength, double d_beta, double c_shape, double c_rate, double d = 0.9, double c = 1.0) : - num_tables_(), - num_customers_(), - discount_(d), - strength_(c), - discount_prior_strength_(d_strength), - discount_prior_beta_(d_beta), - strength_prior_shape_(c_shape), - strength_prior_rate_(c_rate) { - check_hyperparameters(); - } - - void check_hyperparameters() { - if (discount_ < 0.0 || discount_ >= 1.0) { - std::cerr << "Bad discount: " << discount_ << std::endl; - abort(); - } - if (strength_ <= -discount_) { - std::cerr << "Bad strength: " << strength_ << " (discount=" << discount_ << ")" << std::endl; - abort(); - } - } - - double discount() const { return discount_; } - double strength() const { return strength_; } - void set_hyperparameters(double d, double s) { - discount_ = d; strength_ = s; - check_hyperparameters(); - } - void set_discount(double d) { discount_ = d; check_hyperparameters(); } - void set_strength(double a) { strength_ = a; check_hyperparameters(); } - - bool has_discount_prior() const { - return !std::isnan(discount_prior_strength_); - } - - bool has_strength_prior() const { - return !std::isnan(strength_prior_shape_); - } - - void clear() { - num_tables_ = 0; - num_customers_ = 0; - dish_locs_.clear(); - } - - unsigned num_tables() const { - return num_tables_; - } - - unsigned num_tables(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->second.num_tables(); - } - - unsigned num_customers() const { - return num_customers_; - } - - unsigned num_customers(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->num_customers(); - } - - // returns +1 or 0 indicating whether a new table was opened - // p = probability with which the particular table was selected - // excluding p0 - template <typename T> - int increment(const Dish& dish, const T& p0, MT19937* rng, T* p = NULL) { - CRPTableManager& loc = dish_locs_[dish]; - bool share_table = false; - if (loc.num_customers()) { - const T p_empty = T(strength_ + num_tables_ * discount_) * p0; - const T p_share = T(loc.num_customers() - loc.num_tables() * discount_); - share_table = rng->SelectSample(p_empty, p_share); - } - if (share_table) { - loc.share_table(discount_, rng); - } else { - loc.create_table(); - ++num_tables_; - } - ++num_customers_; - return (share_table ? 0 : 1); - } - - // returns -1 or 0, indicating whether a table was closed - int decrement(const Dish& dish, MT19937* rng) { - CRPTableManager& loc = dish_locs_[dish]; - assert(loc.num_customers()); - if (loc.num_customers() == 1) { - dish_locs_.erase(dish); - --num_tables_; - --num_customers_; - return -1; - } else { - int delta = loc.remove_customer(rng); - --num_customers_; - if (delta) --num_tables_; - return delta; - } - } - - template <typename T> - T prob(const Dish& dish, const T& p0) const { - const typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.find(dish); - const T r = T(num_tables_ * discount_ + strength_); - if (it == dish_locs_.end()) { - return r * p0 / T(num_customers_ + strength_); - } else { - return (T(it->second.num_customers() - discount_ * it->second.num_tables()) + r * p0) / - T(num_customers_ + strength_); - } - } - - double log_crp_prob() const { - return log_crp_prob(discount_, strength_); - } - - // taken from http://en.wikipedia.org/wiki/Chinese_restaurant_process - // does not include P_0's - double log_crp_prob(const double& discount, const double& strength) const { - double lp = 0.0; - if (has_discount_prior()) - lp = Md::log_beta_density(discount, discount_prior_strength_, discount_prior_beta_); - if (has_strength_prior()) - lp += Md::log_gamma_density(strength + discount, strength_prior_shape_, strength_prior_rate_); - assert(lp <= 0.0); - if (num_customers_) { - if (discount > 0.0) { - const double r = lgamma(1.0 - discount); - if (strength) - lp += lgamma(strength) - lgamma(strength / discount); - lp += - lgamma(strength + num_customers_) - + num_tables_ * log(discount) + lgamma(strength / discount + num_tables_); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - const CRPTableManager& cur = it->second; // TODO check - for (CRPTableManager::const_iterator ti = cur.begin(); ti != cur.end(); ++ti) { - lp += (lgamma(ti->first - discount) - r) * ti->second; - } - } - } else if (!discount) { // discount == 0.0 - lp += lgamma(strength) + num_tables_ * log(strength) - lgamma(strength + num_tables_); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - const CRPTableManager& cur = it->second; - lp += lgamma(cur.num_tables()); - } - } else { - assert(!"discount less than 0 detected!"); - } - } - assert(std::isfinite(lp)); - return lp; - } - - void resample_hyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - assert(has_discount_prior() || has_strength_prior()); - if (num_customers() == 0) return; - DiscountResampler dr(*this); - StrengthResampler sr(*this); - for (unsigned iter = 0; iter < nloop; ++iter) { - if (has_strength_prior()) { - strength_ = slice_sampler1d(sr, strength_, *rng, -discount_ + std::numeric_limits<double>::min(), - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - if (has_discount_prior()) { - double min_discount = std::numeric_limits<double>::min(); - if (strength_ < 0.0) min_discount -= strength_; - discount_ = slice_sampler1d(dr, discount_, *rng, min_discount, - 1.0, 0.0, niterations, 100*niterations); - } - } - strength_ = slice_sampler1d(sr, strength_, *rng, -discount_, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - - struct DiscountResampler { - DiscountResampler(const CCRP& crp) : crp_(crp) {} - const CCRP& crp_; - double operator()(const double& proposed_discount) const { - return crp_.log_crp_prob(proposed_discount, crp_.strength_); - } - }; - - struct StrengthResampler { - StrengthResampler(const CCRP& crp) : crp_(crp) {} - const CCRP& crp_; - double operator()(const double& proposed_strength) const { - return crp_.log_crp_prob(crp_.discount_, proposed_strength); - } - }; - - void Print(std::ostream* out) const { - std::cerr << "PYP(d=" << discount_ << ",c=" << strength_ << ") customers=" << num_customers_ << std::endl; - for (typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - (*out) << it->first << " : " << it->second << std::endl; - } - } - - typedef typename std::tr1::unordered_map<Dish, CRPTableManager, DishHash>::const_iterator const_iterator; - const_iterator begin() const { - return dish_locs_.begin(); - } - const_iterator end() const { - return dish_locs_.end(); - } - - unsigned num_tables_; - unsigned num_customers_; - std::tr1::unordered_map<Dish, CRPTableManager, DishHash> dish_locs_; - - double discount_; - double strength_; - - // optional beta prior on discount_ (NaN if no prior) - double discount_prior_strength_; - double discount_prior_beta_; - - // optional gamma prior on strength_ (NaN if no prior) - double strength_prior_shape_; - double strength_prior_rate_; -}; - -template <typename T,typename H> -std::ostream& operator<<(std::ostream& o, const CCRP<T,H>& c) { - c.Print(&o); - return o; -} - -#endif diff --git a/utils/ccrp_nt.h b/utils/ccrp_nt.h deleted file mode 100644 index 724b11bd..00000000 --- a/utils/ccrp_nt.h +++ /dev/null @@ -1,164 +0,0 @@ -#ifndef _CCRP_NT_H_ -#define _CCRP_NT_H_ - -#include <numeric> -#include <cassert> -#include <cmath> -#include <list> -#include <iostream> -#include <vector> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include "sampler.h" -#include "slice_sampler.h" -#include "m.h" - -// Chinese restaurant process (1 parameter) -template <typename Dish, typename DishHash = boost::hash<Dish> > -class CCRP_NoTable { - public: - explicit CCRP_NoTable(double conc) : - num_customers_(), - alpha_(conc), - alpha_prior_shape_(std::numeric_limits<double>::quiet_NaN()), - alpha_prior_rate_(std::numeric_limits<double>::quiet_NaN()) {} - - CCRP_NoTable(double c_shape, double c_rate, double c = 10.0) : - num_customers_(), - alpha_(c), - alpha_prior_shape_(c_shape), - alpha_prior_rate_(c_rate) {} - - double alpha() const { return alpha_; } - void set_alpha(const double& alpha) { alpha_ = alpha; assert(alpha_ > 0.0); } - - bool has_alpha_prior() const { - return !std::isnan(alpha_prior_shape_); - } - - void clear() { - num_customers_ = 0; - custs_.clear(); - } - - unsigned num_customers() const { - return num_customers_; - } - - unsigned num_customers(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, unsigned, DishHash>::const_iterator it = custs_.find(dish); - if (it == custs_.end()) return 0; - return it->second; - } - - int increment(const Dish& dish) { - int table_diff = 0; - if (++custs_[dish] == 1) - table_diff = 1; - ++num_customers_; - return table_diff; - } - - int decrement(const Dish& dish) { - int table_diff = 0; - int nc = --custs_[dish]; - if (nc == 0) { - custs_.erase(dish); - table_diff = -1; - } else if (nc < 0) { - std::cerr << "Dish counts dropped below zero for: " << dish << std::endl; - abort(); - } - --num_customers_; - return table_diff; - } - - template <typename F> - F prob(const Dish& dish, const F& p0) const { - const unsigned at_table = num_customers(dish); - return (F(at_table) + p0 * F(alpha_)) / F(num_customers_ + alpha_); - } - - double logprob(const Dish& dish, const double& logp0) const { - const unsigned at_table = num_customers(dish); - return log(at_table + exp(logp0 + log(alpha_))) - log(num_customers_ + alpha_); - } - - double log_crp_prob() const { - return log_crp_prob(alpha_); - } - - // taken from http://en.wikipedia.org/wiki/Chinese_restaurant_process - // does not include P_0's - double log_crp_prob(const double& alpha) const { - double lp = 0.0; - if (has_alpha_prior()) - lp += Md::log_gamma_density(alpha, alpha_prior_shape_, alpha_prior_rate_); - assert(lp <= 0.0); - if (num_customers_) { - lp += lgamma(alpha) - lgamma(alpha + num_customers_) + - custs_.size() * log(alpha); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, unsigned, DishHash>::const_iterator it = custs_.begin(); - it != custs_.end(); ++it) { - lp += lgamma(it->second); - } - } - assert(std::isfinite(lp)); - return lp; - } - - void resample_hyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - assert(has_alpha_prior()); - ConcentrationResampler cr(*this); - for (unsigned iter = 0; iter < nloop; ++iter) { - alpha_ = slice_sampler1d(cr, alpha_, *rng, 0.0, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - } - - struct ConcentrationResampler { - ConcentrationResampler(const CCRP_NoTable& crp) : crp_(crp) {} - const CCRP_NoTable& crp_; - double operator()(const double& proposed_alpha) const { - return crp_.log_crp_prob(proposed_alpha); - } - }; - - void Print(std::ostream* out) const { - (*out) << "DP(alpha=" << alpha_ << ") customers=" << num_customers_ << std::endl; - int cc = 0; - for (typename std::tr1::unordered_map<Dish, unsigned, DishHash>::const_iterator it = custs_.begin(); - it != custs_.end(); ++it) { - (*out) << " " << it->first << "(" << it->second << " eating)"; - ++cc; - if (cc > 10) { (*out) << " ..."; break; } - } - (*out) << std::endl; - } - - unsigned num_customers_; - std::tr1::unordered_map<Dish, unsigned, DishHash> custs_; - - typedef typename std::tr1::unordered_map<Dish, unsigned, DishHash>::const_iterator const_iterator; - const_iterator begin() const { - return custs_.begin(); - } - const_iterator end() const { - return custs_.end(); - } - - double alpha_; - - // optional gamma prior on alpha_ (NaN if no prior) - double alpha_prior_shape_; - double alpha_prior_rate_; -}; - -template <typename T,typename H> -std::ostream& operator<<(std::ostream& o, const CCRP_NoTable<T,H>& c) { - c.Print(&o); - return o; -} - -#endif diff --git a/utils/ccrp_onetable.h b/utils/ccrp_onetable.h deleted file mode 100644 index abe399ea..00000000 --- a/utils/ccrp_onetable.h +++ /dev/null @@ -1,253 +0,0 @@ -#ifndef _CCRP_ONETABLE_H_ -#define _CCRP_ONETABLE_H_ - -#include <numeric> -#include <cassert> -#include <cmath> -#include <list> -#include <iostream> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include "sampler.h" -#include "slice_sampler.h" - -// Chinese restaurant process (Pitman-Yor parameters) with one table approximation - -template <typename Dish, typename DishHash = boost::hash<Dish> > -class CCRP_OneTable { - typedef std::tr1::unordered_map<Dish, unsigned, DishHash> DishMapType; - public: - CCRP_OneTable(double disc, double conc) : - num_tables_(), - num_customers_(), - discount_(disc), - alpha_(conc), - discount_prior_alpha_(std::numeric_limits<double>::quiet_NaN()), - discount_prior_beta_(std::numeric_limits<double>::quiet_NaN()), - alpha_prior_shape_(std::numeric_limits<double>::quiet_NaN()), - alpha_prior_rate_(std::numeric_limits<double>::quiet_NaN()) {} - - CCRP_OneTable(double d_alpha, double d_beta, double c_shape, double c_rate, double d = 0.9, double c = 1.0) : - num_tables_(), - num_customers_(), - discount_(d), - alpha_(c), - discount_prior_alpha_(d_alpha), - discount_prior_beta_(d_beta), - alpha_prior_shape_(c_shape), - alpha_prior_rate_(c_rate) {} - - double discount() const { return discount_; } - double alpha() const { return alpha_; } - void set_alpha(double c) { alpha_ = c; } - void set_discount(double d) { discount_ = d; } - - bool has_discount_prior() const { - return !std::isnan(discount_prior_alpha_); - } - - bool has_alpha_prior() const { - return !std::isnan(alpha_prior_shape_); - } - - void clear() { - num_tables_ = 0; - num_customers_ = 0; - dish_counts_.clear(); - } - - unsigned num_tables() const { - return num_tables_; - } - - unsigned num_tables(const Dish& dish) const { - const typename DishMapType::const_iterator it = dish_counts_.find(dish); - if (it == dish_counts_.end()) return 0; - return 1; - } - - unsigned num_customers() const { - return num_customers_; - } - - unsigned num_customers(const Dish& dish) const { - const typename DishMapType::const_iterator it = dish_counts_.find(dish); - if (it == dish_counts_.end()) return 0; - return it->second; - } - - // returns +1 or 0 indicating whether a new table was opened - int increment(const Dish& dish) { - unsigned& dc = dish_counts_[dish]; - ++dc; - ++num_customers_; - if (dc == 1) { - ++num_tables_; - return 1; - } else { - return 0; - } - } - - // returns -1 or 0, indicating whether a table was closed - int decrement(const Dish& dish) { - unsigned& dc = dish_counts_[dish]; - assert(dc > 0); - if (dc == 1) { - dish_counts_.erase(dish); - --num_tables_; - --num_customers_; - return -1; - } else { - assert(dc > 1); - --dc; - --num_customers_; - return 0; - } - } - - double prob(const Dish& dish, const double& p0) const { - const typename DishMapType::const_iterator it = dish_counts_.find(dish); - const double r = num_tables_ * discount_ + alpha_; - if (it == dish_counts_.end()) { - return r * p0 / (num_customers_ + alpha_); - } else { - return (it->second - discount_ + r * p0) / - (num_customers_ + alpha_); - } - } - - template <typename T> - T probT(const Dish& dish, const T& p0) const { - const typename DishMapType::const_iterator it = dish_counts_.find(dish); - const T r(num_tables_ * discount_ + alpha_); - if (it == dish_counts_.end()) { - return r * p0 / T(num_customers_ + alpha_); - } else { - return (T(it->second - discount_) + r * p0) / - T(num_customers_ + alpha_); - } - } - - double log_crp_prob() const { - return log_crp_prob(discount_, alpha_); - } - - static double log_beta_density(const double& x, const double& alpha, const double& beta) { - assert(x > 0.0); - assert(x < 1.0); - assert(alpha > 0.0); - assert(beta > 0.0); - const double lp = (alpha-1)*log(x)+(beta-1)*log(1-x)+lgamma(alpha+beta)-lgamma(alpha)-lgamma(beta); - return lp; - } - - static double log_gamma_density(const double& x, const double& shape, const double& rate) { - assert(x >= 0.0); - assert(shape > 0.0); - assert(rate > 0.0); - const double lp = (shape-1)*log(x) - shape*log(rate) - x/rate - lgamma(shape); - return lp; - } - - // taken from http://en.wikipedia.org/wiki/Chinese_restaurant_process - // does not include P_0's - double log_crp_prob(const double& discount, const double& alpha) const { - double lp = 0.0; - if (has_discount_prior()) - lp = log_beta_density(discount, discount_prior_alpha_, discount_prior_beta_); - if (has_alpha_prior()) - lp += log_gamma_density(alpha, alpha_prior_shape_, alpha_prior_rate_); - assert(lp <= 0.0); - if (num_customers_) { - if (discount > 0.0) { - const double r = lgamma(1.0 - discount); - lp += lgamma(alpha) - lgamma(alpha + num_customers_) - + num_tables_ * log(discount) + lgamma(alpha / discount + num_tables_) - - lgamma(alpha / discount); - assert(std::isfinite(lp)); - for (typename DishMapType::const_iterator it = dish_counts_.begin(); - it != dish_counts_.end(); ++it) { - const unsigned& cur = it->second; - lp += lgamma(cur - discount) - r; - } - } else { - assert(!"not implemented yet"); - } - } - assert(std::isfinite(lp)); - return lp; - } - - void resample_hyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - assert(has_discount_prior() || has_alpha_prior()); - DiscountResampler dr(*this); - ConcentrationResampler cr(*this); - for (unsigned iter = 0; iter < nloop; ++iter) { - if (has_alpha_prior()) { - alpha_ = slice_sampler1d(cr, alpha_, *rng, 0.0, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - if (has_discount_prior()) { - discount_ = slice_sampler1d(dr, discount_, *rng, std::numeric_limits<double>::min(), - 1.0, 0.0, niterations, 100*niterations); - } - } - alpha_ = slice_sampler1d(cr, alpha_, *rng, 0.0, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - - struct DiscountResampler { - DiscountResampler(const CCRP_OneTable& crp) : crp_(crp) {} - const CCRP_OneTable& crp_; - double operator()(const double& proposed_discount) const { - return crp_.log_crp_prob(proposed_discount, crp_.alpha_); - } - }; - - struct ConcentrationResampler { - ConcentrationResampler(const CCRP_OneTable& crp) : crp_(crp) {} - const CCRP_OneTable& crp_; - double operator()(const double& proposed_alpha) const { - return crp_.log_crp_prob(crp_.discount_, proposed_alpha); - } - }; - - void Print(std::ostream* out) const { - (*out) << "PYP(d=" << discount_ << ",c=" << alpha_ << ") customers=" << num_customers_ << std::endl; - for (typename DishMapType::const_iterator it = dish_counts_.begin(); it != dish_counts_.end(); ++it) { - (*out) << " " << it->first << " = " << it->second << std::endl; - } - } - - typedef typename DishMapType::const_iterator const_iterator; - const_iterator begin() const { - return dish_counts_.begin(); - } - const_iterator end() const { - return dish_counts_.end(); - } - - unsigned num_tables_; - unsigned num_customers_; - DishMapType dish_counts_; - - double discount_; - double alpha_; - - // optional beta prior on discount_ (NaN if no prior) - double discount_prior_alpha_; - double discount_prior_beta_; - - // optional gamma prior on alpha_ (NaN if no prior) - double alpha_prior_shape_; - double alpha_prior_rate_; -}; - -template <typename T,typename H> -std::ostream& operator<<(std::ostream& o, const CCRP_OneTable<T,H>& c) { - c.Print(&o); - return o; -} - -#endif diff --git a/utils/crp_table_manager.h b/utils/crp_table_manager.h deleted file mode 100644 index 753e721f..00000000 --- a/utils/crp_table_manager.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef _CRP_TABLE_MANAGER_H_ -#define _CRP_TABLE_MANAGER_H_ - -#include <iostream> -#include "sparse_vector.h" -#include "sampler.h" - -// these are helper classes for implementing token-based CRP samplers -// basically the data structures recommended by Blunsom et al. in the Note. - -struct CRPHistogram { - //typedef std::map<unsigned, unsigned> MAPTYPE; - typedef SparseVector<unsigned> MAPTYPE; - typedef MAPTYPE::const_iterator const_iterator; - - inline void increment(unsigned bin, unsigned delta = 1u) { - data[bin] += delta; - } - inline void decrement(unsigned bin, unsigned delta = 1u) { - unsigned r = data[bin] -= delta; - if (!r) data.erase(bin); - } - inline void move(unsigned from_bin, unsigned to_bin, unsigned delta = 1u) { - decrement(from_bin, delta); - increment(to_bin, delta); - } - inline const_iterator begin() const { return data.begin(); } - inline const_iterator end() const { return data.end(); } - - private: - MAPTYPE data; -}; - -// A CRPTableManager tracks statistics about all customers -// and tables serving some dish in a CRP and can correctly sample what -// table to remove a customer from and what table to join -struct CRPTableManager { - CRPTableManager() : customers(), tables() {} - - inline unsigned num_tables() const { - return tables; - } - - inline unsigned num_customers() const { - return customers; - } - - inline void create_table() { - h.increment(1); - ++tables; - ++customers; - } - - // seat a customer at a table proportional to the number of customers seated at a table, less the discount - // *new tables are never created by this function! - inline void share_table(const double discount, MT19937* rng) { - const double z = customers - discount * num_tables(); - double r = z * rng->next(); - const CRPHistogram::const_iterator end = h.end(); - CRPHistogram::const_iterator it = h.begin(); - for (; it != end; ++it) { - // it->first = number of customers at table - // it->second = number of such tables - double thresh = (it->first - discount) * it->second; - if (thresh > r) break; - r -= thresh; - } - h.move(it->first, it->first + 1); - ++customers; - } - - // randomly sample a customer - // *tables may be removed - // returns -1 if a table is removed, 0 otherwise - inline int remove_customer(MT19937* rng) { - int r = rng->next() * num_customers(); - const CRPHistogram::const_iterator end = h.end(); - CRPHistogram::const_iterator it = h.begin(); - for (; it != end; ++it) { - int thresh = it->first * it->second; - if (thresh > r) break; - r -= thresh; - } - --customers; - const unsigned tc = it->first; - if (tc == 1) { - h.decrement(1); - --tables; - return -1; - } else { - h.move(tc, tc - 1); - return 0; - } - } - - typedef CRPHistogram::const_iterator const_iterator; - const_iterator begin() const { return h.begin(); } - const_iterator end() const { return h.end(); } - - unsigned customers; - unsigned tables; - CRPHistogram h; -}; - -std::ostream& operator<<(std::ostream& os, const CRPTableManager& tm) { - os << '[' << tm.num_customers() << " total customers at " << tm.num_tables() << " tables ||| "; - for (CRPHistogram::const_iterator it = tm.begin(); it != tm.end(); ++it) { - if (it != tm.h.begin()) os << " -- "; - os << '(' << it->first << ") x " << it->second; - } - return os << ']'; -} - -#endif diff --git a/utils/crp_test.cc b/utils/crp_test.cc deleted file mode 100644 index 0cdb7afd..00000000 --- a/utils/crp_test.cc +++ /dev/null @@ -1,91 +0,0 @@ -#include <iostream> -#include <vector> -#include <string> - -#define BOOST_TEST_MODULE CrpTest -#include <boost/test/unit_test.hpp> -#include <boost/test/floating_point_comparison.hpp> - -#include "ccrp.h" -#include "sampler.h" - -using namespace std; - -MT19937 rng; - -BOOST_AUTO_TEST_CASE(Dist) { - CCRP<string> crp(0.1, 5); - double un = 0.25; - int tt = 0; - tt += crp.increment("hi", un, &rng); - tt += crp.increment("foo", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - tt += crp.increment("bar", un, &rng); - cout << "tt=" << tt << endl; - cout << crp << endl; - cout << " P(bar)=" << crp.prob("bar", un) << endl; - cout << " P(hi)=" << crp.prob("hi", un) << endl; - cout << " P(baz)=" << crp.prob("baz", un) << endl; - cout << " P(foo)=" << crp.prob("foo", un) << endl; - double x = crp.prob("bar", un) + crp.prob("hi", un) + crp.prob("baz", un) + crp.prob("foo", un); - cout << " tot=" << x << endl; - BOOST_CHECK_CLOSE(1.0, x, 1e-6); - tt += crp.decrement("hi", &rng); - tt += crp.decrement("bar", &rng); - cout << crp << endl; - tt += crp.decrement("bar", &rng); - cout << crp << endl; - cout << "tt=" << tt << endl; -} - -BOOST_AUTO_TEST_CASE(Exchangability) { - double tot = 0; - double xt = 0; - CCRP<int> crp(0.5, 1.0); - int cust = 10; - vector<int> hist(cust + 1, 0); - for (int i = 0; i < cust; ++i) { crp.increment(1, 1.0, &rng); } - const int samples = 100000; - const bool simulate = true; - for (int k = 0; k < samples; ++k) { - if (!simulate) { - crp.clear(); - for (int i = 0; i < cust; ++i) { crp.increment(1, 1.0, &rng); } - } else { - int da = rng.next() * cust; - bool a = rng.next() < 0.5; - if (a) { - for (int i = 0; i < da; ++i) { crp.increment(1, 1.0, &rng); } - for (int i = 0; i < da; ++i) { crp.decrement(1, &rng); } - xt += 1.0; - } else { - for (int i = 0; i < da; ++i) { crp.decrement(1, &rng); } - for (int i = 0; i < da; ++i) { crp.increment(1, 1.0, &rng); } - } - } - int c = crp.num_tables(1); - ++hist[c]; - tot += c; - } - BOOST_CHECK_EQUAL(cust, crp.num_customers()); - cerr << "P(a) = " << (xt / samples) << endl; - cerr << "E[num tables] = " << (tot / samples) << endl; - double error = fabs((tot / samples) - 5.4); - cerr << " error = " << error << endl; - BOOST_CHECK_MESSAGE(error < 0.1, "error is too big = " << error); // it's possible for this to fail, but - // very, very unlikely - for (int i = 1; i <= cust; ++i) - cerr << i << ' ' << (hist[i]) << endl; -} - -BOOST_AUTO_TEST_CASE(LP) { - CCRP<string> crp(1,1,1,1,0.1,50.0); - crp.increment("foo", 1.0, &rng); - cerr << crp.log_crp_prob() << endl; -} - diff --git a/utils/fast_sparse_vector.h b/utils/fast_sparse_vector.h index 9fe00459..590a60c4 100644 --- a/utils/fast_sparse_vector.h +++ b/utils/fast_sparse_vector.h @@ -522,7 +522,7 @@ const FastSparseVector<T> operator-(const FastSparseVector<T>& x, const FastSpar } template <class T> -std::size_t hash_value(FastSparseVector<T> const& x) { +std::size_t hash_value(FastSparseVector<T> const&) { assert(!"not implemented"); return 0; } diff --git a/utils/gamma_poisson.h b/utils/gamma_poisson.h deleted file mode 100644 index fec763f6..00000000 --- a/utils/gamma_poisson.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _GAMMA_POISSON_H_ -#define _GAMMA_POISSON_H_ - -#include <m.h> - -// http://en.wikipedia.org/wiki/Conjugate_prior -struct GammaPoisson { - GammaPoisson(double shape, double rate) : - a(shape), b(rate), n(), marginal() {} - - double prob(unsigned x) const { - return exp(Md::log_negative_binom(x, a + marginal, 1.0 - (b + n) / (1 + b + n))); - } - - void increment(unsigned x) { - ++n; - marginal += x; - } - - void decrement(unsigned x) { - --n; - marginal -= x; - } - - double log_likelihood() const { - return 0; - } - - double a, b; - int n, marginal; -}; - -#endif diff --git a/utils/mfcr.h b/utils/mfcr.h deleted file mode 100644 index 4aacb567..00000000 --- a/utils/mfcr.h +++ /dev/null @@ -1,370 +0,0 @@ -#ifndef _MFCR_H_ -#define _MFCR_H_ - -#include <algorithm> -#include <numeric> -#include <cassert> -#include <cmath> -#include <list> -#include <iostream> -#include <vector> -#include <iterator> -#include <tr1/unordered_map> -#include <boost/functional/hash.hpp> -#include "sampler.h" -#include "slice_sampler.h" -#include "m.h" - -struct TableCount { - TableCount() : count(), floor() {} - TableCount(int c, int f) : count(c), floor(f) { - assert(f >= 0); - } - int count; // count or delta (may be 0, <0, or >0) - unsigned char floor; // from which floor? -}; - -std::ostream& operator<<(std::ostream& o, const TableCount& tc) { - return o << "[c=" << tc.count << " floor=" << static_cast<unsigned int>(tc.floor) << ']'; -} - -// Multi-Floor Chinese Restaurant as proposed by Wood & Teh (AISTATS, 2009) to simulate -// graphical Pitman-Yor processes. -// http://jmlr.csail.mit.edu/proceedings/papers/v5/wood09a/wood09a.pdf -// -// Implementation is based on Blunsom, Cohn, Goldwater, & Johnson (ACL 2009) and code -// referenced therein. -// http://www.aclweb.org/anthology/P/P09/P09-2085.pdf -// -template <unsigned Floors, typename Dish, typename DishHash = boost::hash<Dish> > -class MFCR { - public: - - MFCR(double d, double strength) : - num_tables_(), - num_customers_(), - discount_(d), - strength_(strength), - discount_prior_strength_(std::numeric_limits<double>::quiet_NaN()), - discount_prior_beta_(std::numeric_limits<double>::quiet_NaN()), - strength_prior_shape_(std::numeric_limits<double>::quiet_NaN()), - strength_prior_rate_(std::numeric_limits<double>::quiet_NaN()) { check_hyperparameters(); } - - MFCR(double discount_strength, double discount_beta, double strength_shape, double strength_rate, double d = 0.9, double strength = 10.0) : - num_tables_(), - num_customers_(), - discount_(d), - strength_(strength), - discount_prior_strength_(discount_strength), - discount_prior_beta_(discount_beta), - strength_prior_shape_(strength_shape), - strength_prior_rate_(strength_rate) { check_hyperparameters(); } - - void check_hyperparameters() { - if (discount_ < 0.0 || discount_ >= 1.0) { - std::cerr << "Bad discount: " << discount_ << std::endl; - abort(); - } - if (strength_ <= -discount_) { - std::cerr << "Bad strength: " << strength_ << " (discount=" << discount_ << ")" << std::endl; - abort(); - } - } - - double discount() const { return discount_; } - double strength() const { return strength_; } - void set_hyperparameters(double d, double s) { - discount_ = d; strength_ = s; - check_hyperparameters(); - } - void set_discount(double d) { discount_ = d; check_hyperparameters(); } - void set_strength(double a) { strength_ = a; check_hyperparameters(); } - - bool has_discount_prior() const { - return !std::isnan(discount_prior_strength_); - } - - bool has_strength_prior() const { - return !std::isnan(strength_prior_shape_); - } - - void clear() { - num_tables_ = 0; - num_customers_ = 0; - dish_locs_.clear(); - } - - unsigned num_tables() const { - return num_tables_; - } - - unsigned num_tables(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->second.table_counts_.size(); - } - - // this is not terribly efficient but it should not typically be necessary to execute this query - unsigned num_tables(const Dish& dish, const unsigned floor) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - unsigned c = 0; - for (typename std::list<TableCount>::const_iterator i = it->second.table_counts_.begin(); - i != it->second.table_counts_.end(); ++i) { - if (i->floor == floor) ++c; - } - return c; - } - - unsigned num_customers() const { - return num_customers_; - } - - unsigned num_customers(const Dish& dish) const { - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - if (it == dish_locs_.end()) return 0; - return it->total_dish_count_; - } - - // returns (delta, floor) indicating whether a new table (delta) was opened and on which floor - template <class InputIterator, class InputIterator2> - TableCount increment(const Dish& dish, InputIterator p0s, InputIterator2 lambdas, MT19937* rng) { - DishLocations& loc = dish_locs_[dish]; - // marg_p0 = marginal probability of opening a new table on any floor with label dish - typedef typename std::iterator_traits<InputIterator>::value_type F; - const F marg_p0 = std::inner_product(p0s, p0s + Floors, lambdas, F(0.0)); - assert(marg_p0 <= F(1.0001)); - int floor = -1; - bool share_table = false; - if (loc.total_dish_count_) { - const F p_empty = F(strength_ + num_tables_ * discount_) * marg_p0; - const F p_share = F(loc.total_dish_count_ - loc.table_counts_.size() * discount_); - share_table = rng->SelectSample(p_empty, p_share); - } - if (share_table) { - // this can be done with doubles since P0 (which may be tiny) is not involved - double r = rng->next() * (loc.total_dish_count_ - loc.table_counts_.size() * discount_); - for (typename std::list<TableCount>::iterator ti = loc.table_counts_.begin(); - ti != loc.table_counts_.end(); ++ti) { - r -= ti->count - discount_; - if (r <= 0.0) { - ++ti->count; - floor = ti->floor; - break; - } - } - if (r > 0.0) { - std::cerr << "Serious error: r=" << r << std::endl; - Print(&std::cerr); - assert(r <= 0.0); - } - } else { // sit at currently empty table -- must sample what floor - if (Floors == 1) { - floor = 0; - } else { - F r = F(rng->next()) * marg_p0; - for (unsigned i = 0; i < Floors; ++i) { - r -= (*p0s) * (*lambdas); - ++p0s; - ++lambdas; - if (r <= F(0.0)) { - floor = i; - break; - } - } - } - assert(floor >= 0); - loc.table_counts_.push_back(TableCount(1, floor)); - ++num_tables_; - } - ++loc.total_dish_count_; - ++num_customers_; - return (share_table ? TableCount(0, floor) : TableCount(1, floor)); - } - - // returns first = -1 or 0, indicating whether a table was closed, and on what floor (second) - TableCount decrement(const Dish& dish, MT19937* rng) { - DishLocations& loc = dish_locs_[dish]; - assert(loc.total_dish_count_); - int floor = -1; - int delta = 0; - if (loc.total_dish_count_ == 1) { - floor = loc.table_counts_.front().floor; - dish_locs_.erase(dish); - --num_tables_; - --num_customers_; - delta = -1; - } else { - // sample customer to remove UNIFORMLY. that is, do NOT use the d - // here. if you do, it will introduce (unwanted) bias! - double r = rng->next() * loc.total_dish_count_; - --loc.total_dish_count_; - --num_customers_; - for (typename std::list<TableCount>::iterator ti = loc.table_counts_.begin(); - ti != loc.table_counts_.end(); ++ti) { - r -= ti->count; - if (r <= 0.0) { - floor = ti->floor; - if ((--ti->count) == 0) { - --num_tables_; - delta = -1; - loc.table_counts_.erase(ti); - } - break; - } - } - if (r > 0.0) { - std::cerr << "Serious error: r=" << r << std::endl; - Print(&std::cerr); - assert(r <= 0.0); - } - } - return TableCount(delta, floor); - } - - template <class InputIterator, class InputIterator2> - typename std::iterator_traits<InputIterator>::value_type prob(const Dish& dish, InputIterator p0s, InputIterator2 lambdas) const { - typedef typename std::iterator_traits<InputIterator>::value_type F; - const F marg_p0 = std::inner_product(p0s, p0s + Floors, lambdas, F(0.0)); - assert(marg_p0 <= F(1.0001)); - const typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.find(dish); - const F r = F(num_tables_ * discount_ + strength_); - if (it == dish_locs_.end()) { - return r * marg_p0 / F(num_customers_ + strength_); - } else { - return (F(it->second.total_dish_count_ - discount_ * it->second.table_counts_.size()) + F(r * marg_p0)) / - F(num_customers_ + strength_); - } - } - - double log_crp_prob() const { - return log_crp_prob(discount_, strength_); - } - - // taken from http://en.wikipedia.org/wiki/Chinese_restaurant_process - // does not include draws from G_w's - double log_crp_prob(const double& discount, const double& strength) const { - double lp = 0.0; - if (has_discount_prior()) - lp = Md::log_beta_density(discount, discount_prior_strength_, discount_prior_beta_); - if (has_strength_prior()) - lp += Md::log_gamma_density(strength + discount, strength_prior_shape_, strength_prior_rate_); - assert(lp <= 0.0); - if (num_customers_) { - if (discount > 0.0) { - const double r = lgamma(1.0 - discount); - if (strength) - lp += lgamma(strength) - lgamma(strength / discount); - lp += - lgamma(strength + num_customers_) - + num_tables_ * log(discount) + lgamma(strength / discount + num_tables_); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - const DishLocations& cur = it->second; - for (std::list<TableCount>::const_iterator ti = cur.table_counts_.begin(); ti != cur.table_counts_.end(); ++ti) { - lp += lgamma(ti->count - discount) - r; - } - } - } else if (!discount) { // discount == 0.0 - lp += lgamma(strength) + num_tables_ * log(strength) - lgamma(strength + num_tables_); - assert(std::isfinite(lp)); - for (typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - const DishLocations& cur = it->second; - lp += lgamma(cur.table_counts_.size()); - } - } else { - assert(!"discount less than 0 detected!"); - } - } - assert(std::isfinite(lp)); - return lp; - } - - void resample_hyperparameters(MT19937* rng, const unsigned nloop = 5, const unsigned niterations = 10) { - assert(has_discount_prior() || has_strength_prior()); - DiscountResampler dr(*this); - StrengthResampler sr(*this); - for (int iter = 0; iter < nloop; ++iter) { - if (has_strength_prior()) { - strength_ = slice_sampler1d(sr, strength_, *rng, -discount_, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - if (has_discount_prior()) { - double min_discount = std::numeric_limits<double>::min(); - if (strength_ < 0.0) min_discount -= strength_; - discount_ = slice_sampler1d(dr, discount_, *rng, min_discount, - 1.0, 0.0, niterations, 100*niterations); - } - } - strength_ = slice_sampler1d(sr, strength_, *rng, -discount_, - std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); - } - - struct DiscountResampler { - DiscountResampler(const MFCR& crp) : crp_(crp) {} - const MFCR& crp_; - double operator()(const double& proposed_d) const { - return crp_.log_crp_prob(proposed_d, crp_.strength_); - } - }; - - struct StrengthResampler { - StrengthResampler(const MFCR& crp) : crp_(crp) {} - const MFCR& crp_; - double operator()(const double& proposediscount_strength) const { - return crp_.log_crp_prob(crp_.discount_, proposediscount_strength); - } - }; - - struct DishLocations { - DishLocations() : total_dish_count_() {} - unsigned total_dish_count_; // customers at all tables with this dish - std::list<TableCount> table_counts_; // list<> gives O(1) deletion and insertion, which we want - // .size() is the number of tables for this dish - }; - - void Print(std::ostream* out) const { - (*out) << "MFCR<" << Floors << ">(d=" << discount_ << ",strength=" << strength_ << ") customers=" << num_customers_ << std::endl; - for (typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator it = dish_locs_.begin(); - it != dish_locs_.end(); ++it) { - (*out) << it->first << " (" << it->second.total_dish_count_ << " on " << it->second.table_counts_.size() << " tables): "; - for (typename std::list<TableCount>::const_iterator i = it->second.table_counts_.begin(); - i != it->second.table_counts_.end(); ++i) { - (*out) << " " << *i; - } - (*out) << std::endl; - } - } - - typedef typename std::tr1::unordered_map<Dish, DishLocations, DishHash>::const_iterator const_iterator; - const_iterator begin() const { - return dish_locs_.begin(); - } - const_iterator end() const { - return dish_locs_.end(); - } - - unsigned num_tables_; - unsigned num_customers_; - std::tr1::unordered_map<Dish, DishLocations, DishHash> dish_locs_; - - double discount_; - double strength_; - - // optional beta prior on discount_ (NaN if no prior) - double discount_prior_strength_; - double discount_prior_beta_; - - // optional gamma prior on strength_ (NaN if no prior) - double strength_prior_shape_; - double strength_prior_rate_; -}; - -template <unsigned N,typename T,typename H> -std::ostream& operator<<(std::ostream& o, const MFCR<N,T,H>& c) { - c.Print(&o); - return o; -} - -#endif diff --git a/utils/mfcr_test.cc b/utils/mfcr_test.cc deleted file mode 100644 index 29a1a2ce..00000000 --- a/utils/mfcr_test.cc +++ /dev/null @@ -1,72 +0,0 @@ -#include "mfcr.h" - -#include <iostream> -#include <cassert> -#include <cmath> - -#define BOOST_TEST_MODULE MFCRTest -#include <boost/test/unit_test.hpp> -#include <boost/test/floating_point_comparison.hpp> - -#include "sampler.h" - -using namespace std; - -BOOST_AUTO_TEST_CASE(Exchangability) { - MT19937 r; - MT19937* rng = &r; - MFCR<2, int> crp(0.5, 3.0); - vector<double> lambdas(2); - vector<double> p0s(2); - lambdas[0] = 0.2; - lambdas[1] = 0.8; - p0s[0] = 1.0; - p0s[1] = 1.0; - - double tot = 0; - double tot2 = 0; - double xt = 0; - int cust = 10; - vector<int> hist(cust + 1, 0), hist2(cust + 1, 0); - for (int i = 0; i < cust; ++i) { crp.increment(1, p0s.begin(), lambdas.begin(), rng); } - const int samples = 100000; - const bool simulate = true; - for (int k = 0; k < samples; ++k) { - if (!simulate) { - crp.clear(); - for (int i = 0; i < cust; ++i) { crp.increment(1, p0s.begin(), lambdas.begin(), rng); } - } else { - int da = rng->next() * cust; - bool a = rng->next() < 0.45; - if (a) { - for (int i = 0; i < da; ++i) { crp.increment(1, p0s.begin(), lambdas.begin(), rng); } - for (int i = 0; i < da; ++i) { crp.decrement(1, rng); } - xt += 1.0; - } else { - for (int i = 0; i < da; ++i) { crp.decrement(1, rng); } - for (int i = 0; i < da; ++i) { crp.increment(1, p0s.begin(), lambdas.begin(), rng); } - } - } - int c = crp.num_tables(1); - ++hist[c]; - tot += c; - int c2 = crp.num_tables(1,0); // tables on floor 0 with dish 1 - ++hist2[c2]; - tot2 += c2; - } - cerr << cust << " = " << crp.num_customers() << endl; - cerr << "P(a) = " << (xt / samples) << endl; - cerr << "E[num tables] = " << (tot / samples) << endl; - double error = fabs((tot / samples) - 6.894); - cerr << " error = " << error << endl; - for (int i = 1; i <= cust; ++i) - cerr << i << ' ' << (hist[i]) << endl; - cerr << "E[num tables on floor 0] = " << (tot2 / samples) << endl; - double error2 = fabs((tot2 / samples) - 1.379); - cerr << " error2 = " << error2 << endl; - for (int i = 1; i <= cust; ++i) - cerr << i << ' ' << (hist2[i]) << endl; - assert(error < 0.05); // these can fail with very low probability - assert(error2 < 0.05); -}; - diff --git a/utils/sampler.h b/utils/sampler.h index 3e4a4086..88e1856c 100644 --- a/utils/sampler.h +++ b/utils/sampler.h @@ -19,7 +19,7 @@ #include "prob.h" -template <typename F> struct SampleSet; +template <typename F> class SampleSet; template <typename RNG> struct RandomNumberGenerator { diff --git a/utils/slice_sampler.h b/utils/slice_sampler.h deleted file mode 100644 index aa48a169..00000000 --- a/utils/slice_sampler.h +++ /dev/null @@ -1,191 +0,0 @@ -//! slice-sampler.h is an MCMC slice sampler -//! -//! Mark Johnson, 1st August 2008 - -#ifndef SLICE_SAMPLER_H -#define SLICE_SAMPLER_H - -#include <algorithm> -#include <cassert> -#include <cmath> -#include <iostream> -#include <limits> - -//! slice_sampler_rfc_type{} returns the value of a user-specified -//! function if the argument is within range, or - infinity otherwise -// -template <typename F, typename Fn, typename U> -struct slice_sampler_rfc_type { - F min_x, max_x; - const Fn& f; - U max_nfeval, nfeval; - slice_sampler_rfc_type(F min_x, F max_x, const Fn& f, U max_nfeval) - : min_x(min_x), max_x(max_x), f(f), max_nfeval(max_nfeval), nfeval(0) { } - - F operator() (F x) { - if (min_x < x && x < max_x) { - assert(++nfeval <= max_nfeval); - F fx = f(x); - assert(std::isfinite(fx)); - return fx; - } - return -std::numeric_limits<F>::infinity(); - } -}; // slice_sampler_rfc_type{} - -//! slice_sampler1d() implements the univariate "range doubling" slice sampler -//! described in Neal (2003) "Slice Sampling", The Annals of Statistics 31(3), 705-767. -// -template <typename F, typename LogF, typename Uniform01> -F slice_sampler1d(const LogF& logF0, //!< log of function to sample - F x, //!< starting point - Uniform01& u01, //!< uniform [0,1) random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - slice_sampler_rfc_type<F,LogF,U> logF(min_x, max_x, logF0, max_nfeval); - - assert(std::isfinite(x)); - - if (w <= 0.0) { // set w to a default width - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, (F) 0.1); - } - assert(std::isfinite(w)); - - F logFx = logF(x); - for (U sample = 0; sample < nsamples; ++sample) { - F logY = logFx + log(u01()+1e-100); //! slice logFx at this value - assert(std::isfinite(logY)); - - F xl = x - w*u01(); //! lower bound on slice interval - F logFxl = logF(xl); - F xr = xl + w; //! upper bound on slice interval - F logFxr = logF(xr); - - while (logY < logFxl || logY < logFxr) // doubling procedure - if (u01() < 0.5) - logFxl = logF(xl -= xr - xl); - else - logFxr = logF(xr += xr - xl); - - F xl1 = xl; - F xr1 = xr; - while (true) { // shrinking procedure - F x1 = xl1 + u01()*(xr1 - xl1); - if (logY < logF(x1)) { - F xl2 = xl; // acceptance procedure - F xr2 = xr; - bool d = false; - while (xr2 - xl2 > 1.1*w) { - F xm = (xl2 + xr2)/2; - if ((x < xm && x1 >= xm) || (x >= xm && x1 < xm)) - d = true; - if (x1 < xm) - xr2 = xm; - else - xl2 = xm; - if (d && logY >= logF(xl2) && logY >= logF(xr2)) - goto unacceptable; - } - x = x1; - goto acceptable; - } - goto acceptable; - unacceptable: - if (x1 < x) // rest of shrinking procedure - xl1 = x1; - else - xr1 = x1; - } - acceptable: - w = (4*w + (xr1 - xl1))/5; // update width estimate - } - return x; -} - -/* -//! slice_sampler1d() implements a 1-d MCMC slice sampler. -//! It should be correct for unimodal distributions, but -//! not for multimodal ones. -// -template <typename F, typename LogP, typename Uniform01> -F slice_sampler1d(const LogP& logP, //!< log of distribution to sample - F x, //!< initial sample - Uniform01& u01, //!< uniform random number generator - F min_x = -std::numeric_limits<F>::infinity(), //!< minimum value of support - F max_x = std::numeric_limits<F>::infinity(), //!< maximum value of support - F w = 0.0, //!< guess at initial width - unsigned nsamples=1, //!< number of samples to draw - unsigned max_nfeval=200) //!< max number of function evaluations -{ - typedef unsigned U; - assert(std::isfinite(x)); - if (w <= 0.0) { - if (min_x > -std::numeric_limits<F>::infinity() && max_x < std::numeric_limits<F>::infinity()) - w = (max_x - min_x)/4; - else - w = std::max(((x < 0.0) ? -x : x)/4, 0.1); - } - // TRACE4(x, min_x, max_x, w); - F logPx = logP(x); - assert(std::isfinite(logPx)); - U nfeval = 1; - for (U sample = 0; sample < nsamples; ++sample) { - F x0 = x; - F logU = logPx + log(u01()+1e-100); - assert(std::isfinite(logU)); - F r = u01(); - F xl = std::max(min_x, x - r*w); - F xr = std::min(max_x, x + (1-r)*w); - // TRACE3(x, logPx, logU); - while (xl > min_x && logP(xl) > logU) { - xl -= w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << std::endl; - assert(nfeval < max_nfeval); - } - xl = std::max(xl, min_x); - while (xr < max_x && logP(xr) > logU) { - xr += w; - w *= 2; - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xr = " << xr << std::endl; - assert(nfeval < max_nfeval); - } - xr = std::min(xr, max_x); - while (true) { - r = u01(); - x = r*xl + (1-r)*xr; - assert(std::isfinite(x)); - logPx = logP(x); - // TRACE4(logPx, x, xl, xr); - assert(std::isfinite(logPx)); - ++nfeval; - if (nfeval >= max_nfeval) - std::cerr << "## Error: nfeval = " << nfeval << ", max_nfeval = " << max_nfeval << ", sample = " << sample << ", nsamples = " << nsamples << ", r = " << r << ", w = " << w << ", xl = " << xl << ", xr = " << xr << ", x = " << x << std::endl; - assert(nfeval < max_nfeval); - if (logPx > logU) - break; - else if (x > x0) - xr = x; - else - xl = x; - } - // w = (4*w + (xr-xl))/5; // gradually adjust w - } - // TRACE2(logPx, x); - return x; -} // slice_sampler1d() -*/ - -#endif // SLICE_SAMPLER_H diff --git a/utils/small_vector.h b/utils/small_vector.h index 894b1b32..c8a69927 100644 --- a/utils/small_vector.h +++ b/utils/small_vector.h @@ -66,7 +66,7 @@ class SmallVector { //TODO: figure out iterator traits to allow this to be selcted for any iterator range template <class I> SmallVector(I const* begin,I const* end) { - int s=end-begin; + unsigned s=end-begin; Alloc(s); if (s <= SV_MAX) { for (unsigned i = 0; i < s; ++i,++begin) data_.vals[i] = *begin; diff --git a/utils/stringlib.h b/utils/stringlib.h index 75772c4d..ff5dc89d 100644 --- a/utils/stringlib.h +++ b/utils/stringlib.h @@ -86,7 +86,7 @@ bool match_begin(Str const& str,Prefix const& prefix) // source will be returned as a string, target must be a sentence or // a lattice (in PLF format) and will be returned as a Lattice object void ParseTranslatorInput(const std::string& line, std::string* input, std::string* ref); -struct Lattice; +class Lattice; void ParseTranslatorInputLattice(const std::string& line, std::string* input, Lattice* ref); inline std::string Trim(const std::string& str, const std::string& dropChars = " \t") { diff --git a/utils/unigram_pyp_lm.cc b/utils/unigram_pyp_lm.cc deleted file mode 100644 index 30b9fde1..00000000 --- a/utils/unigram_pyp_lm.cc +++ /dev/null @@ -1,214 +0,0 @@ -#include <iostream> -#include <tr1/memory> -#include <queue> - -#include <boost/functional.hpp> -#include <boost/program_options.hpp> -#include <boost/program_options/variables_map.hpp> - -#include "corpus_tools.h" -#include "m.h" -#include "tdict.h" -#include "sampler.h" -#include "ccrp.h" -#include "gamma_poisson.h" - -// A not very memory-efficient implementation of an 1-gram LM based on PYPs -// as described in Y.-W. Teh. (2006) A Hierarchical Bayesian Language Model -// based on Pitman-Yor Processes. In Proc. ACL. - -using namespace std; -using namespace tr1; -namespace po = boost::program_options; - -boost::shared_ptr<MT19937> prng; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("samples,n",po::value<unsigned>()->default_value(50),"Number of samples") - ("train,i",po::value<string>(),"Training data file") - ("test,T",po::value<string>(),"Test data file") - ("discount_prior_a,a",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): a=this") - ("discount_prior_b,b",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): b=this") - ("strength_prior_s,s",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): s=this") - ("strength_prior_r,r",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): r=this") - ("random_seed,S",po::value<uint32_t>(), "Random seed"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value<string>(), "Configuration file") - ("help", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as<string>().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || (conf->count("train") == 0)) { - cerr << dcmdline_options << endl; - exit(1); - } -} - -struct Histogram { - void increment(unsigned bin, unsigned delta = 1u) { - data[bin] += delta; - } - void decrement(unsigned bin, unsigned delta = 1u) { - data[bin] -= delta; - } - void move(unsigned from_bin, unsigned to_bin, unsigned delta = 1u) { - decrement(from_bin, delta); - increment(to_bin, delta); - } - map<unsigned, unsigned> data; - // SparseVector<unsigned> data; -}; - -// Lord Rothschild. 1986. THE DISTRIBUTION OF ENGLISH DICTIONARY WORD LENGTHS. -// Journal of Statistical Planning and Inference 14 (1986) 311-322 -struct PoissonLengthUniformCharWordModel { - explicit PoissonLengthUniformCharWordModel(unsigned vocab_size) : plen(5,5), uc(-log(50)), llh() {} - void increment(WordID w, MT19937*) { - llh += log(prob(w)); // this isn't quite right - plen.increment(TD::Convert(w).size() - 1); - } - void decrement(WordID w, MT19937*) { - plen.decrement(TD::Convert(w).size() - 1); - llh -= log(prob(w)); // this isn't quite right - } - double prob(WordID w) const { - size_t len = TD::Convert(w).size(); - return plen.prob(len - 1) * exp(uc * len); - } - double log_likelihood() const { return llh; } - void resample_hyperparameters(MT19937*) {} - GammaPoisson plen; - const double uc; - double llh; -}; - -// uniform base distribution (0-gram model) -struct UniformWordModel { - explicit UniformWordModel(unsigned vocab_size) : p0(1.0 / vocab_size), draws() {} - void increment(WordID, MT19937*) { ++draws; } - void decrement(WordID, MT19937*) { --draws; assert(draws >= 0); } - double prob(WordID) const { return p0; } // all words have equal prob - double log_likelihood() const { return draws * log(p0); } - void resample_hyperparameters(MT19937*) {} - const double p0; - int draws; -}; - -// represents an Unigram LM -template <class BaseGenerator> -struct UnigramLM { - UnigramLM(unsigned vs, double da, double db, double ss, double sr) : - base(vs), - crp(da, db, ss, sr, 0.8, 1.0) {} - void increment(WordID w, MT19937* rng) { - const double backoff = base.prob(w); - if (crp.increment(w, backoff, rng)) - base.increment(w, rng); - } - void decrement(WordID w, MT19937* rng) { - if (crp.decrement(w, rng)) - base.decrement(w, rng); - } - double prob(WordID w) const { - const double backoff = base.prob(w); - return crp.prob(w, backoff); - } - - double log_likelihood() const { - double llh = base.log_likelihood(); - llh += crp.log_crp_prob(); - return llh; - } - - void resample_hyperparameters(MT19937* rng) { - crp.resample_hyperparameters(rng); - base.resample_hyperparameters(rng); - } - - double discount_a, discount_b, strength_s, strength_r; - double d, strength; - BaseGenerator base; - CCRP<WordID> crp; -}; - -int main(int argc, char** argv) { - po::variables_map conf; - - InitCommandLine(argc, argv, &conf); - const unsigned samples = conf["samples"].as<unsigned>(); - if (conf.count("random_seed")) - prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); - else - prng.reset(new MT19937); - MT19937& rng = *prng; - vector<vector<WordID> > corpuse; - set<WordID> vocabe; - const WordID kEOS = TD::Convert("</s>"); - cerr << "Reading corpus...\n"; - CorpusTools::ReadFromFile(conf["train"].as<string>(), &corpuse, &vocabe); - cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; - vector<vector<WordID> > test; - if (conf.count("test")) - CorpusTools::ReadFromFile(conf["test"].as<string>(), &test); - else - test = corpuse; -#if 1 - UnigramLM<PoissonLengthUniformCharWordModel> lm(vocabe.size(), -#else - UnigramLM<UniformWordModel> lm(vocabe.size(), -#endif - conf["discount_prior_a"].as<double>(), - conf["discount_prior_b"].as<double>(), - conf["strength_prior_s"].as<double>(), - conf["strength_prior_r"].as<double>()); - for (unsigned SS=0; SS < samples; ++SS) { - for (unsigned ci = 0; ci < corpuse.size(); ++ci) { - const vector<WordID>& s = corpuse[ci]; - for (unsigned i = 0; i <= s.size(); ++i) { - WordID w = (i < s.size() ? s[i] : kEOS); - if (SS > 0) lm.decrement(w, &rng); - lm.increment(w, &rng); - } - if (SS > 0) lm.decrement(kEOS, &rng); - lm.increment(kEOS, &rng); - } - cerr << "LLH=" << lm.log_likelihood() << "\t tables=" << lm.crp.num_tables() << " " << endl; - if (SS % 10 == 9) lm.resample_hyperparameters(&rng); - } - double llh = 0; - unsigned cnt = 0; - unsigned oovs = 0; - for (unsigned ci = 0; ci < test.size(); ++ci) { - const vector<WordID>& s = test[ci]; - for (unsigned i = 0; i <= s.size(); ++i) { - WordID w = (i < s.size() ? s[i] : kEOS); - double lp = log(lm.prob(w)) / log(2); - if (i < s.size() && vocabe.count(w) == 0) { - cerr << "**OOV "; - ++oovs; - //lp = 0; - } - cerr << "p(" << TD::Convert(w) << ") = " << lp << endl; - llh -= lp; - cnt++; - } - } - cerr << " Log_10 prob: " << (-llh * log(2) / log(10)) << endl; - cerr << " Count: " << cnt << endl; - cerr << " OOVs: " << oovs << endl; - cerr << "Cross-entropy: " << (llh / cnt) << endl; - cerr << " Perplexity: " << pow(2, llh / cnt) << endl; - return 0; -} - diff --git a/utils/weights.cc b/utils/weights.cc index f56e2a20..575877b6 100644 --- a/utils/weights.cc +++ b/utils/weights.cc @@ -34,7 +34,7 @@ void Weights::InitFromFile(const string& filename, int weight_count = 0; bool fl = false; string buf; - weight_t val = 0; + double val = 0; while (in) { getline(in, buf); if (buf.size() == 0) continue; @@ -53,7 +53,7 @@ void Weights::InitFromFile(const string& filename, if (feature_list) { feature_list->push_back(buf.substr(start, end - start)); } while(end < buf.size() && buf[end] == ' ') ++end; val = strtod(&buf.c_str()[end], NULL); - if (isnan(val)) { + if (std::isnan(val)) { cerr << FD::Convert(fid) << " has weight NaN!\n"; abort(); } @@ -127,8 +127,8 @@ void Weights::InitSparseVector(const vector<weight_t>& dv, void Weights::SanityCheck(const vector<weight_t>& w) { for (unsigned i = 0; i < w.size(); ++i) { - assert(!isnan(w[i])); - assert(!isinf(w[i])); + assert(!std::isnan(w[i])); + assert(!std::isinf(w[i])); } } diff --git a/word-aligner/makefiles/makefile.grammars b/word-aligner/makefiles/makefile.grammars index 1a069abf..08ff33e1 100644 --- a/word-aligner/makefiles/makefile.grammars +++ b/word-aligner/makefiles/makefile.grammars @@ -16,7 +16,7 @@ STEM_E = $(SCRIPT_DIR)/stemmers/$(E_LANG).pl CLASSIFY = $(SUPPORT_DIR)/classify.pl MAKE_LEX_GRAMMAR = $(SUPPORT_DIR)/make_lex_grammar.pl -MODEL1 = $(TRAINING_DIR)/model1 +MODEL1 = $(TRAINING_DIR)/fast_align MERGE_CORPUS = $(SUPPORT_DIR)/merge_corpus.pl e.voc: corpus.e @@ -66,16 +66,16 @@ corpus.e-f: corpus.f corpus.e $(MERGE_CORPUS) corpus.e corpus.f > $@ corpus.f-e.model1: corpus.f-e - $(MODEL1) -v corpus.f-e > $@ + $(MODEL1) -p -v -i corpus.f-e > $@ corpus.e-f.model1: corpus.e-f - $(MODEL1) -v -V corpus.e-f > $@ + $(MODEL1) -p -v -V -i corpus.e-f > $@ corpus.f-e.full-model1: corpus.f-e - $(MODEL1) -t -999999 -v -V corpus.f-e > $@ + $(MODEL1) -p -t -999999 -v -V -i corpus.f-e > $@ corpus.e-f.full-model1: corpus.e-f - $(MODEL1) -t -999999 -v -V corpus.e-f > $@ + $(MODEL1) -p -t -999999 -v -V -i corpus.e-f > $@ corpus.f-e.lex-grammar.gz: corpus.f-e corpus.f-e.model1 corpus.e-f.model1 $(MAKE_LEX_GRAMMAR) corpus.f-e corpus.f-e.model1 corpus.e-f.model1 | $(GZIP) -9 > $@ |