diff options
author | graehl@gmail.com <graehl@gmail.com@ec762483-ff6d-05da-a07a-a48fb63a330f> | 2010-08-07 08:35:19 +0000 |
---|---|---|
committer | graehl@gmail.com <graehl@gmail.com@ec762483-ff6d-05da-a07a-a48fb63a330f> | 2010-08-07 08:35:19 +0000 |
commit | 78a5f4297dcfce6c4a904322c2e989b612a6105a (patch) | |
tree | 245edf663fc1e7b9b9486da42d6a78c6196f401c /decoder | |
parent | 7da354b48459db5ad22120b4fcb38f5c7db468ed (diff) |
cdec -A "LanguageModelFsa lm.gz" works
git-svn-id: https://ws10smt.googlecode.com/svn/trunk@490 ec762483-ff6d-05da-a07a-a48fb63a330f
Diffstat (limited to 'decoder')
-rwxr-xr-x | decoder/apply_fsa_models.cc | 5 | ||||
-rwxr-xr-x | decoder/apply_fsa_models.h | 5 | ||||
-rw-r--r-- | decoder/apply_models.cc | 4 | ||||
-rw-r--r-- | decoder/cdec.cc | 22 | ||||
-rw-r--r-- | decoder/cdec_ff.cc | 16 | ||||
-rw-r--r-- | decoder/ff.cc | 46 | ||||
-rw-r--r-- | decoder/ff.h | 52 | ||||
-rw-r--r-- | decoder/ff_factory.cc | 11 | ||||
-rwxr-xr-x | decoder/ff_from_fsa.h | 10 | ||||
-rwxr-xr-x | decoder/ff_fsa.h | 6 | ||||
-rwxr-xr-x | decoder/ff_fsa_data.h | 5 | ||||
-rwxr-xr-x | decoder/ff_fsa_dynamic.h | 87 | ||||
-rwxr-xr-x | decoder/ff_register.h | 8 | ||||
-rw-r--r-- | decoder/filelib.h | 8 | ||||
-rwxr-xr-x | decoder/fsa-hiero.ini | 3 | ||||
-rwxr-xr-x | decoder/null_deleter.h | 9 |
16 files changed, 213 insertions, 84 deletions
diff --git a/decoder/apply_fsa_models.cc b/decoder/apply_fsa_models.cc index 27773b0d..01de62d3 100755 --- a/decoder/apply_fsa_models.cc +++ b/decoder/apply_fsa_models.cc @@ -1,6 +1,7 @@ #include "apply_fsa_models.h" #include "hg.h" #include "ff_fsa_dynamic.h" +#include "ff_from_fsa.h" #include "feature_vector.h" #include "stringlib.h" #include "apply_models.h" @@ -27,7 +28,9 @@ struct ApplyFsa { } void ApplyBottomUp() { assert(cfg.IsBottomUp()); - vector<const FeatureFunction*> ffs; + FeatureFunctionFromFsa<FsaFeatureFunctionFwd> buff(&fsa); + buff.Init(); // mandatory to call this (normally factory would do it) + vector<const FeatureFunction*> ffs(1,&buff); ModelSet models(weights, ffs); IntersectionConfiguration i(cfg.BottomUpAlgorithm(),cfg.pop_limit); ApplyModelSet(ih,smeta,models,i,oh); diff --git a/decoder/apply_fsa_models.h b/decoder/apply_fsa_models.h index 64ebab39..3dce5e82 100755 --- a/decoder/apply_fsa_models.h +++ b/decoder/apply_fsa_models.h @@ -23,7 +23,10 @@ struct ApplyFsaBy { int algorithm; std::string name() const; friend inline std::ostream &operator << (std::ostream &o,ApplyFsaBy const& c) { - return o << c.name(); + o << c.name(); + if (c.algorithm==BU_CUBE) + o << "("<<c.pop_limit<<")"; + return o; } explicit ApplyFsaBy(int alg, int poplimit=200); ApplyFsaBy(std::string const& name, int poplimit=200); diff --git a/decoder/apply_models.cc b/decoder/apply_models.cc index 700296da..635f1a9c 100644 --- a/decoder/apply_models.cc +++ b/decoder/apply_models.cc @@ -405,10 +405,10 @@ void ApplyModelSet(const Hypergraph& in, const IntersectionConfiguration& config, Hypergraph* out) { //force exhaustive if there's no state req. for model - if (models.stateless() || config.algorithm == 0) { + if (models.stateless() || config.algorithm == IntersectionConfiguration::FULL) { NoPruningRescorer ma(models, smeta, in, out); // avoid overhead of best-first when no state ma.Apply(); - } else if (config.algorithm == 1) { + } else if (config.algorithm == IntersectionConfiguration::CUBE) { int pl = config.pop_limit; const int max_pl_for_large=50; if (pl > max_pl_for_large && in.nodes_.size() > 80000) { diff --git a/decoder/cdec.cc b/decoder/cdec.cc index 72f0b95e..29070a69 100644 --- a/decoder/cdec.cc +++ b/decoder/cdec.cc @@ -68,7 +68,7 @@ inline string str(char const* name,po::variables_map const& conf) { shared_ptr<FeatureFunction> make_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { string ff, param; SplitCommandAndParam(ffp, &ff, ¶m); - cerr << "Feature: " << ff; + cerr << pre << "feature: " << ff; if (param.size() > 0) cerr << " (with config parameters '" << param << "')\n"; else cerr << " (no config parameters)\n"; shared_ptr<FeatureFunction> pf = ff_registry.Create(ff, param); @@ -470,7 +470,9 @@ int main(int argc, char** argv) { vector<shared_ptr<FeatureFunction> > pffs,prelm_only_ffs; vector<const FeatureFunction*> late_ffs,prelm_ffs; if (conf.count("feature_function") > 0) { - const vector<string>& add_ffs = conf["feature_function"].as<vector<string> >(); + vector<string> add_ffs; +// const vector<string>& add_ffs = conf["feature_function"].as<vector<string> >(); + store_conf(conf,"feature_function",&add_ffs); for (int i = 0; i < add_ffs.size(); ++i) { pffs.push_back(make_ff(add_ffs[i],verbose_feature_functions)); FeatureFunction const* p=pffs.back().get(); @@ -484,7 +486,9 @@ int main(int argc, char** argv) { } } if (conf.count("prelm_feature_function") > 0) { - const vector<string>& add_ffs = conf["prelm_feature_function"].as<vector<string> >(); + vector<string> add_ffs; + store_conf(conf,"prelm_feature_function",&add_ffs); +// const vector<string>& add_ffs = conf["prelm_feature_function"].as<vector<string> >(); for (int i = 0; i < add_ffs.size(); ++i) { prelm_only_ffs.push_back(make_ff(add_ffs[i],verbose_feature_functions,"prelm-only ")); prelm_ffs.push_back(prelm_only_ffs.back().get()); @@ -494,12 +498,16 @@ int main(int argc, char** argv) { vector<shared_ptr<FsaFeatureFunction> > fsa_ffs; vector<string> fsa_names; store_conf(conf,"fsa_feature_function",&fsa_names); + for (int i=0;i<fsa_names.size();++i) + fsa_ffs.push_back(make_fsa_ff(fsa_names[i],verbose_feature_functions,"FSA ")); if (fsa_ffs.size()>1) { //FIXME: support N fsa ffs. cerr<<"Only the first fsa FF will be used (FIXME).\n"; - fsa_names.resize(1); - for (int i=0;i<fsa_names.size();++i) - fsa_ffs.push_back(make_fsa_ff(fsa_names[i],verbose_feature_functions,"FSA ")); + fsa_ffs.resize(1); + } + if (!fsa_ffs.empty()) { + cerr<<"FSA: "; + show_all_features(fsa_ffs,feature_weights,cerr,cerr,true,true); } if (late_freeze) { @@ -646,7 +654,6 @@ int main(int argc, char** argv) { maybe_prune(forest,conf,"beam_prune","density_prune","+LM",srclen); - if (!fsa_ffs.empty()) { Timer t("Target FSA rescoring:"); if (!has_late_models) @@ -654,6 +661,7 @@ int main(int argc, char** argv) { Hypergraph fsa_forest; assert(fsa_ffs.size()==1); ApplyFsaBy cfg(str("apply_fsa_by",conf),pop_limit); + cerr << "FSA rescoring with "<<cfg<<" "<<fsa_ffs[0]->describe()<<endl; ApplyFsaModels(forest,smeta,*fsa_ffs[0],feature_weights,cfg,&fsa_forest); forest.swap(fsa_forest); forest.Reweight(feature_weights); diff --git a/decoder/cdec_ff.cc b/decoder/cdec_ff.cc index 1ef50eb1..98d4711f 100644 --- a/decoder/cdec_ff.cc +++ b/decoder/cdec_ff.cc @@ -13,21 +13,23 @@ #include "ff_register.h" void register_feature_functions() { - RegisterFsaImpl<SameFirstLetter>(true,false); - RegisterFsaImpl<LongerThanPrev>(true,false); - RegisterFF<LanguageModel>(); - RegisterFsaImpl<LanguageModelFsa>(true,false); // same as LM but using fsa wrapper - ff_registry.Register("LanguageModelFsaDynamic",new FFFactory<FeatureFunctionFromFsa<FsaFeatureFunctionDynamic<LanguageModelFsa> > >); // test correctness of FsaFeatureFunctionDynamic erasure + //TODO: these are worthless example target FSA ffs. remove later + RegisterFsaImpl<SameFirstLetter>(true); + RegisterFsaImpl<LongerThanPrev>(true); + RegisterFsaImpl<ShorterThanPrev>(true); +// ff_registry.Register("LanguageModelFsaDynamic",new FFFactory<FeatureFunctionFromFsa<FsaFeatureFunctionDynamic<LanguageModelFsa> > >); // to test correctness of FsaFeatureFunctionDynamic erasure + RegisterFsaDynToFF<LanguageModelFsa>(); + RegisterFsaImpl<LanguageModelFsa>(true); // same as LM but using fsa wrapper RegisterFsaDynToFF<SameFirstLetter>(); + RegisterFF<LanguageModel>(); + RegisterFF<WordPenalty>(); RegisterFF<SourceWordPenalty>(); RegisterFF<ArityPenalty>(); RegisterFF<BLEUModel>(); - //TODO: worthless example target FSA ffs. remove later ff_registry.Register(new FFFactory<WordPenaltyFromFsa>); // same as WordPenalty, but implemented using ff_fsa - ff_registry.Register(new FFFactory<FeatureFunctionFromFsa<ShorterThanPrev> >); //TODO: use for all features the new Register which requires static FF::usage(false,false) give name #ifdef HAVE_RANDLM diff --git a/decoder/ff.cc b/decoder/ff.cc index 5c52ca2b..68249b42 100644 --- a/decoder/ff.cc +++ b/decoder/ff.cc @@ -36,47 +36,10 @@ Features FeatureFunction::single_feature(WordID feat) { } Features ModelSet::all_features(std::ostream *warn,bool warn0) { - typedef Features FFS; - FFS ffs; -#define WARNFF(x) do { if (warn) { *warn << "WARNING: "<< x ; *warn<<endl; } } while(0) - typedef std::map<WordID,string> FFM; - FFM ff_from; - for (unsigned i=0;i<models_.size();++i) { - FeatureFunction const& ff=*models_[i]; - string const& ffname=ff.name_; - FFS si=ff.features(); - if (si.empty()) { - WARNFF(ffname<<" doesn't yet report any feature IDs - either supply feature weight, or use --no_freeze_feature_set, or implement features() method"); - } - unsigned n0=0; - for (unsigned j=0;j<si.size();++j) { - WordID fid=si[j]; - if (!fid) ++n0; - if (fid >= weights_.size()) - weights_.resize(fid+1); - if (warn0 || fid) { - pair<FFM::iterator,bool> i_new=ff_from.insert(FFM::value_type(fid,ffname)); - if (i_new.second) { - if (fid) - ffs.push_back(fid); - else - WARNFF("Feature id 0 for "<<ffname<<" (models["<<i<<"]) - probably no weight provided. Don't freeze feature ids to see the name"); - } else { - WARNFF(ffname<<" (models["<<i<<"]) tried to define feature "<<FD::Convert(fid)<<" already defined earlier by "<<i_new.first->second); - } - } - } - if (n0) - WARNFF(ffname<<" (models["<<i<<"]) had "<<n0<<" unused features (--no_freeze_feature_set to see them)"); - } - return ffs; -#undef WARNFF + return ::all_features(models_,weights_,warn,warn0); } -void ModelSet::show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt) -{ - typedef Features FFS; - FFS ffs=all_features(&warn,warn_zero_wt); +void show_features(Features const& ffs,DenseWeightVector const& weights_,std::ostream &out,std::ostream &warn,bool warn_zero_wt) { out << "Weight Feature\n"; for (unsigned i=0;i<ffs.size();++i) { WordID fid=ffs[i]; @@ -86,7 +49,12 @@ void ModelSet::show_features(std::ostream &out,std::ostream &warn,bool warn_zero warn<<"WARNING: "<<fname<<" has 0 weight."<<endl; out << wt << " " << fname<<endl; } +} +void ModelSet::show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt) +{ +// ::show_features(all_features(),weights_,out,warn,warn_zero_wt); + show_all_features(models_,weights_,out,warn,warn_zero_wt,warn_zero_wt); } // Hiero and Joshua use log_10(e) as the value, so I do to diff --git a/decoder/ff.h b/decoder/ff.h index e3bfe392..fe4411cd 100644 --- a/decoder/ff.h +++ b/decoder/ff.h @@ -13,6 +13,7 @@ #include <cstring> #include "fdict.h" #include "hg.h" +#include "feature_vector.h" class SentenceMetadata; class FeatureFunction; // see definition below @@ -54,7 +55,7 @@ public: // returns the number of bytes of context that this feature function will // (maximally) use. By default, 0 ("stateless" models in Hiero/Joshua). // NOTE: this value is fixed for the instance of your class, you cannot - // use different amounts of memory for different nodes in the forest. + // use different amounts of memory for different nodes in the forest. this will be read as soon as you create a ModelSet, then fixed forever on inline int NumBytesContext() const { return state_size_; } // Compute the feature values and (if this applies) the estimates of the @@ -196,6 +197,52 @@ class ArityPenalty : public FeatureFunction { const double value_; }; +void show_features(Features const& features,DenseWeightVector const& weights,std::ostream &out,std::ostream &warn,bool warn_zero_wt=true); //show features and weights + +template <class FFp> +Features all_features(std::vector<FFp> const& models_,DenseWeightVector &weights_,std::ostream *warn=0,bool warn_fid_0=false) { + using namespace std; + Features ffs; +#define WARNFF(x) do { if (warn) { *warn << "WARNING: "<< x << endl; } } while(0) + typedef map<WordID,string> FFM; + FFM ff_from; + for (unsigned i=0;i<models_.size();++i) { + string const& ffname=models_[i]->name_; + Features si=models_[i]->features(); + if (si.empty()) { + WARNFF(ffname<<" doesn't yet report any feature IDs - either supply feature weight, or use --no_freeze_feature_set, or implement features() method"); + } + unsigned n0=0; + for (unsigned j=0;j<si.size();++j) { + WordID fid=si[j]; + if (!fid) ++n0; + if (fid >= weights_.size()) + weights_.resize(fid+1); + if (warn_fid_0 || fid) { + pair<FFM::iterator,bool> i_new=ff_from.insert(FFM::value_type(fid,ffname)); + if (i_new.second) { + if (fid) + ffs.push_back(fid); + else + WARNFF("Feature id 0 for "<<ffname<<" (models["<<i<<"]) - probably no weight provided. Don't freeze feature ids to see the name"); + } else { + WARNFF(ffname<<" (models["<<i<<"]) tried to define feature "<<FD::Convert(fid)<<" already defined earlier by "<<i_new.first->second); + } + } + } + if (n0) + WARNFF(ffname<<" (models["<<i<<"]) had "<<n0<<" unused features (--no_freeze_feature_set to see them)"); + } + return ffs; +#undef WARNFF +} + +template <class FFp> +void show_all_features(std::vector<FFp> const& models_,DenseWeightVector &weights_,std::ostream &out,std::ostream &warn,bool warn_fid_0=true,bool warn_zero_wt=true) { + return show_features(all_features(models_,weights_,&warn,warn_fid_0),weights_,out,warn,warn_zero_wt); +} + + // this class is a set of FeatureFunctions that can be used to score, rescore, // etc. a (translation?) forest class ModelSet { @@ -224,7 +271,8 @@ class ModelSet { bool stateless() const { return !state_size_; } Features all_features(std::ostream *warnings=0,bool warn_fid_zero=false); // this will warn about duplicate features as well (one function overwrites the feature of another). also resizes weights_ so it is large enough to hold the (0) weight for the largest reported feature id. since 0 is a NULL feature id, it's never included. if warn_fid_zero, then even the first 0 id is - void show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt=true); //show features and weights + void show_features(std::ostream &out,std::ostream &warn,bool warn_zero_wt=true); + private: std::vector<const FeatureFunction*> models_; std::vector<double> weights_; diff --git a/decoder/ff_factory.cc b/decoder/ff_factory.cc index 767cc675..25d37648 100644 --- a/decoder/ff_factory.cc +++ b/decoder/ff_factory.cc @@ -17,10 +17,9 @@ void UntypedFactoryRegistry::clear() { reg_.clear(); } -bool UntypedFactoryRegistry::parse_debug(std::string & param) { +bool UntypedFactoryRegistry::parse_debug(std::string & p) { int pl=debug_pre.size(); bool space=false; - std::string p=param; bool debug=match_begin(p,debug_pre)&& (p.size()==pl || (space=(p[pl]==' '))); if (debug) @@ -70,13 +69,7 @@ FsaFFRegistry fsa_ff_registry; FFRegistry ff_registry; /* -namespace { -struct null_deleter -{ - template <class F> - void operator()(F const& f) const { } -}; - +#include "null_deleter.h" boost::shared_ptr<FsaFFRegistry> global_fsa_ff_registry(&fsa_ff_registry,null_deleter()); boost::shared_ptr<FFRegistry> global_ff_registry(&ff_registry,null_deleter()); */ diff --git a/decoder/ff_from_fsa.h b/decoder/ff_from_fsa.h index ec1a28fa..26aca048 100755 --- a/decoder/ff_from_fsa.h +++ b/decoder/ff_from_fsa.h @@ -28,7 +28,12 @@ class FeatureFunctionFromFsa : public FeatureFunction { typedef WordID *W; typedef WordID const* WP; public: - FeatureFunctionFromFsa(std::string const& param) : ff(param) { + template <class I> + FeatureFunctionFromFsa(I const& param) : ff(param) { + debug_=true; // because factory won't set until after we construct. + } + template <class I> + FeatureFunctionFromFsa(I & param) : ff(param) { debug_=true; // because factory won't set until after we construct. } @@ -234,7 +239,7 @@ public: assert(left_end(w2,w2+2)==w2+1); } - // override from FeatureFunction; should be called by factory after constructor. + // override from FeatureFunction; should be called by factory after constructor. we'll also call in our own ctor void Init() { ff.Init(); ff.sync(); @@ -246,6 +251,7 @@ public: SetStateSize(ssz+state_offset); assert(!ssz == !M); // no fsa state <=> markov order 0 } + private: Impl ff; int M; // markov order (ctx len) diff --git a/decoder/ff_fsa.h b/decoder/ff_fsa.h index 6416151c..18e90bf1 100755 --- a/decoder/ff_fsa.h +++ b/decoder/ff_fsa.h @@ -91,6 +91,12 @@ struct FsaFeatureFunctionBase : public FsaFeatureFunctionData { public: int fid_; // you can have more than 1 feature of course. + std::string describe() const { + std::ostringstream o; + o<<*this; + return o.str(); + } + // can override to different return type, e.g. just return feats: Featval describe_features(FeatureVector const& feats) const { return feats.get(fid_); diff --git a/decoder/ff_fsa_data.h b/decoder/ff_fsa_data.h index 2f0c55b8..e60bce45 100755 --- a/decoder/ff_fsa_data.h +++ b/decoder/ff_fsa_data.h @@ -20,7 +20,6 @@ struct FsaFeatureFunctionData FsaFeatureFunctionData *sync_to_; void sync() const { // call this if you modify any fields after your constructor is done - if (sync_to_) { DBGINIT("sync to "<<*sync_to_); *sync_to_=*this; @@ -31,7 +30,7 @@ struct FsaFeatureFunctionData } friend std::ostream &operator<<(std::ostream &o,FsaFeatureFunctionData const& d) { - o << "[FSA "<<d.name_<<" features="<<FD::Convert(d.features_)<<" start="; + o << "[FSA "<<d.name_<<" features="<<FD::Convert(d.features_)<<" state_bytes="<<d.state_bytes()<<" end='"<<d.end_phrase()<<"' start="; d.print_state(o,d.start_state()); o<<"]"; return o; @@ -62,7 +61,7 @@ struct FsaFeatureFunctionData int n_features() const { return features_.size(); } - int state_bytes() const { return ssz; } // or override this + int state_bytes() const { return ssz; } void const* start_state() const { return start.begin(); } diff --git a/decoder/ff_fsa_dynamic.h b/decoder/ff_fsa_dynamic.h index d03fddee..6f75bbe5 100755 --- a/decoder/ff_fsa_dynamic.h +++ b/decoder/ff_fsa_dynamic.h @@ -38,6 +38,8 @@ struct FsaFeatureFunction : public FsaFeatureFunctionData { virtual void print_state(std::ostream &o,void const*state) const { FsaFeatureFunctionData::print_state(o,state); } + virtual std::string describe() const { return "[FSA unnamed_dynamic_fsa_feature]"; } + //end_phrase() virtual ~FsaFeatureFunction() {} @@ -62,6 +64,10 @@ struct FsaFeatureFunctionDynamic : public FsaFeatureFunction { } int markov_order() const { return d().markov_order(); } + std::string describe() const { + return d().describe(); + } + virtual void ScanAccum(SentenceMetadata const& smeta,Hypergraph::Edge const& edge, WordID w,void const* state,void *next_state,Accum *a) const { return d().ScanAccum(smeta,edge,w,state,next_state,a); @@ -105,19 +111,96 @@ struct FsaFeatureFunctionDynamic : public FsaFeatureFunction { } virtual void Init() { - d().sync_to_=(FsaFeatureFunction*)this; + d().sync_to_=(FsaFeatureFunctionData*)this; d().Init(); d().sync(); } - FsaFeatureFunctionDynamic(std::string const& param) : impl(param) { + template <class I> + FsaFeatureFunctionDynamic(I const& param) : impl(param) { Init(); } private: Impl impl; +}; + +// constructor takes ptr or shared_ptr to Impl, otherwise same as above - note: not virtual +template <class Impl> +struct FsaFeatureFunctionPimpl : public FsaFeatureFunctionData { + typedef boost::shared_ptr<Impl const> Pimpl; + static const bool simple_phrase_score=Impl::simple_phrase_score; + Impl const& d() const { return *p_; } + int markov_order() const { return d().markov_order(); } + + std::string describe() const { + return d().describe(); + } + + void ScanAccum(SentenceMetadata const& smeta,Hypergraph::Edge const& edge, + WordID w,void const* state,void *next_state,Accum *a) const { + return d().ScanAccum(smeta,edge,w,state,next_state,a); + } + + void ScanPhraseAccum(SentenceMetadata const& smeta,Hypergraph::Edge const & edge, + WordID const* i, WordID const* end, + void const* state,void *next_state,Accum *a) const { + return d().ScanPhraseAccum(smeta,edge,i,end,state,next_state,a); + } + + void ScanPhraseAccumOnly(SentenceMetadata const& smeta,Hypergraph::Edge const& edge, + WordID const* i, WordID const* end, + void const* state,Accum *a) const { + return d().ScanPhraseAccumOnly(smeta,edge,i,end,state,a); + } + + void *ScanPhraseAccumBounce(SentenceMetadata const& smeta,Hypergraph::Edge const& edge,WordID const* i, WordID const* end,void *cs,void *ns,Accum *a) const { + return d().ScanPhraseAccumBounce(smeta,edge,i,end,cs,ns,a); + } + int early_score_words(SentenceMetadata const& smeta,Hypergraph::Edge const& edge,WordID const* i, WordID const* end,Accum *accum) const { + return d().early_score_words(smeta,edge,i,end,accum); + } + + static std::string usage(bool param,bool verbose) { + return Impl::usage(param,verbose); + } + + std::string usage_v(bool param,bool verbose) const { + return Impl::usage(param,verbose); + } + + void print_state(std::ostream &o,void const*state) const { + return d().print_state(o,state); + } + +#if 0 + // this and Init() don't touch p_ because we want to leave the original alone. + void init_name_debug(std::string const& n,bool debug) { + FsaFeatureFunctionData::init_name_debug(n,debug); + } +#endif + void Init() { + p_=hold_pimpl_.get(); +#if 0 + d().sync_to_=static_cast<FsaFeatureFunctionData*>(this); + d().Init(); +#endif + *static_cast<FsaFeatureFunctionData*>(this)=d(); + } + + FsaFeatureFunctionPimpl(Impl const* const p) : hold_pimpl_(p,null_deleter()) { + Init(); + } + FsaFeatureFunctionPimpl(Pimpl const& p) : hold_pimpl_(p) { + Init(); + } +private: + Impl const* p_; + Pimpl hold_pimpl_; }; +typedef FsaFeatureFunctionPimpl<FsaFeatureFunction> FsaFeatureFunctionFwd; // allow ff_from_fsa for an existing dynamic-type ff (as opposed to usual register a wrapped known-type FSA in ff_register, which is more efficient) +//typedef FsaFeatureFunctionDynamic<FsaFeatureFunctionFwd> DynamicFsaFeatureFunctionFwd; //if you really need to have a dynamic fsa facade that's also a dynamic fsa //TODO: combine 2 (or N) FsaFeatureFunction (type erased) diff --git a/decoder/ff_register.h b/decoder/ff_register.h index f0828ca3..70e1c1c6 100755 --- a/decoder/ff_register.h +++ b/decoder/ff_register.h @@ -36,11 +36,15 @@ inline void RegisterFF() { } template <class FsaImpl> -inline void RegisterFsaDynToFF(bool prefix=true) { +inline void RegisterFsaDynToFF(std::string name,bool prefix=true) { typedef FsaFeatureFunctionDynamic<FsaImpl> DynFsa; - std::string name=FsaImpl::usage(false,false); ff_registry.Register(prefix?"DynamicFsa"+name:name,new FFFactory<FeatureFunctionFromFsa<DynFsa> >); } +template <class FsaImpl> +inline void RegisterFsaDynToFF(bool prefix=true) { + RegisterFsaDynToFF<FsaImpl>(FsaImpl::usage(false,false),prefix); +} + #endif diff --git a/decoder/filelib.h b/decoder/filelib.h index af66dd05..4da4bc4f 100644 --- a/decoder/filelib.h +++ b/decoder/filelib.h @@ -8,6 +8,7 @@ #include <boost/shared_ptr.hpp> #include <stdexcept> #include "gzstream.h" +#include "null_deleter.h" bool FileExists(const std::string& file_name); bool DirectoryExists(const std::string& dir_name); @@ -15,9 +16,6 @@ bool DirectoryExists(const std::string& dir_name); // reads from standard in if filename is - // uncompresses if file ends with .gz // otherwise, reads from a normal file -struct file_null_deleter { - void operator()(void*) const {} -}; template <class Stream> struct BaseFile { @@ -57,7 +55,7 @@ class ReadFile : public BaseFile<std::istream> { void Init(const std::string& filename) { filename_=filename; if (is_std()) { - ps_=PS(&std::cin,file_null_deleter()); + ps_=PS(&std::cin,null_deleter()); } else { if (!FileExists(filename)) { std::cerr << "File does not exist: " << filename << std::endl; @@ -85,7 +83,7 @@ class WriteFile : public BaseFile<std::ostream> { void Init(const std::string& filename) { filename_=filename; if (is_std()) { - ps_=PS(&std::cout,file_null_deleter()); + ps_=PS(&std::cout,null_deleter()); } else { char const* file=filename_.c_str(); // just in case the gzstream keeps using the filename for longer than the constructor, e.g. inflateReset2. warning in valgrind that I'm hoping will disappear - it makes no sense. ps_=PS(EndsWith(filename, ".gz") ? diff --git a/decoder/fsa-hiero.ini b/decoder/fsa-hiero.ini index 19f88421..3eb8b3d2 100755 --- a/decoder/fsa-hiero.ini +++ b/decoder/fsa-hiero.ini @@ -1,7 +1,6 @@ formalism=scfg +fsa_feature_function=LanguageModelFsa debug lm.gz -n LM scfg_extra_glue_grammar=glue-lda.scfg -feature_function=ShorterThanPrev debug -feature_function=LongerThanPrev grammar=grammar.hiero show_tree_structure=true weights=weights.hiero diff --git a/decoder/null_deleter.h b/decoder/null_deleter.h new file mode 100755 index 00000000..082ab453 --- /dev/null +++ b/decoder/null_deleter.h @@ -0,0 +1,9 @@ +#ifndef NULL_DELETER_H +#define NULL_DELETER_H + +struct null_deleter { + void operator()(void*) const {} + void operator()(void const*) const {} +}; + +#endif |