diff options
author | graehl <graehl@ec762483-ff6d-05da-a07a-a48fb63a330f> | 2010-08-27 19:26:31 +0000 |
---|---|---|
committer | graehl <graehl@ec762483-ff6d-05da-a07a-a48fb63a330f> | 2010-08-27 19:26:31 +0000 |
commit | 1eab70e16f0e0d5531f3babfea2062c82f6362e1 (patch) | |
tree | a1ea1930e585b9356c6e089465baa969bb6919bb | |
parent | 6ddaff1341e565dd91dca7ea763d0ea4d897f4c7 (diff) |
compiles
git-svn-id: https://ws10smt.googlecode.com/svn/trunk@626 ec762483-ff6d-05da-a07a-a48fb63a330f
-rwxr-xr-x | decoder/apply_fsa_models.cc | 342 | ||||
-rwxr-xr-x | decoder/cfg.h | 22 | ||||
-rw-r--r-- | decoder/ff.h | 3 | ||||
-rwxr-xr-x | decoder/ff_fsa_data.h | 6 | ||||
-rwxr-xr-x | utils/agenda.h | 86 | ||||
-rwxr-xr-x | utils/best.h | 15 | ||||
-rw-r--r-- | utils/d_ary_heap.h | 107 | ||||
-rwxr-xr-x | utils/hash.h | 12 | ||||
-rwxr-xr-x | utils/intern_pool.h | 54 | ||||
-rw-r--r-- | utils/logval.h | 8 | ||||
-rwxr-xr-x | utils/lvalue_pmap.h | 8 | ||||
-rwxr-xr-x | utils/max_plus.h | 13 | ||||
-rwxr-xr-x | utils/show.h | 25 | ||||
-rwxr-xr-x | utils/utoa.h | 2 | ||||
-rwxr-xr-x | utils/value_array.h | 24 | ||||
-rw-r--r-- | utils/warning_compiler.h | 3 | ||||
-rw-r--r-- | utils/warning_push.h | 2 |
17 files changed, 563 insertions, 169 deletions
diff --git a/decoder/apply_fsa_models.cc b/decoder/apply_fsa_models.cc index 2c10762b..5493d29f 100755 --- a/decoder/apply_fsa_models.cc +++ b/decoder/apply_fsa_models.cc @@ -1,4 +1,4 @@ -#include "maybe_update_bound.h" +#include <queue> #include "apply_fsa_models.h" #include "hg.h" #include "ff_fsa_dynamic.h" @@ -15,9 +15,24 @@ #include "value_array.h" #include "d_ary_heap.h" #include "agenda.h" +#include "show.h" +#include <stdint.h> #define DFSA(x) x +//fsa earley chart + #define DPFSA(x) x +//prefix trie + +#define PRINT_PREFIX 1 +#if PRINT_PREFIX +# define IF_PRINT_PREFIX(x) x +#else +# define IF_PRINT_PREFIX(x) +#endif +// keep backpointers in prefix trie so you can print a meaningful node id + +static const unsigned FSA_AGENDA_RESERVE=10; // TODO: increase to 1<<24 (16M) using namespace std; @@ -51,10 +66,9 @@ vs. 3) * -> x . * with 3, we predict all sorts of useless items - that won't give us our goal A and may not partcipate in any parse. this is not a good option at all. -*/ -#define TRIE_START_LHS 1 -// 1 is option 1) above. 0 would be option 3), which is dumb +I'm using option 1. +*/ // if we don't greedy-binarize, we want to encode recognized prefixes p (X -> p . rest) efficiently. if we're doing this, we may as well also push costs so we can best-first select rules in a lazy fashion. this is effectively left-branching binarization, of course. template <class K,class V,class Hash> @@ -73,14 +87,33 @@ struct get_second { }; struct PrefixTrieNode; +typedef PrefixTrieNode *NodeP; +typedef PrefixTrieNode const *NodePc; + +// for debugging prints only +struct TrieBackP { + WordID w; + NodePc from; + TrieBackP(WordID w=0,NodePc from=0) : w(w),from(from) { } +}; + +FsaFeatureFunction const* print_fsa=0; +CFG const* print_cfg=0; +inline void print_cfg_rhs(std::ostream &o,WordID w) { + if (print_cfg) + print_cfg->print_rhs_name(o,w); + else + CFG::static_print_rhs_name(o,w); +} + struct PrefixTrieEdge { // PrefixTrieEdge() { } -// explicit PrefixTrieEdge(prob_t p) : p(p),dest(0) { } - prob_t p;// viterbi additional prob, i.e. product over path incl. p_final = total rule prob +// explicit PrefixTrieEdge(best_t p) : p(p),dest(0) { } + best_t p;// viterbi additional prob, i.e. product over path incl. p_final = total rule prob //DPFSA() // we can probably just store deltas, but for debugging remember the full p - // prob_t delta; // - PrefixTrieNode *dest; + // best_t delta; // + NodeP dest; bool is_final() const { return dest==0; } WordID w; // for lhs, this will be nonneg NTHandle instead. // not set if is_final() // actually, set to lhs nt index @@ -88,23 +121,61 @@ struct PrefixTrieEdge { inline bool operator <(PrefixTrieEdge const& o) const { return o.p<p; } + PRINT_SELF(PrefixTrieEdge) + void print(std::ostream &o) const { + print_cfg_rhs(o,w); + o<<"{"<<p<<"}->"<<dest; + } + }; + +//note: ending a rule is handled with a special final edge, so that possibility can be explored in best-first order along with the rest (alternative: always finish a final rule by putting it on queue). this edge has no symbol on it. struct PrefixTrieNode { - prob_t p; // viterbi (max prob) of rule this node leads to - when building. telescope later onto edges for best-first. -#if TRIE_START_LHS -// bool final; // may also have successors, of course. we don't really need to track this; a null dest edge in the adj list lets us encounter the fact in best first order. -// prob_t p_final; // additional prob beyond what we already paid. while building, this is the total prob + best_t p; // viterbi (max prob) of rule this node leads to - when building. telescope later onto edges for best-first. + bool final; // may also have successors, of course. we don't really need to track this; a null dest edge in the adj list lets us encounter the fact in best first order. + typedef TrieBackP BP; + typedef std::vector<BP> BPs; + void back_vec(BPs &ns) const { + IF_PRINT_PREFIX(if(backp.from) { ns.push_back(backp); backp.from->back_vec(ns); }) + } + + BPs back_vec() const { + BPs ret; + back_vec(ret); + return ret; + } + + void print_back_str(std::ostream &o) const { + BPs back=back_vec(); + unsigned i=back.size(); + if (!i) { + o<<"PrefixTrieNode@"<<(uintptr_t)this; + return; + } + bool first=true; + while (i--<=0) { + if (!first) o<<','; + first=false; + WordID w=back[i].w; + print_cfg_rhs(o,w); + } + } + std::string back_str() const { + std::ostringstream o; + print_back_str(o); + return o.str(); + } + +// best_t p_final; // additional prob beyond what we already paid. while building, this is the total prob // instead of storing final, we'll say that an edge with a NULL dest is a final edge. this way it gets sorted into the list of adj. // instead of completed map, we have trie start w/ lhs. NTHandle lhs; // nonneg. - instead of storing this in Item. -#else - typedef FSA_MAP(LHS,RuleHandle) Completed; // can only have one rule w/ a given signature (duplicates should be collapsed when making CFG). but there may be multiple rules, with different LHS - Completed completed; -#endif + IF_PRINT_PREFIX(BP backp;) + enum { ROOT=-1 }; - explicit PrefixTrieNode(NTHandle lhs=ROOT,prob_t p=1) : p(p),lhs(lhs) { + explicit PrefixTrieNode(NTHandle lhs=ROOT,best_t p=1) : p(p),lhs(lhs),IF_PRINT_PREFIX(backp()) { //final=false; } bool is_root() const { return lhs==ROOT; } // means adj are the nonneg lhs indices, and we have the index edge_for still available @@ -134,12 +205,13 @@ public: } } template <class PV> - void index_root(PV &v) { - v.resize(adj.size()); + void index_lhs(PV &v) { for (int i=0,e=adj.size();i!=e;++i) { PrefixTrieEdge const& e=adj[i]; // assert(e.p.is_1()); // actually, after done_building, e will have telescoped dest->p/p. - v[e.w]=e.dest; + NTHandle n=e.w; + assert(n>=0); + v[n]=e.dest; } } @@ -172,43 +244,47 @@ public: typedef WordID W; // let's compute p_min so that every rule reachable from the created node has p at least this low. - PrefixTrieNode *improve_edge(PrefixTrieEdge const& e,prob_t rulep) { - PrefixTrieNode *d=e.dest; - maybe_increase_max(d->p,rulep); + NodeP improve_edge(PrefixTrieEdge const& e,best_t rulep) { + NodeP d=e.dest; + maybe_improve(d->p,rulep); return d; } - inline PrefixTrieNode *build(W w,prob_t rulep) { + inline NodeP build(W w,best_t rulep) { return build(lhs,w,rulep); } - inline PrefixTrieNode *build_lhs(NTHandle w,prob_t rulep) { + inline NodeP build_lhs(NTHandle w,best_t rulep) { return build(w,w,rulep); } - PrefixTrieNode *build(NTHandle lhs_,W w,prob_t rulep) { + NodeP build(NTHandle lhs_,W w,best_t rulep) { PrefixTrieEdgeFor::iterator i=edge_for.find(w); if (i!=edge_for.end()) return improve_edge(i->second,rulep); PrefixTrieEdge &e=edge_for[w]; - return e.dest=new PrefixTrieNode(lhs_,rulep); + NodeP r=new PrefixTrieNode(lhs_,rulep); + IF_PRINT_PREFIX(r->backp=BP(w,this)); + return e.dest=r; } - void set_final(NTHandle lhs_,prob_t pf) { + void set_final(NTHandle lhs_,best_t pf) { assert(no_adj()); -// final=true; // don't really need to track this. + final=true; + /* PrefixTrieEdge &e=edge_for[-1]; e.p=pf; e.dest=0; e.w=lhs_; if (pf>p) p=pf; + */ } private: void destroy_children() { assert(adj.size()>=edge_for.size()); for (int i=0,e=adj.size();i<e;++i) { - PrefixTrieNode *c=adj[i].dest; + NodeP c=adj[i].dest; if (c) { // final state has no end delete c; } @@ -220,31 +296,33 @@ public: } }; -#if TRIE_START_LHS -//Trie starts with lhs (nonneg index), then continues w/ rhs (mixed >0 word, else NT) -#else -// just rhs. i think item names should exclude lhs if possible (most sharing). get prefix cost w/ forward = viterbi (global best-first admissable h only) and it should be ok? -#endif +//Trie starts with lhs (nonneg index), then continues w/ rhs (mixed >0 word, else NT) +// trie ends with final edge, which points to a per-lhs prefix node struct PrefixTrie { CFG *cfgp; Rules const* rulesp; Rules const& rules() const { return *rulesp; } CFG const& cfg() const { return *cfgp; } PrefixTrieNode root; - - PrefixTrie(CFG &cfg) : cfgp(&cfg),rulesp(&cfg.rules) { + typedef std::vector<NodeP> LhsToTrie; // will have to check lhs2[lhs].p for best cost of some rule with that lhs, then use edge deltas after? they're just caching a very cheap computation, really + LhsToTrie lhs2; // no reason to use a map or hash table; every NT in the CFG will have some rule rhses. lhs_to_trie[i]=root.edge_for[i], i.e. we still have a root trie node conceptually, we just access through this since it's faster. + typedef LhsToTrie LhsToComplete; + LhsToComplete lhs2complete; // the sentinel "we're completing" node (dot at end) for that lhs. special case of suffix-set=same trie minimization (aka right branching binarization) // these will be used to track kbest completions, along with a l state (r state will be in the list) + PrefixTrie(CFG &cfg) : cfgp(&cfg),rulesp(&cfg.rules),lhs2(cfg.nts.size(),0),lhs2complete(cfg.nts.size()) { // cfg.SortLocalBestFirst(); // instead we'll sort in done_building_r + print_cfg=cfgp; cfg.VisitRuleIds(*this); root.done_building_r(); root.index_adj(); // maybe the index we use for building trie should be replaced by a much larger/faster table since we look up by lhs many times in parsing? -//TODO: + root.index_lhs(lhs2); } + void operator()(int ri) const { Rule const& r=rules()[ri]; NTHandle lhs=r.lhs; - prob_t p=r.p; - PrefixTrieNode *n=const_cast<PrefixTrieNode&>(root).build_lhs(lhs,p); + best_t p=r.p; + NodeP n=const_cast<PrefixTrieNode&>(root).build_lhs(lhs,p); for (RHS::const_iterator i=r.rhs.begin(),e=r.rhs.end();;++i) { if (i==e) { n->set_final(lhs,p); @@ -254,60 +332,167 @@ struct PrefixTrie { } // root.build(lhs,r.p)->build(r.rhs,r.p); } + inline NodeP lhs2_ex(NTHandle n) const { + NodeP r=lhs2[n]; + if (!r) throw std::runtime_error("PrefixTrie: no CFG rule w/ lhs "+cfgp->nt_name(n)); + return r; + } +}; + + +// these should go in a global best-first queue +struct ItemPrio { + // NOTE: sum = viterbi (max) + ItemPrio() : priority(init_0()),inner(init_0()) { } + best_t priority; // includes inner prob. (forward) + /* The forward probability alpha_i(X[k]->x.y) is the sum of the probabilities of all + constrained paths of length i that end in state X[k]->x.y*/ + best_t inner; + /* The inner probability beta_i(X[k]->x.y) is the sum of the probabilities of all + paths of length i-k that start in state X[k,k]->.xy and end in X[k,i]->x.y, and generate the input symbols x[k,...,i-1] */ + template<class O> + void print(O &o) const { + o<<priority; // TODO: show/use inner? + } + typedef ItemPrio self_type; + SELF_TYPE_PRINT }; typedef std::size_t ItemHash; -struct Item { - explicit Item(PrefixTrieNode *dot,int next=0) : dot(dot),next(next) { } - PrefixTrieNode *dot; // dot is a function of the stuff already recognized, and gives a set of suffixes y to complete to finish a rhs for lhs() -> dot y. for a lhs A -> . *, this will point to lh2[A] - int next; // index of dot->adj to complete (if dest==0), or predict (if NT), or scan (if word) - NTHandle lhs() const { return dot->lhs; } + +struct ItemKey { + explicit ItemKey(NodeP start,Bytes const& start_state) : dot(start),q(start_state),r(start_state) { } + explicit ItemKey(NodeP dot) : dot(dot) { } + NodeP dot; // dot is a function of the stuff already recognized, and gives a set of suffixes y to complete to finish a rhs for lhs() -> dot y. for a lhs A -> . *, this will point to lh2[A] + Bytes q,r; // (q->r are the fsa states; if r is empty it means + bool operator==(ItemKey const& o) const { + return dot==o.dot && q==o.q && r==o.r; + } inline ItemHash hash() const { - return GOLDEN_MEAN_FRACTION*next^((ItemHash)dot>>4); // / sizeof(PrefixTrieNode), approx., i.e. lower order bits of ptr are nonrandom + ItemHash h=GOLDEN_MEAN_FRACTION*(ItemHash)(dot-NULL); // i.e. lower order bits of ptr are nonrandom + using namespace boost; + hash_combine(h,q); + hash_combine(h,r); + return h; + } + template<class O> + void print(O &o) const { + o<<"lhs="<<lhs(); + if (dot) + dot->print_back_str(o); + if (print_fsa) { + o<<'/'; + print_fsa->print_state(o,&q[0]); + o<<"->"; + print_fsa->print_state(o,&r[0]); + } } + NTHandle lhs() const { return dot->lhs; } + typedef ItemKey self_type; + SELF_TYPE_PRINT }; - -inline ItemHash hash_value(Item const& x) { +inline ItemHash hash_value(ItemKey const& x) { return x.hash(); } +ItemKey null_item((PrefixTrieNode*)0); + +struct Item; +typedef Item *ItemP; + +/* we use a single type of item so it can live in a single best-first queue. we hold them by pointer so they can have mutable state, e.g. priority/location, but also lists of predictions and kbest completions (i.e. completions[L,r] = L -> * (r,s), by 1best for each possible s. we may discover more s later. we could use different subtypes since we hold by pointer, but for now everything will be packed as variants of Item */ +struct Item : ItemPrio,ItemKey { + explicit Item(NodeP dot,int next=0) : ItemKey(dot),next(next),from(0) { } + explicit Item(NodeP dot,FFState const& state,int next=0) : ItemKey(dot,state),next(next),from(0) { } + unsigned location; + typedef std::queue<ItemP> Predicted; + Predicted predicted; // this is empty, unless this is a predicted L -> .asdf item, or a to-complete L -> asdf . + int next; // index of dot->adj to complete (if dest==0), or predict (if NT), or scan (if word). note: we could store pointer inside adj since it and trie are @ fixed addrs. less pointer arith, more space. + ItemP from; //backpointer - 0 for L -> . asdf for the rest; L -> a .sdf, it's the L -> .asdf item. + ItemP predicted_from() const { + ItemP p=(ItemP)this; + while(p->from) p=p->from; + return p; + } + template<class O> + void print(O &o) const { + o<< '['; + ItemKey::print(o); + o<<' '; + ItemPrio::print(o); + o<<" next="<<next; + o<< ']'; + } + typedef Item self_type; + SELF_TYPE_PRINT +}; -Item null_item((PrefixTrieNode*)0); - -// these should go in a global best-first queue -struct ItemP { - ItemP() : forward(init_0()),inner(init_0()) { } - prob_t forward; // includes inner prob. - // NOTE: sum = viterbi (max) - /* The forward probability alpha_i(X[k]->x.y) is the sum of the probabilities of all - constrained paths of length that end in state X[k]->x.y*/ - prob_t inner; - /* The inner probability beta_i(X[k]->x.y) is the sum of the probabilities of all - paths of length i-k that start in state X[k,k]->.xy and end in X[k,i]->x.y, and generate the input symbols x[k,...,i-1] */ +struct GetItemKey { + typedef Item argument_type; + typedef ItemKey result_type; + result_type const& operator()(Item const& i) const { return i; } + template <class T> + T const& operator()(T const& t) const { return t; } }; +/* here's what i imagine (best first): + all of these are looked up in a chart which includes the fsa states as part of the identity + + perhaps some items are ephemeral and never reused (e.g. edge items of a cube, where we delay traversing trie based on probabilities), but in all ohter cases we make entirely new objects derived from the original one (memoizing). let's ignore lazier edge items for now and always push all successors onto heap. + + initial item (predicted): GOAL_NT -> . * (trie root for that lhs), start, start (fsa start states). has a list of + + completing item ( L -> * . needs to queue all the completions immediately. when predicting before a completion happens, add to prediction list. after it happens, immediately use the completed bests. this is confusing to me: the completions for an original NT w/ a given r state may end up in several different ones. we don't only care about the 1 best cost r item but all the different r. + + the prediction's left/right uses the predictor's right + + */ +template <class FsaFF=FsaFeatureFunction> struct Chart { - //Agenda<Item> a; //typedef HASH_MAP<Item,ItemP,boost::hash<Item> > Items; //typedef Items::iterator FindItem; //typedef std::pair<FindItem,bool> InsertItem; // Items items; CFG &cfg; // TODO: remove this from Chart + SentenceMetadata const& smeta; + FsaFF const& fsa; NTHandle goal_nt; PrefixTrie trie; - typedef std::vector<PrefixTrieNode *> LhsToTrie; // will have to check lhs2[lhs].p for best cost of some rule with that lhs, then use edge deltas after? they're just caching a very cheap computation, really - LhsToTrie lhs2; // no reason to use a map or hash table; every NT in the CFG will have some rule rhses. lhs_to_trie[i]=root.edge_for[i], i.e. we still have a root trie node conceptually, we just access through this since it's faster. + typedef Agenda<Item,BetterP,GetItemKey> A; + A a; + void best_first(unsigned kbest=1) { + BetterP better; + assert(kbest==1); //TODO: k-best via best-first requires revisiting best things again and adjusting desc. tricky. + while(!a.empty()) { + ItemP top=a.pop(); + best_t b=a.best(); // remember; best_t a<b means a better than (higher prob) than b + best_t topb=top->priority; + best_t trie_stop_p=topb/b; + NodeP d=top->dot; + PrefixTrieNode::Adj const& adj=d->adj; + int n=top->next; + for (int m=adj.size();n<m;++n) { // cube corner + PrefixTrieEdge const& te=adj[m]; + if (better(te.p,trie_stop_p)) { + SHOWM2(DFSA,"trying adj ",m,te) + } else { + break; + } - void enqueue(Item const& item,ItemP const& p) { -// FindItem f=items.find(item); -// if (f==items.end()) ; + } + + + } } - Chart(CFG &cfg) :cfg(cfg),trie(cfg) { + Chart(CFG &cfg,SentenceMetadata const& smeta,FsaFF const& fsa,unsigned reserve=FSA_AGENDA_RESERVE) + : cfg(cfg),smeta(smeta),fsa(fsa),trie(cfg),a(reserve) { + assert(fsa.state_bytes()); + print_fsa=&fsa; goal_nt=cfg.goal_nt; - trie.root.index_root(lhs2); + a.add(a.construct(trie.lhs2_ex(goal_nt),fsa.start)); } }; @@ -317,6 +502,7 @@ struct Chart { DEFINE_NAMED_ENUM(FSA_BY) +template <class FsaFF=FsaFeatureFunction> struct ApplyFsa { ApplyFsa(HgCFG &i, const SentenceMetadata& smeta, @@ -327,9 +513,10 @@ struct ApplyFsa { ) :hgcfg(i),smeta(smeta),fsa(fsa),weights(weights),by(by),oh(oh) { + stateless=!fsa.state_bytes(); } void Compute() { - if (by.IsBottomUp()) + if (by.IsBottomUp() || stateless) ApplyBottomUp(); else ApplyEarley(); @@ -340,30 +527,33 @@ struct ApplyFsa { private: CFG cfg; HgCFG &hgcfg; - const SentenceMetadata& smeta; - const FsaFeatureFunction& fsa; + SentenceMetadata const& smeta; + FsaFF const& fsa; // WeightVector weight_vector; DenseWeightVector weights; ApplyFsaBy by; Hypergraph* oh; std::string cfg_out; + bool stateless; }; -void ApplyFsa::ApplyBottomUp() +template <class F> +void ApplyFsa<F>::ApplyBottomUp() { assert(by.IsBottomUp()); FeatureFunctionFromFsa<FsaFeatureFunctionFwd> buff(&fsa); buff.Init(); // mandatory to call this (normally factory would do it) vector<const FeatureFunction*> ffs(1,&buff); ModelSet models(weights, ffs); - IntersectionConfiguration i(by.BottomUpAlgorithm(),by.pop_limit); + IntersectionConfiguration i(stateless ? BU_FULL : by.BottomUpAlgorithm(),by.pop_limit); ApplyModelSet(hgcfg.ih,smeta,models,i,oh); } -void ApplyFsa::ApplyEarley() +template <class F> +void ApplyFsa<F>::ApplyEarley() { hgcfg.GiveCFG(cfg); - Chart chart(cfg); + Chart<F> chart(cfg,smeta,fsa); // don't need to uniq - option to do that already exists in cfg_options //TODO: } @@ -376,7 +566,7 @@ void ApplyFsaModels(HgCFG &i, ApplyFsaBy const& by, Hypergraph* oh) { - ApplyFsa a(i,smeta,fsa,weight_vector,by,oh); + ApplyFsa<FsaFeatureFunction> a(i,smeta,fsa,weight_vector,by,oh); a.Compute(); } diff --git a/decoder/cfg.h b/decoder/cfg.h index 9be0926d..79ae6f33 100755 --- a/decoder/cfg.h +++ b/decoder/cfg.h @@ -62,6 +62,28 @@ struct CFG { void print_nt_name(std::ostream &o,NTHandle n) const { o << nts[n].from << n; } + std::string nt_name(NTHandle n) const { + std::ostringstream o; + print_nt_name(o,n); + return o.str(); + } + void print_rhs_name(std::ostream &o,WordID w) const { + if (w<=0) print_nt_name(o,-w); + else o<<TD::Convert(w); + } + std::string rhs_name(WordID w) const { + if (w<=0) return nt_name(-w); + else return TD::Convert(w); + } + static void static_print_rhs_name(std::ostream &o,WordID w) { + if (w<=0) o<<'['<<-w<<']'; + else o<<TD::Convert(w); + } + static std::string static_rhs_name(WordID w) { + std::ostringstream o; + static_print_rhs_name(o,w); + return o.str(); + } typedef std::pair<WordID,WordID> BinRhs; diff --git a/decoder/ff.h b/decoder/ff.h index 80a880d8..904b9eb8 100644 --- a/decoder/ff.h +++ b/decoder/ff.h @@ -9,6 +9,7 @@ # define DBGINIT(a) #endif +#include <stdint.h> #include <vector> #include <cstring> #include "fdict.h" @@ -243,7 +244,7 @@ void show_all_features(std::vector<FFp> const& models_,DenseWeightVector &weight return show_features(all_features(models_,weights_,&warn,warn_fid_0),weights_,out,warn,warn_zero_wt); } -typedef ValueArray<char> FFState; // this is about 10% faster than string. +typedef ValueArray<uint8_t> FFState; // this is about 10% faster than string. //typedef std::string FFState; //FIXME: only context.data() is required to be contiguous, and it becomes invalid after next string operation. use ValueArray instead? (higher performance perhaps, save a word due to fixed size) diff --git a/decoder/ff_fsa_data.h b/decoder/ff_fsa_data.h index e60bce45..d215e940 100755 --- a/decoder/ff_fsa_data.h +++ b/decoder/ff_fsa_data.h @@ -36,7 +36,7 @@ struct FsaFeatureFunctionData return o; } - FsaFeatureFunctionData(int statesz=0,Sentence const& end_sentence_phrase=Sentence()) : ssz(statesz),start(statesz),h_start(statesz),end_phrase_(end_sentence_phrase) { + FsaFeatureFunctionData(int statesz=0,Sentence const& end_sentence_phrase=Sentence()) : start(statesz),h_start(statesz),end_phrase_(end_sentence_phrase),ssz(statesz) { debug_=true; sync_to_=0; } @@ -86,10 +86,10 @@ struct FsaFeatureFunctionData } Features features_; -protected: - int ssz; // don't forget to set this. default 0 (it may depend on params of course) Bytes start,h_start; // start state and estimated-features (heuristic) start state. set these. default empty. Sentence end_phrase_; // words appended for final traversal (final state cost is assessed using Scan) e.g. "</s>" for lm. +protected: + int ssz; // don't forget to set this. default 0 (it may depend on params of course) // this can be called instead or after constructor (also set bytes and end_phrase_) void set_state_bytes(int sb=0) { if (start.size()!=sb) start.resize(sb); diff --git a/utils/agenda.h b/utils/agenda.h index 639f35a9..1937ad1a 100755 --- a/utils/agenda.h +++ b/utils/agenda.h @@ -1,6 +1,7 @@ #ifndef AGENDA_H #define AGENDA_H +#define DBG_AGENDA(x) x /* a priority queue where you expect to queue the same item at different priorities several times before finally popping it. higher priority = better. @@ -48,54 +49,71 @@ template <class P> struct priority_traits { typedef typename P::priority_type priority_type; }; - -// P p has priority_traits<P>::type &p->agenda_priority() and unsigned &p->agenda_location(), and bool & p->agenda_done() -// this is really 4 functors in 1 (or lvalue property maps); you can supply your own as the Prio type in Agenda<...> below ; state is allowed. -template <class P> -struct AdjustablePriority { - typedef AdjustablePriority<P> Self; - typedef typename priority_traits<P>::priority_type Priority; - Priority & priority(P const &x) { - return x->agenda_priority(); - } - unsigned & location(P const &x) { // this gets updated by push, pop, and adjust - return x->agenda_location(); - } - void is_done(P const& x) const { - return x->agenda_done(); - } - void set_done(P const& x) const { - x->agenda_done()=true; - } -}; */ typedef best_t agenda_best_t; +typedef unsigned agenda_location_t; -PMAP_MEMBER_INDIRECT(LocationMap,unsigned,location) -PMAP_MEMBER_INDIRECT(PriorityMap,best_t,priority) +PMAP_MEMBER_INDIRECT(LocationMap,agenda_location_t,location) +PMAP_MEMBER_INDIRECT(PriorityMap,agenda_best_t,priority) + +struct Less { + typedef bool result_type; + template <class A,class B> + bool operator()(A const& a,B const& b) const { return a<b; } +}; // LocMap and PrioMap are boost property maps put(locmap,key,size_t), Better(get(priomap,k1),get(priomap,k2)) means k1 should be above k2 (be popped first). Locmap and PrioMap may have state; the rest are assumed stateless functors -template <class Item,class Better=std::less<Item>, /* intern_pool args */ class KeyF=get_key<Item>,class HashKey=boost::hash<typename KeyF::return_type>,class EqKey=std::equal_to<typename KeyF::return_type>, class Pool=boost::object_pool<Item> > -struct Agenda : intern_pool<Item,KeyF,EqKey,Pool> { +template <class Item,class Better=Less, /* intern_pool args */ class KeyF=get_key<Item>,class HashKey=boost::hash<typename KeyF::result_type>,class EqKey=std::equal_to<typename KeyF::result_type>, class Pool=boost::object_pool<Item> > +struct Agenda : intern_pool<Item,KeyF,HashKey,EqKey,Pool> { + typedef intern_pool<Item,KeyF,HashKey,EqKey,Pool> Intern; // inherited because I want to use construct() /* this is less generic than it could be, because I want to use a single hash mapping to intern to canonical mutable object pointers, where the property maps are just lvalue accessors */ - typedef intern_pool<Item,KeyF,EqKey,Pool> Intern; // inherited because I want to use construct() + typedef typename KeyF::result_type Key; typedef Item * Handle; typedef LocationMap<Handle> LocMap; typedef PriorityMap<Handle> PrioMap; LocMap locmap; - PrioMap priomap; - //NOT NEEDED: initialize function object state (there is none) + PrioMap priomap; // note: priomap[item] is set by caller before giving us the item; then tracks best (for canonicalized item) thereafter + Better better; + //NOT NEEDED: initialize function object state (there is none) -/* - typedef Item *CanonItem; + typedef Item *ItemC; //canonicalized pointer + typedef Item *ItemP; static const std::size_t heap_arity=4; // might be fastest possible (depends on key size probably - cache locality is bad w/ arity=2) - typedef std::vector<CanonItem> HeapStorage; - typedef boost::detail::d_ary_heap_indirect<Key,heap_arity,LocMap,PrioMap,Better,HeapStorage> Heap; - HASH_SET<Key,Hash,Equal> queued; - Agenda(LocMap const& lm=LocMap(),PrioMap const& pm=PrioMap()) : locmap(lm), priomap(pm) { } -*/ + typedef std::vector<ItemC> HeapStorage; + typedef d_ary_heap_indirect<Handle,heap_arity,LocMap,PrioMap,Better,HeapStorage,agenda_location_t> Heap; + Heap q; + + // please don't call q.push etc. directly. + void add(ItemP i) { + bool fresh=interneq(i); + DBG_AGENDA(assert(fresh && !q.contains(i))); + q.push(i); + } + bool improve(ItemP i) { + ItemP c=i; + bool fresh=interneq(c); + if (fresh) + return add(c); + DBG_AGENDA(assert(q.contains(c))); + return q.maybe_improve(priomap[i]); + } + inline bool empty() { + return q.empty(); + } + // no need to destroy the canon. item because we want to remember the best cost and reject more expensive ways of using it). + ItemC pop() { + DBG_AGENDA(assert(!empty())); + ItemC r=q.top(); + q.pop(); + return r; + } + agenda_best_t best() const { + return priomap[q.top()]; //TODO: cache/track the global best? + } + + Agenda(unsigned reserve=1000000,LocMap const& lm=LocMap(),PrioMap const& pm=PrioMap(),EqKey const& eq=EqKey(),Better const& better=Better()) : locmap(lm), priomap(pm), better(better), q(priomap,locmap,better,reserve) { } }; #endif diff --git a/utils/best.h b/utils/best.h index 8ff896bb..689e7600 100755 --- a/utils/best.h +++ b/utils/best.h @@ -8,6 +8,21 @@ typedef MaxPlus<double> best_t; inline bool operator <(best_t const& a,best_t const& b) { return a.v_>b.v_; // intentionally reversed, so default min-heap, sort, etc. put best first. } +struct BetterP { + inline bool operator ()(best_t const& a,best_t const& b) const { + return a.v_>b.v_; // intentionally reversed, so default min-heap, sort, etc. put best first. + } +}; +inline void maybe_improve(best_t &a,best_t const& b) { + if (a.v_>b.v_) + a.v_=b.v_; +} + +template <class O> +inline void maybe_improve(best_t &a,O const& b) { + if (a.v_>b.v_) + a.v_=b.v_; +} #endif diff --git a/utils/d_ary_heap.h b/utils/d_ary_heap.h index 15bde583..da99902c 100644 --- a/utils/d_ary_heap.h +++ b/utils/d_ary_heap.h @@ -106,7 +106,7 @@ std::size_t Arity, typename IndexInHeapPropertyMap, typename DistanceMap, - typename Compare = std::less<Value>, + typename Better = std::less<Value>, typename Container = std::vector<Value>, typename Size = typename Container::size_type, typename Equal = std::equal_to<Value> > @@ -120,16 +120,16 @@ typedef Value value_type; typedef typename Container::const_iterator const_iterator; typedef const_iterator iterator; - // The distances being compared using compare and that are stored in the + // The distances being compared using better and that are stored in the // distance map typedef typename boost::property_traits<DistanceMap>::value_type distance_type; - d_ary_heap_indirect(DistanceMap distance, - IndexInHeapPropertyMap index_in_heap, - const Compare& compare = Compare(), + d_ary_heap_indirect(DistanceMap const& distance, + IndexInHeapPropertyMap const& index_in_heap, + const Better& better = Better(), size_type container_reserve = 100000, Equal const& equal = Equal() ) - : compare(compare), data(), distance(distance), + : better(better), data(), distance(distance), index_in_heap(index_in_heap) { data.reserve(container_reserve); } @@ -210,8 +210,9 @@ void clear() { #if D_ARY_TRACK_OUT_OF_HEAP - for (typename Container::iterator i=data.begin(),e=data.end();i!=e;++i) - put(index_in_heap,*i,D_ARY_HEAP_NULL_INDEX); + using boost::put; + for (typename Container::iterator i=data.begin(),e=data.end();i!=e;++i) + put(index_in_heap,*i,D_ARY_HEAP_NULL_INDEX); #endif data.clear(); } @@ -224,6 +225,7 @@ } else { size_type index = data.size(); data.push_back(v); + using boost::put; put(index_in_heap, v, index); preserve_heap_property_up(index); } @@ -239,6 +241,7 @@ } void pop() { + using boost::put; if(D_ARY_TRACK_OUT_OF_HEAP) put(index_in_heap, data[0], D_ARY_HEAP_NULL_INDEX); if (data.size() != 1) { @@ -261,22 +264,40 @@ // (distance has become smaller, so it may need to rise toward top(). // i.e. decrease-key in a min-heap void update(const Value& v) { + using boost::get; size_type index = get(index_in_heap, v); - preserve_heap_property_up(index); + preserve_heap_property_up(v,index); verify_heap(); } + // return true if improved. + bool maybe_improve(const Value& v,distance_type dbetter) { + using boost::get; + if (better(dbetter,get(distance,v))) { + preserve_heap_property_up_dist(v,dbetter); + return true; + } + return false; + } + + +#include "warning_push.h" +#pragma GCC diagnostic ignored "-Wtype-limits" + // because maybe size_type is signed or unsigned inline bool contains(const Value &v,size_type i) const { return D_ARY_TRACK_OUT_OF_HEAP ? (i != D_ARY_HEAP_NULL_INDEX) : i>=0 && i<data.size() && equal(v,data[i]); // note: size_type may be signed (don't recommend it, though) - thus i>=0 check to catch uninit. data } +#include "warning_pop.h" inline bool contains(const Value& v) const { + using boost::get; return contains(v,get(index_in_heap, v)); } void push_or_update(const Value& v) { /* insert if not present, else update */ + using boost::get; size_type index = get(index_in_heap, v); if (D_ARY_PUSH_GRAEHL) { if (contains(v,index)) @@ -287,6 +308,7 @@ if (!contains(v,index)) { index = data.size(); data.push_back(v); + using boost::put; put(index_in_heap, v, index); } preserve_heap_property_up(index); @@ -296,7 +318,7 @@ private: Equal equal; - Compare compare; + Better better; Container data; DistanceMap distance; IndexInHeapPropertyMap index_in_heap; @@ -318,11 +340,13 @@ Value value_b = data[index_b]; data[index_a] = value_b; data[index_b] = value_a; + using boost::put; put(index_in_heap, value_a, index_b); put(index_in_heap, value_b, index_a); } inline void move_heap_element(Value const& v,size_type ito) { + using boost::put; put(index_in_heap,v,ito); data[ito]=v; //todo: move assign? } @@ -332,8 +356,9 @@ // This is a very expensive test so it should be disabled even when // NDEBUG is not defined #if D_ARY_VERIFY_HEAP + using boost::get; for (size_t i = 1; i < data.size(); ++i) { - if (compare(get(distance,data[i]), get(distance,data[parent(i)]))) { + if (better(get(distance,data[i]), get(distance,data[parent(i)]))) { assert (!"Element is smaller than its parent"); } } @@ -341,28 +366,47 @@ } // we have a copy of the key, so we don't need to do that stupid find # of levels to move then move. we act as though data[index]=currently_being_moved, but in fact it's an uninitialized "hole", which we fill at the very end - void preserve_heap_property_up(Value const& currently_being_moved,size_type index) { - size_type orig_index = index; - distance_type currently_being_moved_dist = - get(distance, currently_being_moved); - for (;;) { - if (index == 0) break; // Stop at root - size_type parent_index = parent(index); - Value const& parent_value = data[parent_index]; - if (compare(currently_being_moved_dist, get(distance, parent_value))) { - move_heap_element(parent_value,index); - index = parent_index; - } else { - break; // Heap property satisfied + inline void preserve_heap_property_up(Value const& currently_being_moved,size_type index) { + using boost::get; + preserve_heap_property_up(currently_being_moved,index,get(distance,currently_being_moved)); + } + + inline void preserve_heap_property_up_set_dist(Value const& currently_being_moved,distance_type dbetter) { + using boost::get; + using boost::put; + put(distance,currently_being_moved,dbetter); + preserve_heap_property_up(currently_being_moved,get(index_in_heap,currently_being_moved),dbetter); + verify_heap(); + } + + void preserve_heap_property_up(Value const& currently_being_moved,size_type index,distance_type currently_being_moved_dist) { + using boost::put; + using boost::get; + if (D_ARY_UP_GRAEHL) { + for (;;) { + if (index == 0) break; // Stop at root + size_type parent_index = parent(index); + Value const& parent_value = data[parent_index]; + if (better(currently_being_moved_dist, get(distance, parent_value))) { + move_heap_element(parent_value,index); + index = parent_index; + } else { + break; // Heap property satisfied + } } + //finish "swap chain" by filling hole w/ currently_being_moved + move_heap_element(currently_being_moved,index); // note: it's ok not to return early on index==0 at start, even if self-assignment isn't supported by Value - because currently_being_moved is a copy. + } else { + put(index_in_heap,currently_being_moved,index); + put(distance,currently_being_moved,currently_being_moved_dist); + preserve_heap_property_up(index); } - //finish "swap chain" by filling hole w/ currently_being_moved - move_heap_element(currently_being_moved,index); // note: it's ok not to return early on index==0 at start, even if self-assignment isn't supported by Value - because currently_being_moved is a copy. } // Starting at a node, move up the tree swapping elements to preserve the // heap property. doesn't actually use swap; uses hole void preserve_heap_property_up(size_type index) { + using boost::get; if (index == 0) return; // Do nothing on root if (D_ARY_UP_GRAEHL) { Value copyi=data[index]; @@ -381,7 +425,7 @@ if (index == 0) break; // Stop at root size_type parent_index = parent(index); Value parent_value = data[parent_index]; - if (compare(currently_being_moved_dist, get(distance, parent_value))) { + if (better(currently_being_moved_dist, get(distance, parent_value))) { ++num_levels_moved; index = parent_index; continue; @@ -392,6 +436,7 @@ // Actually do the moves -- move num_levels_moved elements down in the // tree, then put currently_being_moved at the top index = orig_index; + using boost::put; for (size_type i = 0; i < num_levels_moved; ++i) { size_type parent_index = parent(index); Value parent_value = data[parent_index]; @@ -409,6 +454,7 @@ // are any parent-child pairs that violate the heap property. v is placed at data[i], but then pushed down (note: data[i] won't be read explicitly; it will instead be overwritten by percolation). this also means that v must be a copy of data[i] if it was already at i. // e.g. v=data.back(), i=0, sz=data.size()-1 for pop(), implicitly swapping data[i], data.back(), and doing data.pop_back(), then adjusting from 0 down w/ swaps. updates index_in_heap for v. inline void preserve_heap_property_down(Value const& currently_being_moved,size_type i,size_type heap_size) { + using boost::get; distance_type currently_being_moved_dist=get(distance,currently_being_moved); Value* data_ptr = &data[0]; size_type index = 0; // hole at index - currently_being_moved to be put here when we find the final hole spot @@ -423,7 +469,7 @@ #undef D_ARY_MAYBE_IMPROVE_CHILD_I #define D_ARY_MAYBE_IMPROVE_CHILD_I \ distance_type i_dist = get(distance, child_base_ptr[i]); \ - if (compare(i_dist, smallest_child_dist)) { \ + if (better(i_dist, smallest_child_dist)) { \ smallest_child_index = i; \ smallest_child_dist = i_dist; \ } @@ -439,7 +485,7 @@ } //end: know best child - if (compare(smallest_child_dist, currently_being_moved_dist)) { + if (better(smallest_child_dist, currently_being_moved_dist)) { // instead of swapping, move. move_heap_element(child_base_ptr[smallest_child_index],index); // move up index=first_child_index+smallest_child_index; // descend - hole is now here @@ -456,6 +502,7 @@ } void preserve_heap_property_down() { + using boost::get; if (data.empty()) return; if (D_ARY_DOWN_GRAEHL) { // this *should* be more efficient because i avoid swaps. Value copy0=data[0]; @@ -484,7 +531,7 @@ D_ARY_MAYBE_IMPROVE_CHILD_I } } - if (compare(smallest_child_dist, currently_being_moved_dist)) { + if (better(smallest_child_dist, currently_being_moved_dist)) { swap_heap_elements(smallest_child_index + first_child_index, index); index = smallest_child_index + first_child_index; continue; diff --git a/utils/hash.h b/utils/hash.h index 17f150ff..2062578f 100755 --- a/utils/hash.h +++ b/utils/hash.h @@ -32,9 +32,9 @@ const unsigned GOLDEN_MEAN_FRACTION=2654435769U; template <class C> struct murmur_hash { - typedef MurmurInt return_type; + typedef MurmurInt result_type; typedef C /*const&*/ argument_type; - return_type operator()(argument_type const& c) const { + result_type operator()(argument_type const& c) const { return MurmurHash((void*)&c,sizeof(c)); } }; @@ -43,9 +43,9 @@ struct murmur_hash template <> struct murmur_hash<std::string> { - typedef MurmurInt return_type; + typedef MurmurInt result_type; typedef std::string /*const&*/ argument_type; - return_type operator()(argument_type const& c) const { + result_type operator()(argument_type const& c) const { return MurmurHash(c.data(),c.size()); } }; @@ -54,9 +54,9 @@ struct murmur_hash<std::string> template <class C> struct murmur_hash_array { - typedef MurmurInt return_type; + typedef MurmurInt result_type; typedef C /*const&*/ argument_type; - return_type operator()(argument_type const& c) const { + result_type operator()(argument_type const& c) const { return MurmurHash(&*c.begin(),c.size()*sizeof(*c.begin())); } }; diff --git a/utils/intern_pool.h b/utils/intern_pool.h index c85c9881..d9890ae6 100755 --- a/utils/intern_pool.h +++ b/utils/intern_pool.h @@ -14,26 +14,48 @@ template <class I> struct get_key { // default accessor for I = like pair<key,val> - typedef typename I::first_type const& return_type; + typedef typename I::first_type const& result_type; typedef I const& argument_type; - return_type operator()(I const& i) const { + result_type operator()(I const& i) const { return i.first; } }; +// Arg type should be the non-pointer version. this saves me from using boost type traits to remove_pointer. f may be binary or unary template <class KeyF,class F,class Arg=typename KeyF::argument_type> struct compose_indirect { - typedef Arg *argument_type; // also Arg + typedef Arg *argument_type; // we also accept Arg & KeyF kf; F f; - typedef typename F::return_type return_type; - return_type operator()(Arg p) const { + typedef typename F::result_type result_type; + result_type operator()(Arg const& p) const { return f(kf(p)); } - template <class V> - return_type operator()(Arg *p) const { + result_type operator()(Arg & p) const { + return f(kf(p)); + } + result_type operator()(Arg * p) const { return f(kf(*p)); } + template <class V> + result_type operator()(V const& v) const { + return f(kf(*v)); + } + + result_type operator()(Arg const& a1,Arg const& a2) const { + return f(kf(a1),kf(a2)); + } + result_type operator()(Arg & a1,Arg & a2) const { + return f(kf(a1),kf(a2)); + } + result_type operator()(Arg * a1,Arg * a2) const { + return f(kf(*a1),kf(*a2)); + } + template <class V,class W> + result_type operator()(V const& v,W const&w) const { + return f(kf(*v),kf(*w)); + } + }; @@ -43,27 +65,29 @@ template <class F> struct indirect_function { F f; explicit indirect_function(F const& f=F()) : f(f) {} - typedef typename F::return_type return_type; + typedef typename F::result_type result_type; template <class V> - return_type operator()(V *p) const { + result_type operator()(V *p) const { return f(*p); } }; */ -template <class Item,class KeyF=get_key<Item>,class HashKey=boost::hash<typename KeyF::return_type>,class EqKey=std::equal_to<typename KeyF::return_type>, class Pool=boost::object_pool<Item> > +template <class Item,class KeyF=get_key<Item>,class HashKey=boost::hash<typename KeyF::result_type>,class EqKey=std::equal_to<typename KeyF::result_type>, class Pool=boost::object_pool<Item> > struct intern_pool : Pool { KeyF key; - typedef typename KeyF::return_type Key; + typedef typename KeyF::result_type Key; typedef Item *Handle; - typedef compose_indirect<KeyF,HashKey,Handle> HashDeep; - typedef compose_indirect<KeyF,EqKey,Handle> EqDeep; + typedef compose_indirect<KeyF,HashKey,Item> HashDeep; + typedef compose_indirect<KeyF,EqKey,Item> EqDeep; typedef HASH_SET<Handle,HashDeep,EqDeep> Canonical; typedef typename Canonical::iterator CFind; typedef std::pair<CFind,bool> CInsert; Canonical canonical; - void interneq(Handle &i) { - i=intern(i); + bool interneq(Handle &i) { // returns true if i is newly interned, false if it already existed + CInsert i_new=canonical.insert(i); + i=*i_new.first; + return i_new.second; } // inherited: Handle construct(...) Handle construct_fresh() { return Pool::construct(); } diff --git a/utils/logval.h b/utils/logval.h index 4605d919..da0aa2b0 100644 --- a/utils/logval.h +++ b/utils/logval.h @@ -10,11 +10,18 @@ #include <limits> #include <cassert> #include "semiring.h" +#include "show.h" //TODO: template for supporting negation or not - most uses are for nonnegative "probs" only; probably some 10-20% speedup available template <class T> class LogVal { public: + void print(std::ostream &o) const { + if (s_) o<<"(-)"; + o<<v_; + } + PRINT_SELF(LogVal<T>) + typedef LogVal<T> Self; LogVal() : s_(), v_(LOGVAL_LOG0) {} @@ -143,6 +150,7 @@ class LogVal { bool s_; T v_; + }; template <class T> diff --git a/utils/lvalue_pmap.h b/utils/lvalue_pmap.h index 03ddf5e7..5b9403c0 100755 --- a/utils/lvalue_pmap.h +++ b/utils/lvalue_pmap.h @@ -3,7 +3,7 @@ #include <boost/property_map/property_map.hpp> -// i checked: boost provides get and put given [] +// i checked: boost provides get and put given [] - but it's not being found by ADL so instead i define them myself // lvalue property map pmapname<P> that is: P p; valtype &v=p->name; #define PMAP_MEMBER_INDIRECT(pmapname,valtype,name) template <class P> struct pmapname { \ @@ -12,6 +12,9 @@ typedef value_type & reference; \ typedef boost::lvalue_property_map_tag category; \ reference operator[](key_type p) const { return p->name; } \ + typedef pmapname<P> self_type; \ + friend inline value_type const& get(self_type const&,key_type p) { return p->name; } \ + friend inline void put(self_type &,key_type p,value_type const& v) { p->name = v; } \ }; #define PMAP_MEMBER_INDIRECT_2(pmapname,name) template <class P,class R> struct pmapname { \ @@ -20,6 +23,9 @@ typedef value_type & reference; \ typedef boost::lvalue_property_map_tag category; \ reference operator[](key_type p) const { return p->name; } \ + typedef pmapname<P,R> self_type; \ + friend inline value_type const& get(self_type const&,key_type p) { return p->name; } \ + friend inline void put(self_type &,key_type p,value_type const& v) { p->name = v; } \ }; #endif diff --git a/utils/max_plus.h b/utils/max_plus.h index 2c55b33c..2e56f85e 100755 --- a/utils/max_plus.h +++ b/utils/max_plus.h @@ -19,10 +19,23 @@ #include <cassert> #include <limits> #include "semiring.h" +#include "show.h" +//#include "logval.h" template <class T> class MaxPlus { public: + void print(std::ostream &o) const { + o<<v_; + } + PRINT_SELF(MaxPlus<T>) + template <class O> + void operator=(O const& o) { + v_=o.v_; + } + template <class O> + MaxPlus(O const& o) : v_(o.v_) { } + typedef MaxPlus<T> Self; MaxPlus() : v_(LOGVAL_LOG0) {} explicit MaxPlus(double x) : v_(std::log(x)) {} diff --git a/utils/show.h b/utils/show.h index 6f601d47..1b645c83 100755 --- a/utils/show.h +++ b/utils/show.h @@ -6,6 +6,29 @@ #define SHOWS std::cerr #endif + +#define SELF_TYPE_PRINT \ + template <class Char,class Traits> \ + inline friend std::basic_ostream<Char,Traits> & operator <<(std::basic_ostream<Char,Traits> &o, self_type const& me) \ + { me.print(o);return o; } \ + typedef self_type has_print; + +#define SELF_TYPE_PRINT_ANY_STREAM \ + template <class O> \ + friend inline O & operator <<(O &o, self_type const& me) \ + { me.print(o);return o; } \ + typedef self_type has_print; + +#define SELF_TYPE_PRINT_OSTREAM \ + friend inline std::ostream & operator <<(std::ostream &o, self_type const& me) \ + { me.print(o);return o; } \ + typedef self_type has_print; + +#define PRINT_SELF(self) typedef self self_type; SELF_TYPE_PRINT_OSTREAM + +#undef SHOWALWAYS +#define SHOWALWAYS(x) x + /* usage: #if DEBUG # define IFD(x) x @@ -36,6 +59,7 @@ careful: none of this is wrapped in a block. so you can't use one of these macr #define SHOW4(IF,x,y0,y1,y2) SHOW1(IF,x) SHOW3(IF,y0,y1,y2) #define SHOW5(IF,x,y0,y1,y2,y3) SHOW1(IF,x) SHOW4(IF,y0,y1,y2,y3) #define SHOW6(IF,x,y0,y1,y2,y3,y4) SHOW1(IF,x) SHOW5(IF,y0,y1,y2,y3,y4) +#define SHOW7(IF,x,y0,y1,y2,y3,y4,y5) SHOW1(IF,x) SHOW6(IF,y0,y1,y2,y3,y4,y5) #define SHOWM(IF,m,x) SHOWP(IF,m<<": ") SHOW(IF,x) #define SHOWM2(IF,m,x0,x1) SHOWP(IF,m<<": ") SHOW2(IF,x0,x1) @@ -43,5 +67,6 @@ careful: none of this is wrapped in a block. so you can't use one of these macr #define SHOWM4(IF,m,x0,x1,x2,x3) SHOWP(IF,m<<": ") SHOW4(IF,x0,x1,x2,x3) #define SHOWM5(IF,m,x0,x1,x2,x3,x4) SHOWP(IF,m<<": ") SHOW5(IF,x,x0,x1,x2,x3,x4) #define SHOWM6(IF,m,x0,x1,x2,x3,x4,x5) SHOWP(IF,m<<": ") SHOW6(IF,x0,x1,x2,x3,x4,x5) +#define SHOWM7(IF,m,x0,x1,x2,x3,x4,x5,x6) SHOWP(IF,m<<": ") SHOW7(IF,x0,x1,x2,x3,x4,x5,x6) #endif diff --git a/utils/utoa.h b/utils/utoa.h index 5de490ba..8b54987b 100755 --- a/utils/utoa.h +++ b/utils/utoa.h @@ -137,7 +137,7 @@ char *utoa_drop_trailing_0(char *buf,Uint_ n_, unsigned &n_skipped) { } //#include "warning_push.h" -//#pragma GCC diagnostic ignore "-Wtype-limits" // because sign check on itoa<unsigned> is annoying +//#pragma GCC diagnostic ignored "-Wtype-limits" // because sign check on itoa<unsigned> is annoying // desired feature: itoa(unsigned) = utoa(unsigned) // positive sign: 0 -> +0, 1-> +1. obviously -n -> -n diff --git a/utils/value_array.h b/utils/value_array.h index a10f754f..f1cc2a84 100755 --- a/utils/value_array.h +++ b/utils/value_array.h @@ -305,6 +305,21 @@ bool operator==(ValueArray<T,A> const& v1, ValueArray<T,A> const& v2) std::equal(v1.begin(),v1.end(),v2.begin()); } +template <class A> +bool operator==(ValueArray<char,A> const& v1, ValueArray<char,A> const& v2) +{ + typename ValueArray<char,A>::size_type sz=v1.size(); + return sz == v2.size() && + 0==std::memcmp(v1.begin(),v2.begin(),sizeof(char)*sz); +} + +template <class A> +bool operator==(ValueArray<unsigned char,A> const& v1, ValueArray<unsigned char,A> const& v2) +{ + typename ValueArray<char,A>::size_type sz=v1.size(); + return sz == v2.size() && + 0==std::memcmp(v1.begin(),v2.begin(),sizeof(char)*sz); +} template <class T,class A> bool operator< (ValueArray<T,A> const& v1, ValueArray<T,A> const& v2) @@ -315,6 +330,15 @@ bool operator< (ValueArray<T,A> const& v1, ValueArray<T,A> const& v2) , v2.end() ); } +template <class A> +bool operator<(ValueArray<unsigned char,A> const& v1, ValueArray<unsigned char,A> const& v2) +{ + typename ValueArray<char,A>::size_type sz=v1.size(); + return sz == v2.size() && + std::memcmp(v1.begin(),v2.begin(),sizeof(char)*sz)<0; +} + + template <class T,class A> void memcpy(void *out,ValueArray<T,A> const& v) { std::memcpy(out,v.begin(),v.size()*sizeof(T)); diff --git a/utils/warning_compiler.h b/utils/warning_compiler.h index 2052cff3..b981ac0f 100644 --- a/utils/warning_compiler.h +++ b/utils/warning_compiler.h @@ -5,7 +5,8 @@ #undef HAVE_DIAGNOSTIC_PUSH #if __GNUC__ > 4 || __GNUC__==4 && __GNUC_MINOR__ > 3 # define HAVE_GCC_4_4 1 -# define HAVE_DIAGNOSTIC_PUSH 1 +# define HAVE_DIAGNOSTIC_PUSH 0 +// weird, they took it out of 4.5? #else # define HAVE_GCC_4_4 0 # define HAVE_DIAGNOSTIC_PUSH 0 diff --git a/utils/warning_push.h b/utils/warning_push.h index 086fd524..29d04e76 100644 --- a/utils/warning_push.h +++ b/utils/warning_push.h @@ -3,6 +3,6 @@ #else #include "warning_compiler.h" #if HAVE_DIAGNOSTIC_PUSH -# pragma GCC diagnostic push +#pragma GCC diagnostic push #endif #endif |