summaryrefslogtreecommitdiff
path: root/decoder
diff options
context:
space:
mode:
Diffstat (limited to 'decoder')
-rw-r--r--decoder/bottom_up_parser.cc26
-rw-r--r--decoder/cfg.cc8
-rw-r--r--decoder/ff_bleu.h1
-rw-r--r--decoder/ff_dwarf.cc6
-rw-r--r--decoder/ff_klm.cc12
-rw-r--r--decoder/ff_lm.h2
-rw-r--r--decoder/hg.cc132
-rw-r--r--decoder/hg.h8
-rw-r--r--decoder/hg_intersect.cc28
-rw-r--r--decoder/hg_io.cc13
-rw-r--r--decoder/inside_outside.h12
-rw-r--r--decoder/kbest.h22
-rw-r--r--decoder/maxtrans_blunsom.cc28
-rw-r--r--decoder/scfg_translator.cc14
-rw-r--r--decoder/trule.cc26
-rw-r--r--decoder/trule.h4
16 files changed, 169 insertions, 173 deletions
diff --git a/decoder/bottom_up_parser.cc b/decoder/bottom_up_parser.cc
index 63939221..ed79aaf0 100644
--- a/decoder/bottom_up_parser.cc
+++ b/decoder/bottom_up_parser.cc
@@ -84,7 +84,7 @@ class ActiveChart {
const GrammarIter* ni = gptr_->Extend(symbol);
if (!ni) return;
Hypergraph::TailNodeVector na(ant_nodes_.size() + 1);
- for (int i = 0; i < ant_nodes_.size(); ++i)
+ for (unsigned i = 0; i < ant_nodes_.size(); ++i)
na[i] = ant_nodes_[i];
na[ant_nodes_.size()] = node_index;
out_cell->push_back(ActiveItem(ni, na, lattice_cost));
@@ -154,7 +154,7 @@ PassiveChart::PassiveChart(const string& goal,
goal_idx_(-1),
lc_fid_(FD::Convert("LatticeCost")) {
act_chart_.resize(grammars_.size());
- for (int i = 0; i < grammars_.size(); ++i)
+ for (unsigned i = 0; i < grammars_.size(); ++i)
act_chart_[i] = new ActiveChart(forest, *this);
if (!kGOAL) kGOAL = TD::Convert("Goal") * -1;
if (!SILENT) cerr << " Goal category: [" << goal << ']' << endl;
@@ -204,12 +204,12 @@ void PassiveChart::ApplyRules(const int i,
void PassiveChart::ApplyUnaryRules(const int i, const int j) {
const vector<int>& nodes = chart_(i,j); // reference is important!
- for (int gi = 0; gi < grammars_.size(); ++gi) {
+ for (unsigned gi = 0; gi < grammars_.size(); ++gi) {
if (!grammars_[gi]->HasRuleForSpan(i,j,input_.Distance(i,j))) continue;
- for (int di = 0; di < nodes.size(); ++di) {
+ for (unsigned di = 0; di < nodes.size(); ++di) {
const WordID& cat = forest_->nodes_[nodes[di]].cat_;
const vector<TRulePtr>& unaries = grammars_[gi]->GetUnaryRulesForRHS(cat);
- for (int ri = 0; ri < unaries.size(); ++ri) {
+ for (unsigned ri = 0; ri < unaries.size(); ++ri) {
// cerr << "At (" << i << "," << j << "): applying " << unaries[ri]->AsString() << endl;
const Hypergraph::TailNodeVector ant(1, nodes[di]);
ApplyRule(i, j, unaries[ri], ant, 0); // may update nodes
@@ -224,15 +224,15 @@ bool PassiveChart::Parse() {
size_t res = min(static_cast<size_t>(2000000), static_cast<size_t>(in_size_2 * 1000));
forest_->edges_.reserve(res);
goal_idx_ = -1;
- for (int gi = 0; gi < grammars_.size(); ++gi)
+ for (unsigned gi = 0; gi < grammars_.size(); ++gi)
act_chart_[gi]->SeedActiveChart(*grammars_[gi]);
if (!SILENT) cerr << " ";
- for (int l=1; l<input_.size()+1; ++l) {
+ for (unsigned l=1; l<input_.size()+1; ++l) {
if (!SILENT) cerr << '.';
- for (int i=0; i<input_.size() + 1 - l; ++i) {
- int j = i + l;
- for (int gi = 0; gi < grammars_.size(); ++gi) {
+ for (unsigned i=0; i<input_.size() + 1 - l; ++i) {
+ unsigned j = i + l;
+ for (unsigned gi = 0; gi < grammars_.size(); ++gi) {
const Grammar& g = *grammars_[gi];
if (g.HasRuleForSpan(i, j, input_.Distance(i, j))) {
act_chart_[gi]->AdvanceDotsForAllItemsInCell(i, j, input_);
@@ -248,7 +248,7 @@ bool PassiveChart::Parse() {
}
ApplyUnaryRules(i,j);
- for (int gi = 0; gi < grammars_.size(); ++gi) {
+ for (unsigned gi = 0; gi < grammars_.size(); ++gi) {
const Grammar& g = *grammars_[gi];
// deal with non-terminals that were just proved
if (g.HasRuleForSpan(i, j, input_.Distance(i,j)))
@@ -256,7 +256,7 @@ bool PassiveChart::Parse() {
}
}
const vector<int>& dh = chart_(0, input_.size());
- for (int di = 0; di < dh.size(); ++di) {
+ for (unsigned di = 0; di < dh.size(); ++di) {
const Hypergraph::Node& node = forest_->nodes_[dh[di]];
if (node.cat_ == goal_cat_) {
Hypergraph::TailNodeVector ant(1, node.id_);
@@ -272,7 +272,7 @@ bool PassiveChart::Parse() {
}
PassiveChart::~PassiveChart() {
- for (int i = 0; i < act_chart_.size(); ++i)
+ for (unsigned i = 0; i < act_chart_.size(); ++i)
delete act_chart_[i];
}
diff --git a/decoder/cfg.cc b/decoder/cfg.cc
index cd7e66e9..d6ee651a 100644
--- a/decoder/cfg.cc
+++ b/decoder/cfg.cc
@@ -229,13 +229,13 @@ template <>
struct null_for<RHS> {
static RHS null;
};
-*/
template <>
-BinRhs null_traits<BinRhs>::null(std::numeric_limits<int>::min(),std::numeric_limits<int>::min());
+BinRhs null_traits<BinRhs>::xnull(std::numeric_limits<int>::min(),std::numeric_limits<int>::min());
template <>
-RHS null_traits<RHS>::null(1,std::numeric_limits<int>::min());
+RHS null_traits<RHS>::xnull(1,std::numeric_limits<int>::min());
+*/
template <class Rhs>
struct add_virtual_rules {
@@ -250,7 +250,7 @@ struct add_virtual_rules {
R2L rhs2lhs; // an rhs maps to this -virtntid, or original id if length 1
bool name_nts;
add_virtual_rules(CFG &cfg,bool name_nts=false) : nts(cfg.nts),rules(cfg.rules),newnt(-nts.size()),newruleid(rules.size()),name_nts(name_nts) {
- HASH_MAP_EMPTY(rhs2lhs,null_traits<Rhs>::null);
+ HASH_MAP_EMPTY(rhs2lhs,null_traits<Rhs>::xnull);
}
NTHandle get_virt(Rhs const& r) {
NTHandle nt=get_default(rhs2lhs,r,newnt);
diff --git a/decoder/ff_bleu.h b/decoder/ff_bleu.h
index e93731c3..5544920e 100644
--- a/decoder/ff_bleu.h
+++ b/decoder/ff_bleu.h
@@ -6,7 +6,6 @@
#include "hg.h"
#include "ff.h"
-#include "config.h"
class BLEUModelImpl;
diff --git a/decoder/ff_dwarf.cc b/decoder/ff_dwarf.cc
index 3daa85ac..43528405 100644
--- a/decoder/ff_dwarf.cc
+++ b/decoder/ff_dwarf.cc
@@ -519,7 +519,7 @@ void Dwarf::neighboringFWs(const Lattice& l, const int& i, const int& j, const m
while (idx>=0) {
if (l[idx].size()>0) {
if (fw_hash.find(l[idx][0].label)!=fw_hash.end()) {
- *lfw++;
+ lfw++;
}
}
idx-=l[idx][0].dist2next;
@@ -528,7 +528,7 @@ void Dwarf::neighboringFWs(const Lattice& l, const int& i, const int& j, const m
while (idx<l.size()) {
if (l[idx].size()>0) {
if (fw_hash.find(l[idx][0].label)!=fw_hash.end()) {
- *rfw++;
+ rfw++;
}
}
idx+=l[idx][0].dist2next;
@@ -787,7 +787,7 @@ bool Dwarf::generalizeOrientation(CountTable* table, const std::map<WordID,WordI
}
}
}
-
+ return false; // no idea if this is right
}
diff --git a/decoder/ff_klm.cc b/decoder/ff_klm.cc
index a4b26f7c..7a84add7 100644
--- a/decoder/ff_klm.cc
+++ b/decoder/ff_klm.cc
@@ -373,15 +373,17 @@ boost::shared_ptr<FeatureFunction> KLanguageModelFactory::Create(std::string par
if (!RecognizeBinary(filename.c_str(), m)) m = HASH_PROBING;
switch (m) {
- case HASH_PROBING:
+ case PROBING:
return CreateModel<ProbingModel>(param);
- case TRIE_SORTED:
+ case REST_PROBING:
+ return CreateModel<RestProbingModel>(param);
+ case TRIE:
return CreateModel<TrieModel>(param);
- case ARRAY_TRIE_SORTED:
+ case ARRAY_TRIE:
return CreateModel<ArrayTrieModel>(param);
- case QUANT_TRIE_SORTED:
+ case QUANT_TRIE:
return CreateModel<QuantTrieModel>(param);
- case QUANT_ARRAY_TRIE_SORTED:
+ case QUANT_ARRAY_TRIE:
return CreateModel<QuantArrayTrieModel>(param);
default:
UTIL_THROW(util::Exception, "Unrecognized kenlm binary file type " << (unsigned)m);
diff --git a/decoder/ff_lm.h b/decoder/ff_lm.h
index 8885efce..ccee4268 100644
--- a/decoder/ff_lm.h
+++ b/decoder/ff_lm.h
@@ -6,7 +6,9 @@
#include "hg.h"
#include "ff.h"
+#ifdef HAVE_CONFIG_H
#include "config.h"
+#endif
// everything in this file is deprecated and may be broken.
// Chris Dyer, Mar 2011
diff --git a/decoder/hg.cc b/decoder/hg.cc
index 180986d7..0dcbe91f 100644
--- a/decoder/hg.cc
+++ b/decoder/hg.cc
@@ -56,7 +56,7 @@ struct less_ve {
Hypergraph::Edge const* Hypergraph::ViterbiSortInEdges(EdgeProbs const& ev)
{
- for (int i=0;i<nodes_.size();++i) {
+ for (unsigned i=0;i<nodes_.size();++i) {
EdgesVector &ie=nodes_[i].in_edges_;
std::sort(ie.begin(),ie.end(),less_ve(ev));
}
@@ -70,9 +70,9 @@ prob_t Hypergraph::ComputeEdgeViterbi(EdgeProbs *ev) const {
}
prob_t Hypergraph::ComputeEdgeViterbi(NodeProbs const& nv,EdgeProbs *ev) const {
- int ne=edges_.size();
+ unsigned ne=edges_.size();
ev->resize(ne);
- for (int i=0;i<ne;++i) {
+ for (unsigned i=0;i<ne;++i) {
Edge const& e=edges_[i];
prob_t r=e.edge_prob_;
TailNodeVector const& t=e.tail_nodes_;
@@ -162,7 +162,7 @@ prob_t Hypergraph::ComputeEdgePosteriors(double scale, vector<prob_t>* posts) co
SparseVector<prob_t>,
ScaledTransitionEventWeightFunction>(*this, &pv, weight, w2);
posts->resize(edges_.size());
- for (int i = 0; i < edges_.size(); ++i)
+ for (unsigned i = 0; i < edges_.size(); ++i)
(*posts)[i] = prob_t(pv.value(i));
return inside;
}
@@ -175,7 +175,7 @@ prob_t Hypergraph::ComputeBestPathThroughEdges(vector<prob_t>* post) const {
SparseVector<TropicalValue>,
ViterbiTransitionEventWeightFunction>(*this, &pv);
post->resize(edges_.size());
- for (int i = 0; i < edges_.size(); ++i)
+ for (unsigned i = 0; i < edges_.size(); ++i)
(*post)[i] = pv.value(i).v_;
return viterbi_weight.v_;
}
@@ -183,12 +183,12 @@ prob_t Hypergraph::ComputeBestPathThroughEdges(vector<prob_t>* post) const {
void Hypergraph::PushWeightsToSource(double scale) {
vector<prob_t> posts;
ComputeEdgePosteriors(scale, &posts);
- for (int i = 0; i < nodes_.size(); ++i) {
+ for (unsigned i = 0; i < nodes_.size(); ++i) {
const Hypergraph::Node& node = nodes_[i];
prob_t z = prob_t::Zero();
- for (int j = 0; j < node.out_edges_.size(); ++j)
+ for (unsigned j = 0; j < node.out_edges_.size(); ++j)
z += posts[node.out_edges_[j]];
- for (int j = 0; j < node.out_edges_.size(); ++j) {
+ for (unsigned j = 0; j < node.out_edges_.size(); ++j) {
edges_[node.out_edges_[j]].edge_prob_ = posts[node.out_edges_[j]] / z;
}
}
@@ -201,7 +201,7 @@ struct vpusher : public vector<TropicalValue> {
void operator()(int n,int /*ei*/,Hypergraph::Edge &e) const {
Hypergraph::TailNodeVector const& t=e.tail_nodes_;
prob_t p=e.edge_prob_;
- for (int i=0;i<t.size();++i)
+ for (unsigned i=0;i<t.size();++i)
p*=(*this)[t[i]].v_;
e.feature_values_.set_value(fid,log(e.edge_prob_=p/(*this)[n].v_));
}
@@ -229,12 +229,12 @@ prob_t Hypergraph::PushViterbiWeightsToGoal(int fid) {
prob_t Hypergraph::PushWeightsToGoal(double scale) {
vector<prob_t> posts;
const prob_t inside_z = ComputeEdgePosteriors(scale, &posts);
- for (int i = 0; i < nodes_.size(); ++i) {
+ for (unsigned i = 0; i < nodes_.size(); ++i) {
const Hypergraph::Node& node = nodes_[i];
prob_t z = prob_t::Zero();
- for (int j = 0; j < node.in_edges_.size(); ++j)
+ for (unsigned j = 0; j < node.in_edges_.size(); ++j)
z += posts[node.in_edges_[j]];
- for (int j = 0; j < node.in_edges_.size(); ++j) {
+ for (unsigned j = 0; j < node.in_edges_.size(); ++j) {
edges_[node.in_edges_[j]].edge_prob_ = posts[node.in_edges_[j]] / z;
}
}
@@ -257,7 +257,7 @@ void Hypergraph::PruneEdges(const EdgeMask& prune_edge, bool run_inside_algorith
if (run_inside_algorithm) {
const EdgeExistsWeightFunction wf(prune_edge);
vector<Boolean> reachable;
- bool goal_derivable = Inside/* <Boolean, EdgeExistsWeightFunction> */(*this, &reachable, wf);
+ bool goal_derivable = Inside<Boolean, EdgeExistsWeightFunction>(*this, &reachable, wf);
if (!goal_derivable) {
edges_.clear();
nodes_.clear();
@@ -266,11 +266,11 @@ void Hypergraph::PruneEdges(const EdgeMask& prune_edge, bool run_inside_algorith
}
assert(reachable.size() == nodes_.size());
- for (int i = 0; i < edges_.size(); ++i) {
+ for (unsigned i = 0; i < edges_.size(); ++i) {
bool prune = prune_edge[i];
if (!prune) {
const Edge& edge = edges_[i];
- for (int j = 0; j < edge.tail_nodes_.size(); ++j) {
+ for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) {
if (!reachable[edge.tail_nodes_[j]]) {
prune = true;
break;
@@ -299,7 +299,7 @@ void Hypergraph::MarginPrune(vector<prob_t> const& io,prob_t cutoff,vector<bool>
cerr<<"Finishing prune for "<<prune.size()<<" edges; CUTOFF=" << cutoff << endl;
}
unsigned pc = 0;
- for (int i = 0; i < io.size(); ++i) {
+ for (unsigned i = 0; i < io.size(); ++i) {
cutoff*=creep; // start more permissive, then become less generous. this is barely more than 1. we want to do this because it's a disaster if something lower in a derivation tree is deleted, but the higher thing remains (unless safe_inside)
const bool prune_edge = (io[i] < cutoff);
if (prune_edge) {
@@ -325,11 +325,11 @@ bool Hypergraph::PruneInsideOutside(double alpha,double density,const EdgeMask*
assert(!use_beam||alpha>0);
assert(!use_density||density>=1);
assert(!use_sum_prod_semiring||scale>0);
- int rnum=edges_.size();
+ unsigned rnum=edges_.size();
if (use_density) {
const int plen = ViterbiPathLength(*this);
vector<WordID> bp;
- rnum = min(rnum, static_cast<int>(density * static_cast<double>(plen)));
+ rnum = min(rnum, static_cast<unsigned>(density * plen));
cerr << "Density pruning: keep "<<rnum<<" of "<<edges_.size()<<" edges (viterbi = "<<plen<<" edges)"<<endl;
if (rnum == edges_.size()) {
cerr << "No pruning required: denisty already sufficient\n";
@@ -357,7 +357,7 @@ bool Hypergraph::PruneInsideOutside(double alpha,double density,const EdgeMask*
if (use_beam) {
prob_t best=prob_t::One();
if (use_sum_prod_semiring) {
- for (int i = 0; i < mm.size(); ++i)
+ for (unsigned i = 0; i < mm.size(); ++i)
if (mm[i] > best) best = mm[i];
}
prob_t beam_cut=best*prob_t::exp(-alpha);
@@ -386,10 +386,10 @@ void Hypergraph::PrintGraphviz() const {
<< "\" shape=\"rect\"];\n";
Hypergraph::TailNodeVector indorder(edge.tail_nodes_.size(), 0);
int ntc = 0;
- for (int i = 0; i < edge.rule_->e_.size(); ++i) {
+ for (unsigned i = 0; i < edge.rule_->e_.size(); ++i) {
if (edge.rule_->e_[i] <= 0) indorder[ntc++] = 1 + (-1 * edge.rule_->e_[i]);
}
- for (int i = 0; i < edge.tail_nodes_.size(); ++i) {
+ for (unsigned i = 0; i < edge.tail_nodes_.size(); ++i) {
cerr << " " << edge.tail_nodes_[i] << " -> A_" << ei;
if (edge.tail_nodes_.size() > 1) {
cerr << " [label=\"" << indorder[i] << "\"]";
@@ -414,8 +414,8 @@ void Hypergraph::PrintGraphviz() const {
void Hypergraph::Union(const Hypergraph& other) {
if (&other == this) return;
if (nodes_.empty()) { nodes_ = other.nodes_; edges_ = other.edges_; return; }
- int noff = nodes_.size();
- int eoff = edges_.size();
+ unsigned noff = nodes_.size();
+ unsigned eoff = edges_.size();
int ogoal = other.nodes_.size() - 1;
int cgoal = noff - 1;
// keep a single goal node, so add nodes.size - 1
@@ -428,15 +428,15 @@ void Hypergraph::Union(const Hypergraph& other) {
Node& cn = nodes_[i + noff];
cn.id_ = i + noff;
cn.in_edges_.resize(on.in_edges_.size());
- for (int j = 0; j < on.in_edges_.size(); ++j)
+ for (unsigned j = 0; j < on.in_edges_.size(); ++j)
cn.in_edges_[j] = on.in_edges_[j] + eoff;
cn.out_edges_.resize(on.out_edges_.size());
- for (int j = 0; j < on.out_edges_.size(); ++j)
+ for (unsigned j = 0; j < on.out_edges_.size(); ++j)
cn.out_edges_[j] = on.out_edges_[j] + eoff;
}
- for (int i = 0; i < other.edges_.size(); ++i) {
+ for (unsigned i = 0; i < other.edges_.size(); ++i) {
const Edge& oe = other.edges_[i];
Edge& ce = edges_[i + eoff];
ce.id_ = i + eoff;
@@ -449,7 +449,7 @@ void Hypergraph::Union(const Hypergraph& other) {
ce.head_node_ = oe.head_node_ + noff;
}
ce.tail_nodes_.resize(oe.tail_nodes_.size());
- for (int j = 0; j < oe.tail_nodes_.size(); ++j)
+ for (unsigned j = 0; j < oe.tail_nodes_.size(); ++j)
ce.tail_nodes_[j] = oe.tail_nodes_[j] + noff;
}
@@ -460,16 +460,6 @@ void Hypergraph::PruneUnreachable(int goal_node_id) {
TopologicallySortNodesAndEdges(goal_node_id, NULL);
}
-void Hypergraph::RemoveNoncoaccessibleStates(int goal_node_id) {
- if (goal_node_id < 0) goal_node_id += nodes_.size();
- assert(goal_node_id >= 0);
- assert(goal_node_id < nodes_.size());
-
- // I don't get it: does TopologicallySortNodesAndEdges not remove things that don't connect to goal_index? it uses goal_index just to order things? InsideOutside pruning can do this anyway (nearly infinite beam, viterbi semiring)
- // TODO finish implementation
- abort();
-}
-
struct DFSContext {
int node;
int edge_iter;
@@ -559,7 +549,7 @@ void Hypergraph::TopologicallySortNodesAndEdges(int goal_index,
}
#ifndef HG_EDGES_TOPO_SORTED
int ec = 0;
- for (int i = 0; i < reloc_edge.size(); ++i) {
+ for (unsigned i = 0; i < reloc_edge.size(); ++i) {
int& cp = reloc_edge[i];
if (cp >= 0) { cp = ec++; }
}
@@ -576,34 +566,34 @@ void Hypergraph::TopologicallySortNodesAndEdges(int goal_index,
cerr << endl;
#endif
bool no_op = true;
- for (int i = 0; i < reloc_node.size() && no_op; ++i)
- if (reloc_node[i] != i) no_op = false;
- for (int i = 0; i < reloc_edge.size() && no_op; ++i)
- if (reloc_edge[i] != i) no_op = false;
+ for (unsigned i = 0; i < reloc_node.size() && no_op; ++i)
+ if (reloc_node[i] != static_cast<int>(i)) no_op = false;
+ for (unsigned i = 0; i < reloc_edge.size() && no_op; ++i)
+ if (reloc_edge[i] != static_cast<int>(i)) no_op = false;
if (no_op) return;
- for (int i = 0; i < reloc_node.size(); ++i) {
+ for (unsigned i = 0; i < reloc_node.size(); ++i) {
Node& node = nodes_[i];
node.id_ = reloc_node[i];
int c = 0;
- for (int j = 0; j < node.in_edges_.size(); ++j) {
+ for (unsigned j = 0; j < node.in_edges_.size(); ++j) {
const int new_index = reloc_edge[node.in_edges_[j]];
if (new_index >= 0)
node.in_edges_[c++] = new_index;
}
node.in_edges_.resize(c);
c = 0;
- for (int j = 0; j < node.out_edges_.size(); ++j) {
+ for (unsigned j = 0; j < node.out_edges_.size(); ++j) {
const int new_index = reloc_edge[node.out_edges_[j]];
if (new_index >= 0)
node.out_edges_[c++] = new_index;
}
node.out_edges_.resize(c);
}
- for (int i = 0; i < reloc_edge.size(); ++i) {
+ for (unsigned i = 0; i < reloc_edge.size(); ++i) {
Edge& edge = edges_[i];
edge.id_ = reloc_edge[i];
edge.head_node_ = reloc_node[edge.head_node_];
- for (int j = 0; j < edge.tail_nodes_.size(); ++j)
+ for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j)
edge.tail_nodes_[j] = reloc_node[edge.tail_nodes_[j]];
}
edges_.erase(remove_if(edges_.begin(), edges_.end(), BadId<Edge>()), edges_.end());
@@ -623,7 +613,7 @@ void Hypergraph::EpsilonRemove(WordID eps) {
kUnaryRule.reset(new TRule("[X] ||| [X,1] ||| [X,1]"));
}
vector<bool> kill(edges_.size(), false);
- for (int i = 0; i < edges_.size(); ++i) {
+ for (unsigned i = 0; i < edges_.size(); ++i) {
const Edge& edge = edges_[i];
if (edge.tail_nodes_.empty() &&
edge.rule_->f_.size() == 1 &&
@@ -637,7 +627,7 @@ void Hypergraph::EpsilonRemove(WordID eps) {
// same sequence via different paths through the input forest
// this needs to be investigated and fixed
} else {
- for (int j = 0; j < node.out_edges_.size(); ++j)
+ for (unsigned j = 0; j < node.out_edges_.size(); ++j)
edges_[node.out_edges_[j]].feature_values_ += edge.feature_values_;
// cerr << "PROMOTED " << edge.feature_values_ << endl;
}
@@ -646,19 +636,19 @@ void Hypergraph::EpsilonRemove(WordID eps) {
}
bool created_eps = false;
PruneEdges(kill);
- for (int i = 0; i < nodes_.size(); ++i) {
+ for (unsigned i = 0; i < nodes_.size(); ++i) {
const Node& node = nodes_[i];
if (node.in_edges_.empty()) {
- for (int j = 0; j < node.out_edges_.size(); ++j) {
+ for (unsigned j = 0; j < node.out_edges_.size(); ++j) {
Edge& edge = edges_[node.out_edges_[j]];
if (edge.rule_->Arity() == 2) {
assert(edge.rule_->f_.size() == 2);
assert(edge.rule_->e_.size() == 2);
edge.rule_ = kUnaryRule;
- int cur = node.id_;
+ unsigned cur = node.id_;
int t = -1;
assert(edge.tail_nodes_.size() == 2);
- for (int i = 0; i < 2; ++i) if (edge.tail_nodes_[i] != cur) { t = edge.tail_nodes_[i]; }
+ for (unsigned i = 0; i < 2u; ++i) if (edge.tail_nodes_[i] != cur) { t = edge.tail_nodes_[i]; }
assert(t != -1);
edge.tail_nodes_.resize(1);
edge.tail_nodes_[0] = t;
@@ -712,14 +702,14 @@ HypergraphP Hypergraph::CreateEdgeSubset(EdgeMask &keep_edges) const {
HypergraphP Hypergraph::CreateEdgeSubset(EdgeMask &keep_edges,NodeMask &kn) const {
kn.clear();
kn.resize(nodes_.size());
- for (int n=0;n<nodes_.size();++n) { // this nested iteration gives us edges in topo order too
+ for (unsigned n=0;n<nodes_.size();++n) { // this nested iteration gives us edges in topo order too
EdgesVector const& es=nodes_[n].in_edges_;
- for (int i=0;i<es.size();++i) {
+ for (unsigned i=0;i<es.size();++i) {
int ei=es[i];
if (keep_edges[ei]) {
const Edge& e = edges_[ei];
TailNodeVector const& tails=e.tail_nodes_;
- for (int j=0;j<e.tail_nodes_.size();++j) {
+ for (unsigned j=0;j<e.tail_nodes_.size();++j) {
if (!kn[tails[j]]) {
keep_edges[ei]=false;
goto next_edge;
@@ -738,11 +728,11 @@ HypergraphP Hypergraph::CreateNodeEdgeSubset(NodeMask const& keep_nodes,EdgeMask
indices_after e2(keep_edges);
HypergraphP ret(new Hypergraph(n2.n_kept, e2.n_kept, is_linear_chain_));
Nodes &rn=ret->nodes_;
- for (int i=0;i<nodes_.size();++i)
+ for (unsigned i=0;i<nodes_.size();++i)
if (n2.keeping(i))
rn[n2[i]].copy_reindex(nodes_[i],n2,e2);
Edges &re=ret->edges_;
- for (int i=0;i<edges_.size();++i)
+ for (unsigned i=0;i<edges_.size();++i)
if (e2.keeping(i))
re[e2[i]].copy_reindex(edges_[i],n2,e2);
return ret;
@@ -750,11 +740,11 @@ HypergraphP Hypergraph::CreateNodeEdgeSubset(NodeMask const& keep_nodes,EdgeMask
void Hypergraph::TightenEdgeMask(EdgeMask &ke,NodeMask const& kn) const
{
- for (int i = 0; i < edges_.size(); ++i) {
+ for (unsigned i = 0; i < edges_.size(); ++i) {
if (ke[i]) {
const Edge& edge = edges_[i];
TailNodeVector const& tails=edge.tail_nodes_;
- for (int j = 0; j < edge.tail_nodes_.size(); ++j) {
+ for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) {
if (!kn[tails[j]]) {
ke[i]=false;
goto next_edge;
@@ -766,18 +756,18 @@ void Hypergraph::TightenEdgeMask(EdgeMask &ke,NodeMask const& kn) const
}
void Hypergraph::set_ids() {
- for (int i = 0; i < edges_.size(); ++i)
+ for (unsigned i = 0; i < edges_.size(); ++i)
edges_[i].id_=i;
- for (int i = 0; i < nodes_.size(); ++i)
+ for (unsigned i = 0; i < nodes_.size(); ++i)
nodes_[i].id_=i;
}
void Hypergraph::check_ids() const
{
- for (int i = 0; i < edges_.size(); ++i)
- assert(edges_[i].id_==i);
- for (int i = 0; i < nodes_.size(); ++i)
- assert(nodes_[i].id_==i);
+ for (unsigned i = 0; i < edges_.size(); ++i)
+ assert(edges_[i].id_==static_cast<int>(i));
+ for (unsigned i = 0; i < nodes_.size(); ++i)
+ assert(nodes_[i].id_==static_cast<int>(i));
}
HypergraphP Hypergraph::CreateViterbiHypergraph(const vector<bool>* edges) const {
@@ -796,15 +786,15 @@ HypergraphP Hypergraph::CreateViterbiHypergraph(const vector<bool>* edges) const
set_ids();
# endif
EdgeMask used(edges_.size());
- for (int i = 0; i < vit_edges.size(); ++i)
+ for (unsigned i = 0; i < vit_edges.size(); ++i)
used[vit_edges[i]->id_]=true;
return CreateEdgeSubset(used);
#else
map<int, int> old2new_node;
int num_new_nodes = 0;
- for (int i = 0; i < vit_edges.size(); ++i) {
+ for (unsigned i = 0; i < vit_edges.size(); ++i) {
const Edge& edge = *vit_edges[i];
- for (int j = 0; j < edge.tail_nodes_.size(); ++j) assert(old2new_node.count(edge.tail_nodes_[j]) > 0);
+ for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) assert(old2new_node.count(edge.tail_nodes_[j]) > 0);
if (old2new_node.count(edge.head_node_) == 0) {
old2new_node[edge.head_node_] = num_new_nodes;
++num_new_nodes;
@@ -820,7 +810,7 @@ HypergraphP Hypergraph::CreateViterbiHypergraph(const vector<bool>* edges) const
new_node.id_ = it->second;
}
- for (int i = 0; i < vit_edges.size(); ++i) {
+ for (unsigned i = 0; i < vit_edges.size(); ++i) {
const Edge& old_edge = *vit_edges[i];
Edge& new_edge = out->edges_[i];
new_edge = old_edge;
@@ -828,7 +818,7 @@ HypergraphP Hypergraph::CreateViterbiHypergraph(const vector<bool>* edges) const
const int new_head_node = old2new_node[old_edge.head_node_];
new_edge.head_node_ = new_head_node;
out->nodes_[new_head_node].in_edges_.push_back(i);
- for (int j = 0; j < old_edge.tail_nodes_.size(); ++j) {
+ for (unsigned j = 0; j < old_edge.tail_nodes_.size(); ++j) {
const int new_tail_node = old2new_node[old_edge.tail_nodes_[j]];
new_edge.tail_nodes_[j] = new_tail_node;
out->nodes_[new_tail_node].out_edges_.push_back(i);
diff --git a/decoder/hg.h b/decoder/hg.h
index dfa4ac6d..91d25f01 100644
--- a/decoder/hg.h
+++ b/decoder/hg.h
@@ -43,7 +43,7 @@ public:
Hypergraph() : is_linear_chain_(false) {}
// SmallVector is a fast, small vector<int> implementation for sizes <= 2
- typedef SmallVectorInt TailNodeVector; // indices in nodes_
+ typedef SmallVectorUnsigned TailNodeVector; // indices in nodes_
typedef std::vector<int> EdgesVector; // indices in edges_
// TODO get rid of cat_?
@@ -396,7 +396,7 @@ public:
// (inner product) to set the edge probabilities
template <class V>
void Reweight(const V& weights) {
- for (int i = 0; i < edges_.size(); ++i) {
+ for (unsigned i = 0; i < edges_.size(); ++i) {
Edge& e = edges_[i];
e.edge_prob_.logeq(e.feature_values_.dot(weights));
}
@@ -457,8 +457,6 @@ public:
void PruneUnreachable(int goal_node_id); // DEPRECATED
- void RemoveNoncoaccessibleStates(int goal_node_id = -1);
-
// remove edges from the hypergraph if prune_edge[edge_id] is true
// note: if run_inside_algorithm is false, then consumers may be unhappy if you pruned nodes that are built on by nodes that are kept.
void PruneEdges(const EdgeMask& prune_edge, bool run_inside_algorithm = false);
@@ -524,7 +522,7 @@ public:
template <class V>
void visit_edges(V &v) {
- for (int i=0;i<edges_.size();++i)
+ for (unsigned i=0;i<edges_.size();++i)
v(edges_[i].head_node_,i,edges_[i]);
}
diff --git a/decoder/hg_intersect.cc b/decoder/hg_intersect.cc
index 8752838f..6e3bfee6 100644
--- a/decoder/hg_intersect.cc
+++ b/decoder/hg_intersect.cc
@@ -19,12 +19,12 @@ using namespace std;
struct RuleFilter {
unordered_map<vector<WordID>, bool, boost::hash<vector<WordID> > > exists_;
bool true_lattice;
- RuleFilter(const Lattice& target, int max_phrase_size) {
+ RuleFilter(const Lattice& target, unsigned max_phrase_size) {
true_lattice = false;
- for (int i = 0; i < target.size(); ++i) {
+ for (unsigned i = 0; i < target.size(); ++i) {
vector<WordID> phrase;
- int lim = min(static_cast<int>(target.size()), i + max_phrase_size);
- for (int j = i; j < lim; ++j) {
+ const unsigned lim = min(static_cast<unsigned>(target.size()), i + max_phrase_size);
+ for (unsigned j = i; j < lim; ++j) {
if (target[j].size() > 1) { true_lattice = true; break; }
phrase.push_back(target[j][0].label);
exists_[phrase] = true;
@@ -37,10 +37,10 @@ struct RuleFilter {
// TODO do some smarter filtering for lattices
if (true_lattice) return false; // don't filter "true lattice" input
const vector<WordID>& e = r.e();
- for (int i = 0; i < e.size(); ++i) {
+ for (unsigned i = 0; i < e.size(); ++i) {
if (e[i] <= 0) continue;
vector<WordID> phrase;
- for (int j = i; j < e.size(); ++j) {
+ for (unsigned j = i; j < e.size(); ++j) {
if (e[j] <= 0) break;
phrase.push_back(e[j]);
if (exists_.count(phrase) == 0) return true;
@@ -55,7 +55,7 @@ static bool FastLinearIntersect(const Lattice& target, Hypergraph* hg) {
vector<bool> prune(hg->edges_.size(), false);
set<int> cov;
map<const TRule*, TRulePtr> inverted_rules;
- for (int i = 0; i < prune.size(); ++i) {
+ for (unsigned i = 0; i < prune.size(); ++i) {
Hypergraph::Edge& edge = hg->edges_[i];
if (edge.Arity() == 0) {
const int trg_index = edge.prev_i_;
@@ -87,12 +87,12 @@ bool HG::Intersect(const Lattice& target, Hypergraph* hg) {
vector<bool> rem(hg->edges_.size(), false);
const RuleFilter filter(target, 15); // TODO make configurable
- for (int i = 0; i < rem.size(); ++i)
+ for (unsigned i = 0; i < rem.size(); ++i)
rem[i] = filter(*hg->edges_[i].rule_);
hg->PruneEdges(rem, true);
- const int nedges = hg->edges_.size();
- const int nnodes = hg->nodes_.size();
+ const unsigned nedges = hg->edges_.size();
+ const unsigned nnodes = hg->nodes_.size();
TextGrammar* g = new TextGrammar;
GrammarPtr gp(g);
@@ -100,7 +100,7 @@ bool HG::Intersect(const Lattice& target, Hypergraph* hg) {
// each node in the translation forest becomes a "non-terminal" in the new
// grammar, create the labels here
const string kSEP = "_";
- for (int i = 0; i < nnodes; ++i) {
+ for (unsigned i = 0; i < nnodes; ++i) {
const char* pstr = "CAT";
if (hg->nodes_[i].cat_ < 0)
pstr = TD::Convert(-hg->nodes_[i].cat_);
@@ -108,7 +108,7 @@ bool HG::Intersect(const Lattice& target, Hypergraph* hg) {
}
// construct the grammar
- for (int i = 0; i < nedges; ++i) {
+ for (unsigned i = 0; i < nedges; ++i) {
const Hypergraph::Edge& edge = hg->edges_[i];
const vector<WordID>& tgt = edge.rule_->e();
const vector<WordID>& src = edge.rule_->f();
@@ -122,7 +122,7 @@ bool HG::Intersect(const Lattice& target, Hypergraph* hg) {
e.resize(src.size()); // parses using the source side!
Hypergraph::TailNodeVector tn(edge.tail_nodes_.size());
int ntc = 0;
- for (int j = 0; j < tgt.size(); ++j) {
+ for (unsigned j = 0; j < tgt.size(); ++j) {
const WordID& cur = tgt[j];
if (cur > 0) {
f[j] = cur;
@@ -133,7 +133,7 @@ bool HG::Intersect(const Lattice& target, Hypergraph* hg) {
}
}
ntc = 0;
- for (int j = 0; j < src.size(); ++j) {
+ for (unsigned j = 0; j < src.size(); ++j) {
const WordID& cur = src[j];
if (cur > 0) {
e[j] = cur;
diff --git a/decoder/hg_io.cc b/decoder/hg_io.cc
index 734c2ce8..bfb2fb80 100644
--- a/decoder/hg_io.cc
+++ b/decoder/hg_io.cc
@@ -28,7 +28,7 @@ struct HGReader : public JSONParser {
hg.ConnectEdgeToHeadNode(&hg.edges_[in_edges[i]], node);
}
}
- void CreateEdge(const TRulePtr& rule, FeatureVector* feats, const SmallVectorInt& tail) {
+ void CreateEdge(const TRulePtr& rule, FeatureVector* feats, const SmallVectorUnsigned& tail) {
Hypergraph::Edge* edge = hg.AddEdge(rule, tail);
feats->swap(edge->feature_values_);
edge->i_ = spans[0];
@@ -229,7 +229,7 @@ struct HGReader : public JSONParser {
}
string rp;
string cat;
- SmallVectorInt tail;
+ SmallVectorUnsigned tail;
vector<int> in_edges;
TRulePtr cur_rule;
map<int, TRulePtr> rules;
@@ -488,13 +488,13 @@ int getInt(const std::string& in, int &c)
#define MAX_NODES 100000000
// parse ('foo', 0.23)
void ReadPLFEdge(const std::string& in, int &c, int cur_node, Hypergraph* hg) {
- if (get(in,c++) != '(') { assert(!"PCN/PLF parse error: expected ( at start of cn alt block\n"); }
+ if (get(in,c++) != '(') { cerr << "PCN/PLF parse error: expected (\n"; abort(); }
vector<WordID> ewords(2, 0);
ewords[1] = TD::Convert(getEscapedString(in,c));
TRulePtr r(new TRule(ewords));
r->ComputeArity();
// cerr << "RULE: " << r->AsString() << endl;
- if (get(in,c++) != ',') { cerr << in << endl; assert(!"PCN/PLF parse error: expected , after string\n"); }
+ if (get(in,c++) != ',') { cerr << in << endl; cerr << "PCN/PLF parse error: expected , after string\n"; abort(); }
size_t cnNext = 1;
std::vector<float> probs;
probs.push_back(getFloat(in,c));
@@ -508,10 +508,9 @@ void ReadPLFEdge(const std::string& in, int &c, int cur_node, Hypergraph* hg) {
if (probs.size()>1) {
cnNext = static_cast<size_t>(probs.back());
probs.pop_back();
- if (cnNext < 1) { cerr << cnNext << endl;
- assert(!"PCN/PLF parse error: bad link length at last element of cn alt block\n"); }
+ if (cnNext < 1) { cerr << cnNext << endl << "PCN/PLF parse error: bad link length at last element of cn alt block\n"; abort(); }
}
- if (get(in,c++) != ')') { assert(!"PCN/PLF parse error: expected ) at end of cn alt block\n"); }
+ if (get(in,c++) != ')') { cerr << "PCN/PLF parse error: expected ) at end of cn alt block\n"; abort(); }
eatws(in,c);
Hypergraph::TailNodeVector tail(1, cur_node);
Hypergraph::Edge* edge = hg->AddEdge(r, tail);
diff --git a/decoder/inside_outside.h b/decoder/inside_outside.h
index dc96f1a9..bb7f9fcc 100644
--- a/decoder/inside_outside.h
+++ b/decoder/inside_outside.h
@@ -31,24 +31,24 @@ template<class WeightType, class WeightFunction>
WeightType Inside(const Hypergraph& hg,
std::vector<WeightType>* result = NULL,
const WeightFunction& weight = WeightFunction()) {
- const int num_nodes = hg.nodes_.size();
+ const unsigned num_nodes = hg.nodes_.size();
std::vector<WeightType> dummy;
std::vector<WeightType>& inside_score = result ? *result : dummy;
inside_score.clear();
inside_score.resize(num_nodes);
// std::fill(inside_score.begin(), inside_score.end(), WeightType()); // clear handles
- for (int i = 0; i < num_nodes; ++i) {
+ for (unsigned i = 0; i < num_nodes; ++i) {
WeightType* const cur_node_inside_score = &inside_score[i];
Hypergraph::EdgesVector const& in=hg.nodes_[i].in_edges_;
- const int num_in_edges = in.size();
+ const unsigned num_in_edges = in.size();
if (num_in_edges == 0) {
*cur_node_inside_score = WeightType(1); //FIXME: why not call weight(edge) instead?
continue;
}
- for (int j = 0; j < num_in_edges; ++j) {
+ for (unsigned j = 0; j < num_in_edges; ++j) {
const Hypergraph::Edge& edge = hg.edges_[in[j]];
WeightType score = weight(edge);
- for (int k = 0; k < edge.tail_nodes_.size(); ++k) {
+ for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) {
const int tail_node_index = edge.tail_nodes_[k];
score *= inside_score[tail_node_index];
}
@@ -67,7 +67,7 @@ void Outside(const Hypergraph& hg,
) {
assert(result);
const int num_nodes = hg.nodes_.size();
- assert(inside_score.size() == num_nodes);
+ assert(static_cast<int>(inside_score.size()) == num_nodes);
std::vector<WeightType>& outside_score = *result;
outside_score.clear();
outside_score.resize(num_nodes);
diff --git a/decoder/kbest.h b/decoder/kbest.h
index 03a8311c..9af3a20e 100644
--- a/decoder/kbest.h
+++ b/decoder/kbest.h
@@ -43,7 +43,7 @@ namespace KBest {
traverse(tf), w(wf), g(hg), nds(g.nodes_.size()), k_prime(k) {}
~KBestDerivations() {
- for (int i = 0; i < freelist.size(); ++i)
+ for (unsigned i = 0; i < freelist.size(); ++i)
delete freelist[i];
}
@@ -86,7 +86,7 @@ namespace KBest {
// Hypergraph::Edge const * operator ->() const { return d->edge; }
};
- EdgeHandle operator()(int t,int taili,EdgeHandle const& parent) const {
+ EdgeHandle operator()(unsigned t,unsigned taili,EdgeHandle const& parent) const {
return EdgeHandle(nds[t].D[parent.d->j[taili]]);
}
@@ -98,7 +98,7 @@ namespace KBest {
size_t operator()(const Derivation* d) const {
size_t x = 5381;
x = ((x << 5) + x) ^ d->edge->id_;
- for (int i = 0; i < d->j.size(); ++i)
+ for (unsigned i = 0; i < d->j.size(); ++i)
x = ((x << 5) + x) ^ d->j[i];
return x;
}
@@ -121,7 +121,7 @@ namespace KBest {
explicit NodeDerivationState(const DerivationFilter& f = DerivationFilter()) : filter(f) {}
};
- Derivation* LazyKthBest(int v, int k) {
+ Derivation* LazyKthBest(unsigned v, unsigned k) {
NodeDerivationState& s = GetCandidates(v);
CandidateHeap& cand = s.cand;
DerivationList& D = s.D;
@@ -139,7 +139,7 @@ namespace KBest {
Derivation* d = cand.back();
cand.pop_back();
std::vector<const T*> ants(d->edge->Arity());
- for (int j = 0; j < ants.size(); ++j)
+ for (unsigned j = 0; j < ants.size(); ++j)
ants[j] = &LazyKthBest(d->edge->tail_nodes_[j], d->j[j])->yield;
traverse(*d->edge, ants, &d->yield);
if (!filter(d->yield)) {
@@ -171,12 +171,12 @@ namespace KBest {
return freelist.back();
}
- NodeDerivationState& GetCandidates(int v) {
+ NodeDerivationState& GetCandidates(unsigned v) {
NodeDerivationState& s = nds[v];
if (!s.D.empty() || !s.cand.empty()) return s;
const Hypergraph::Node& node = g.nodes_[v];
- for (int i = 0; i < node.in_edges_.size(); ++i) {
+ for (unsigned i = 0; i < node.in_edges_.size(); ++i) {
const Hypergraph::Edge& edge = g.edges_[node.in_edges_[i]];
SmallVectorInt jv(edge.Arity(), 0);
Derivation* d = CreateDerivation(edge, jv);
@@ -184,7 +184,7 @@ namespace KBest {
s.cand.push_back(d);
}
- const int effective_k = std::min(k_prime, s.cand.size());
+ const unsigned effective_k = std::min(k_prime, s.cand.size());
const typename CandidateHeap::iterator kth = s.cand.begin() + effective_k;
std::nth_element(s.cand.begin(), kth, s.cand.end(), DerivationCompare());
s.cand.resize(effective_k);
@@ -194,7 +194,7 @@ namespace KBest {
}
void LazyNext(const Derivation* d, CandidateHeap* cand, UniqueDerivationSet* ds) {
- for (int i = 0; i < d->j.size(); ++i) {
+ for (unsigned i = 0; i < d->j.size(); ++i) {
SmallVectorInt j = d->j;
++j[i];
const Derivation* ant = LazyKthBest(d->edge->tail_nodes_[i], j[i]);
@@ -205,8 +205,12 @@ namespace KBest {
if (new_d) {
cand->push_back(new_d);
std::push_heap(cand->begin(), cand->end(), HeapCompare());
+#ifdef NDEBUG
+ ds->insert(new_d).second; // insert into uniqueness set
+#else
bool inserted = ds->insert(new_d).second; // insert into uniqueness set
assert(inserted);
+#endif
}
}
}
diff --git a/decoder/maxtrans_blunsom.cc b/decoder/maxtrans_blunsom.cc
index 6efab454..774e4170 100644
--- a/decoder/maxtrans_blunsom.cc
+++ b/decoder/maxtrans_blunsom.cc
@@ -73,7 +73,7 @@ struct Candidate {
prob_t p = prob_t::One();
// cerr << "\nEstimating application of " << in_edge.rule_->AsString() << endl;
vector<const vector<WordID>* > ants(tail.size());
- for (int i = 0; i < tail.size(); ++i) {
+ for (unsigned i = 0; i < tail.size(); ++i) {
const Candidate& ant = *D[in_edge.tail_nodes_[i]][j_[i]];
ants[i] = &ant.state_;
assert(ant.IsIncorporatedIntoHypergraph());
@@ -99,7 +99,7 @@ ostream& operator<<(ostream& os, const Candidate& cand) {
else { os << "+LM_node=" << cand.node_index_; }
os << " edge=" << cand.in_edge_->id_;
os << " j=<";
- for (int i = 0; i < cand.j_.size(); ++i)
+ for (unsigned i = 0; i < cand.j_.size(); ++i)
os << (i==0 ? "" : " ") << cand.j_[i];
os << "> vit=" << log(cand.inside_prob_);
os << " est=" << log(cand.est_prob_);
@@ -127,7 +127,7 @@ struct CandidateUniquenessHash {
size_t operator()(const Candidate* c) const {
size_t x = 5381;
x = ((x << 5) + x) ^ c->in_edge_->id_;
- for (int i = 0; i < c->j_.size(); ++i)
+ for (unsigned i = 0; i < c->j_.size(); ++i)
x = ((x << 5) + x) ^ c->j_[i];
return x;
}
@@ -154,12 +154,12 @@ public:
}
void Apply() {
- int num_nodes = in.nodes_.size();
- int goal_id = num_nodes - 1;
- int pregoal = goal_id - 1;
+ const unsigned num_nodes = in.nodes_.size();
+ const unsigned goal_id = num_nodes - 1;
+ const unsigned pregoal = goal_id - 1;
assert(in.nodes_[pregoal].out_edges_.size() == 1);
cerr << " ";
- for (int i = 0; i < in.nodes_.size(); ++i) {
+ for (unsigned i = 0; i < in.nodes_.size(); ++i) {
cerr << '.';
KBest(i, i == goal_id);
}
@@ -174,9 +174,9 @@ public:
private:
void FreeAll() {
- for (int i = 0; i < D.size(); ++i) {
+ for (unsigned i = 0; i < D.size(); ++i) {
CandidateList& D_i = D[i];
- for (int j = 0; j < D_i.size(); ++j)
+ for (unsigned j = 0; j < D_i.size(); ++j)
delete D_i[j];
}
D.clear();
@@ -216,7 +216,7 @@ public:
CandidateList freelist;
cand.reserve(in_edges.size());
UniqueCandidateSet unique_cands;
- for (int i = 0; i < in_edges.size(); ++i) {
+ for (unsigned i = 0; i < in_edges.size(); ++i) {
const Hypergraph::Edge& edge = in.edges_[in_edges[i]];
const JVector j(edge.tail_nodes_.size(), 0);
cand.push_back(new Candidate(edge, j, D, is_goal));
@@ -242,20 +242,20 @@ public:
sort(D_v.begin(), D_v.end(), EstProbSorter());
// cerr << " expanded to " << D_v.size() << " nodes\n";
- for (int i = 0; i < cand.size(); ++i)
+ for (unsigned i = 0; i < cand.size(); ++i)
delete cand[i];
// freelist is necessary since even after an item merged, it still stays in
// the unique set so it can't be deleted til now
- for (int i = 0; i < freelist.size(); ++i)
+ for (unsigned i = 0; i < freelist.size(); ++i)
delete freelist[i];
}
void PushSucc(const Candidate& item, const bool is_goal, CandidateHeap* pcand, UniqueCandidateSet* cs) {
CandidateHeap& cand = *pcand;
- for (int i = 0; i < item.j_.size(); ++i) {
+ for (unsigned i = 0; i < item.j_.size(); ++i) {
JVector j = item.j_;
++j[i];
- if (j[i] < D[item.in_edge_->tail_nodes_[i]].size()) {
+ if (static_cast<unsigned>(j[i]) < D[item.in_edge_->tail_nodes_[i]].size()) {
Candidate query_unique(*item.in_edge_, j);
if (cs->count(&query_unique) == 0) {
Candidate* new_cand = new Candidate(*item.in_edge_, j, D, is_goal);
diff --git a/decoder/scfg_translator.cc b/decoder/scfg_translator.cc
index 15abb600..185f979a 100644
--- a/decoder/scfg_translator.cc
+++ b/decoder/scfg_translator.cc
@@ -33,7 +33,7 @@ struct SCFGTranslatorImpl {
{
if(conf.count("grammar")){
vector<string> gfiles = conf["grammar"].as<vector<string> >();
- for (int i = 0; i < gfiles.size(); ++i) {
+ for (unsigned i = 0; i < gfiles.size(); ++i) {
if (!SILENT) cerr << "Reading SCFG grammar from " << gfiles[i] << endl;
TextGrammar* g = new TextGrammar(gfiles[i]);
g->SetMaxSpan(max_span_limit);
@@ -132,7 +132,7 @@ struct SCFGTranslatorImpl {
g->SetGrammarName("PassThrough");
glist.push_back(GrammarPtr(g));
}
- for (int gi = 0; gi < glist.size(); ++gi) {
+ for (unsigned gi = 0; gi < glist.size(); ++gi) {
if(printGrammarsUsed)
cerr << "Using grammar::" << glist[gi]->GetGrammarName() << endl;
}
@@ -147,7 +147,7 @@ struct SCFGTranslatorImpl {
forest->Reweight(weights);
if (use_ctf_) {
Hypergraph::Node& goal_node = *(forest->nodes_.end()-1);
- foreach(int edge_id, goal_node.in_edges_)
+ foreach(unsigned edge_id, goal_node.in_edges_)
RefineRule(forest->edges_[edge_id].rule_, ctf_iterations_);
double alpha = ctf_alpha_;
bool found_parse=false;
@@ -155,7 +155,7 @@ struct SCFGTranslatorImpl {
cerr << "Coarse-to-fine source parse, alpha=" << alpha << endl;
found_parse = true;
Hypergraph refined_forest = *forest;
- for (int j=0; j < ctf_iterations_; ++j) {
+ for (unsigned j=0; j < ctf_iterations_; ++j) {
cerr << viterbi_stats(refined_forest," Coarse forest",true,show_tree_structure_);
cerr << " Iteration " << (j+1) << ": Pruning forest... ";
refined_forest.BeamPruneInsideOutside(1.0, false, alpha, NULL);
@@ -178,7 +178,7 @@ struct SCFGTranslatorImpl {
if (!found_parse){
if (ctf_exhaustive_){
cerr << "Last resort: refining coarse forest without pruning...";
- for (int j=0; j < ctf_iterations_; ++j) {
+ for (unsigned j=0; j < ctf_iterations_; ++j) {
if (RefineForest(forest)){
cerr << " Refinement succeeded." << endl;
forest->Reweight(weights);
@@ -213,7 +213,7 @@ struct SCFGTranslatorImpl {
Hypergraph::Edge& edge = forest->edges_[edge_id];
std::vector<int> nt_positions;
TRulePtr& coarse_rule_ptr = edge.rule_;
- for(int i=0; i< coarse_rule_ptr->f_.size(); ++i){
+ for(unsigned i=0; i< coarse_rule_ptr->f_.size(); ++i){
if (coarse_rule_ptr->f_[i] < 0)
nt_positions.push_back(i);
}
@@ -225,7 +225,7 @@ struct SCFGTranslatorImpl {
// fine rules apply only if state splits on tail nodes match fine rule nonterminals
foreach(TRulePtr& fine_rule_ptr, *(coarse_rule_ptr->fine_rules_)) {
Hypergraph::TailNodeVector tail;
- for (int pos_i=0; pos_i<nt_positions.size(); ++pos_i){
+ for (unsigned pos_i=0; pos_i<nt_positions.size(); ++pos_i){
WordID fine_cat = fine_rule_ptr->f_[nt_positions[pos_i]];
Split2Node::iterator it =
s2n.find(StateSplit(edge.tail_nodes_[pos_i], fine_cat));
diff --git a/decoder/trule.cc b/decoder/trule.cc
index 141b8faa..187a003d 100644
--- a/decoder/trule.cc
+++ b/decoder/trule.cc
@@ -18,7 +18,7 @@ bool TRule::IsGoal() const {
}
static WordID ConvertTrgString(const string& w) {
- int len = w.size();
+ const unsigned len = w.size();
WordID id = 0;
// [X,0] or [0]
// for target rules, we ignore the category, just keep the index
@@ -33,7 +33,7 @@ static WordID ConvertTrgString(const string& w) {
}
static WordID ConvertSrcString(const string& w, bool mono = false) {
- int len = w.size();
+ const unsigned len = w.size();
// [X,0]
// for source rules, we keep the category and ignore the index (source rules are
// always numbered 1, 2, 3...
@@ -60,7 +60,7 @@ static WordID ConvertSrcString(const string& w, bool mono = false) {
static WordID ConvertLHS(const string& w) {
if (w[0] == '[') {
- int len = w.size();
+ const unsigned len = w.size();
if (len < 3) { cerr << "Format error: " << w << endl; exit(1); }
return TD::Convert(w.substr(1, len-2)) * -1;
} else {
@@ -100,6 +100,8 @@ namespace {
// callback for lexer
int n_assigned=0;
void assign_trule(const TRulePtr& new_rule, const unsigned int ctf_level, const TRulePtr& coarse_rule, void* extra) {
+ (void) ctf_level;
+ (void) coarse_rule;
TRule *assignto=(TRule *)extra;
*assignto=*new_rule;
++n_assigned;
@@ -143,15 +145,15 @@ bool TRule::ReadFromString(const string& line, bool strict, bool mono) {
string ss;
getline(is, ss);
//cerr << "L: " << ss << endl;
- int start = 0;
- int len = ss.size();
+ unsigned start = 0;
+ unsigned len = ss.size();
const size_t ppos = ss.find(" |||");
if (ppos != string::npos) { len = ppos; }
while (start < len) {
while(start < len && (ss[start] == ' ' || ss[start] == ';'))
++start;
if (start == len) break;
- int end = start + 1;
+ unsigned end = start + 1;
while(end < len && (ss[end] != '=' && ss[end] != ' ' && ss[end] != ';'))
++end;
if (end == len || ss[end] == ' ' || ss[end] == ';') {
@@ -188,7 +190,7 @@ bool TRule::ReadFromString(const string& line, bool strict, bool mono) {
while(is>>w && w!="|||") { e_.push_back(ConvertTrgString(w)); }
f_ = e_;
int x = ConvertLHS("[X]");
- for (int i = 0; i < f_.size(); ++i)
+ for (unsigned i = 0; i < f_.size(); ++i)
if (f_[i] <= 0) { f_[i] = x; }
} else {
cerr << "F: " << format << endl;
@@ -197,7 +199,7 @@ bool TRule::ReadFromString(const string& line, bool strict, bool mono) {
if (mono) {
e_ = f_;
int ci = 0;
- for (int i = 0; i < e_.size(); ++i)
+ for (unsigned i = 0; i < e_.size(); ++i)
if (e_[i] < 0)
e_[i] = ci--;
}
@@ -208,7 +210,7 @@ bool TRule::ReadFromString(const string& line, bool strict, bool mono) {
bool TRule::SanityCheck() const {
vector<int> used(f_.size(), 0);
int ac = 0;
- for (int i = 0; i < e_.size(); ++i) {
+ for (unsigned i = 0; i < e_.size(); ++i) {
int ind = e_[i];
if (ind > 0) continue;
ind = -ind;
@@ -238,7 +240,7 @@ string TRule::AsString(bool verbose) const {
if (lhs_ && verbose) {
os << '[' << TD::Convert(lhs_ * -1) << "] |||";
}
- for (int i = 0; i < f_.size(); ++i) {
+ for (unsigned i = 0; i < f_.size(); ++i) {
const WordID& w = f_[i];
if (w < 0) {
int wi = w * -1;
@@ -249,7 +251,7 @@ string TRule::AsString(bool verbose) const {
}
}
os << " ||| ";
- for (int i =0; i<e_.size(); ++i) {
+ for (unsigned i =0; i<e_.size(); ++i) {
if (i) os << ' ';
const WordID& w = e_[i];
if (w < 1)
@@ -261,7 +263,7 @@ string TRule::AsString(bool verbose) const {
os << " ||| " << scores_;
if (!a_.empty()) {
os << " |||";
- for (int i = 0; i < a_.size(); ++i)
+ for (unsigned i = 0; i < a_.size(); ++i)
os << ' ' << a_[i];
}
}
diff --git a/decoder/trule.h b/decoder/trule.h
index 8eb2a059..6a33d052 100644
--- a/decoder/trule.h
+++ b/decoder/trule.h
@@ -76,7 +76,7 @@ class TRule {
void ESubstitute(const std::vector<const std::vector<WordID>* >& var_values,
std::vector<WordID>* result) const {
- int vc = 0;
+ unsigned vc = 0;
result->clear();
for (std::vector<WordID>::const_iterator i = e_.begin(); i != e_.end(); ++i) {
const WordID& c = *i;
@@ -95,7 +95,7 @@ class TRule {
void FSubstitute(const std::vector<const std::vector<WordID>* >& var_values,
std::vector<WordID>* result) const {
- int vc = 0;
+ unsigned vc = 0;
result->clear();
for (std::vector<WordID>::const_iterator i = f_.begin(); i != f_.end(); ++i) {
const WordID& c = *i;