diff options
author | Kenneth Heafield <github@kheafield.com> | 2014-01-11 21:02:04 -0800 |
---|---|---|
committer | Kenneth Heafield <github@kheafield.com> | 2014-01-11 21:02:04 -0800 |
commit | c148f8429c66103a401ba4c3a029e349cd11aa8a (patch) | |
tree | a0239922e00ac9ae5027069f2846ae96c5e768b4 /training | |
parent | b5c7cb3b9a1bfd917235fc79d67169b9d7d98ab0 (diff) |
Fix compilation with ancient gcc
Diffstat (limited to 'training')
-rw-r--r-- | training/crf/mpi_adagrad_optimize.cc | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/training/crf/mpi_adagrad_optimize.cc b/training/crf/mpi_adagrad_optimize.cc index 39bd763e..bac57324 100644 --- a/training/crf/mpi_adagrad_optimize.cc +++ b/training/crf/mpi_adagrad_optimize.cc @@ -157,11 +157,11 @@ struct TrainingObserver : public DecoderObserver { void GetGradient(SparseVector<double>* g) const { g->clear(); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : acc_grad) { #else for (FastSparseVector<prob_t>::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) { - pair<unsigned, double>& gi = *it; + const pair<unsigned, prob_t>& gi = *it; #endif g->set_value(gi.first, -gi.second.as_float()); } @@ -190,7 +190,7 @@ class AdaGradOptimizer { G() {} void update(const SparseVector<double>& g, vector<double>* x, SparseVector<double>* sx) { if (x->size() > G.size()) G.resize(x->size(), 0.0); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : g) { #else for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -220,7 +220,7 @@ class AdaGradL1Optimizer { G.resize(x->size(), 0.0); u.resize(x->size(), 0.0); } -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : g) { #else for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -236,11 +236,11 @@ class AdaGradL1Optimizer { // compute updates (avoid invalidating iterators by putting them all // in the vector vupdate and applying them after this) vector<pair<unsigned, double>> vupdate; -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& xi : *sx) { #else - for (SparseVector<double>::const_iterator it = sx->begin(); it != sx->end(); ++it) { - const pair<unsigned,double>& gi = *it; + for (SparseVector<double>::iterator it = sx->begin(); it != sx->end(); ++it) { + const pair<unsigned,double>& xi = *it; #endif double z = fabs(u[xi.first] / t) - lambda; double s = 1; |