diff options
| author | Chris Dyer <redpony@gmail.com> | 2014-01-15 20:33:07 -0500 | 
|---|---|---|
| committer | Chris Dyer <redpony@gmail.com> | 2014-01-15 20:33:07 -0500 | 
| commit | aa5520768619eb15e0c1c70addbfaff0f150c345 (patch) | |
| tree | b81fa2e3d1ae3e79ece18024aae6a499f4d3f3ea /training/crf | |
| parent | b1628d8bedb8a4145c77ed5917999379fc99a5d5 (diff) | |
| parent | 0ddc951bc8990c1253e6a873848ed69321615e0a (diff) | |
Merge branch 'master' of https://github.com/redpony/cdec
Diffstat (limited to 'training/crf')
| -rw-r--r-- | training/crf/mpi_adagrad_optimize.cc | 14 | 
1 files changed, 7 insertions, 7 deletions
diff --git a/training/crf/mpi_adagrad_optimize.cc b/training/crf/mpi_adagrad_optimize.cc index 39bd763e..bac57324 100644 --- a/training/crf/mpi_adagrad_optimize.cc +++ b/training/crf/mpi_adagrad_optimize.cc @@ -157,11 +157,11 @@ struct TrainingObserver : public DecoderObserver {    void GetGradient(SparseVector<double>* g) const {      g->clear(); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4)      for (auto& gi : acc_grad) {  #else      for (FastSparseVector<prob_t>::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) { -      pair<unsigned, double>& gi = *it; +      const pair<unsigned, prob_t>& gi = *it;  #endif        g->set_value(gi.first, -gi.second.as_float());      } @@ -190,7 +190,7 @@ class AdaGradOptimizer {        G() {}    void update(const SparseVector<double>& g, vector<double>* x, SparseVector<double>* sx) {      if (x->size() > G.size()) G.resize(x->size(), 0.0); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4)      for (auto& gi : g) {  #else      for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -220,7 +220,7 @@ class AdaGradL1Optimizer {        G.resize(x->size(), 0.0);        u.resize(x->size(), 0.0);      } -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4)      for (auto& gi : g) {  #else      for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -236,11 +236,11 @@ class AdaGradL1Optimizer {      // compute updates (avoid invalidating iterators by putting them all      // in the vector vupdate and applying them after this)      vector<pair<unsigned, double>> vupdate; -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4)      for (auto& xi : *sx) {  #else -    for (SparseVector<double>::const_iterator it = sx->begin(); it != sx->end(); ++it) { -      const pair<unsigned,double>& gi = *it; +    for (SparseVector<double>::iterator it = sx->begin(); it != sx->end(); ++it) { +      const pair<unsigned,double>& xi = *it;  #endif        double z = fabs(u[xi.first] / t) - lambda;        double s = 1;  | 
