diff options
author | Chris Dyer <cdyer@cs.cmu.edu> | 2011-04-14 22:22:22 -0400 |
---|---|---|
committer | Chris Dyer <cdyer@cs.cmu.edu> | 2011-04-14 22:22:22 -0400 |
commit | 2aa002bee551b5c61b3b20290fd2b1339a791b27 (patch) | |
tree | 8987a224bc77b159bd4770464290b360bda9067f | |
parent | c8e8640664295868ce0efd7530b33b21f22453d5 (diff) |
mpi optimizations
-rw-r--r-- | training/mpi_batch_optimize.cc | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc index 11be8bbe..5a6bf301 100644 --- a/training/mpi_batch_optimize.cc +++ b/training/mpi_batch_optimize.cc @@ -4,6 +4,7 @@ #include <cassert> #include <cmath> +#include "config.h" #ifdef HAVE_MPI #include <boost/mpi/timer.hpp> #include <boost/mpi.hpp> @@ -333,20 +334,23 @@ int main(int argc, char** argv) { TrainingObserver observer; while (!converged) { observer.Reset(); +#ifdef HAVE_MPI + world.barrier(); +#endif if (rank == 0) { cerr << "Starting decoding... (~" << corpus.size() << " sentences / proc)\n"; } decoder->SetWeights(lambdas); for (int i = 0; i < corpus.size(); ++i) decoder->Decode(corpus[i], &observer); - + cerr << " process " << rank << '/' << size << " done\n"; fill(gradient.begin(), gradient.end(), 0); fill(rcv_grad.begin(), rcv_grad.end(), 0); observer.SetLocalGradientAndObjective(&gradient, &objective); double to = 0; #ifdef HAVE_MPI - mpi::reduce(world, &gradient[0], &rcv_grad[0], gradient.size(), plus<double>(), 0); + mpi::reduce(world, &gradient[0], gradient.size(), &rcv_grad[0], plus<double>(), 0); mpi::reduce(world, objective, to, plus<double>(), 0); swap(gradient, rcv_grad); objective = to; @@ -398,9 +402,8 @@ int main(int argc, char** argv) { } // rank == 0 int cint = converged; #ifdef HAVE_MPI - mpi::broadcast(world, lambdas, 0); + mpi::broadcast(world, &lambdas[0], lambdas.size(), 0); mpi::broadcast(world, cint, 0); - world.barrier(); #endif converged = cint; } |