From 2aa002bee551b5c61b3b20290fd2b1339a791b27 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Thu, 14 Apr 2011 22:22:22 -0400 Subject: mpi optimizations --- training/mpi_batch_optimize.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc index 11be8bbe..5a6bf301 100644 --- a/training/mpi_batch_optimize.cc +++ b/training/mpi_batch_optimize.cc @@ -4,6 +4,7 @@ #include #include +#include "config.h" #ifdef HAVE_MPI #include #include @@ -333,20 +334,23 @@ int main(int argc, char** argv) { TrainingObserver observer; while (!converged) { observer.Reset(); +#ifdef HAVE_MPI + world.barrier(); +#endif if (rank == 0) { cerr << "Starting decoding... (~" << corpus.size() << " sentences / proc)\n"; } decoder->SetWeights(lambdas); for (int i = 0; i < corpus.size(); ++i) decoder->Decode(corpus[i], &observer); - + cerr << " process " << rank << '/' << size << " done\n"; fill(gradient.begin(), gradient.end(), 0); fill(rcv_grad.begin(), rcv_grad.end(), 0); observer.SetLocalGradientAndObjective(&gradient, &objective); double to = 0; #ifdef HAVE_MPI - mpi::reduce(world, &gradient[0], &rcv_grad[0], gradient.size(), plus(), 0); + mpi::reduce(world, &gradient[0], gradient.size(), &rcv_grad[0], plus(), 0); mpi::reduce(world, objective, to, plus(), 0); swap(gradient, rcv_grad); objective = to; @@ -398,9 +402,8 @@ int main(int argc, char** argv) { } // rank == 0 int cint = converged; #ifdef HAVE_MPI - mpi::broadcast(world, lambdas, 0); + mpi::broadcast(world, &lambdas[0], lambdas.size(), 0); mpi::broadcast(world, cint, 0); - world.barrier(); #endif converged = cint; } -- cgit v1.2.3