X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/f35db13138682b1be530a509d1eeadeafff84ca7..6d004c352f7b26fba38486001f874e65466b5bee:/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp diff --git a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp index 77baf42248..e4dc8a0c75 100644 --- a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp +++ b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014. The SimGrid Team. +/* Copyright (c) 2013-2019. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -37,15 +37,15 @@ * copyright file COPYRIGHT in the top level MVAPICH2 directory. * */ - + //correct on stampede #define MV2_ALLTOALL_THROTTLE_FACTOR 4 - -#include "../colls_private.h" + +#include "../colls_private.hpp" namespace simgrid{ namespace smpi{ int Coll_alltoall_mvapich2_scatter_dest::alltoall( - void *sendbuf, + const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, @@ -57,18 +57,16 @@ int Coll_alltoall_mvapich2_scatter_dest::alltoall( MPI_Aint sendtype_extent = 0, recvtype_extent = 0; int mpi_errno=MPI_SUCCESS; int dst, rank; - MPI_Request *reqarray; - MPI_Status *starray; - + if (recvcount == 0) return MPI_SUCCESS; - + comm_size = comm->size(); rank = comm->rank(); - + /* Get extent of send and recv types */ recvtype_extent = recvtype->get_extent(); sendtype_extent = sendtype->get_extent(); - + /* Medium-size message. Use isend/irecv with scattered destinations. Use Tony Ladd's modification to post only a small number of isends/irecvs at a time. */ @@ -83,20 +81,20 @@ int Coll_alltoall_mvapich2_scatter_dest::alltoall( there are only a few isend/irecvs left) */ int ii, ss, bblock; - + //Stampede is configured with bblock = MV2_ALLTOALL_THROTTLE_FACTOR;//mv2_coll_param.alltoall_throttle_factor; - + if (bblock >= comm_size) bblock = comm_size; /* If throttle_factor is n, each process posts n pairs of isend/irecv in each iteration. */ - + /* FIXME: This should use the memory macros (there are storage leaks here if there is an error, for example) */ - reqarray= (MPI_Request*)xbt_malloc(2*bblock*sizeof(MPI_Request)); - - starray=(MPI_Status *)xbt_malloc(2*bblock*sizeof(MPI_Status)); - + MPI_Request* reqarray = new MPI_Request[2 * bblock]; + + MPI_Status* starray = new MPI_Status[2 * bblock]; + for (ii=0; ii