X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/def940c56dde95a61d9ca67677ee73547c541ff9..64d0f9debf71f140a2faa41d2ee6e4892ba37bb9:/src/smpi/colls/alltoallv-pair-mpi-barrier.c diff --git a/src/smpi/colls/alltoallv-pair-mpi-barrier.c b/src/smpi/colls/alltoallv-pair-mpi-barrier.c new file mode 100644 index 0000000000..9944493844 --- /dev/null +++ b/src/smpi/colls/alltoallv-pair-mpi-barrier.c @@ -0,0 +1,50 @@ +#include "colls_private.h" +/***************************************************************************** + + * Function: alltoall_pair_mpi_barrier + + * Return: int + + * Inputs: + send_buff: send input buffer + send_count: number of elements to send + send_type: data type of elements being sent + recv_buff: receive output buffer + recv_count: number of elements to received + recv_type: data type of elements being received + comm: communicator + + * Descrp: Function works when P is power of two. In each phase of P - 1 + phases, nodes in pair communicate their data. MPI barriers are + inserted between each two phases. + + * Auther: Ahmad Faraj + + ****************************************************************************/ +int +smpi_coll_tuned_alltoallv_pair_mpi_barrier(void *send_buff, int *send_counts, int *send_disps, + MPI_Datatype send_type, + void *recv_buff, int *recv_counts, int *recv_disps, + MPI_Datatype recv_type, MPI_Comm comm) +{ + MPI_Status s; + MPI_Aint send_chunk, recv_chunk; + int i, src, dst, rank, num_procs; + int tag = 101; + char *send_ptr = (char *) send_buff; + char *recv_ptr = (char *) recv_buff; + + rank = smpi_comm_rank(comm); + num_procs = smpi_comm_size(comm); + send_chunk = smpi_datatype_get_extent(send_type); + recv_chunk = smpi_datatype_get_extent(recv_type); + + for (i = 0; i < num_procs; i++) { + src = dst = rank ^ i; + smpi_mpi_barrier(comm); + smpi_mpi_sendrecv(send_ptr + send_disps[dst] * send_chunk, send_counts[dst], send_type, dst, + tag, recv_ptr + recv_disps[src] * recv_chunk, recv_counts[src], recv_type, + src, tag, comm, &s); + } + return MPI_SUCCESS; +}