X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/d155fd69fa99c97b3a9c86bb7f2e472c2e7332df..cd9affe6152d6bbec19a72ea6fe26ab9407b51b7:/src/smpi/colls/allgather-rhv.c diff --git a/src/smpi/colls/allgather-rhv.c b/src/smpi/colls/allgather-rhv.c index dab0e6ee64..c2e0cb43ff 100644 --- a/src/smpi/colls/allgather-rhv.c +++ b/src/smpi/colls/allgather-rhv.c @@ -1,4 +1,10 @@ -#include "colls.h" +/* Copyright (c) 2013-2014. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "colls_private.h" // now only work with power of two processes @@ -15,25 +21,32 @@ smpi_coll_tuned_allgather_rhv(void *sbuf, int send_count, int i, dst, send_base_offset, recv_base_offset, send_chunk, recv_chunk, send_offset, recv_offset; int rank, num_procs; - int tag = 50; + int tag = COLL_TAG_ALLGATHER; int mask; int curr_count; // get size of the communicator, followed by rank - MPI_Comm_size(comm, &num_procs); - MPI_Comm_rank(comm, &rank); + num_procs = smpi_comm_size(comm); + + if((num_procs&(num_procs-1))) + THROWF(arg_error,0, "allgather rhv algorithm can't be used with non power of two number of processes ! "); + + rank = smpi_comm_rank(comm); // get size of single element's type for send buffer and recv buffer - MPI_Type_extent(send_type, &s_extent); - MPI_Type_extent(recv_type, &r_extent); + s_extent = smpi_datatype_get_extent(send_type); + r_extent = smpi_datatype_get_extent(recv_type); // multiply size of each element by number of elements to send or recv send_chunk = s_extent * send_count; recv_chunk = r_extent * recv_count; - if (send_chunk != recv_chunk) - return MPI_Allgather(sbuf, send_count, send_type, rbuf, recv_count, - recv_type, comm); + if (send_chunk != recv_chunk) { + XBT_WARN("MPI_allgather_rhv use default MPI_allgather."); + smpi_mpi_allgather(sbuf, send_count, send_type, rbuf, recv_count, + recv_type, comm); + return MPI_SUCCESS; + } // compute starting offset location to perform local copy int size = num_procs / 2; @@ -52,7 +65,7 @@ smpi_coll_tuned_allgather_rhv(void *sbuf, int send_count, //perform a remote copy dst = base_offset; - MPI_Sendrecv(sbuf, send_count, send_type, dst, tag, + smpi_mpi_sendrecv(sbuf, send_count, send_type, dst, tag, (char *)rbuf + base_offset * recv_chunk, recv_count, recv_type, dst, tag, comm, &status); @@ -78,7 +91,7 @@ smpi_coll_tuned_allgather_rhv(void *sbuf, int send_count, // printf("node %d send to %d in phase %d s_offset = %d r_offset = %d count = %d\n",rank,dst,phase, send_base_offset, recv_base_offset, curr_count); - MPI_Sendrecv((char *)rbuf + send_offset, curr_count, recv_type, dst, tag, + smpi_mpi_sendrecv((char *)rbuf + send_offset, curr_count, recv_type, dst, tag, (char *)rbuf + recv_offset, curr_count, recv_type, dst, tag, comm, &status);