X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/488dda0c43d26f0a770e1d5b47f73148e153949b..f89671e0bd7450461d70d5ced6079123e73c2a63:/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp diff --git a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp index a9a573941b..12f64483fa 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp @@ -6,12 +6,11 @@ #include "../colls_private.hpp" /* IMPLEMENTED BY PITCH PATARASUK - Non-topoloty-specific (however, number of cores/node need to be changed) + Non-topology-specific (however, number of cores/node need to be changed) all-reduce operation designed for smp clusters It uses 2-layer communication: binomial for intra-communication and rdb for inter-communication*/ - /* ** NOTE ** Use -DMPICH2 if this code does not compile. MPICH1 code also work on MPICH2 on our cluster and the performance are similar. @@ -33,7 +32,6 @@ int Coll_allreduce_smp_rdb::allreduce(const void *send_buf, void *recv_buf, int MPI_Comm comm) { int comm_size, rank; - void *tmp_buf; int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; @@ -58,7 +56,7 @@ int Coll_allreduce_smp_rdb::allreduce(const void *send_buf, void *recv_buf, int rank = comm->rank(); MPI_Aint extent; extent = dtype->get_extent(); - tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); + unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent); /* compute intra and inter ranking */ int intra_rank, inter_rank;