X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/a2f1b23687f04169144f4ffb4f20dc4fc5c28395..1687df79d61a9418bba830bbd0ab7de16e457090:/src/smpi/colls/allreduce-smp-rsag-rab.c diff --git a/src/smpi/colls/allreduce-smp-rsag-rab.c b/src/smpi/colls/allreduce-smp-rsag-rab.c index 8ccb479e18..1ac827e5ed 100644 --- a/src/smpi/colls/allreduce-smp-rsag-rab.c +++ b/src/smpi/colls/allreduce-smp-rsag-rab.c @@ -1,14 +1,15 @@ -#include "colls.h" +/* Copyright (c) 2013-2014. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + /* * implemented by Pitch Patarasuk, 07/01/2007 */ +#include "colls_private.h" //#include -/* change number of core per smp-node - we assume that number of core per process will be the same for all implementations */ -#ifndef NUM_CORE -#define NUM_CORE 8 -#endif /* This fucntion performs all-reduce operation as follow. @@ -23,16 +24,26 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, { int comm_size, rank; void *tmp_buf; - int tag = 50; + int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - int num_core = NUM_CORE; + if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ + smpi_comm_init_smp(comm); + } + int num_core=1; + if (smpi_comm_is_uniform(comm)){ + num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + } - MPI_Comm_size(comm, &comm_size); - MPI_Comm_rank(comm, &rank); + comm_size = smpi_comm_size(comm); + + if((comm_size&(comm_size-1))) + THROWF(arg_error,0, "allreduce smp rsag rab algorithm can't be used with non power of two number of processes ! "); + + rank = smpi_comm_rank(comm); MPI_Aint extent; - MPI_Type_extent(dtype, &extent); - tmp_buf = (void *) malloc(count * extent); + extent = smpi_datatype_get_extent(dtype); + tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); int intra_rank, inter_rank; intra_rank = rank % num_core; @@ -42,7 +53,7 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, int inter_comm_size = (comm_size + num_core - 1) / num_core; - MPI_Sendrecv(sbuf, count, dtype, rank, tag, + smpi_mpi_sendrecv(sbuf, count, dtype, rank, tag, rbuf, count, dtype, rank, tag, comm, &status); // SMP_binomial_reduce @@ -52,14 +63,14 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, src = (inter_rank * num_core) + (intra_rank | mask); // if (src < ((inter_rank + 1) * num_core)) { if (src < comm_size) { - MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status); - star_reduction(op, tmp_buf, rbuf, &count, &dtype); + smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status); + smpi_op_apply(op, tmp_buf, rbuf, &count, &dtype); //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); } } else { dst = (inter_rank * num_core) + (intra_rank & (~mask)); - MPI_Send(rbuf, count, dtype, dst, tag, comm); + smpi_mpi_send(rbuf, count, dtype, dst, tag, comm); //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); break; } @@ -78,19 +89,14 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, recv_chunk = extent * count / (comm_size / num_core); mask = 1; - i = 0; curr_count = count / 2; int phase = 0; base_offset = 0; - send_base_offset = 0; - recv_base_offset = 0; while (mask < (comm_size / num_core)) { dst = inter_rank ^ mask; // compute offsets - send_base_offset = base_offset; - // right-handside if (inter_rank & mask) { recv_base_offset = base_offset + curr_count; @@ -108,11 +114,11 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, // if (rank==7) // printf("node %d send to %d in phase %d s_offset = %d r_offset = %d count = %d\n",rank,dst,phase, send_offset, recv_offset, curr_count); - MPI_Sendrecv((char *)rbuf + send_offset, curr_count, dtype, (dst * num_core), tag, + smpi_mpi_sendrecv((char *)rbuf + send_offset, curr_count, dtype, (dst * num_core), tag, tmp_buf, curr_count, dtype, (dst * num_core), tag, comm, &status); - star_reduction(op, tmp_buf, (char *)rbuf + recv_offset, &curr_count, &dtype); + smpi_op_apply(op, tmp_buf, (char *)rbuf + recv_offset, &curr_count, &dtype); mask *= 2; curr_count /= 2; @@ -155,7 +161,7 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, // if (rank==7) //printf("node %d send to %d in phase %d s_offset = %d r_offset = %d count = %d\n",rank,dst,phase, send_offset, recv_offset, curr_count); - MPI_Sendrecv((char *)rbuf + send_offset, curr_count, dtype, (dst * num_core), tag, + smpi_mpi_sendrecv((char *)rbuf + send_offset, curr_count, dtype, (dst * num_core), tag, (char *)rbuf + recv_offset, curr_count, dtype, (dst * num_core), tag, comm, &status); @@ -181,7 +187,7 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, if (intra_rank & mask) { src = (inter_rank * num_core) + (intra_rank - mask); //printf("Node %d recv from node %d when mask is %d\n", rank, src, mask); - MPI_Recv(rbuf, count, dtype, src, tag, comm, &status); + smpi_mpi_recv(rbuf, count, dtype, src, tag, comm, &status); break; } mask <<= 1; @@ -194,12 +200,12 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, dst = (inter_rank * num_core) + (intra_rank + mask); if (dst < comm_size) { //printf("Node %d send to node %d when mask is %d\n", rank, dst, mask); - MPI_Send(rbuf, count, dtype, dst, tag, comm); + smpi_mpi_send(rbuf, count, dtype, dst, tag, comm); } mask >>= 1; } - free(tmp_buf); + smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; }