+/* $Id$tag */
+
+/* smpi_mpi.c --
+ *
+ * Eventually will contain the user level MPI primitives and its corresponding
+ * internal wrapper. The implementations of these primitives should go to specific
+ * files. For example, a SMPI_MPI_Bcast() in this file, should call the wrapper
+ * smpi_mpi_bcast(), which decides which implementation to call. Currently, it
+ * calls nary_tree_bcast() in smpi_coll.c. (Stéphane Genaud).
+ * */
+
+
+
#include "private.h"
+#include "smpi_coll_private.h"
+#include "smpi_mpi_dt_private.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
"Logging specific to SMPI (mpi)");
int SMPI_MPI_Init(int *argc, char ***argv)
{
- smpi_process_init(argc,argv);
+ smpi_process_init(argc, argv);
smpi_bench_begin();
return MPI_SUCCESS;
}
return retval;
}
-int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size)
-{
- int retval = MPI_SUCCESS;
-
- smpi_bench_end();
- if (NULL == datatype) {
- retval = MPI_ERR_TYPE;
- } else if (NULL == size) {
- retval = MPI_ERR_ARG;
- } else {
- *size = datatype->size;
- }
-
- smpi_bench_begin();
-
- return retval;
-}
+/**
+ * Barrier
+ **/
int SMPI_MPI_Barrier(MPI_Comm comm)
{
int retval = MPI_SUCCESS;
+ int arity=4;
smpi_bench_end();
if (NULL == comm) {
retval = MPI_ERR_COMM;
} else {
- retval = smpi_mpi_barrier(comm);
+
+ /*
+ * original implemantation:
+ * retval = smpi_mpi_barrier(comm);
+ * this one is unrealistic: it just cond_waits, means no time.
+ */
+ retval = nary_tree_barrier( comm, arity );
}
smpi_bench_begin();
return retval;
}
+
+
int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
int tag, MPI_Comm comm, MPI_Request * request)
{
int retval = MPI_SUCCESS;
+ int rank;
smpi_bench_end();
-
- retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
+ rank = smpi_mpi_comm_rank(comm);
+ retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm,
request);
if (NULL != *request && MPI_SUCCESS == retval) {
retval = smpi_mpi_irecv(*request);
int tag, MPI_Comm comm, MPI_Status * status)
{
int retval = MPI_SUCCESS;
+ int rank;
smpi_mpi_request_t request;
smpi_bench_end();
- retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
+ rank = smpi_mpi_comm_rank(comm);
+ retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm,
&request);
if (NULL != request && MPI_SUCCESS == retval) {
retval = smpi_mpi_irecv(request);
int tag, MPI_Comm comm, MPI_Request * request)
{
int retval = MPI_SUCCESS;
+ int rank;
smpi_bench_end();
- retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
+ rank = smpi_mpi_comm_rank(comm);
+ retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm,
request);
if (NULL != *request && MPI_SUCCESS == retval) {
retval = smpi_mpi_isend(*request);
return retval;
}
+/**
+ * MPI_Send user level
+ **/
int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
int tag, MPI_Comm comm)
{
int retval = MPI_SUCCESS;
+ int rank;
smpi_mpi_request_t request;
smpi_bench_end();
- retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
+ rank = smpi_mpi_comm_rank(comm);
+ retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm,
&request);
if (NULL != request && MPI_SUCCESS == retval) {
retval = smpi_mpi_isend(request);
return retval;
}
+
+/**
+ * MPI_Sendrecv internal level
+ **/
+int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
+ MPI_Comm comm, MPI_Status *status)
+{
+int rank;
+int retval = MPI_SUCCESS;
+smpi_mpi_request_t srequest;
+smpi_mpi_request_t rrequest;
+
+ rank = smpi_mpi_comm_rank(comm);
+
+ /* send */
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank,dest,sendtag,
+ comm, &srequest);
+ smpi_mpi_isend(srequest);
+
+ /* recv */
+ retval = smpi_create_request(recvbuf, recvcount, recvtype,
+ source, rank,recvtag,
+ comm, &rrequest);
+ smpi_mpi_irecv(rrequest);
+
+ smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
+ //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval);
+ smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
+ //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval);
+
+ return(retval);
+}
+/**
+ * MPI_Sendrecv user entry point
+ **/
+int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
+ MPI_Comm comm, MPI_Status *status)
+{
+int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+ smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag,
+ recvbuf, recvcount, recvtype, source, recvtag,
+ comm, status);
+ smpi_bench_begin();
+
+ return retval;
+
+
+}
+
+/**
+ * MPI_Wait and friends
+ **/
int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
{
- return smpi_mpi_wait(*request, status);
+ int retval;
+
+ smpi_bench_end();
+ retval = smpi_mpi_wait(*request, status);
+ smpi_bench_begin();
+ return retval;
}
-int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) {
- return smpi_mpi_waitall(count, requests,status);
+int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
+{
+ int retval;
+
+ smpi_bench_end();
+ retval = smpi_mpi_waitall(count, requests, status);
+ smpi_bench_begin();
+ return retval;
}
-int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status status[]) {
- return smpi_mpi_waitany(count, requests, index,status);
+
+int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
+ MPI_Status status[])
+{
+ int retval;
+
+ smpi_bench_end();
+ retval = smpi_mpi_waitany(count, requests, index, status);
+ smpi_bench_begin();
+ return retval;
}
+
/**
* MPI_Bcast
**/
+
+/**
+ * flat bcast
+ **/
+int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
+int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
+{
+ int rank;
+ int retval = MPI_SUCCESS;
+ smpi_mpi_request_t request;
+
+ rank = smpi_mpi_comm_rank(comm);
+ if (rank == root) {
+ retval = smpi_create_request(buf, count, datatype, root,
+ (root + 1) % comm->size, 0, comm, &request);
+ request->forward = comm->size - 1;
+ smpi_mpi_isend(request);
+ } else {
+ retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
+ 0, comm, &request);
+ smpi_mpi_irecv(request);
+ }
+
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+
+ return(retval);
+
+}
+/**
+ * Bcast internal level
+ **/
+int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank = smpi_mpi_comm_rank(comm);
+
+ DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank);
+ //retval = flat_tree_bcast(buf, count, datatype, root, comm);
+ retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
+ return retval;
+}
+
+/**
+ * Bcast user entry point
+ **/
int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
MPI_Comm comm)
{
+ int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+ smpi_mpi_bcast(buf,count,datatype,root,comm);
+ smpi_bench_begin();
+
+ return retval;
+}
+
+
+
+#ifdef DEBUG_REDUCE
+/**
+ * debugging helper function
+ **/
+static void print_buffer_int(void *buf, int len, char *msg, int rank)
+{
+ int tmp, *v;
+ printf("**[%d] %s: ", rank, msg);
+ for (tmp = 0; tmp < len; tmp++) {
+ v = buf;
+ printf("[%d]", v[tmp]);
+ }
+ printf("\n");
+ free(msg);
+}
+static void print_buffer_double(void *buf, int len, char *msg, int rank)
+{
+ int tmp;
+ double *v;
+ printf("**[%d] %s: ", rank, msg);
+ for (tmp = 0; tmp < len; tmp++) {
+ v = buf;
+ printf("[%lf]", v[tmp]);
+ }
+ printf("\n");
+ free(msg);
+}
+
+
+#endif
+/**
+ * MPI_Reduce internal level
+ **/
+int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank;
+ int size;
+ int i;
+ int system_tag = 666;
+ smpi_mpi_request_t *requests;
+ smpi_mpi_request_t request;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ DEBUG1("<%d> entered smpi_mpi_reduce()",rank);
+
+ if (rank != root) { // if i am not ROOT, simply send my buffer to root
+
+#ifdef DEBUG_REDUCE
+ print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
+#endif
+ retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm,
+ &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+
+ } else {
+ // i am the ROOT: wait for all buffers by creating one request by sender
+ int src;
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+
+ void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
+ for (i = 0; i < size-1; i++) {
+ // we need 1 buffer per request to store intermediate receptions
+ tmpbufs[i] = xbt_malloc(count * datatype->size);
+ }
+ // root: initiliaze recv buf with my own snd buf
+ memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
+
+ // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
+ // since we should op values as soon as one receiving request matches.
+ for (i = 0; i < size-1; i++) {
+ // reminder: for smpi_create_request() the src is always the process sending.
+ src = i < root ? i : i + 1;
+ retval = smpi_create_request(tmpbufs[i], count, datatype,
+ src, root, system_tag, comm, &(requests[i]));
+ if (NULL != requests[i] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[i]);
+ }
+ }
+ }
+ // now, wait for completion of all irecv's.
+ for (i = 0; i < size-1; i++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
+ DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)",
+ rank,index,size-i-2);
+#ifdef DEBUG_REDUCE
+ print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
+ rank);
+#endif
+
+ // arg 2 is modified
+ op->func(tmpbufs[index], recvbuf, &count, &datatype);
+#ifdef DEBUG_REDUCE
+ print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
+#endif
+ xbt_free(tmpbufs[index]);
+ /* FIXME: with the following line, it generates an
+ * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
+ * Fixed ?
+ */
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ xbt_free(tmpbufs);
+ }
+ return retval;
+}
+
+/**
+ * MPI_Reduce user entry point
+ **/
+int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+
+ retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+
+ smpi_bench_begin();
+ return retval;
+}
+
+
+
+/**
+ * MPI_Allreduce
+ *
+ * Same as MPI_Reduce except that the result appears in the receive buffer of all the group members.
+ **/
+int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
+ MPI_Op op, MPI_Comm comm )
+{
+int retval = MPI_SUCCESS;
+int root=0; // arbitrary choice
+
+ smpi_bench_end();
+
+ retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
+ if (MPI_SUCCESS != retval)
+ return(retval);
+
+ retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
+ smpi_bench_end();
+ return( retval );
+}
+
+
+/**
+ * MPI_Scatter user entry point
+ **/
+int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
int retval = MPI_SUCCESS;
+ int i;
+ int cnt=0;
int rank;
+ int tag=0;
+ char *cptr; // to manipulate the void * buffers
+ smpi_mpi_request_t *requests;
smpi_mpi_request_t request;
+ smpi_mpi_status_t status;
+
smpi_bench_end();
rank = smpi_mpi_comm_rank(comm);
+ requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
if (rank == root) {
- retval = smpi_create_request(buf, count, datatype, root,
- (root + 1) % comm->size, 0, comm, &request);
- request->forward = comm->size - 1;
- smpi_mpi_isend(request);
- } else {
- retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
- 0, comm, &request);
- smpi_mpi_irecv(request);
- }
+ // i am the root: distribute my sendbuf
+ //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
+ cptr = sendbuf;
+ for (i=0; i < comm->size; i++) {
+ if ( i!=root ) { // send to processes ...
+
+ retval = smpi_create_request((void *)cptr, sendcount,
+ datatype, root, i, tag, comm, &(requests[cnt]));
+ if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_isend(requests[cnt]);
+ }
+ }
+ cnt++;
+ }
+ else { // ... except if it's me.
+ memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
+ }
+ cptr += sendcount*datatype->size;
+ }
+ for(i=0; i<cnt; i++) { // wait for send to complete
+ /* FIXME: waitall() should be slightly better */
+ smpi_mpi_wait(requests[i], &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
- smpi_mpi_wait(request, MPI_STATUS_IGNORE);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+ }
+ else { // i am a non-root process: wait data from the root
+ retval = smpi_create_request(recvbuf,recvcount,
+ recvtype, root, rank, tag, comm, &request);
+ if (NULL != request && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(request);
+ }
+ }
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+ xbt_free(requests);
smpi_bench_begin();
return retval;
}
+
/**
- * MPI_Reduce
+ * MPI_Alltoall user entry point
+ *
+ * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
+ * ompi/mca/coll/tuned/coll_tuned_module.c
**/
-
-int SMPI_MPI_Reduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, int root, MPI_Comm comm )
+int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm)
{
int retval = MPI_SUCCESS;
+ int block_dsize;
int rank;
- int size;
- int i;
- smpi_mpi_request_t *tabrequest;
smpi_bench_end();
rank = smpi_mpi_comm_rank(comm);
- size = comm->size;
+ block_dsize = datatype->size * sendcount;
+ DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize);
- tabrequest = xbt_malloc((size)*sizeof(smpi_mpi_request_t));
+ if ((block_dsize < 200) && (comm->size > 12)) {
+ retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
- if (rank != root) { // if i am not root, simply send my buffer to root
- retval = smpi_create_request(sendbuf, count, datatype,
- rank, root, 0, comm, &(tabrequest[rank]));
- smpi_mpi_isend(tabrequest[rank]);
- smpi_mpi_wait(tabrequest[rank], MPI_STATUS_IGNORE);
- //printf("DEBUG: rank %d sent my sendbuf to root (rank %d)\n",rank,root);
+ } else if (block_dsize < 3000) {
+ retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
} else {
- // i am the root: wait for all buffers by creating requests
- // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
- // since we should op values as soon as one receiving request matches.
- for (i=0; i<comm->size; i++) {
- if ( rank != i ) { // except for me
- // reminder: for smpi_create_request() the src is always the process sending.
- retval = smpi_create_request(recvbuf, count, datatype, MPI_ANY_SOURCE, root,
- 0, comm, &(tabrequest[i]));
- if (NULL != tabrequest[i] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_irecv(tabrequest[i]);
- }
- }
- }
- }
- // now, wait for completion of all irecv's.
- // FIXME: we should implement smpi_wait_all for a more asynchronous behavior
- for (i=0; i<comm->size; i++) {
- if ( rank != i ) { // except for me
- smpi_mpi_wait(tabrequest[i], MPI_STATUS_IGNORE);
-
- // FIXME: the core part is here. To be written ...
-
- fprintf(stderr,"[smpi] %s:%d : MPI_Reduce *Not yet implemented*.\n",__FILE__,__LINE__);
- }
- }
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
}
- for (i=0; i<comm->size; i++)
- xbt_mallocator_release(smpi_global->request_mallocator, tabrequest[i]);
smpi_bench_begin();
return retval;
}
+/**
+ * MPI_Alltoallv user entry point
+ *
+ * As in OpenMPI, alltoallv is not optimized
+ * ompi/mca/coll/basic/coll_basic_alltoallv.c
+ **/
+int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype,
+ void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ DEBUG1("<%d> basic alltoallv() called.",rank);
+
+ retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype,
+ recvbuf, rcounts, rdisps, recvtype,
+ comm);
+ smpi_bench_begin();
+ return retval;
+}
+
+
+
+
// used by comm_split to sort ranks based on key values
int smpi_compare_rankkeys(const void *a, const void *b);
int smpi_compare_rankkeys(const void *a, const void *b)
return retval;
}
-double SMPI_MPI_Wtime( void )
+double SMPI_MPI_Wtime(void)
+{
+ double time;
+
+ smpi_bench_end();
+ time = SIMIX_get_clock();
+ smpi_bench_begin();
+ return time;
+}
+
+int SMPI_MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank, root, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Receive buffers from senders
+ int src;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(src = 0; src < size; src++) {
+ if(src == root) {
+ // Local copy from root
+ memcpy(&((char*)recvbuf)[src*recvcount*recvtype->size],
+ sendbuf, sendcount*sendtype->size*sizeof(char));
+ } else {
+ int index = src < root ? src : src - 1;
+ retval = smpi_create_request(&((char*)recvbuf)[src*recvcount*recvtype->size],
+ recvcount, recvtype, src, root, system_tag,
+ comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of irecv's.
+ for(src = 0; src < size - 1; src++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank, root, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Receive buffers from senders
+ int src;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(src = 0; src < size; src++) {
+ if(src == root) {
+ // Local copy from root
+ memcpy(&((char*)recvbuf)[displs[src]],
+ sendbuf, sendcount*sendtype->size*sizeof(char));
+ } else {
+ int index = src < root ? src : src - 1;
+ retval = smpi_create_request(&((char*)recvbuf)[displs[src]],
+ recvcounts[src], recvtype, src, root, system_tag,
+ comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of irecv's.
+ for(src = 0; src < size - 1; src++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Receive buffer from root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(recvbuf, recvcount, recvtype,
+ root, rank, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Send buffers to receivers
+ int dst;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(dst = 0; dst < size; dst++) {
+ if(dst == root) {
+ // Local copy from root
+ memcpy(recvbuf, &((char*)sendbuf)[displs[dst]],
+ sendcounts[dst]*sendtype->size*sizeof(char));
+ } else {
+ int index = dst < root ? dst : dst - 1;
+ retval = smpi_create_request(&((char*)sendbuf)[displs[dst]], sendcounts[dst], sendtype,
+ root, dst, system_tag, comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_isend(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of isend's.
+ for(dst = 0; dst < size - 1; dst++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ // FIXME: Suboptimal implementation
+ int retval = MPI_SUCCESS;
+ int count = 0;
+ int root = 0;
+ int i, rank;
+ int* displs;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ displs = xbt_new(int, comm->size);
+ for(i = 0; i < comm->size; i++) {
+ count += recvcounts[i];
+ displs[i] = 0;
+ }
+ retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+ retval = SMPI_MPI_Scatterv(recvbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, root, comm);
+ xbt_free(displs);
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+{
+ // FIXME: Suboptimal implementation
+ int root = 0;
+ int retval;
+
+ smpi_bench_end();
+ retval = SMPI_MPI_Gather(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype, root, comm);
+ if(retval == MPI_SUCCESS) {
+ retval = SMPI_MPI_Bcast(recvbuf, recvcount, recvtype, root, comm);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
+ MPI_Comm comm)
{
- return ( SIMIX_get_clock() );
+ // FIXME: Suboptimal implementation
+ int root = 0;
+ int last, retval;
+
+ smpi_bench_end();
+ retval = SMPI_MPI_Gatherv(sendbuf, sendcount, sendtype,
+ recvbuf, recvcounts, displs, recvtype, root, comm);
+ if(retval == MPI_SUCCESS) {
+ last = comm->size - 1;
+ retval = SMPI_MPI_Bcast(recvbuf, displs[last] + recvcounts[last], recvtype, root, comm);
+ }
+ smpi_bench_begin();
+ return retval;
}