X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/f0aa7a97261661fc3bf5c5ab9cb81128a3742bff..3c072dea92bbf0e4df46b1d8d56cd62e68883b33:/src/smpi/smpi_mpi.c diff --git a/src/smpi/smpi_mpi.c b/src/smpi/smpi_mpi.c index 4ee1d8a864..22f9ae52d8 100644 --- a/src/smpi/smpi_mpi.c +++ b/src/smpi/smpi_mpi.c @@ -1,7 +1,19 @@ +/* $Id$tag */ + +/* smpi_mpi.c -- + * + * Eventually will contain the user level MPI primitives and its corresponding + * internal wrapper. The implementations of these primitives should go to specific + * files. For example, a SMPI_MPI_Bcast() in this file, should call the wrapper + * smpi_mpi_bcast(), which decides which implementation to call. Currently, it + * calls nary_tree_bcast() in smpi_coll.c. (Stéphane Genaud). + * */ + #include "private.h" #include "smpi_coll_private.h" +#include "smpi_mpi_dt_private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi, "Logging specific to SMPI (mpi)"); @@ -66,25 +78,11 @@ int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank) return retval; } -int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size) -{ - int retval = MPI_SUCCESS; - - smpi_bench_end(); - if (NULL == datatype) { - retval = MPI_ERR_TYPE; - } else if (NULL == size) { - retval = MPI_ERR_ARG; - } else { - *size = datatype->size; - } - - smpi_bench_begin(); - - return retval; -} +/** + * Barrier + **/ int SMPI_MPI_Barrier(MPI_Comm comm) { int retval = MPI_SUCCESS; @@ -109,6 +107,8 @@ int SMPI_MPI_Barrier(MPI_Comm comm) return retval; } + + int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) { @@ -168,6 +168,9 @@ int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, return retval; } +/** + * MPI_Send user level + **/ int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { @@ -191,10 +194,11 @@ int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, return retval; } + /** - * MPI_Sendrecv + * MPI_Sendrecv internal level **/ -int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, +int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status) { @@ -206,36 +210,43 @@ smpi_mpi_request_t rrequest; rank = smpi_mpi_comm_rank(comm); /* send */ - /* -------------*/ - retval = smpi_create_request(sendbuf, sendcount, sendtype, + retval = smpi_create_request(sendbuf, sendcount, sendtype, rank,dest,sendtag, comm, &srequest); - printf("[%d] isend request src=%d -> dst=%d (retval=%d)\n",rank,rank,dest,retval); smpi_mpi_isend(srequest); - - //retval = MPI_Isend( sendbuf, sendcount, sendtype, dest, sendtag, MPI_COMM_WORLD, &srequest); - - /* recv */ retval = smpi_create_request(recvbuf, recvcount, recvtype, source, rank,recvtag, comm, &rrequest); - printf("[%d] irecv request src=%d -> dst=%d (retval=%d)\n",rank,source,rank,retval); smpi_mpi_irecv(rrequest); - //retval = MPI_Irecv( recvbuf, recvcount, recvtype, source, recvtag, MPI_COMM_WORLD, &rrequest); - - smpi_mpi_wait(srequest, MPI_STATUS_IGNORE); - printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval); - + //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval); smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE); - printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval); + //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval); return(retval); } +/** + * MPI_Sendrecv user entry point + **/ +int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, + MPI_Comm comm, MPI_Status *status) +{ +int retval = MPI_SUCCESS; + smpi_bench_end(); + smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag, + recvbuf, recvcount, recvtype, source, recvtag, + comm, status); + smpi_bench_begin(); + + return retval; + + +} /** * MPI_Wait and friends @@ -296,6 +307,9 @@ int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { int retval = MPI_SUCCESS; + int rank = smpi_mpi_comm_rank(comm); + + DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank); //retval = flat_tree_bcast(buf, count, datatype, root, comm); retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 ); return retval; @@ -349,7 +363,7 @@ static void print_buffer_double(void *buf, int len, char *msg, int rank) #endif /** - * MPI_Reduce + * MPI_Reduce internal level **/ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) @@ -358,7 +372,7 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, int rank; int size; int i; - int tag = 0; + int system_tag = 666; smpi_mpi_request_t *requests; smpi_mpi_request_t request; @@ -366,13 +380,14 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, rank = smpi_mpi_comm_rank(comm); size = comm->size; + DEBUG1("<%d> entered smpi_mpi_reduce()",rank); if (rank != root) { // if i am not ROOT, simply send my buffer to root #ifdef DEBUG_REDUCE print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank); #endif - retval = smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm, + retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm, &request); smpi_mpi_isend(request); smpi_mpi_wait(request, MPI_STATUS_IGNORE); @@ -397,7 +412,7 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, // reminder: for smpi_create_request() the src is always the process sending. src = i < root ? i : i + 1; retval = smpi_create_request(tmpbufs[i], count, datatype, - src, root, tag, comm, &(requests[i])); + src, root, system_tag, comm, &(requests[i])); if (NULL != requests[i] && MPI_SUCCESS == retval) { if (MPI_SUCCESS == retval) { smpi_mpi_irecv(requests[i]); @@ -408,8 +423,9 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, for (i = 0; i < size-1; i++) { int index = MPI_UNDEFINED; smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE); + DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)", + rank,index,size-i-2); #ifdef DEBUG_REDUCE - printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index); print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index), rank); #endif @@ -452,7 +468,7 @@ int retval = MPI_SUCCESS; /** * MPI_Allreduce * - * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members. + * Same as MPI_Reduce except that the result appears in the receive buffer of all the group members. **/ int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) @@ -476,9 +492,6 @@ int root=0; // arbitrary choice /** * MPI_Scatter user entry point **/ -//int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype, -// void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, -// MPI_Comm comm); int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) @@ -553,8 +566,8 @@ int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype, * ompi/mca/coll/tuned/coll_tuned_module.c **/ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm) + void *recvbuf, int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) { int retval = MPI_SUCCESS; int block_dsize; @@ -564,16 +577,14 @@ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, rank = smpi_mpi_comm_rank(comm); block_dsize = datatype->size * sendcount; + DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize); if ((block_dsize < 200) && (comm->size > 12)) { retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype, recvbuf, recvcount, recvtype, comm); } else if (block_dsize < 3000) { -/* use this one !! retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype, - recvbuf, recvcount, recvtype, comm); - */ - retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype, + retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype, recvbuf, recvcount, recvtype, comm); } else { @@ -586,6 +597,29 @@ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, return retval; } +/** + * MPI_Alltoallv user entry point + * + * As in OpenMPI, alltoallv is not optimized + * ompi/mca/coll/basic/coll_basic_alltoallv.c + **/ +int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype, + void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype, + MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int rank; + + rank = smpi_mpi_comm_rank(comm); + DEBUG1("<%d> basic alltoallv() called.",rank); + + retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype, + recvbuf, rcounts, rdisps, recvtype, + comm); + + return retval; +} +