X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/99e8fb90a14e543c1a4b2feaf22a5c0670c2951a..ae2d6a900ad8864d500f3e6fb63b6074519cb364:/src/smpi/smpi_mpi.c diff --git a/src/smpi/smpi_mpi.c b/src/smpi/smpi_mpi.c index c1f52536d6..20ff68f150 100644 --- a/src/smpi/smpi_mpi.c +++ b/src/smpi/smpi_mpi.c @@ -1,7 +1,19 @@ +/* $Id$tag */ + +/* smpi_mpi.c -- + * + * Eventually will contain the user level MPI primitives and its corresponding + * internal wrapper. The implementations of these primitives should go to specific + * files. For example, a SMPI_MPI_Bcast() in this file, should call the wrapper + * smpi_mpi_bcast(), which decides which implementation to call. Currently, it + * calls nary_tree_bcast() in smpi_coll.c. (Stéphane Genaud). + * */ + #include "private.h" #include "smpi_coll_private.h" +#include "smpi_mpi_dt_private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi, "Logging specific to SMPI (mpi)"); @@ -66,25 +78,11 @@ int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank) return retval; } -int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size) -{ - int retval = MPI_SUCCESS; - - smpi_bench_end(); - - if (NULL == datatype) { - retval = MPI_ERR_TYPE; - } else if (NULL == size) { - retval = MPI_ERR_ARG; - } else { - *size = datatype->size; - } - - smpi_bench_begin(); - return retval; -} +/** + * Barrier + **/ int SMPI_MPI_Barrier(MPI_Comm comm) { int retval = MPI_SUCCESS; @@ -109,14 +107,17 @@ int SMPI_MPI_Barrier(MPI_Comm comm) return retval; } + + int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request) { int retval = MPI_SUCCESS; + int rank; smpi_bench_end(); - - retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm, + rank = smpi_mpi_comm_rank(comm); + retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm, request); if (NULL != *request && MPI_SUCCESS == retval) { retval = smpi_mpi_irecv(*request); @@ -131,11 +132,13 @@ int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) { int retval = MPI_SUCCESS; + int rank; smpi_mpi_request_t request; smpi_bench_end(); - retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm, + rank = smpi_mpi_comm_rank(comm); + retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm, &request); if (NULL != request && MPI_SUCCESS == retval) { retval = smpi_mpi_irecv(request); @@ -154,10 +157,12 @@ int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request) { int retval = MPI_SUCCESS; + int rank; smpi_bench_end(); - retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm, + rank = smpi_mpi_comm_rank(comm); + retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm, request); if (NULL != *request && MPI_SUCCESS == retval) { retval = smpi_mpi_isend(*request); @@ -168,15 +173,20 @@ int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, return retval; } +/** + * MPI_Send user level + **/ int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { int retval = MPI_SUCCESS; + int rank; smpi_mpi_request_t request; smpi_bench_end(); - retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm, + rank = smpi_mpi_comm_rank(comm); + retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm, &request); if (NULL != request && MPI_SUCCESS == retval) { retval = smpi_mpi_isend(request); @@ -250,18 +260,33 @@ int retval = MPI_SUCCESS; **/ int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status) { - return smpi_mpi_wait(*request, status); + int retval; + + smpi_bench_end(); + retval = smpi_mpi_wait(*request, status); + smpi_bench_begin(); + return retval; } int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) { - return smpi_mpi_waitall(count, requests, status); + int retval; + + smpi_bench_end(); + retval = smpi_mpi_waitall(count, requests, status); + smpi_bench_begin(); + return retval; } int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status status[]) { - return smpi_mpi_waitany(count, requests, index, status); + int retval; + + smpi_bench_end(); + retval = smpi_mpi_waitany(count, requests, index, status); + smpi_bench_begin(); + return retval; } /** @@ -304,6 +329,9 @@ int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { int retval = MPI_SUCCESS; + int rank = smpi_mpi_comm_rank(comm); + + DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank); //retval = flat_tree_bcast(buf, count, datatype, root, comm); retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 ); return retval; @@ -366,7 +394,7 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, int rank; int size; int i; - int tag = 0; + int system_tag = 666; smpi_mpi_request_t *requests; smpi_mpi_request_t request; @@ -374,13 +402,14 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, rank = smpi_mpi_comm_rank(comm); size = comm->size; + DEBUG1("<%d> entered smpi_mpi_reduce()",rank); if (rank != root) { // if i am not ROOT, simply send my buffer to root #ifdef DEBUG_REDUCE print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank); #endif - retval = smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm, + retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm, &request); smpi_mpi_isend(request); smpi_mpi_wait(request, MPI_STATUS_IGNORE); @@ -405,7 +434,7 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, // reminder: for smpi_create_request() the src is always the process sending. src = i < root ? i : i + 1; retval = smpi_create_request(tmpbufs[i], count, datatype, - src, root, tag, comm, &(requests[i])); + src, root, system_tag, comm, &(requests[i])); if (NULL != requests[i] && MPI_SUCCESS == retval) { if (MPI_SUCCESS == retval) { smpi_mpi_irecv(requests[i]); @@ -416,8 +445,9 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, for (i = 0; i < size-1; i++) { int index = MPI_UNDEFINED; smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE); + DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)", + rank,index,size-i-2); #ifdef DEBUG_REDUCE - printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index); print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index), rank); #endif @@ -430,8 +460,9 @@ int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, xbt_free(tmpbufs[index]); /* FIXME: with the following line, it generates an * [xbt_ex/CRITICAL] Conditional list not empty 162518800. + * Fixed ? */ - // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]); + xbt_mallocator_release(smpi_global->request_mallocator, requests[index]); } xbt_free(requests); xbt_free(tmpbufs); @@ -460,7 +491,7 @@ int retval = MPI_SUCCESS; /** * MPI_Allreduce * - * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members. + * Same as MPI_Reduce except that the result appears in the receive buffer of all the group members. **/ int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) @@ -558,8 +589,8 @@ int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype, * ompi/mca/coll/tuned/coll_tuned_module.c **/ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm) + void *recvbuf, int recvcount, MPI_Datatype recvtype, + MPI_Comm comm) { int retval = MPI_SUCCESS; int block_dsize; @@ -569,16 +600,14 @@ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, rank = smpi_mpi_comm_rank(comm); block_dsize = datatype->size * sendcount; + DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize); if ((block_dsize < 200) && (comm->size > 12)) { retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype, recvbuf, recvcount, recvtype, comm); } else if (block_dsize < 3000) { -/* use this one !! retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype, - recvbuf, recvcount, recvtype, comm); - */ - retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype, + retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype, recvbuf, recvcount, recvtype, comm); } else { @@ -591,6 +620,30 @@ int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype, return retval; } +/** + * MPI_Alltoallv user entry point + * + * As in OpenMPI, alltoallv is not optimized + * ompi/mca/coll/basic/coll_basic_alltoallv.c + **/ +int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype, + void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype, + MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int rank; + + smpi_bench_end(); + rank = smpi_mpi_comm_rank(comm); + DEBUG1("<%d> basic alltoallv() called.",rank); + + retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype, + recvbuf, rcounts, rdisps, recvtype, + comm); + smpi_bench_begin(); + return retval; +} + @@ -728,7 +781,228 @@ int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key, double SMPI_MPI_Wtime(void) { - return (SIMIX_get_clock()); + double time; + + smpi_bench_end(); + time = SIMIX_get_clock(); + smpi_bench_begin(); + return time; +} + +int SMPI_MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + int root, MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int system_tag = 666; + int rank, size; + + smpi_bench_end(); + rank = smpi_mpi_comm_rank(comm); + size = comm->size; + if(rank != root) { + // Send buffer to root + smpi_mpi_request_t request; + + retval = smpi_create_request(sendbuf, sendcount, sendtype, + rank, root, system_tag, comm, &request); + smpi_mpi_isend(request); + smpi_mpi_wait(request, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, request); + } else { + // Receive buffers from senders + int src; + smpi_mpi_request_t* requests; + + requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t)); + for(src = 0; src < size; src++) { + if(src == root) { + // Local copy from root + memcpy(&((char*)recvbuf)[src*recvcount*recvtype->size], + sendbuf, sendcount*sendtype->size*sizeof(char)); + } else { + int index = src < root ? src : src - 1; + retval = smpi_create_request(&((char*)recvbuf)[src*recvcount*recvtype->size], + recvcount, recvtype, src, root, system_tag, + comm, &requests[index]); + if(NULL != requests[index] && MPI_SUCCESS == retval) { + smpi_mpi_irecv(requests[index]); + } + } + } + // Wait for completion of irecv's. + for(src = 0; src < size - 1; src++) { + int index = MPI_UNDEFINED; + smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, requests[index]); + } + xbt_free(requests); + } + smpi_bench_begin(); + return retval; +} + +int SMPI_MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype, + int root, MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int system_tag = 666; + int rank, size; + + smpi_bench_end(); + rank = smpi_mpi_comm_rank(comm); + size = comm->size; + if(rank != root) { + // Send buffer to root + smpi_mpi_request_t request; + + retval = smpi_create_request(sendbuf, sendcount, sendtype, + rank, root, system_tag, comm, &request); + smpi_mpi_isend(request); + smpi_mpi_wait(request, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, request); + } else { + // Receive buffers from senders + int src; + smpi_mpi_request_t* requests; + + requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t)); + for(src = 0; src < size; src++) { + if(src == root) { + // Local copy from root + memcpy(&((char*)recvbuf)[displs[src]], + sendbuf, sendcount*sendtype->size*sizeof(char)); + } else { + int index = src < root ? src : src - 1; + retval = smpi_create_request(&((char*)recvbuf)[displs[src]], + recvcounts[src], recvtype, src, root, system_tag, + comm, &requests[index]); + if(NULL != requests[index] && MPI_SUCCESS == retval) { + smpi_mpi_irecv(requests[index]); + } + } + } + // Wait for completion of irecv's. + for(src = 0; src < size - 1; src++) { + int index = MPI_UNDEFINED; + smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, requests[index]); + } + xbt_free(requests); + } + smpi_bench_begin(); + return retval; } +int SMPI_MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, + int root, MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int system_tag = 666; + int rank, size; + smpi_bench_end(); + rank = smpi_mpi_comm_rank(comm); + size = comm->size; + if(rank != root) { + // Receive buffer from root + smpi_mpi_request_t request; + + retval = smpi_create_request(recvbuf, recvcount, recvtype, + root, rank, system_tag, comm, &request); + smpi_mpi_isend(request); + smpi_mpi_wait(request, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, request); + } else { + // Send buffers to receivers + int dst; + smpi_mpi_request_t* requests; + + requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t)); + for(dst = 0; dst < size; dst++) { + if(dst == root) { + // Local copy from root + memcpy(recvbuf, &((char*)sendbuf)[displs[dst]], + sendcounts[dst]*sendtype->size*sizeof(char)); + } else { + int index = dst < root ? dst : dst - 1; + retval = smpi_create_request(&((char*)sendbuf)[displs[dst]], sendcounts[dst], sendtype, + root, dst, system_tag, comm, &requests[index]); + if(NULL != requests[index] && MPI_SUCCESS == retval) { + smpi_mpi_isend(requests[index]); + } + } + } + // Wait for completion of isend's. + for(dst = 0; dst < size - 1; dst++) { + int index = MPI_UNDEFINED; + smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE); + xbt_mallocator_release(smpi_global->request_mallocator, requests[index]); + } + xbt_free(requests); + } + smpi_bench_begin(); + return retval; +} + +int SMPI_MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts, + MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) +{ + // FIXME: Suboptimal implementation + int retval = MPI_SUCCESS; + int count = 0; + int root = 0; + int i, rank; + int* displs; + + smpi_bench_end(); + rank = smpi_mpi_comm_rank(comm); + displs = xbt_new(int, comm->size); + for(i = 0; i < comm->size; i++) { + count += recvcounts[i]; + displs[i] = 0; + } + retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm); + retval = SMPI_MPI_Scatterv(recvbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, root, comm); + xbt_free(displs); + smpi_bench_begin(); + return retval; +} + +int SMPI_MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) +{ + // FIXME: Suboptimal implementation + int root = 0; + int retval; + + smpi_bench_end(); + retval = SMPI_MPI_Gather(sendbuf, sendcount, sendtype, + recvbuf, recvcount, recvtype, root, comm); + if(retval == MPI_SUCCESS) { + retval = SMPI_MPI_Bcast(recvbuf, recvcount, recvtype, root, comm); + } + smpi_bench_begin(); + return retval; +} + +int SMPI_MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype, + void* recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, + MPI_Comm comm) +{ + // FIXME: Suboptimal implementation + int root = 0; + int last, retval; + + smpi_bench_end(); + retval = SMPI_MPI_Gatherv(sendbuf, sendcount, sendtype, + recvbuf, recvcounts, displs, recvtype, root, comm); + if(retval == MPI_SUCCESS) { + last = comm->size - 1; + retval = SMPI_MPI_Bcast(recvbuf, displs[last] + recvcounts[last], recvtype, root, comm); + } + smpi_bench_begin(); + return retval; +}