+ }
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+ xbt_free(requests);
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+
+/**
+ * MPI_Alltoall user entry point
+ *
+ * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
+ * ompi/mca/coll/tuned/coll_tuned_module.c
+ **/
+int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int block_dsize;
+ int rank;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ block_dsize = datatype->size * sendcount;
+ DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize);
+
+ if ((block_dsize < 200) && (comm->size > 12)) {
+ retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+
+ } else if (block_dsize < 3000) {
+ retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ } else {
+
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ }
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+/**
+ * MPI_Alltoallv user entry point
+ *
+ * As in OpenMPI, alltoallv is not optimized
+ * ompi/mca/coll/basic/coll_basic_alltoallv.c
+ **/
+int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype,
+ void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ DEBUG1("<%d> basic alltoallv() called.",rank);
+
+ retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype,
+ recvbuf, rcounts, rdisps, recvtype,
+ comm);
+ smpi_bench_begin();
+ return retval;
+}
+
+
+
+
+// used by comm_split to sort ranks based on key values
+int smpi_compare_rankkeys(const void *a, const void *b);
+int smpi_compare_rankkeys(const void *a, const void *b)
+{
+ int *x = (int *) a;
+ int *y = (int *) b;
+
+ if (x[1] < y[1])
+ return -1;
+
+ if (x[1] == y[1]) {
+ if (x[0] < y[0])
+ return -1;
+ if (x[0] == y[0])
+ return 0;
+ return 1;
+ }
+
+ return 1;
+}
+
+int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
+ MPI_Comm * comm_out)
+{
+ int retval = MPI_SUCCESS;
+
+ int index, rank;
+ smpi_mpi_request_t request;
+ int colorkey[2];
+ smpi_mpi_status_t status;
+
+ smpi_bench_end();
+
+ // FIXME: need to test parameters
+
+ index = smpi_process_index();
+ rank = comm->index_to_rank_map[index];
+
+ // default output
+ comm_out = NULL;
+
+ // root node does most of the real work
+ if (0 == rank) {
+ int colormap[comm->size];
+ int keymap[comm->size];
+ int rankkeymap[comm->size * 2];
+ int i, j;
+ smpi_mpi_communicator_t tempcomm = NULL;
+ int count;
+ int indextmp;
+
+ colormap[0] = color;
+ keymap[0] = key;
+
+ // FIXME: use scatter/gather or similar instead of individual comms
+ for (i = 1; i < comm->size; i++) {
+ retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
+ rank, MPI_ANY_TAG, comm, &request);
+ smpi_mpi_irecv(request);
+ smpi_mpi_wait(request, &status);
+ colormap[status.MPI_SOURCE] = colorkey[0];
+ keymap[status.MPI_SOURCE] = colorkey[1];
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+
+ for (i = 0; i < comm->size; i++) {
+ if (MPI_UNDEFINED == colormap[i]) {
+ continue;
+ }
+ // make a list of nodes with current color and sort by keys
+ count = 0;
+ for (j = i; j < comm->size; j++) {
+ if (colormap[i] == colormap[j]) {
+ colormap[j] = MPI_UNDEFINED;
+ rankkeymap[count * 2] = j;
+ rankkeymap[count * 2 + 1] = keymap[j];
+ count++;
+ }
+ }
+ qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
+
+ // new communicator
+ tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
+ tempcomm->barrier_count = 0;
+ tempcomm->size = count;
+ tempcomm->barrier_mutex = SIMIX_mutex_init();
+ tempcomm->barrier_cond = SIMIX_cond_init();
+ tempcomm->rank_to_index_map = xbt_new(int, count);
+ tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
+ for (j = 0; j < smpi_global->process_count; j++) {
+ tempcomm->index_to_rank_map[j] = -1;
+ }
+ for (j = 0; j < count; j++) {
+ indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
+ tempcomm->rank_to_index_map[j] = indextmp;
+ tempcomm->index_to_rank_map[indextmp] = j;
+ }
+ for (j = 0; j < count; j++) {
+ if (rankkeymap[j * 2]) {
+ retval = smpi_create_request(&j, 1, MPI_INT, 0,
+ rankkeymap[j * 2], 0, comm, &request);
+ request->data = tempcomm;
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ *comm_out = tempcomm;
+ }
+ }
+ }
+ } else {
+ colorkey[0] = color;
+ colorkey[1] = key;
+ retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
+ &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ if (MPI_UNDEFINED != color) {
+ retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
+ &request);
+ smpi_mpi_irecv(request);
+ smpi_mpi_wait(request, &status);
+ *comm_out = request->data;
+ }
+ }
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+double SMPI_MPI_Wtime(void)
+{
+ double time;
+
+ smpi_bench_end();
+ time = SIMIX_get_clock();
+ smpi_bench_begin();
+ return time;
+}
+
+int SMPI_MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank, root, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Receive buffers from senders
+ int src;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(src = 0; src < size; src++) {
+ if(src == root) {
+ // Local copy from root
+ memcpy(&((char*)recvbuf)[src*recvcount*recvtype->size],
+ sendbuf, sendcount*sendtype->size*sizeof(char));
+ } else {
+ int index = src < root ? src : src - 1;
+ retval = smpi_create_request(&((char*)recvbuf)[src*recvcount*recvtype->size],
+ recvcount, recvtype, src, root, system_tag,
+ comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of irecv's.
+ for(src = 0; src < size - 1; src++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Send buffer to root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank, root, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Receive buffers from senders
+ int src;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(src = 0; src < size; src++) {
+ if(src == root) {
+ // Local copy from root
+ memcpy(&((char*)recvbuf)[displs[src]],
+ sendbuf, sendcount*sendtype->size*sizeof(char));
+ } else {
+ int index = src < root ? src : src - 1;
+ retval = smpi_create_request(&((char*)recvbuf)[displs[src]],
+ recvcounts[src], recvtype, src, root, system_tag,
+ comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of irecv's.
+ for(src = 0; src < size - 1; src++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int system_tag = 666;
+ int rank, size;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ if(rank != root) {
+ // Receive buffer from root
+ smpi_mpi_request_t request;
+
+ retval = smpi_create_request(recvbuf, recvcount, recvtype,
+ root, rank, system_tag, comm, &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ // Send buffers to receivers
+ int dst;
+ smpi_mpi_request_t* requests;
+
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+ for(dst = 0; dst < size; dst++) {
+ if(dst == root) {
+ // Local copy from root
+ memcpy(recvbuf, &((char*)sendbuf)[displs[dst]],
+ sendcounts[dst]*sendtype->size*sizeof(char));
+ } else {
+ int index = dst < root ? dst : dst - 1;
+ retval = smpi_create_request(&((char*)sendbuf)[displs[dst]], sendcounts[dst], sendtype,
+ root, dst, system_tag, comm, &requests[index]);
+ if(NULL != requests[index] && MPI_SUCCESS == retval) {
+ smpi_mpi_isend(requests[index]);
+ }
+ }
+ }
+ // Wait for completion of isend's.
+ for(dst = 0; dst < size - 1; dst++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts,
+ MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
+{
+ // FIXME: Suboptimal implementation
+ int retval = MPI_SUCCESS;
+ int count = 0;
+ int root = 0;
+ int i, rank;
+ int* displs;
+
+ smpi_bench_end();
+ rank = smpi_mpi_comm_rank(comm);
+ displs = xbt_new(int, comm->size);
+ for(i = 0; i < comm->size; i++) {
+ count += recvcounts[i];
+ displs[i] = 0;
+ }
+ retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+ retval = SMPI_MPI_Scatterv(recvbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, root, comm);
+ xbt_free(displs);
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+{
+ // FIXME: Suboptimal implementation
+ int root = 0;
+ int retval;
+
+ smpi_bench_end();
+ retval = SMPI_MPI_Gather(sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype, root, comm);
+ if(retval == MPI_SUCCESS) {
+ retval = SMPI_MPI_Bcast(recvbuf, recvcount, recvtype, root, comm);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int SMPI_MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
+ void* recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ // FIXME: Suboptimal implementation
+ int root = 0;
+ int last, retval;
+
+ smpi_bench_end();
+ retval = SMPI_MPI_Gatherv(sendbuf, sendcount, sendtype,
+ recvbuf, recvcounts, displs, recvtype, root, comm);
+ if(retval == MPI_SUCCESS) {
+ last = comm->size - 1;
+ retval = SMPI_MPI_Bcast(recvbuf, displs[last] + recvcounts[last], recvtype, root, comm);
+ }
+ smpi_bench_begin();
+ return retval;