+/**
+ * Bcast internal level
+ **/
+int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank = smpi_mpi_comm_rank(comm);
+
+ DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank);
+ //retval = flat_tree_bcast(buf, count, datatype, root, comm);
+ retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
+ return retval;
+}
+
+/**
+ * Bcast user entry point
+ **/
+int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+ smpi_mpi_bcast(buf,count,datatype,root,comm);
+ smpi_bench_begin();
+
+ return retval;
+}
+
+
+
+#ifdef DEBUG_REDUCE
+/**
+ * debugging helper function
+ **/
+static void print_buffer_int(void *buf, int len, char *msg, int rank)
+{
+ int tmp, *v;
+ printf("**[%d] %s: ", rank, msg);
+ for (tmp = 0; tmp < len; tmp++) {
+ v = buf;
+ printf("[%d]", v[tmp]);
+ }
+ printf("\n");
+ free(msg);
+}
+static void print_buffer_double(void *buf, int len, char *msg, int rank)
+{
+ int tmp;
+ double *v;
+ printf("**[%d] %s: ", rank, msg);
+ for (tmp = 0; tmp < len; tmp++) {
+ v = buf;
+ printf("[%lf]", v[tmp]);
+ }
+ printf("\n");
+ free(msg);
+}
+
+
+#endif
+/**
+ * MPI_Reduce internal level
+ **/
+int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int rank;
+ int size;
+ int i;
+ int system_tag = 666;
+ smpi_mpi_request_t *requests;
+ smpi_mpi_request_t request;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ size = comm->size;
+ DEBUG1("<%d> entered smpi_mpi_reduce()",rank);
+
+ if (rank != root) { // if i am not ROOT, simply send my buffer to root
+
+#ifdef DEBUG_REDUCE
+ print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
+#endif
+ retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm,
+ &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, MPI_STATUS_IGNORE);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+
+ } else {
+ // i am the ROOT: wait for all buffers by creating one request by sender
+ int src;
+ requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
+
+ void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
+ for (i = 0; i < size-1; i++) {
+ // we need 1 buffer per request to store intermediate receptions
+ tmpbufs[i] = xbt_malloc(count * datatype->size);
+ }
+ // root: initiliaze recv buf with my own snd buf
+ memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
+
+ // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
+ // since we should op values as soon as one receiving request matches.
+ for (i = 0; i < size-1; i++) {
+ // reminder: for smpi_create_request() the src is always the process sending.
+ src = i < root ? i : i + 1;
+ retval = smpi_create_request(tmpbufs[i], count, datatype,
+ src, root, system_tag, comm, &(requests[i]));
+ if (NULL != requests[i] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(requests[i]);
+ }
+ }
+ }
+ // now, wait for completion of all irecv's.
+ for (i = 0; i < size-1; i++) {
+ int index = MPI_UNDEFINED;
+ smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
+ DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)",
+ rank,index,size-i-2);
+#ifdef DEBUG_REDUCE
+ print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
+ rank);
+#endif
+
+ // arg 2 is modified
+ op->func(tmpbufs[index], recvbuf, &count, &datatype);
+#ifdef DEBUG_REDUCE
+ print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
+#endif
+ xbt_free(tmpbufs[index]);
+ /* FIXME: with the following line, it generates an
+ * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
+ */
+ // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
+ }
+ xbt_free(requests);
+ xbt_free(tmpbufs);
+ }
+ return retval;
+}
+
+/**
+ * MPI_Reduce user entry point
+ **/
+int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+
+ retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+
+ smpi_bench_begin();
+ return retval;
+}
+
+
+
+/**
+ * MPI_Allreduce
+ *
+ * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
+ **/
+int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
+ MPI_Op op, MPI_Comm comm )
+{
+int retval = MPI_SUCCESS;
+int root=0; // arbitrary choice
+
+ smpi_bench_end();
+
+ retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
+ if (MPI_SUCCESS != retval)
+ return(retval);
+
+ retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
+
+ smpi_bench_end();
+ return( retval );
+}
+
+
+/**
+ * MPI_Scatter user entry point
+ **/
+int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ int root, MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int i;
+ int cnt=0;
+ int rank;
+ int tag=0;
+ char *cptr; // to manipulate the void * buffers
+ smpi_mpi_request_t *requests;
+ smpi_mpi_request_t request;
+ smpi_mpi_status_t status;
+
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+
+ requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
+ if (rank == root) {
+ // i am the root: distribute my sendbuf
+ //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
+ cptr = sendbuf;
+ for (i=0; i < comm->size; i++) {
+ if ( i!=root ) { // send to processes ...
+
+ retval = smpi_create_request((void *)cptr, sendcount,
+ datatype, root, i, tag, comm, &(requests[cnt]));
+ if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_isend(requests[cnt]);
+ }
+ }
+ cnt++;
+ }
+ else { // ... except if it's me.
+ memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
+ }
+ cptr += sendcount*datatype->size;
+ }
+ for(i=0; i<cnt; i++) { // wait for send to complete
+ /* FIXME: waitall() should be slightly better */
+ smpi_mpi_wait(requests[i], &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
+
+ }
+ }
+ else { // i am a non-root process: wait data from the root
+ retval = smpi_create_request(recvbuf,recvcount,
+ recvtype, root, rank, tag, comm, &request);
+ if (NULL != request && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_irecv(request);
+ }
+ }
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+ xbt_free(requests);
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+
+/**
+ * MPI_Alltoall user entry point
+ *
+ * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
+ * ompi/mca/coll/tuned/coll_tuned_module.c
+ **/
+int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int block_dsize;
+ int rank;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ block_dsize = datatype->size * sendcount;
+ DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize);
+
+ if ((block_dsize < 200) && (comm->size > 12)) {
+ retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+
+ } else if (block_dsize < 3000) {
+ retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ } else {
+
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ }
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+
+
+
+// used by comm_split to sort ranks based on key values
+int smpi_compare_rankkeys(const void *a, const void *b);
+int smpi_compare_rankkeys(const void *a, const void *b)
+{
+ int *x = (int *) a;
+ int *y = (int *) b;
+
+ if (x[1] < y[1])
+ return -1;
+
+ if (x[1] == y[1]) {
+ if (x[0] < y[0])
+ return -1;
+ if (x[0] == y[0])
+ return 0;
+ return 1;
+ }
+
+ return 1;
+}
+
+int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
+ MPI_Comm * comm_out)
+{
+ int retval = MPI_SUCCESS;
+
+ int index, rank;
+ smpi_mpi_request_t request;
+ int colorkey[2];
+ smpi_mpi_status_t status;
+
+ smpi_bench_end();
+
+ // FIXME: need to test parameters
+
+ index = smpi_process_index();
+ rank = comm->index_to_rank_map[index];
+
+ // default output
+ comm_out = NULL;
+
+ // root node does most of the real work
+ if (0 == rank) {
+ int colormap[comm->size];
+ int keymap[comm->size];
+ int rankkeymap[comm->size * 2];
+ int i, j;
+ smpi_mpi_communicator_t tempcomm = NULL;
+ int count;
+ int indextmp;
+
+ colormap[0] = color;
+ keymap[0] = key;
+
+ // FIXME: use scatter/gather or similar instead of individual comms
+ for (i = 1; i < comm->size; i++) {
+ retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
+ rank, MPI_ANY_TAG, comm, &request);
+ smpi_mpi_irecv(request);
+ smpi_mpi_wait(request, &status);
+ colormap[status.MPI_SOURCE] = colorkey[0];
+ keymap[status.MPI_SOURCE] = colorkey[1];
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ }
+
+ for (i = 0; i < comm->size; i++) {
+ if (MPI_UNDEFINED == colormap[i]) {
+ continue;
+ }
+ // make a list of nodes with current color and sort by keys
+ count = 0;
+ for (j = i; j < comm->size; j++) {
+ if (colormap[i] == colormap[j]) {
+ colormap[j] = MPI_UNDEFINED;
+ rankkeymap[count * 2] = j;
+ rankkeymap[count * 2 + 1] = keymap[j];
+ count++;
+ }
+ }
+ qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
+
+ // new communicator
+ tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
+ tempcomm->barrier_count = 0;
+ tempcomm->size = count;
+ tempcomm->barrier_mutex = SIMIX_mutex_init();
+ tempcomm->barrier_cond = SIMIX_cond_init();
+ tempcomm->rank_to_index_map = xbt_new(int, count);
+ tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
+ for (j = 0; j < smpi_global->process_count; j++) {
+ tempcomm->index_to_rank_map[j] = -1;
+ }
+ for (j = 0; j < count; j++) {
+ indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
+ tempcomm->rank_to_index_map[j] = indextmp;
+ tempcomm->index_to_rank_map[indextmp] = j;
+ }
+ for (j = 0; j < count; j++) {
+ if (rankkeymap[j * 2]) {
+ retval = smpi_create_request(&j, 1, MPI_INT, 0,
+ rankkeymap[j * 2], 0, comm, &request);
+ request->data = tempcomm;
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ } else {
+ *comm_out = tempcomm;
+ }
+ }
+ }
+ } else {
+ colorkey[0] = color;
+ colorkey[1] = key;
+ retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
+ &request);
+ smpi_mpi_isend(request);
+ smpi_mpi_wait(request, &status);
+ xbt_mallocator_release(smpi_global->request_mallocator, request);
+ if (MPI_UNDEFINED != color) {
+ retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
+ &request);
+ smpi_mpi_irecv(request);
+ smpi_mpi_wait(request, &status);
+ *comm_out = request->data;
+ }
+ }
+
+ smpi_bench_begin();
+
+ return retval;
+}
+
+double SMPI_MPI_Wtime(void)
+{
+ return (SIMIX_get_clock());
+}
+
+