- int retval = MPI_SUCCESS;
- //retval = flat_tree_bcast(buf, count, datatype, root, comm);
- retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
- return retval;
-}
-
-/**
- * Bcast user entry point
- **/
-int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
- MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
-
- smpi_bench_end();
- smpi_mpi_bcast(buf,count,datatype,root,comm);
- smpi_bench_begin();
-
- return retval;
-}
-
-
-
-#ifdef DEBUG_REDUCE
-/**
- * debugging helper function
- **/
-static void print_buffer_int(void *buf, int len, char *msg, int rank)
-{
- int tmp, *v;
- printf("**[%d] %s: ", rank, msg);
- for (tmp = 0; tmp < len; tmp++) {
- v = buf;
- printf("[%d]", v[tmp]);
- }
- printf("\n");
- free(msg);
-}
-static void print_buffer_double(void *buf, int len, char *msg, int rank)
-{
- int tmp;
- double *v;
- printf("**[%d] %s: ", rank, msg);
- for (tmp = 0; tmp < len; tmp++) {
- v = buf;
- printf("[%lf]", v[tmp]);
- }
- printf("\n");
- free(msg);
-}
-
-
-#endif
-/**
- * MPI_Reduce internal level
- **/
-int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
- MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- int rank;
- int size;
- int i;
- int tag = 0;
- smpi_mpi_request_t *requests;
- smpi_mpi_request_t request;
-
- smpi_bench_end();
-
- rank = smpi_mpi_comm_rank(comm);
- size = comm->size;
-
- if (rank != root) { // if i am not ROOT, simply send my buffer to root
-
-#ifdef DEBUG_REDUCE
- print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
-#endif
- retval = smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
- &request);
- smpi_mpi_isend(request);
- smpi_mpi_wait(request, MPI_STATUS_IGNORE);
- xbt_mallocator_release(smpi_global->request_mallocator, request);
-
- } else {
- // i am the ROOT: wait for all buffers by creating one request by sender
- int src;
- requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
-
- void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
- for (i = 0; i < size-1; i++) {
- // we need 1 buffer per request to store intermediate receptions
- tmpbufs[i] = xbt_malloc(count * datatype->size);
- }
- // root: initiliaze recv buf with my own snd buf
- memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
-
- // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
- // since we should op values as soon as one receiving request matches.
- for (i = 0; i < size-1; i++) {
- // reminder: for smpi_create_request() the src is always the process sending.
- src = i < root ? i : i + 1;
- retval = smpi_create_request(tmpbufs[i], count, datatype,
- src, root, tag, comm, &(requests[i]));
- if (NULL != requests[i] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_irecv(requests[i]);
- }
- }
- }
- // now, wait for completion of all irecv's.
- for (i = 0; i < size-1; i++) {
- int index = MPI_UNDEFINED;
- smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
-#ifdef DEBUG_REDUCE
- printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index);
- print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
- rank);
-#endif
-
- // arg 2 is modified
- op->func(tmpbufs[index], recvbuf, &count, &datatype);
-#ifdef DEBUG_REDUCE
- print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
-#endif
- xbt_free(tmpbufs[index]);
- /* FIXME: with the following line, it generates an
- * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
- */
- // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
- }
- xbt_free(requests);
- xbt_free(tmpbufs);
- }
- return retval;
-}
-
-/**
- * MPI_Reduce user entry point
- **/
-int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
- MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
-{
-int retval = MPI_SUCCESS;
-
- smpi_bench_end();
-
- retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
-
- smpi_bench_begin();
- return retval;
-}
-
-
-
-/**
- * MPI_Allreduce
- *
- * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
- **/
-int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, MPI_Comm comm )
-{
-int retval = MPI_SUCCESS;
-int root=0; // arbitrary choice
-
- smpi_bench_end();
-
- retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
- if (MPI_SUCCESS != retval)
- return(retval);
-
- retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
-
- smpi_bench_end();
- return( retval );
-}
-
-
-/**
- * MPI_Scatter user entry point
- **/
-int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype,
- int root, MPI_Comm comm)
-{
- int retval = MPI_SUCCESS;
- int i;
- int cnt=0;
- int rank;
- int tag=0;
- char *cptr; // to manipulate the void * buffers
- smpi_mpi_request_t *requests;
- smpi_mpi_request_t request;
- smpi_mpi_status_t status;
-
-
- smpi_bench_end();
-
- rank = smpi_mpi_comm_rank(comm);
-
- requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
- if (rank == root) {
- // i am the root: distribute my sendbuf
- //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
- cptr = sendbuf;
- for (i=0; i < comm->size; i++) {
- if ( i!=root ) { // send to processes ...
-
- retval = smpi_create_request((void *)cptr, sendcount,
- datatype, root, i, tag, comm, &(requests[cnt]));
- if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_isend(requests[cnt]);
- }
- }
- cnt++;
- }
- else { // ... except if it's me.
- memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
- }
- cptr += sendcount*datatype->size;
- }
- for(i=0; i<cnt; i++) { // wait for send to complete
- /* FIXME: waitall() should be slightly better */
- smpi_mpi_wait(requests[i], &status);
- xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
+ return PMPI_Get_count(status, datatype, count);
+}
+
+int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
+ return PMPI_Pack_size(incount, datatype, comm, size);
+}
+
+int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
+ return PMPI_Cart_coords(comm, rank, maxdims, coords);
+}
+
+int MPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periods, int reorder, MPI_Comm* comm_cart) {
+ return PMPI_Cart_create(comm_old, ndims, dims, periods, reorder, comm_cart);
+}
+
+int MPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
+ return PMPI_Cart_get(comm, maxdims, dims, periods, coords);
+}
+
+int MPI_Cart_map(MPI_Comm comm_old, int ndims, int* dims, int* periods, int* newrank) {
+ return PMPI_Cart_map(comm_old, ndims, dims, periods, newrank);
+}
+
+int MPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
+ return PMPI_Cart_rank(comm, coords, rank);
+}
+
+int MPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
+ return PMPI_Cart_shift(comm, direction, displ, source, dest);
+}
+
+int MPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
+ return PMPI_Cart_sub(comm, remain_dims, comm_new);
+}
+
+int MPI_Cartdim_get(MPI_Comm comm, int* ndims) {
+ return PMPI_Cartdim_get(comm, ndims);
+}
+
+int MPI_Graph_create(MPI_Comm comm_old, int nnodes, int* index, int* edges, int reorder, MPI_Comm* comm_graph) {
+ return PMPI_Graph_create(comm_old, nnodes, index, edges, reorder, comm_graph);
+}
+
+int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int* index, int* edges) {
+ return PMPI_Graph_get(comm, maxindex, maxedges, index, edges);
+}
+
+int MPI_Graph_map(MPI_Comm comm_old, int nnodes, int* index, int* edges, int* newrank) {
+ return PMPI_Graph_map(comm_old, nnodes, index, edges, newrank);
+}
+
+int MPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, int* neighbors) {
+ return PMPI_Graph_neighbors(comm, rank, maxneighbors, neighbors);
+}
+
+int MPI_Graph_neighbors_count(MPI_Comm comm, int rank, int* nneighbors) {
+ return PMPI_Graph_neighbors_count(comm, rank, nneighbors);
+}
+
+int MPI_Graphdims_get(MPI_Comm comm, int* nnodes, int* nedges) {
+ return PMPI_Graphdims_get(comm, nnodes, nedges);
+}
+
+int MPI_Topo_test(MPI_Comm comm, int* top_type) {
+ return PMPI_Topo_test(comm, top_type);
+}
+
+int MPI_Error_class(int errorcode, int* errorclass) {
+ return PMPI_Error_class(errorcode, errorclass);
+}
+
+int MPI_Errhandler_create(MPI_Handler_function* function, MPI_Errhandler* errhandler) {
+ return PMPI_Errhandler_create(function, errhandler);
+}
+
+int MPI_Errhandler_free(MPI_Errhandler* errhandler) {
+ return PMPI_Errhandler_free(errhandler);
+}
+
+int MPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler* errhandler) {
+ return PMPI_Errhandler_get(comm, errhandler);
+}
+
+int MPI_Error_string(int errorcode, char* string, int* resultlen) {
+ return PMPI_Error_string(errorcode, string, resultlen);
+}
+
+int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler) {
+ return PMPI_Errhandler_set(comm, errhandler);
+}
+
+int MPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* newtype) {
+ return PMPI_Type_contiguous(count, old_type, newtype);
+}
+
+int MPI_Cancel(MPI_Request* request) {
+ return PMPI_Cancel(request);
+}
+
+int MPI_Buffer_attach(void* buffer, int size) {
+ return PMPI_Buffer_attach(buffer, size);
+}
+
+int MPI_Buffer_detach(void* buffer, int* size) {
+ return PMPI_Buffer_detach(buffer, size);
+}
+
+int MPI_Testsome(int incount, MPI_Request* requests, int* outcount, int* indices, MPI_Status* statuses) {
+ return PMPI_Testsome(incount, requests, outcount, indices, statuses);
+}
+
+int MPI_Comm_test_inter(MPI_Comm comm, int* flag) {
+ return PMPI_Comm_test_inter(comm, flag);
+}