return retval;
}
+
+/**
+ * MPI_Sendrecv internal level
+ **/
+int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
+ MPI_Comm comm, MPI_Status *status)
+{
+int rank;
+int retval = MPI_SUCCESS;
+smpi_mpi_request_t srequest;
+smpi_mpi_request_t rrequest;
+
+ rank = smpi_mpi_comm_rank(comm);
+
+ /* send */
+ retval = smpi_create_request(sendbuf, sendcount, sendtype,
+ rank,dest,sendtag,
+ comm, &srequest);
+ smpi_mpi_isend(srequest);
+
+ /* recv */
+ retval = smpi_create_request(recvbuf, recvcount, recvtype,
+ source, rank,recvtag,
+ comm, &rrequest);
+ smpi_mpi_irecv(rrequest);
+
+ smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
+ //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval);
+ smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
+ //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval);
+
+ return(retval);
+}
+/**
+ * MPI_Sendrecv user entry point
+ **/
+int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
+ MPI_Comm comm, MPI_Status *status)
+{
+int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+ smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag,
+ recvbuf, recvcount, recvtype, source, recvtag,
+ comm, status);
+ smpi_bench_begin();
+
+ return retval;
+
+
+}
+
/**
* MPI_Wait and friends
**/
return(retval);
}
+/**
+ * Bcast internal level
+ **/
+int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ //retval = flat_tree_bcast(buf, count, datatype, root, comm);
+ retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
+ return retval;
+}
/**
* Bcast user entry point
int retval = MPI_SUCCESS;
smpi_bench_end();
-
- //retval = flat_tree_bcast(buf, count, datatype, root, comm);
- retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
-
+ smpi_mpi_bcast(buf,count,datatype,root,comm);
smpi_bench_begin();
return retval;
/**
* debugging helper function
**/
-static void print_buffer_int(void *buf, int len, const char *msg, int rank)
+static void print_buffer_int(void *buf, int len, char *msg, int rank)
{
int tmp, *v;
printf("**[%d] %s: ", rank, msg);
printf("\n");
free(msg);
}
-#endif
+static void print_buffer_double(void *buf, int len, char *msg, int rank)
+{
+ int tmp;
+ double *v;
+ printf("**[%d] %s: ", rank, msg);
+ for (tmp = 0; tmp < len; tmp++) {
+ v = buf;
+ printf("[%lf]", v[tmp]);
+ }
+ printf("\n");
+ free(msg);
+}
+
+#endif
/**
- * MPI_Reduce
+ * MPI_Reduce internal level
**/
-int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
+int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
{
int retval = MPI_SUCCESS;
#ifdef DEBUG_REDUCE
print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
#endif
- retval =
- smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
+ retval = smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
&request);
smpi_mpi_isend(request);
smpi_mpi_wait(request, MPI_STATUS_IGNORE);
xbt_free(requests);
xbt_free(tmpbufs);
}
- smpi_bench_begin();
return retval;
}
+/**
+ * MPI_Reduce user entry point
+ **/
+int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
+ MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+{
+int retval = MPI_SUCCESS;
+
+ smpi_bench_end();
+
+ retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+
+ smpi_bench_begin();
+ return retval;
+}
+
+
+
/**
* MPI_Allreduce
*
* Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
**/
int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, MPI_Comm comm );
-int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
- MPI_Op op, MPI_Comm comm )
+ MPI_Op op, MPI_Comm comm )
{
- int retval = MPI_SUCCESS;
- int root=1; // arbitrary choice
+int retval = MPI_SUCCESS;
+int root=0; // arbitrary choice
- smpi_bench_end();
+ smpi_bench_end();
- retval = SMPI_MPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
- if (MPI_SUCCESS != retval)
- return(retval);
+ retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
+ if (MPI_SUCCESS != retval)
+ return(retval);
- retval = SMPI_MPI_Bcast( sendbuf, count, datatype, root, comm);
- smpi_bench_begin();
- return( retval );
+ retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
+
+ smpi_bench_end();
+ return( retval );
}
/**
* MPI_Scatter user entry point
**/
-//int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
-// void *recvbuf, int recvcount, MPI_Datatype recvtype,int root,
-// MPI_Comm comm);
int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
int cnt=0;
int rank;
int tag=0;
- char *cbuf; // to manipulate the void * buffers
+ char *cptr; // to manipulate the void * buffers
smpi_mpi_request_t *requests;
smpi_mpi_request_t request;
smpi_mpi_status_t status;
requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
if (rank == root) {
- // i am the root: distribute my sendbuf
- for (i=0; i < comm->size; i++) {
- cbuf = sendbuf;
- cbuf += i*sendcount*datatype->size;
- if ( i!=root ) { // send to processes ...
-
- retval = smpi_create_request((void *)cbuf, sendcount,
- datatype, root, i, tag, comm, &(requests[cnt++]));
- if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
- if (MPI_SUCCESS == retval) {
- smpi_mpi_isend(requests[cnt]);
- }
+ // i am the root: distribute my sendbuf
+ //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
+ cptr = sendbuf;
+ for (i=0; i < comm->size; i++) {
+ if ( i!=root ) { // send to processes ...
+
+ retval = smpi_create_request((void *)cptr, sendcount,
+ datatype, root, i, tag, comm, &(requests[cnt]));
+ if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
+ if (MPI_SUCCESS == retval) {
+ smpi_mpi_isend(requests[cnt]);
+ }
}
cnt++;
}
else { // ... except if it's me.
- memcpy(recvbuf, (void *)cbuf, recvcount*recvtype->size*sizeof(char));
+ memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
}
+ cptr += sendcount*datatype->size;
}
for(i=0; i<cnt; i++) { // wait for send to complete
/* FIXME: waitall() should be slightly better */
}
+/**
+ * MPI_Alltoall user entry point
+ *
+ * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
+ * ompi/mca/coll/tuned/coll_tuned_module.c
+ **/
+int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
+ void *recvbuf, int recvcount, MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int retval = MPI_SUCCESS;
+ int block_dsize;
+ int rank;
+
+ smpi_bench_end();
+
+ rank = smpi_mpi_comm_rank(comm);
+ block_dsize = datatype->size * sendcount;
+
+ if ((block_dsize < 200) && (comm->size > 12)) {
+ retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+
+ } else if (block_dsize < 3000) {
+/* use this one !! retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ */
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ } else {
+
+ retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
+ recvbuf, recvcount, recvtype, comm);
+ }
+
+ smpi_bench_begin();
+
+ return retval;
+}