tree = xbt_new(struct s_proc_tree, 1);
tree->PROCTREE_A = arity;
- tree->isRoot = 0;
+ tree->isRoot = 0;
tree->numChildren = 0;
tree->child = xbt_new(int, arity);
for(i = 0; i < arity; i++) {
smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank, comm, MPI_STATUS_IGNORE);
}
requests = xbt_new(MPI_Request, tree->numChildren);
- DEBUG2("<%d> creates %d requests (1 per child)\n", rank, tree->numChildren);
+ DEBUG2("<%d> creates %d requests (1 per child)", rank, tree->numChildren);
/* iniates sends to ranks lower in the tree */
for(i = 0; i < tree->numChildren; i++) {
if(tree->child[i] == -1) {
}
//every one receives as many messages as it has children
requests = xbt_new(MPI_Request, tree->numChildren);
- DEBUG2("<%d> creates %d requests (1 per child)\n", rank, tree->numChildren);
+ DEBUG2("<%d> creates %d requests (1 per child)", rank, tree->numChildren);
for(i = 0; i < tree->numChildren; i++) {
if(tree->child[i] == -1) {
requests[i] = MPI_REQUEST_NULL;
}
smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
xbt_free(requests);
-}
+}
/**
- * bcast with a binary, ternary, or whatever tree ..
+ * bcast with a binary, ternary, or whatever tree ..
**/
void nary_tree_bcast(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm, int arity) {
- proc_tree_t tree = alloc_tree(arity);
+ proc_tree_t tree = alloc_tree(arity);
int rank, size;
rank = smpi_comm_rank(comm);
}
/**
- * barrier with a binary, ternary, or whatever tree ..
+ * barrier with a binary, ternary, or whatever tree ..
**/
void nary_tree_barrier(MPI_Comm comm, int arity) {
- proc_tree_t tree = alloc_tree( arity );
+ proc_tree_t tree = alloc_tree( arity );
int rank, size;
char dummy='$';
}
/**
- * Alltoall Bruck
+ * Alltoall Bruck
*
* Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
**/
int smpi_coll_tuned_alltoall_bruck(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) {
- DEBUG0("coll:tuned:alltoall_intra_bruck ** NOT IMPLEMENTED YET**");
+ int system_tag = 777;
+ int i, rank, size, err, count;
+ MPI_Aint lb, sendextent, recvextent;
+ MPI_Request* requests;
+
+ // FIXME: check implementation
+ rank = smpi_comm_rank(comm);
+ size = smpi_comm_size(comm);
+ DEBUG1("<%d> algorithm alltoall_bruck() called.", rank);
+ err = smpi_datatype_extent(sendtype, &lb, &sendextent);
+ err = smpi_datatype_extent(recvtype, &lb, &recvextent);
+ /* Local copy from self */
+ err = smpi_datatype_copy(&((char*)sendbuf)[rank * sendextent], sendcount, sendtype, &((char*)recvbuf)[rank * recvextent], recvcount, recvtype);
+ if(err == MPI_SUCCESS && size > 1) {
+ /* Initiate all send/recv to/from others. */
+ requests = xbt_new(MPI_Request, 2 * (size - 1));
+ count = 0;
+ /* Create all receives that will be posted first */
+ for(i = 0; i < size; ++i) {
+ if(i == rank) {
+ DEBUG3("<%d> skip request creation [src = %d, recvcount = %d]", rank, i, recvcount);
+ continue;
+ }
+ requests[count] = smpi_mpi_irecv(&((char*)recvbuf)[i * recvextent], recvcount, recvtype, i, system_tag, comm);
+ count++;
+ }
+ /* Now create all sends */
+ for(i = 0; i < size; ++i) {
+ if(i == rank) {
+ DEBUG3("<%d> skip request creation [dst = %d, sendcount = %d]", rank, i, sendcount);
+ continue;
+ }
+ requests[count] = smpi_mpi_isend(&((char*)sendbuf)[i * sendextent], sendcount, sendtype, i, system_tag, comm);
+ count++;
+ }
+ /* Wait for them all. If there's an error, note that we don't
+ * care what the error was -- just that there *was* an error. The
+ * PML will finish all requests, even if one or more of them fail.
+ * i.e., by the end of this call, all the requests are free-able.
+ * So free them anyway -- even if there was an error, and return
+ * the error after we free everything.
+ */
+ DEBUG2("<%d> wait for %d requests", rank, count);
+ smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
+ xbt_free(requests);
+ }
return MPI_SUCCESS;
}
requests[count] = smpi_mpi_irecv(&((char*)recvbuf)[i * recvinc], recvcount, recvtype, i, system_tag, comm);
count++;
}
- /* Now post all sends in reverse order
+ /* Now post all sends in reverse order
* - We would like to minimize the search time through message queue
* when messages actually arrive in the order in which they were posted.
* TODO: check the previous assertion
*
* this algorithm performs size steps (1<=s<=size) and
* at each step s, a process p sends iand receive to.from a unique distinct remote process
- * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ...
+ * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ...
* s=2: 3->0->2, 4->1->3, 0->2->4, 1->3->0 , 2->4->1
- * ....
+ * ....
* Openmpi calls this routine when the message size sent to each rank is greater than 3000 bytes
**/
int smpi_coll_tuned_alltoall_pairwise(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) {