1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009, 2010. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
15 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
16 "Logging specific to SMPI (coll)");
27 typedef struct s_proc_tree *proc_tree_t;
32 static proc_tree_t alloc_tree(int arity)
37 tree = xbt_new(struct s_proc_tree, 1);
38 tree->PROCTREE_A = arity;
40 tree->numChildren = 0;
41 tree->child = xbt_new(int, arity);
42 for (i = 0; i < arity; i++) {
53 static void free_tree(proc_tree_t tree)
55 xbt_free(tree->child);
60 * Build the tree depending on a process rank (index) and the group size (extent)
61 * @param root the rank of the tree root
62 * @param rank the rank of the calling process
63 * @param size the total number of processes
65 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
67 int index = (rank - root + size) % size;
68 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
74 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
75 (*tree)->child[i] = (firstChildIdx + i + root) % size;
76 (*tree)->numChildren++;
82 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
89 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
90 MPI_Comm comm, proc_tree_t tree)
92 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
94 MPI_Request *requests;
96 rank = smpi_comm_rank(comm);
97 /* wait for data from my parent in the tree */
99 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
100 rank, tree->parent, system_tag + rank);
101 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
102 comm, MPI_STATUS_IGNORE);
104 requests = xbt_new(MPI_Request, tree->numChildren);
105 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
107 /* iniates sends to ranks lower in the tree */
108 for (i = 0; i < tree->numChildren; i++) {
109 if (tree->child[i] == -1) {
110 requests[i] = MPI_REQUEST_NULL;
112 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
113 system_tag + tree->child[i]);
115 smpi_isend_init(buf, count, datatype, tree->child[i],
116 system_tag + tree->child[i], comm);
119 smpi_mpi_startall(tree->numChildren, requests);
120 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
121 for (i = 0; i < tree->numChildren; i++){
122 xbt_free (requests[i]);
130 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
131 MPI_Comm comm, proc_tree_t tree)
133 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
135 MPI_Request *requests;
137 rank = smpi_comm_rank(comm);
138 // everyone sends to its parent, except root.
140 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
141 rank, tree->parent, system_tag + rank);
142 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
145 //every one receives as many messages as it has children
146 requests = xbt_new(MPI_Request, tree->numChildren);
147 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
149 for (i = 0; i < tree->numChildren; i++) {
150 if (tree->child[i] == -1) {
151 requests[i] = MPI_REQUEST_NULL;
153 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
154 system_tag + tree->child[i]);
156 smpi_irecv_init(buf, count, datatype, tree->child[i],
157 system_tag + tree->child[i], comm);
160 smpi_mpi_startall(tree->numChildren, requests);
161 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
162 for (i = 0; i < tree->numChildren; i++){
163 xbt_free (requests[i]);
169 * bcast with a binary, ternary, or whatever tree ..
171 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
172 MPI_Comm comm, int arity)
174 proc_tree_t tree = alloc_tree(arity);
177 rank = smpi_comm_rank(comm);
178 size = smpi_comm_size(comm);
179 build_tree(root, rank, size, &tree);
180 tree_bcast(buf, count, datatype, comm, tree);
185 * barrier with a binary, ternary, or whatever tree ..
187 void nary_tree_barrier(MPI_Comm comm, int arity)
189 proc_tree_t tree = alloc_tree(arity);
193 rank = smpi_comm_rank(comm);
194 size = smpi_comm_size(comm);
195 build_tree(0, rank, size, &tree);
196 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
197 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
204 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
205 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
208 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
209 MPI_Datatype sendtype, void *recvbuf,
210 int recvcount, MPI_Datatype recvtype,
213 int system_tag = 777;
214 int i, rank, size, err, count;
216 MPI_Aint sendext = 0;
217 MPI_Aint recvext = 0;
218 MPI_Request *requests;
220 // FIXME: check implementation
221 rank = smpi_comm_rank(comm);
222 size = smpi_comm_size(comm);
223 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
224 err = smpi_datatype_extent(sendtype, &lb, &sendext);
225 err = smpi_datatype_extent(recvtype, &lb, &recvext);
226 /* Local copy from self */
228 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
230 (char *)recvbuf + rank * recvcount * recvext,
231 recvcount, recvtype);
232 if (err == MPI_SUCCESS && size > 1) {
233 /* Initiate all send/recv to/from others. */
234 requests = xbt_new(MPI_Request, 2 * (size - 1));
236 /* Create all receives that will be posted first */
237 for (i = 0; i < size; ++i) {
239 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
244 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
245 recvtype, i, system_tag, comm);
248 /* Now create all sends */
249 for (i = 0; i < size; ++i) {
251 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
256 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
257 sendtype, i, system_tag, comm);
260 /* Wait for them all. */
261 smpi_mpi_startall(count, requests);
262 XBT_DEBUG("<%d> wait for %d requests", rank, count);
263 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
270 * Alltoall basic_linear
272 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
273 MPI_Datatype sendtype,
274 void *recvbuf, int recvcount,
275 MPI_Datatype recvtype,
278 int system_tag = 888;
279 int i, rank, size, err, count;
280 MPI_Aint lb = 0, sendext = 0, recvext = 0;
281 MPI_Request *requests;
284 rank = smpi_comm_rank(comm);
285 size = smpi_comm_size(comm);
286 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
287 err = smpi_datatype_extent(sendtype, &lb, &sendext);
288 err = smpi_datatype_extent(recvtype, &lb, &recvext);
289 /* simple optimization */
290 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
292 (char *)recvbuf + rank * recvcount * recvext,
293 recvcount, recvtype);
294 if (err == MPI_SUCCESS && size > 1) {
295 /* Initiate all send/recv to/from others. */
296 requests = xbt_new(MPI_Request, 2 * (size - 1));
297 /* Post all receives first -- a simple optimization */
299 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
301 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
302 recvtype, i, system_tag, comm);
305 /* Now post all sends in reverse order
306 * - We would like to minimize the search time through message queue
307 * when messages actually arrive in the order in which they were posted.
308 * TODO: check the previous assertion
310 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
312 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
313 sendtype, i, system_tag, comm);
316 /* Wait for them all. */
317 smpi_mpi_startall(count, requests);
318 XBT_DEBUG("<%d> wait for %d requests", rank, count);
319 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
328 * this algorithm performs size steps (1<=s<=size) and
329 * at each step s, a process p sends iand receive to.from a unique distinct remote process
330 * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ...
331 * s=2: 3->0->2, 4->1->3, 0->2->4, 1->3->0 , 2->4->1
333 * Openmpi calls this routine when the message size sent to each rank is greater than 3000 bytes
335 int smpi_coll_tuned_alltoall_pairwise(void *sendbuf, int sendcount,
336 MPI_Datatype sendtype, void *recvbuf,
337 int recvcount, MPI_Datatype recvtype,
340 int system_tag = 999;
341 int rank, size, step, sendto, recvfrom, sendsize, recvsize;
343 rank = smpi_comm_rank(comm);
344 size = smpi_comm_size(comm);
345 XBT_DEBUG("<%d> algorithm alltoall_pairwise() called.", rank);
346 sendsize = smpi_datatype_size(sendtype);
347 recvsize = smpi_datatype_size(recvtype);
348 /* Perform pairwise exchange - starting from 1 so the local copy is last */
349 for (step = 1; step < size + 1; step++) {
350 /* who do we talk to in this step? */
351 sendto = (rank + step) % size;
352 recvfrom = (rank + size - step) % size;
353 /* send and receive */
354 smpi_mpi_sendrecv(&((char *) sendbuf)[sendto * sendsize * sendcount],
355 sendcount, sendtype, sendto, system_tag,
356 &((char *) recvbuf)[recvfrom * recvsize * recvcount],
357 recvcount, recvtype, recvfrom, system_tag, comm,
363 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
364 int *senddisps, MPI_Datatype sendtype,
365 void *recvbuf, int *recvcounts,
366 int *recvdisps, MPI_Datatype recvtype,
369 int system_tag = 889;
370 int i, rank, size, err, count;
371 MPI_Aint lb = 0, sendext = 0, recvext = 0;
372 MPI_Request *requests;
375 rank = smpi_comm_rank(comm);
376 size = smpi_comm_size(comm);
377 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
378 err = smpi_datatype_extent(sendtype, &lb, &sendext);
379 err = smpi_datatype_extent(recvtype, &lb, &recvext);
380 /* Local copy from self */
382 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
383 sendcounts[rank], sendtype,
384 (char *)recvbuf + recvdisps[rank] * recvext,
385 recvcounts[rank], recvtype);
386 if (err == MPI_SUCCESS && size > 1) {
387 /* Initiate all send/recv to/from others. */
388 requests = xbt_new(MPI_Request, 2 * (size - 1));
390 /* Create all receives that will be posted first */
391 for (i = 0; i < size; ++i) {
392 if (i == rank || recvcounts[i] == 0) {
394 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
395 rank, i, recvcounts[i]);
399 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
400 recvcounts[i], recvtype, i, system_tag, comm);
403 /* Now create all sends */
404 for (i = 0; i < size; ++i) {
405 if (i == rank || sendcounts[i] == 0) {
407 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
408 rank, i, sendcounts[i]);
412 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
413 sendcounts[i], sendtype, i, system_tag, comm);
416 /* Wait for them all. */
417 smpi_mpi_startall(count, requests);
418 XBT_DEBUG("<%d> wait for %d requests", rank, count);
419 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);