1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009, 2010. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
16 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
18 "allgather default collective",
20 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA),
21 {NULL, NULL, NULL} /* this array must be NULL terminated */
24 s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {
26 "allgatherv default collective",
28 COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA),
29 {NULL, NULL, NULL} /* this array must be NULL terminated */
32 s_mpi_coll_description_t mpi_coll_allreduce_description[] = {
34 "allreduce default collective",
36 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
37 {NULL, NULL, NULL} /* this array must be NULL terminated */
40 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
42 "Ompi alltoall default collective",
43 smpi_coll_tuned_alltoall_ompi},
44 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
46 "Alltoall Bruck (SG) collective",
47 smpi_coll_tuned_alltoall_bruck},
49 "Alltoall basic linear (SG) collective",
50 smpi_coll_tuned_alltoall_basic_linear},
51 {NULL, NULL, NULL} /* this array must be NULL terminated */
54 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
56 "Ompi alltoallv default collective",
57 smpi_coll_basic_alltoallv},
58 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
59 {NULL, NULL, NULL} /* this array must be NULL terminated */
62 s_mpi_coll_description_t mpi_coll_bcast_description[] = {
64 "allgather default collective",
66 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA),
67 {NULL, NULL, NULL} /* this array must be NULL terminated */
70 s_mpi_coll_description_t mpi_coll_reduce_description[] = {
72 "allgather default collective",
74 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA),
75 {NULL, NULL, NULL} /* this array must be NULL terminated */
80 /** Displays the long description of all registered models, and quit */
81 void coll_help(const char *category, s_mpi_coll_description_t * table)
84 printf("Long description of the %s models accepted by this simulator:\n",
86 for (i = 0; table[i].name; i++)
87 printf(" %s: %s\n", table[i].name, table[i].description);
90 int find_coll_description(s_mpi_coll_description_t * table,
94 char *name_list = NULL;
96 for (i = 0; table[i].name; i++)
97 if (!strcmp(name, table[i].name)) {
100 name_list = strdup(table[0].name);
101 for (i = 1; table[i].name; i++) {
103 xbt_realloc(name_list,
104 strlen(name_list) + strlen(table[i].name) + 3);
105 strcat(name_list, ", ");
106 strcat(name_list, table[i].name);
108 xbt_die("Model '%s' is invalid! Valid models are: %s.", name, name_list);
114 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
115 "Logging specific to SMPI (coll)");
117 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
118 int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
119 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
120 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
121 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
122 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
123 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
134 typedef struct s_proc_tree *proc_tree_t;
139 static proc_tree_t alloc_tree(int arity)
144 tree = xbt_new(struct s_proc_tree, 1);
145 tree->PROCTREE_A = arity;
147 tree->numChildren = 0;
148 tree->child = xbt_new(int, arity);
149 for (i = 0; i < arity; i++) {
160 static void free_tree(proc_tree_t tree)
162 xbt_free(tree->child);
167 * Build the tree depending on a process rank (index) and the group size (extent)
168 * @param root the rank of the tree root
169 * @param rank the rank of the calling process
170 * @param size the total number of processes
172 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
174 int index = (rank - root + size) % size;
175 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
179 (*tree)->root = root;
181 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
182 (*tree)->child[i] = (firstChildIdx + i + root) % size;
183 (*tree)->numChildren++;
189 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
196 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
197 MPI_Comm comm, proc_tree_t tree)
199 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
201 MPI_Request *requests;
203 rank = smpi_comm_rank(comm);
204 /* wait for data from my parent in the tree */
206 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
207 rank, tree->parent, system_tag + rank);
208 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
209 comm, MPI_STATUS_IGNORE);
211 requests = xbt_new(MPI_Request, tree->numChildren);
212 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
214 /* iniates sends to ranks lower in the tree */
215 for (i = 0; i < tree->numChildren; i++) {
216 if (tree->child[i] == -1) {
217 requests[i] = MPI_REQUEST_NULL;
219 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
220 system_tag + tree->child[i]);
222 smpi_isend_init(buf, count, datatype, tree->child[i],
223 system_tag + tree->child[i], comm);
226 smpi_mpi_startall(tree->numChildren, requests);
227 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
234 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
235 MPI_Comm comm, proc_tree_t tree)
237 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
239 MPI_Request *requests;
241 rank = smpi_comm_rank(comm);
242 // everyone sends to its parent, except root.
244 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
245 rank, tree->parent, system_tag + rank);
246 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
249 //every one receives as many messages as it has children
250 requests = xbt_new(MPI_Request, tree->numChildren);
251 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
253 for (i = 0; i < tree->numChildren; i++) {
254 if (tree->child[i] == -1) {
255 requests[i] = MPI_REQUEST_NULL;
257 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
258 system_tag + tree->child[i]);
260 smpi_irecv_init(buf, count, datatype, tree->child[i],
261 system_tag + tree->child[i], comm);
264 smpi_mpi_startall(tree->numChildren, requests);
265 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
270 * bcast with a binary, ternary, or whatever tree ..
272 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
273 MPI_Comm comm, int arity)
275 proc_tree_t tree = alloc_tree(arity);
278 rank = smpi_comm_rank(comm);
279 size = smpi_comm_size(comm);
280 build_tree(root, rank, size, &tree);
281 tree_bcast(buf, count, datatype, comm, tree);
286 * barrier with a binary, ternary, or whatever tree ..
288 void nary_tree_barrier(MPI_Comm comm, int arity)
290 proc_tree_t tree = alloc_tree(arity);
294 rank = smpi_comm_rank(comm);
295 size = smpi_comm_size(comm);
296 build_tree(0, rank, size, &tree);
297 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
298 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
302 int smpi_coll_tuned_alltoall_ompi(void *sendbuf, int sendcount,
303 MPI_Datatype sendtype, void *recvbuf,
304 int recvcount, MPI_Datatype recvtype,
308 size = smpi_comm_size(comm);
309 sendsize = smpi_datatype_size(sendtype) * sendcount;
310 if (sendsize < 200 && size > 12) {
312 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype,
313 recvbuf, recvcount, recvtype,
315 } else if (sendsize < 3000) {
317 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount,
319 recvcount, recvtype, comm);
322 smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype,
323 recvbuf, recvcount, recvtype,
331 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
332 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
335 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
336 MPI_Datatype sendtype, void *recvbuf,
337 int recvcount, MPI_Datatype recvtype,
340 int system_tag = 777;
341 int i, rank, size, err, count;
343 MPI_Aint sendext = 0;
344 MPI_Aint recvext = 0;
345 MPI_Request *requests;
347 // FIXME: check implementation
348 rank = smpi_comm_rank(comm);
349 size = smpi_comm_size(comm);
350 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
351 err = smpi_datatype_extent(sendtype, &lb, &sendext);
352 err = smpi_datatype_extent(recvtype, &lb, &recvext);
353 /* Local copy from self */
355 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
357 (char *)recvbuf + rank * recvcount * recvext,
358 recvcount, recvtype);
359 if (err == MPI_SUCCESS && size > 1) {
360 /* Initiate all send/recv to/from others. */
361 requests = xbt_new(MPI_Request, 2 * (size - 1));
363 /* Create all receives that will be posted first */
364 for (i = 0; i < size; ++i) {
366 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
371 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
372 recvtype, i, system_tag, comm);
375 /* Now create all sends */
376 for (i = 0; i < size; ++i) {
378 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
383 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
384 sendtype, i, system_tag, comm);
387 /* Wait for them all. */
388 smpi_mpi_startall(count, requests);
389 XBT_DEBUG("<%d> wait for %d requests", rank, count);
390 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
397 * Alltoall basic_linear (STARMPI:alltoall-simple)
399 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
400 MPI_Datatype sendtype,
401 void *recvbuf, int recvcount,
402 MPI_Datatype recvtype,
405 int system_tag = 888;
406 int i, rank, size, err, count;
407 MPI_Aint lb = 0, sendext = 0, recvext = 0;
408 MPI_Request *requests;
411 rank = smpi_comm_rank(comm);
412 size = smpi_comm_size(comm);
413 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
414 err = smpi_datatype_extent(sendtype, &lb, &sendext);
415 err = smpi_datatype_extent(recvtype, &lb, &recvext);
416 /* simple optimization */
417 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
419 (char *)recvbuf + rank * recvcount * recvext,
420 recvcount, recvtype);
421 if (err == MPI_SUCCESS && size > 1) {
422 /* Initiate all send/recv to/from others. */
423 requests = xbt_new(MPI_Request, 2 * (size - 1));
424 /* Post all receives first -- a simple optimization */
426 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
428 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
429 recvtype, i, system_tag, comm);
432 /* Now post all sends in reverse order
433 * - We would like to minimize the search time through message queue
434 * when messages actually arrive in the order in which they were posted.
435 * TODO: check the previous assertion
437 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
439 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
440 sendtype, i, system_tag, comm);
443 /* Wait for them all. */
444 smpi_mpi_startall(count, requests);
445 XBT_DEBUG("<%d> wait for %d requests", rank, count);
446 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
452 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
453 int *senddisps, MPI_Datatype sendtype,
454 void *recvbuf, int *recvcounts,
455 int *recvdisps, MPI_Datatype recvtype,
458 int system_tag = 889;
459 int i, rank, size, err, count;
460 MPI_Aint lb = 0, sendext = 0, recvext = 0;
461 MPI_Request *requests;
464 rank = smpi_comm_rank(comm);
465 size = smpi_comm_size(comm);
466 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
467 err = smpi_datatype_extent(sendtype, &lb, &sendext);
468 err = smpi_datatype_extent(recvtype, &lb, &recvext);
469 /* Local copy from self */
471 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
472 sendcounts[rank], sendtype,
473 (char *)recvbuf + recvdisps[rank] * recvext,
474 recvcounts[rank], recvtype);
475 if (err == MPI_SUCCESS && size > 1) {
476 /* Initiate all send/recv to/from others. */
477 requests = xbt_new(MPI_Request, 2 * (size - 1));
479 /* Create all receives that will be posted first */
480 for (i = 0; i < size; ++i) {
481 if (i == rank || recvcounts[i] == 0) {
483 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
484 rank, i, recvcounts[i]);
488 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
489 recvcounts[i], recvtype, i, system_tag, comm);
492 /* Now create all sends */
493 for (i = 0; i < size; ++i) {
494 if (i == rank || sendcounts[i] == 0) {
496 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
497 rank, i, sendcounts[i]);
501 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
502 sendcounts[i], sendtype, i, system_tag, comm);
505 /* Wait for them all. */
506 smpi_mpi_startall(count, requests);
507 XBT_DEBUG("<%d> wait for %d requests", rank, count);
508 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);