1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009, 2010. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
16 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
18 "Ompi alltoall default collective",
19 smpi_coll_tuned_alltoall_ompi},
22 "Alltoall 2dmesh collective",
23 smpi_coll_tuned_alltoall_2dmesh},
25 "Alltoall 3dmesh collective",
26 smpi_coll_tuned_alltoall_3dmesh},
28 "Alltoall Bruck collective",
29 smpi_coll_tuned_alltoall_bruck},*/
31 "Alltoall pair collective",
32 smpi_coll_tuned_alltoall_pair},
33 {"pair_light_barrier",
34 "Alltoall pair_light_barrier collective",
35 smpi_coll_tuned_alltoall_pair_light_barrier},
37 "Alltoall pair_mpi_barrier collective",
38 smpi_coll_tuned_alltoall_pair_mpi_barrier},
40 "Alltoall rdb collective",
41 smpi_coll_tuned_alltoall_rdb},
43 "Alltoall ring collective",
44 smpi_coll_tuned_alltoall_ring},
45 {"ring_light_barrier",
46 "Alltoall ring_light_barrier collective",
47 smpi_coll_tuned_alltoall_ring_light_barrier},
48 {"ring_light_barrier",
49 "Alltoall ring_light_barrier collective",
50 smpi_coll_tuned_alltoall_ring_light_barrier},
52 "Alltoall ring_mpi_barrier collective",
53 smpi_coll_tuned_alltoall_ring_mpi_barrier},
55 "Alltoall ring_one_barrier collective",
56 smpi_coll_tuned_alltoall_ring_one_barrier},
58 "Alltoall simple collective",
59 smpi_coll_tuned_alltoall_simple},
62 "Alltoall Bruck (SG) collective",
63 smpi_coll_tuned_alltoall_bruck},
65 "Alltoall basic linear (SG) collective",
66 smpi_coll_tuned_alltoall_basic_linear},
68 "Alltoall pairwise (SG) collective",
69 smpi_coll_tuned_alltoall_pairwise},
71 {NULL, NULL, NULL} /* this array must be NULL terminated */
74 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
76 "allgather default collective",
79 {NULL, NULL, NULL} /* this array must be NULL terminated */
83 /** Displays the long description of all registered models, and quit */
84 void coll_help(const char *category, s_mpi_coll_description_t * table)
87 printf("Long description of the %s models accepted by this simulator:\n",
89 for (i = 0; table[i].name; i++)
90 printf(" %s: %s\n", table[i].name, table[i].description);
93 int find_coll_description(s_mpi_coll_description_t * table,
97 char *name_list = NULL;
99 for (i = 0; table[i].name; i++)
100 if (!strcmp(name, table[i].name)) {
103 name_list = strdup(table[0].name);
104 for (i = 1; table[i].name; i++) {
106 xbt_realloc(name_list,
107 strlen(name_list) + strlen(table[i].name) + 3);
108 strcat(name_list, ", ");
109 strcat(name_list, table[i].name);
111 xbt_die("Model '%s' is invalid! Valid models are: %s.", name, name_list);
117 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
118 "Logging specific to SMPI (coll)");
120 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
121 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
132 typedef struct s_proc_tree *proc_tree_t;
137 static proc_tree_t alloc_tree(int arity)
142 tree = xbt_new(struct s_proc_tree, 1);
143 tree->PROCTREE_A = arity;
145 tree->numChildren = 0;
146 tree->child = xbt_new(int, arity);
147 for (i = 0; i < arity; i++) {
158 static void free_tree(proc_tree_t tree)
160 xbt_free(tree->child);
165 * Build the tree depending on a process rank (index) and the group size (extent)
166 * @param root the rank of the tree root
167 * @param rank the rank of the calling process
168 * @param size the total number of processes
170 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
172 int index = (rank - root + size) % size;
173 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
177 (*tree)->root = root;
179 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
180 (*tree)->child[i] = (firstChildIdx + i + root) % size;
181 (*tree)->numChildren++;
187 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
194 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
195 MPI_Comm comm, proc_tree_t tree)
197 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
199 MPI_Request *requests;
201 rank = smpi_comm_rank(comm);
202 /* wait for data from my parent in the tree */
204 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
205 rank, tree->parent, system_tag + rank);
206 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
207 comm, MPI_STATUS_IGNORE);
209 requests = xbt_new(MPI_Request, tree->numChildren);
210 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
212 /* iniates sends to ranks lower in the tree */
213 for (i = 0; i < tree->numChildren; i++) {
214 if (tree->child[i] == -1) {
215 requests[i] = MPI_REQUEST_NULL;
217 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
218 system_tag + tree->child[i]);
220 smpi_isend_init(buf, count, datatype, tree->child[i],
221 system_tag + tree->child[i], comm);
224 smpi_mpi_startall(tree->numChildren, requests);
225 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
232 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
233 MPI_Comm comm, proc_tree_t tree)
235 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
237 MPI_Request *requests;
239 rank = smpi_comm_rank(comm);
240 // everyone sends to its parent, except root.
242 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
243 rank, tree->parent, system_tag + rank);
244 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
247 //every one receives as many messages as it has children
248 requests = xbt_new(MPI_Request, tree->numChildren);
249 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
251 for (i = 0; i < tree->numChildren; i++) {
252 if (tree->child[i] == -1) {
253 requests[i] = MPI_REQUEST_NULL;
255 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
256 system_tag + tree->child[i]);
258 smpi_irecv_init(buf, count, datatype, tree->child[i],
259 system_tag + tree->child[i], comm);
262 smpi_mpi_startall(tree->numChildren, requests);
263 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
268 * bcast with a binary, ternary, or whatever tree ..
270 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
271 MPI_Comm comm, int arity)
273 proc_tree_t tree = alloc_tree(arity);
276 rank = smpi_comm_rank(comm);
277 size = smpi_comm_size(comm);
278 build_tree(root, rank, size, &tree);
279 tree_bcast(buf, count, datatype, comm, tree);
284 * barrier with a binary, ternary, or whatever tree ..
286 void nary_tree_barrier(MPI_Comm comm, int arity)
288 proc_tree_t tree = alloc_tree(arity);
292 rank = smpi_comm_rank(comm);
293 size = smpi_comm_size(comm);
294 build_tree(0, rank, size, &tree);
295 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
296 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
300 int smpi_coll_tuned_alltoall_ompi(void *sendbuf, int sendcount,
301 MPI_Datatype sendtype, void *recvbuf,
302 int recvcount, MPI_Datatype recvtype,
306 size = smpi_comm_size(comm);
307 sendsize = smpi_datatype_size(sendtype) * sendcount;
308 if (sendsize < 200 && size > 12) {
310 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype,
311 recvbuf, recvcount, recvtype,
313 } else if (sendsize < 3000) {
315 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount,
317 recvcount, recvtype, comm);
320 smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, sendtype,
321 recvbuf, recvcount, recvtype,
329 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
330 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
333 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
334 MPI_Datatype sendtype, void *recvbuf,
335 int recvcount, MPI_Datatype recvtype,
338 int system_tag = 777;
339 int i, rank, size, err, count;
341 MPI_Aint sendext = 0;
342 MPI_Aint recvext = 0;
343 MPI_Request *requests;
345 // FIXME: check implementation
346 rank = smpi_comm_rank(comm);
347 size = smpi_comm_size(comm);
348 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
349 err = smpi_datatype_extent(sendtype, &lb, &sendext);
350 err = smpi_datatype_extent(recvtype, &lb, &recvext);
351 /* Local copy from self */
353 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
355 (char *)recvbuf + rank * recvcount * recvext,
356 recvcount, recvtype);
357 if (err == MPI_SUCCESS && size > 1) {
358 /* Initiate all send/recv to/from others. */
359 requests = xbt_new(MPI_Request, 2 * (size - 1));
361 /* Create all receives that will be posted first */
362 for (i = 0; i < size; ++i) {
364 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
369 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
370 recvtype, i, system_tag, comm);
373 /* Now create all sends */
374 for (i = 0; i < size; ++i) {
376 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
381 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
382 sendtype, i, system_tag, comm);
385 /* Wait for them all. */
386 smpi_mpi_startall(count, requests);
387 XBT_DEBUG("<%d> wait for %d requests", rank, count);
388 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
395 * Alltoall basic_linear (STARMPI:alltoall-simple)
397 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
398 MPI_Datatype sendtype,
399 void *recvbuf, int recvcount,
400 MPI_Datatype recvtype,
403 int system_tag = 888;
404 int i, rank, size, err, count;
405 MPI_Aint lb = 0, sendext = 0, recvext = 0;
406 MPI_Request *requests;
409 rank = smpi_comm_rank(comm);
410 size = smpi_comm_size(comm);
411 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
412 err = smpi_datatype_extent(sendtype, &lb, &sendext);
413 err = smpi_datatype_extent(recvtype, &lb, &recvext);
414 /* simple optimization */
415 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
417 (char *)recvbuf + rank * recvcount * recvext,
418 recvcount, recvtype);
419 if (err == MPI_SUCCESS && size > 1) {
420 /* Initiate all send/recv to/from others. */
421 requests = xbt_new(MPI_Request, 2 * (size - 1));
422 /* Post all receives first -- a simple optimization */
424 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
426 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
427 recvtype, i, system_tag, comm);
430 /* Now post all sends in reverse order
431 * - We would like to minimize the search time through message queue
432 * when messages actually arrive in the order in which they were posted.
433 * TODO: check the previous assertion
435 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
437 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
438 sendtype, i, system_tag, comm);
441 /* Wait for them all. */
442 smpi_mpi_startall(count, requests);
443 XBT_DEBUG("<%d> wait for %d requests", rank, count);
444 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
453 * this algorithm performs size steps (1<=s<=size) and
454 * at each step s, a process p sends iand receive to.from a unique distinct remote process
455 * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ...
456 * s=2: 3->0->2, 4->1->3, 0->2->4, 1->3->0 , 2->4->1
458 * Openmpi calls this routine when the message size sent to each rank is greater than 3000 bytes
460 int smpi_coll_tuned_alltoall_pairwise(void *sendbuf, int sendcount,
461 MPI_Datatype sendtype, void *recvbuf,
462 int recvcount, MPI_Datatype recvtype,
465 int system_tag = 999;
466 int rank, size, step, sendto, recvfrom, sendsize, recvsize;
468 rank = smpi_comm_rank(comm);
469 size = smpi_comm_size(comm);
470 XBT_DEBUG("<%d> algorithm alltoall_pairwise() called.", rank);
471 sendsize = smpi_datatype_size(sendtype);
472 recvsize = smpi_datatype_size(recvtype);
473 /* Perform pairwise exchange - starting from 1 so the local copy is last */
474 for (step = 1; step < size + 1; step++) {
475 /* who do we talk to in this step? */
476 sendto = (rank + step) % size;
477 recvfrom = (rank + size - step) % size;
478 /* send and receive */
479 smpi_mpi_sendrecv(&((char *) sendbuf)[sendto * sendsize * sendcount],
480 sendcount, sendtype, sendto, system_tag,
481 &((char *) recvbuf)[recvfrom * recvsize * recvcount],
482 recvcount, recvtype, recvfrom, system_tag, comm,
488 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
489 int *senddisps, MPI_Datatype sendtype,
490 void *recvbuf, int *recvcounts,
491 int *recvdisps, MPI_Datatype recvtype,
494 int system_tag = 889;
495 int i, rank, size, err, count;
496 MPI_Aint lb = 0, sendext = 0, recvext = 0;
497 MPI_Request *requests;
500 rank = smpi_comm_rank(comm);
501 size = smpi_comm_size(comm);
502 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
503 err = smpi_datatype_extent(sendtype, &lb, &sendext);
504 err = smpi_datatype_extent(recvtype, &lb, &recvext);
505 /* Local copy from self */
507 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
508 sendcounts[rank], sendtype,
509 (char *)recvbuf + recvdisps[rank] * recvext,
510 recvcounts[rank], recvtype);
511 if (err == MPI_SUCCESS && size > 1) {
512 /* Initiate all send/recv to/from others. */
513 requests = xbt_new(MPI_Request, 2 * (size - 1));
515 /* Create all receives that will be posted first */
516 for (i = 0; i < size; ++i) {
517 if (i == rank || recvcounts[i] == 0) {
519 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
520 rank, i, recvcounts[i]);
524 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
525 recvcounts[i], recvtype, i, system_tag, comm);
528 /* Now create all sends */
529 for (i = 0; i < size; ++i) {
530 if (i == rank || sendcounts[i] == 0) {
532 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
533 rank, i, sendcounts[i]);
537 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
538 sendcounts[i], sendtype, i, system_tag, comm);
541 /* Wait for them all. */
542 smpi_mpi_startall(count, requests);
543 XBT_DEBUG("<%d> wait for %d requests", rank, count);
544 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);