1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009, 2010. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
16 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
18 "allgather default collective",
20 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA),
21 {NULL, NULL, NULL} /* this array must be NULL terminated */
24 s_mpi_coll_description_t mpi_coll_allreduce_description[] = {
26 "allreduce default collective",
28 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
29 {NULL, NULL, NULL} /* this array must be NULL terminated */
32 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
34 "Ompi alltoall default collective",
35 smpi_coll_tuned_alltoall_ompi},
36 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
38 "Alltoall Bruck (SG) collective",
39 smpi_coll_tuned_alltoall_bruck},
41 "Alltoall basic linear (SG) collective",
42 smpi_coll_tuned_alltoall_basic_linear},
44 "Alltoall pairwise (SG) collective",
45 smpi_coll_tuned_alltoall_pairwise},
46 {NULL, NULL, NULL} /* this array must be NULL terminated */
49 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
51 "Ompi alltoallv default collective",
52 smpi_coll_basic_alltoallv},
53 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
54 {NULL, NULL, NULL} /* this array must be NULL terminated */
57 s_mpi_coll_description_t mpi_coll_bcast_description[] = {
59 "allgather default collective",
61 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA),
62 {NULL, NULL, NULL} /* this array must be NULL terminated */
65 s_mpi_coll_description_t mpi_coll_reduce_description[] = {
67 "allgather default collective",
69 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA),
70 {NULL, NULL, NULL} /* this array must be NULL terminated */
75 /** Displays the long description of all registered models, and quit */
76 void coll_help(const char *category, s_mpi_coll_description_t * table)
79 printf("Long description of the %s models accepted by this simulator:\n",
81 for (i = 0; table[i].name; i++)
82 printf(" %s: %s\n", table[i].name, table[i].description);
85 int find_coll_description(s_mpi_coll_description_t * table,
89 char *name_list = NULL;
91 for (i = 0; table[i].name; i++)
92 if (!strcmp(name, table[i].name)) {
95 name_list = strdup(table[0].name);
96 for (i = 1; table[i].name; i++) {
98 xbt_realloc(name_list,
99 strlen(name_list) + strlen(table[i].name) + 3);
100 strcat(name_list, ", ");
101 strcat(name_list, table[i].name);
103 xbt_die("Model '%s' is invalid! Valid models are: %s.", name, name_list);
109 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi,
110 "Logging specific to SMPI (coll)");
112 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
113 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
114 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
115 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
116 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
117 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
128 typedef struct s_proc_tree *proc_tree_t;
133 static proc_tree_t alloc_tree(int arity)
138 tree = xbt_new(struct s_proc_tree, 1);
139 tree->PROCTREE_A = arity;
141 tree->numChildren = 0;
142 tree->child = xbt_new(int, arity);
143 for (i = 0; i < arity; i++) {
154 static void free_tree(proc_tree_t tree)
156 xbt_free(tree->child);
161 * Build the tree depending on a process rank (index) and the group size (extent)
162 * @param root the rank of the tree root
163 * @param rank the rank of the calling process
164 * @param size the total number of processes
166 static void build_tree(int root, int rank, int size, proc_tree_t * tree)
168 int index = (rank - root + size) % size;
169 int firstChildIdx = index * (*tree)->PROCTREE_A + 1;
173 (*tree)->root = root;
175 for (i = 0; i < (*tree)->PROCTREE_A && firstChildIdx + i < size; i++) {
176 (*tree)->child[i] = (firstChildIdx + i + root) % size;
177 (*tree)->numChildren++;
183 (*tree)->parent = (((index - 1) / (*tree)->PROCTREE_A) + root) % size;
190 static void tree_bcast(void *buf, int count, MPI_Datatype datatype,
191 MPI_Comm comm, proc_tree_t tree)
193 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
195 MPI_Request *requests;
197 rank = smpi_comm_rank(comm);
198 /* wait for data from my parent in the tree */
200 XBT_DEBUG("<%d> tree_bcast(): i am not root: recv from %d, tag=%d)",
201 rank, tree->parent, system_tag + rank);
202 smpi_mpi_recv(buf, count, datatype, tree->parent, system_tag + rank,
203 comm, MPI_STATUS_IGNORE);
205 requests = xbt_new(MPI_Request, tree->numChildren);
206 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
208 /* iniates sends to ranks lower in the tree */
209 for (i = 0; i < tree->numChildren; i++) {
210 if (tree->child[i] == -1) {
211 requests[i] = MPI_REQUEST_NULL;
213 XBT_DEBUG("<%d> send to <%d>, tag=%d", rank, tree->child[i],
214 system_tag + tree->child[i]);
216 smpi_isend_init(buf, count, datatype, tree->child[i],
217 system_tag + tree->child[i], comm);
220 smpi_mpi_startall(tree->numChildren, requests);
221 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
228 static void tree_antibcast(void *buf, int count, MPI_Datatype datatype,
229 MPI_Comm comm, proc_tree_t tree)
231 int system_tag = 999; // used negative int but smpi_create_request() declares this illegal (to be checked)
233 MPI_Request *requests;
235 rank = smpi_comm_rank(comm);
236 // everyone sends to its parent, except root.
238 XBT_DEBUG("<%d> tree_antibcast(): i am not root: send to %d, tag=%d)",
239 rank, tree->parent, system_tag + rank);
240 smpi_mpi_send(buf, count, datatype, tree->parent, system_tag + rank,
243 //every one receives as many messages as it has children
244 requests = xbt_new(MPI_Request, tree->numChildren);
245 XBT_DEBUG("<%d> creates %d requests (1 per child)", rank,
247 for (i = 0; i < tree->numChildren; i++) {
248 if (tree->child[i] == -1) {
249 requests[i] = MPI_REQUEST_NULL;
251 XBT_DEBUG("<%d> recv from <%d>, tag=%d", rank, tree->child[i],
252 system_tag + tree->child[i]);
254 smpi_irecv_init(buf, count, datatype, tree->child[i],
255 system_tag + tree->child[i], comm);
258 smpi_mpi_startall(tree->numChildren, requests);
259 smpi_mpi_waitall(tree->numChildren, requests, MPI_STATUS_IGNORE);
264 * bcast with a binary, ternary, or whatever tree ..
266 void nary_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
267 MPI_Comm comm, int arity)
269 proc_tree_t tree = alloc_tree(arity);
272 rank = smpi_comm_rank(comm);
273 size = smpi_comm_size(comm);
274 build_tree(root, rank, size, &tree);
275 tree_bcast(buf, count, datatype, comm, tree);
280 * barrier with a binary, ternary, or whatever tree ..
282 void nary_tree_barrier(MPI_Comm comm, int arity)
284 proc_tree_t tree = alloc_tree(arity);
288 rank = smpi_comm_rank(comm);
289 size = smpi_comm_size(comm);
290 build_tree(0, rank, size, &tree);
291 tree_antibcast(&dummy, 1, MPI_CHAR, comm, tree);
292 tree_bcast(&dummy, 1, MPI_CHAR, comm, tree);
296 int smpi_coll_tuned_alltoall_ompi(void *sendbuf, int sendcount,
297 MPI_Datatype sendtype, void *recvbuf,
298 int recvcount, MPI_Datatype recvtype,
302 size = smpi_comm_size(comm);
303 sendsize = smpi_datatype_size(sendtype) * sendcount;
304 if (sendsize < 200 && size > 12) {
306 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype,
307 recvbuf, recvcount, recvtype,
309 } else if (sendsize < 3000) {
311 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount,
313 recvcount, recvtype, comm);
316 smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, sendtype,
317 recvbuf, recvcount, recvtype,
325 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
326 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
329 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
330 MPI_Datatype sendtype, void *recvbuf,
331 int recvcount, MPI_Datatype recvtype,
334 int system_tag = 777;
335 int i, rank, size, err, count;
337 MPI_Aint sendext = 0;
338 MPI_Aint recvext = 0;
339 MPI_Request *requests;
341 // FIXME: check implementation
342 rank = smpi_comm_rank(comm);
343 size = smpi_comm_size(comm);
344 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
345 err = smpi_datatype_extent(sendtype, &lb, &sendext);
346 err = smpi_datatype_extent(recvtype, &lb, &recvext);
347 /* Local copy from self */
349 smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
351 (char *)recvbuf + rank * recvcount * recvext,
352 recvcount, recvtype);
353 if (err == MPI_SUCCESS && size > 1) {
354 /* Initiate all send/recv to/from others. */
355 requests = xbt_new(MPI_Request, 2 * (size - 1));
357 /* Create all receives that will be posted first */
358 for (i = 0; i < size; ++i) {
360 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
365 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
366 recvtype, i, system_tag, comm);
369 /* Now create all sends */
370 for (i = 0; i < size; ++i) {
372 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
377 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
378 sendtype, i, system_tag, comm);
381 /* Wait for them all. */
382 smpi_mpi_startall(count, requests);
383 XBT_DEBUG("<%d> wait for %d requests", rank, count);
384 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
391 * Alltoall basic_linear (STARMPI:alltoall-simple)
393 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
394 MPI_Datatype sendtype,
395 void *recvbuf, int recvcount,
396 MPI_Datatype recvtype,
399 int system_tag = 888;
400 int i, rank, size, err, count;
401 MPI_Aint lb = 0, sendext = 0, recvext = 0;
402 MPI_Request *requests;
405 rank = smpi_comm_rank(comm);
406 size = smpi_comm_size(comm);
407 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
408 err = smpi_datatype_extent(sendtype, &lb, &sendext);
409 err = smpi_datatype_extent(recvtype, &lb, &recvext);
410 /* simple optimization */
411 err = smpi_datatype_copy((char *)sendbuf + rank * sendcount * sendext,
413 (char *)recvbuf + rank * recvcount * recvext,
414 recvcount, recvtype);
415 if (err == MPI_SUCCESS && size > 1) {
416 /* Initiate all send/recv to/from others. */
417 requests = xbt_new(MPI_Request, 2 * (size - 1));
418 /* Post all receives first -- a simple optimization */
420 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
422 smpi_irecv_init((char *)recvbuf + i * recvcount * recvext, recvcount,
423 recvtype, i, system_tag, comm);
426 /* Now post all sends in reverse order
427 * - We would like to minimize the search time through message queue
428 * when messages actually arrive in the order in which they were posted.
429 * TODO: check the previous assertion
431 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
433 smpi_isend_init((char *)sendbuf + i * sendcount * sendext, sendcount,
434 sendtype, i, system_tag, comm);
437 /* Wait for them all. */
438 smpi_mpi_startall(count, requests);
439 XBT_DEBUG("<%d> wait for %d requests", rank, count);
440 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
449 * this algorithm performs size steps (1<=s<=size) and
450 * at each step s, a process p sends iand receive to.from a unique distinct remote process
451 * size=5 : s=1: 4->0->1, 0->1->2, 1->2->3, ...
452 * s=2: 3->0->2, 4->1->3, 0->2->4, 1->3->0 , 2->4->1
454 * Openmpi calls this routine when the message size sent to each rank is greater than 3000 bytes
456 int smpi_coll_tuned_alltoall_pairwise(void *sendbuf, int sendcount,
457 MPI_Datatype sendtype, void *recvbuf,
458 int recvcount, MPI_Datatype recvtype,
461 int system_tag = 999;
462 int rank, size, step, sendto, recvfrom, sendsize, recvsize;
464 rank = smpi_comm_rank(comm);
465 size = smpi_comm_size(comm);
466 XBT_DEBUG("<%d> algorithm alltoall_pairwise() called.", rank);
467 sendsize = smpi_datatype_size(sendtype);
468 recvsize = smpi_datatype_size(recvtype);
469 /* Perform pairwise exchange - starting from 1 so the local copy is last */
470 for (step = 1; step < size + 1; step++) {
471 /* who do we talk to in this step? */
472 sendto = (rank + step) % size;
473 recvfrom = (rank + size - step) % size;
474 /* send and receive */
475 smpi_mpi_sendrecv(&((char *) sendbuf)[sendto * sendsize * sendcount],
476 sendcount, sendtype, sendto, system_tag,
477 &((char *) recvbuf)[recvfrom * recvsize * recvcount],
478 recvcount, recvtype, recvfrom, system_tag, comm,
484 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
485 int *senddisps, MPI_Datatype sendtype,
486 void *recvbuf, int *recvcounts,
487 int *recvdisps, MPI_Datatype recvtype,
490 int system_tag = 889;
491 int i, rank, size, err, count;
492 MPI_Aint lb = 0, sendext = 0, recvext = 0;
493 MPI_Request *requests;
496 rank = smpi_comm_rank(comm);
497 size = smpi_comm_size(comm);
498 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
499 err = smpi_datatype_extent(sendtype, &lb, &sendext);
500 err = smpi_datatype_extent(recvtype, &lb, &recvext);
501 /* Local copy from self */
503 smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
504 sendcounts[rank], sendtype,
505 (char *)recvbuf + recvdisps[rank] * recvext,
506 recvcounts[rank], recvtype);
507 if (err == MPI_SUCCESS && size > 1) {
508 /* Initiate all send/recv to/from others. */
509 requests = xbt_new(MPI_Request, 2 * (size - 1));
511 /* Create all receives that will be posted first */
512 for (i = 0; i < size; ++i) {
513 if (i == rank || recvcounts[i] == 0) {
515 ("<%d> skip request creation [src = %d, recvcounts[src] = %d]",
516 rank, i, recvcounts[i]);
520 smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext,
521 recvcounts[i], recvtype, i, system_tag, comm);
524 /* Now create all sends */
525 for (i = 0; i < size; ++i) {
526 if (i == rank || sendcounts[i] == 0) {
528 ("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]",
529 rank, i, sendcounts[i]);
533 smpi_isend_init((char *)sendbuf + senddisps[i] * sendext,
534 sendcounts[i], sendtype, i, system_tag, comm);
537 /* Wait for them all. */
538 smpi_mpi_startall(count, requests);
539 XBT_DEBUG("<%d> wait for %d requests", rank, count);
540 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);