1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009-2015. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include "colls/colls.h"
15 #include "simgrid/sg_config.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)");
19 s_mpi_coll_description_t mpi_coll_gather_description[] = {
20 {"default", "gather default collective", reinterpret_cast<void*>(&smpi_mpi_gather)},
21 COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
25 s_mpi_coll_description_t mpi_coll_allgather_description[] = {
27 "allgather default collective",
28 reinterpret_cast<void*>(&smpi_mpi_allgather)},
29 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA),
30 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
33 s_mpi_coll_description_t mpi_coll_allgatherv_description[] = {
35 "allgatherv default collective",
36 reinterpret_cast<void*>(&smpi_mpi_allgatherv)},
37 COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA),
38 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
41 s_mpi_coll_description_t mpi_coll_allreduce_description[] = {
43 "allreduce default collective",
44 reinterpret_cast<void*>(&smpi_mpi_allreduce)},
45 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA),
46 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
49 s_mpi_coll_description_t mpi_coll_reduce_scatter_description[] = {
51 "reduce_scatter default collective",
52 reinterpret_cast<void*>(&smpi_mpi_reduce_scatter)},
53 COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
54 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
57 s_mpi_coll_description_t mpi_coll_scatter_description[] = {
59 "scatter default collective",
60 reinterpret_cast<void*>(&smpi_mpi_scatter)},
61 COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA),
62 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
65 s_mpi_coll_description_t mpi_coll_barrier_description[] = {
67 "barrier default collective",
68 reinterpret_cast<void*>(&smpi_mpi_barrier)},
69 COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA),
70 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
72 s_mpi_coll_description_t mpi_coll_alltoall_description[] = {
74 "Ompi alltoall default collective",
75 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_ompi2)},
76 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA),
78 "Alltoall Bruck (SG) collective",
79 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_bruck)},
81 "Alltoall basic linear (SG) collective",
82 reinterpret_cast<void*>(&smpi_coll_tuned_alltoall_basic_linear)},
83 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
86 s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
88 "Ompi alltoallv default collective",
89 reinterpret_cast<void*>(&smpi_coll_basic_alltoallv)},
90 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA),
91 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
94 s_mpi_coll_description_t mpi_coll_bcast_description[] = {
96 "bcast default collective ",
97 reinterpret_cast<void*>(&smpi_mpi_bcast)},
98 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA),
99 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
102 s_mpi_coll_description_t mpi_coll_reduce_description[] = {
104 "reduce default collective",
105 reinterpret_cast<void*>(&smpi_mpi_reduce)},
106 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA),
107 {nullptr, nullptr, nullptr} /* this array must be nullptr terminated */
112 /** Displays the long description of all registered models, and quit */
113 void coll_help(const char *category, s_mpi_coll_description_t * table)
116 printf("Long description of the %s models accepted by this simulator:\n",
118 for (i = 0; table[i].name; i++)
119 printf(" %s: %s\n", table[i].name, table[i].description);
122 int find_coll_description(s_mpi_coll_description_t * table,
123 char *name, const char *desc)
125 char *name_list = nullptr;
127 if (name==nullptr || name[0] == '\0') {
128 //no argument provided, use active selector's algorithm
129 name=static_cast<char*>(xbt_cfg_get_string("smpi/coll-selector"));
132 for (int i = 0; table[i].name; i++)
133 if (!strcmp(name, table[i].name)) {
134 if (strcmp(table[i].name,"default"))
135 XBT_INFO("Switch to algorithm %s for collective %s",table[i].name,desc);
140 // collective seems not handled by the active selector, try with default one
141 name=const_cast<char*>("default");
142 for (int i = 0; table[i].name; i++)
143 if (!strcmp(name, table[i].name)) {
148 xbt_die("No collective is valid for '%s'! This is a bug.",name);
149 name_list = xbt_strdup(table[0].name);
150 for (int i = 1; table[i].name; i++) {
151 name_list = static_cast<char*>(xbt_realloc(name_list,
152 strlen(name_list) + strlen(table[i].name) + 3));
153 strncat(name_list, ", ",2);
154 strncat(name_list, table[i].name, strlen(table[i].name));
156 xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list);
160 int (*mpi_coll_gather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, int root, MPI_Comm);
161 int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
162 int (*mpi_coll_allgatherv_fun)(void *, int, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
163 int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
164 int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
165 int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
166 int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
167 int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
168 int (*mpi_coll_reduce_scatter_fun)(void *sbuf, void *rbuf, int *rcounts,MPI_Datatype dtype,MPI_Op op,MPI_Comm comm);
169 int (*mpi_coll_scatter_fun)(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm);
170 int (*mpi_coll_barrier_fun)(MPI_Comm comm);
171 void (*smpi_coll_cleanup_callback)();
174 int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
175 int recvcount, MPI_Datatype recvtype,
179 size = smpi_comm_size(comm);
180 sendsize = smpi_datatype_size(sendtype) * sendcount;
181 if (sendsize < 200 && size > 12) {
183 smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
184 } else if (sendsize < 3000) {
186 smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
189 smpi_coll_tuned_alltoall_ring(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
196 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
197 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
200 int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount,
201 MPI_Datatype sendtype, void *recvbuf,
202 int recvcount, MPI_Datatype recvtype,
205 int system_tag = 777;
206 int i, rank, size, err, count;
208 MPI_Aint sendext = 0;
209 MPI_Aint recvext = 0;
210 MPI_Request *requests;
212 // FIXME: check implementation
213 rank = smpi_comm_rank(comm);
214 size = smpi_comm_size(comm);
215 XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);
216 smpi_datatype_extent(sendtype, &lb, &sendext);
217 smpi_datatype_extent(recvtype, &lb, &recvext);
218 /* Local copy from self */
220 smpi_datatype_copy(static_cast<char *>(sendbuf) + rank * sendcount * sendext,
222 static_cast<char *>(recvbuf) + rank * recvcount * recvext,
223 recvcount, recvtype);
224 if (err == MPI_SUCCESS && size > 1) {
225 /* Initiate all send/recv to/from others. */
226 requests = xbt_new(MPI_Request, 2 * (size - 1));
228 /* Create all receives that will be posted first */
229 for (i = 0; i < size; ++i) {
232 smpi_irecv_init(static_cast<char *>(recvbuf) + i * recvcount * recvext, recvcount,
233 recvtype, i, system_tag, comm);
236 XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
240 /* Now create all sends */
241 for (i = 0; i < size; ++i) {
244 smpi_isend_init(static_cast<char *>(sendbuf) + i * sendcount * sendext, sendcount,
245 sendtype, i, system_tag, comm);
248 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
252 /* Wait for them all. */
253 smpi_mpi_startall(count, requests);
254 XBT_DEBUG("<%d> wait for %d requests", rank, count);
255 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
256 for(i = 0; i < count; i++) {
257 if(requests[i]!=MPI_REQUEST_NULL)
258 smpi_mpi_request_free(&requests[i]);
266 * Alltoall basic_linear (STARMPI:alltoall-simple)
268 int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount,
269 MPI_Datatype sendtype,
270 void *recvbuf, int recvcount,
271 MPI_Datatype recvtype,
274 int system_tag = 888;
275 int i, rank, size, err, count;
276 MPI_Aint lb = 0, sendext = 0, recvext = 0;
277 MPI_Request *requests;
280 rank = smpi_comm_rank(comm);
281 size = smpi_comm_size(comm);
282 XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank);
283 smpi_datatype_extent(sendtype, &lb, &sendext);
284 smpi_datatype_extent(recvtype, &lb, &recvext);
285 /* simple optimization */
286 err = smpi_datatype_copy(static_cast<char *>(sendbuf) + rank * sendcount * sendext,
288 static_cast<char *>(recvbuf) + rank * recvcount * recvext,
289 recvcount, recvtype);
290 if (err == MPI_SUCCESS && size > 1) {
291 /* Initiate all send/recv to/from others. */
292 requests = xbt_new(MPI_Request, 2 * (size - 1));
293 /* Post all receives first -- a simple optimization */
295 for (i = (rank + 1) % size; i != rank; i = (i + 1) % size) {
297 smpi_irecv_init(static_cast<char *>(recvbuf) + i * recvcount * recvext, recvcount,
298 recvtype, i, system_tag, comm);
301 /* Now post all sends in reverse order
302 * - We would like to minimize the search time through message queue
303 * when messages actually arrive in the order in which they were posted.
304 * TODO: check the previous assertion
306 for (i = (rank + size - 1) % size; i != rank; i = (i + size - 1) % size) {
308 smpi_isend_init(static_cast<char *>(sendbuf) + i * sendcount * sendext, sendcount,
309 sendtype, i, system_tag, comm);
312 /* Wait for them all. */
313 smpi_mpi_startall(count, requests);
314 XBT_DEBUG("<%d> wait for %d requests", rank, count);
315 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
316 for(i = 0; i < count; i++) {
317 if(requests[i]!=MPI_REQUEST_NULL)
318 smpi_mpi_request_free(&requests[i]);
325 int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts,
326 int *senddisps, MPI_Datatype sendtype,
327 void *recvbuf, int *recvcounts,
328 int *recvdisps, MPI_Datatype recvtype,
331 int system_tag = 889;
332 int i, rank, size, err, count;
333 MPI_Aint lb = 0, sendext = 0, recvext = 0;
334 MPI_Request *requests;
337 rank = smpi_comm_rank(comm);
338 size = smpi_comm_size(comm);
339 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
340 smpi_datatype_extent(sendtype, &lb, &sendext);
341 smpi_datatype_extent(recvtype, &lb, &recvext);
342 /* Local copy from self */
344 smpi_datatype_copy(static_cast<char *>(sendbuf) + senddisps[rank] * sendext, sendcounts[rank], sendtype,
345 static_cast<char *>(recvbuf) + recvdisps[rank] * recvext, recvcounts[rank], recvtype);
346 if (err == MPI_SUCCESS && size > 1) {
347 /* Initiate all send/recv to/from others. */
348 requests = xbt_new(MPI_Request, 2 * (size - 1));
350 /* Create all receives that will be posted first */
351 for (i = 0; i < size; ++i) {
352 if (i != rank && recvcounts[i] != 0) {
354 smpi_irecv_init(static_cast<char *>(recvbuf) + recvdisps[i] * recvext,
355 recvcounts[i], recvtype, i, system_tag, comm);
358 XBT_DEBUG("<%d> skip request creation [src = %d, recvcounts[src] = %d]", rank, i, recvcounts[i]);
361 /* Now create all sends */
362 for (i = 0; i < size; ++i) {
363 if (i != rank && sendcounts[i] != 0) {
365 smpi_isend_init(static_cast<char *>(sendbuf) + senddisps[i] * sendext,
366 sendcounts[i], sendtype, i, system_tag, comm);
369 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]", rank, i, sendcounts[i]);
372 /* Wait for them all. */
373 smpi_mpi_startall(count, requests);
374 XBT_DEBUG("<%d> wait for %d requests", rank, count);
375 smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
376 for(i = 0; i < count; i++) {
377 if(requests[i]!=MPI_REQUEST_NULL)
378 smpi_mpi_request_free(&requests[i]);