1 /* smpi_coll.c -- various optimized routing for collectives */
3 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */
5 /* This program is free software; you can redistribute it and/or modify it
6 * under the terms of the license (GNU LGPL) which comes with this package. */
8 #include "smpi_coll.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_datatype.hpp"
12 #include "smpi_op.hpp"
13 #include "smpi_request.hpp"
14 #include "xbt/config.hpp"
16 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI (coll)");
18 #define COLL_SETTER(cat, ret, args, args2) \
19 int(*Colls::cat) args; \
20 void Colls::set_##cat(const std::string& name) \
22 int id = find_coll_description(mpi_coll_##cat##_description, name, #cat); \
23 cat = reinterpret_cast<ret(*) args>(mpi_coll_##cat##_description[id].coll); \
25 xbt_die("Collective " #cat " set to nullptr!"); \
31 void (*Colls::smpi_coll_cleanup_callback)();
33 /* these arrays must be nullptr terminated */
34 s_mpi_coll_description_t Colls::mpi_coll_gather_description[] = {
35 COLL_GATHERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
36 s_mpi_coll_description_t Colls::mpi_coll_allgather_description[] = {
37 COLL_ALLGATHERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
38 s_mpi_coll_description_t Colls::mpi_coll_allgatherv_description[] = {
39 COLL_ALLGATHERVS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
40 s_mpi_coll_description_t Colls::mpi_coll_allreduce_description[] ={
41 COLL_ALLREDUCES(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
42 s_mpi_coll_description_t Colls::mpi_coll_reduce_scatter_description[] = {
43 COLL_REDUCE_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
44 s_mpi_coll_description_t Colls::mpi_coll_scatter_description[] ={
45 COLL_SCATTERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
46 s_mpi_coll_description_t Colls::mpi_coll_barrier_description[] ={
47 COLL_BARRIERS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
48 s_mpi_coll_description_t Colls::mpi_coll_alltoall_description[] = {
49 COLL_ALLTOALLS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
50 s_mpi_coll_description_t Colls::mpi_coll_alltoallv_description[] = {
51 COLL_ALLTOALLVS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
52 s_mpi_coll_description_t Colls::mpi_coll_bcast_description[] = {
53 COLL_BCASTS(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
54 s_mpi_coll_description_t Colls::mpi_coll_reduce_description[] = {
55 COLL_REDUCES(COLL_DESCRIPTION, COLL_COMMA), {"", "", nullptr} };
57 /** Displays the long description of all registered models, and quit */
58 void Colls::coll_help(const char *category, s_mpi_coll_description_t * table)
60 XBT_WARN("Long description of the %s models accepted by this simulator:\n", category);
61 for (int i = 0; not table[i].name.empty(); i++)
62 XBT_WARN(" %s: %s\n", table[i].name.c_str(), table[i].description.c_str());
65 int Colls::find_coll_description(s_mpi_coll_description_t* table, const std::string& name, const char* desc)
67 for (int i = 0; not table[i].name.empty(); i++)
68 if (name == table[i].name) {
69 if (table[i].name != "default")
70 XBT_INFO("Switch to algorithm %s for collective %s",table[i].name.c_str(),desc);
74 if (table[0].name.empty())
75 xbt_die("No collective is valid for '%s'! This is a bug.", name.c_str());
76 std::string name_list = table[0].name;
77 for (int i = 1; not table[i].name.empty(); i++)
78 name_list = name_list + ", " + table[i].name;
80 xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name.c_str(), name_list.c_str());
84 COLL_APPLY(COLL_SETTER,COLL_GATHER_SIG,"");
85 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"");
86 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"");
87 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"");
88 COLL_APPLY(COLL_SETTER,COLL_ALLREDUCE_SIG,"");
89 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SCATTER_SIG,"");
90 COLL_APPLY(COLL_SETTER,COLL_SCATTER_SIG,"");
91 COLL_APPLY(COLL_SETTER,COLL_BARRIER_SIG,"");
92 COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,"");
93 COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,"");
94 COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,"");
96 void Colls::set_collectives(){
97 std::string selector_name = simgrid::config::get_value<std::string>("smpi/coll-selector");
98 if (selector_name.empty())
99 selector_name = "default";
101 std::pair<std::string, std::function<void(std::string)>> setter_callbacks[] = {
102 {"gather", &Colls::set_gather}, {"allgather", &Colls::set_allgather},
103 {"allgatherv", &Colls::set_allgatherv}, {"allreduce", &Colls::set_allreduce},
104 {"alltoall", &Colls::set_alltoall}, {"alltoallv", &Colls::set_alltoallv},
105 {"reduce", &Colls::set_reduce}, {"reduce_scatter", &Colls::set_reduce_scatter},
106 {"scatter", &Colls::set_scatter}, {"bcast", &Colls::set_bcast},
107 {"barrier", &Colls::set_barrier}};
109 for (auto& elem : setter_callbacks) {
110 std::string name = simgrid::config::get_value<std::string>(("smpi/" + elem.first).c_str());
112 name = selector_name;
118 int Colls::finish_nbc_request(MPI_Request request){
119 MPI_Request* requests = request->get_nbc_requests();
120 int count = request->get_nbc_requests_size();
121 Request::waitall(count, requests, MPI_STATUS_IGNORE);
122 for (int i = 0; i < count; i++) {
123 if(requests[i]!=MPI_REQUEST_NULL)
124 Request::unref(&requests[i]);
127 Request::unref(&request);
131 //Implementations of the single algorith collectives
133 int Colls::gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
134 MPI_Datatype recvtype, int root, MPI_Comm comm)
137 Colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request);
138 return Colls::finish_nbc_request(request);
142 int Colls::scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,
143 MPI_Datatype recvtype, int root, MPI_Comm comm)
146 Colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request);
147 return Colls::finish_nbc_request(request);
151 int Colls::scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
153 int system_tag = -888;
155 MPI_Aint dataext = 0;
157 int rank = comm->rank();
158 int size = comm->size();
160 datatype->extent(&lb, &dataext);
162 // Local copy from self
163 Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
165 // Send/Recv buffers to/from others
166 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
167 void **tmpbufs = xbt_new(void *, rank);
169 for (int other = 0; other < rank; other++) {
170 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
171 requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
174 for (int other = rank + 1; other < size; other++) {
175 requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
178 // Wait for completion of all comms.
179 Request::startall(size - 1, requests);
181 if(op != MPI_OP_NULL && op->is_commutative()){
182 for (int other = 0; other < size - 1; other++) {
183 index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
184 if(index == MPI_UNDEFINED) {
188 // #Request is below rank: it's a irecv
189 op->apply( tmpbufs[index], recvbuf, &count, datatype);
193 //non commutative case, wait in order
194 for (int other = 0; other < size - 1; other++) {
195 Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
196 if(index < rank && op!=MPI_OP_NULL) {
197 op->apply( tmpbufs[other], recvbuf, &count, datatype);
201 for(index = 0; index < rank; index++) {
202 smpi_free_tmp_buffer(tmpbufs[index]);
204 for(index = 0; index < size-1; index++) {
205 Request::unref(&requests[index]);
212 int Colls::exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
214 int system_tag = -888;
216 MPI_Aint dataext = 0;
217 int recvbuf_is_empty=1;
218 int rank = comm->rank();
219 int size = comm->size();
221 datatype->extent(&lb, &dataext);
223 // Send/Recv buffers to/from others
224 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
225 void **tmpbufs = xbt_new(void *, rank);
227 for (int other = 0; other < rank; other++) {
228 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
229 requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
232 for (int other = rank + 1; other < size; other++) {
233 requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
236 // Wait for completion of all comms.
237 Request::startall(size - 1, requests);
239 if(op != MPI_OP_NULL && op->is_commutative()){
240 for (int other = 0; other < size - 1; other++) {
241 index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
242 if(index == MPI_UNDEFINED) {
246 if(recvbuf_is_empty){
247 Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
250 // #Request is below rank: it's a irecv
251 op->apply( tmpbufs[index], recvbuf, &count, datatype);
255 //non commutative case, wait in order
256 for (int other = 0; other < size - 1; other++) {
257 Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
259 if (recvbuf_is_empty) {
260 Datatype::copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
261 recvbuf_is_empty = 0;
264 op->apply( tmpbufs[other], recvbuf, &count, datatype);
268 for(index = 0; index < rank; index++) {
269 smpi_free_tmp_buffer(tmpbufs[index]);
271 for(index = 0; index < size-1; index++) {
272 Request::unref(&requests[index]);
279 int Colls::alltoallw(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype* sendtypes,
280 void *recvbuf, int *recvcounts, int *recvdisps, MPI_Datatype* recvtypes, MPI_Comm comm)
283 Colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm, &request);
284 return Colls::finish_nbc_request(request);