Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Concatenate nested namespaces (sonar).
[simgrid.git] / src / smpi / colls / smpi_coll.cpp
1 /* smpi_coll.c -- various optimized routing for collectives                 */
2
3 /* Copyright (c) 2009-2022. The SimGrid Team. All rights reserved.          */
4
5 /* This program is free software; you can redistribute it and/or modify it
6  * under the terms of the license (GNU LGPL) which comes with this package. */
7
8 #include "smpi_coll.hpp"
9 #include "private.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_datatype.hpp"
12 #include "smpi_op.hpp"
13 #include "smpi_request.hpp"
14 #include "xbt/config.hpp"
15
16 #include <map>
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives.");
19
20 namespace simgrid::smpi {
21
22 std::map<std::string, std::vector<s_mpi_coll_description_t>, std::less<>> smpi_coll_descriptions(
23     {{std::string("gather"),
24       {{"default", "gather default collective", (void*)gather__default},
25        {"ompi", "gather ompi collective", (void*)gather__ompi},
26        {"ompi_basic_linear", "gather ompi_basic_linear collective", (void*)gather__ompi_basic_linear},
27        {"ompi_binomial", "gather ompi_binomial collective", (void*)gather__ompi_binomial},
28        {"ompi_linear_sync", "gather ompi_linear_sync collective", (void*)gather__ompi_linear_sync},
29        {"mpich", "gather mpich collective", (void*)gather__mpich},
30        {"mvapich2", "gather mvapich2 collective", (void*)gather__mvapich2},
31        {"mvapich2_two_level", "gather mvapich2_two_level collective", (void*)gather__mvapich2_two_level},
32        {"impi", "gather impi collective", (void*)gather__impi},
33        {"automatic", "gather automatic collective", (void*)gather__automatic}}},
34
35      {"allgather",
36       {{"default", "allgather default collective", (void*)allgather__default},
37        {"2dmesh", "allgather 2dmesh collective", (void*)allgather__2dmesh},
38        {"3dmesh", "allgather 3dmesh collective", (void*)allgather__3dmesh},
39        {"bruck", "allgather bruck collective", (void*)allgather__bruck},
40        {"GB", "allgather GB collective", (void*)allgather__GB},
41        {"loosely_lr", "allgather loosely_lr collective", (void*)allgather__loosely_lr},
42        {"NTSLR", "allgather NTSLR collective", (void*)allgather__NTSLR},
43        {"NTSLR_NB", "allgather NTSLR_NB collective", (void*)allgather__NTSLR_NB},
44        {"pair", "allgather pair collective", (void*)allgather__pair},
45        {"rdb", "allgather rdb collective", (void*)allgather__rdb},
46        {"rhv", "allgather rhv collective", (void*)allgather__rhv},
47        {"ring", "allgather ring collective", (void*)allgather__ring},
48        {"SMP_NTS", "allgather SMP_NTS collective", (void*)allgather__SMP_NTS},
49        {"smp_simple", "allgather smp_simple collective", (void*)allgather__smp_simple},
50        {"spreading_simple", "allgather spreading_simple collective", (void*)allgather__spreading_simple},
51        {"ompi", "allgather ompi collective", (void*)allgather__ompi},
52        {"ompi_neighborexchange", "allgather ompi_neighborexchange collective", (void*)allgather__ompi_neighborexchange},
53        {"mvapich2", "allgather mvapich2 collective", (void*)allgather__mvapich2},
54        {"mvapich2_smp", "allgather mvapich2_smp collective", (void*)allgather__mvapich2_smp},
55        {"mpich", "allgather mpich collective", (void*)allgather__mpich},
56        {"impi", "allgather impi collective", (void*)allgather__impi},
57        {"automatic", "allgather automatic collective", (void*)allgather__automatic}}},
58
59      {"allgatherv",
60       {{"default", "allgatherv default collective", (void*)allgatherv__default},
61        {"GB", "allgatherv GB collective", (void*)allgatherv__GB},
62        {"pair", "allgatherv pair collective", (void*)allgatherv__pair},
63        {"ring", "allgatherv ring collective", (void*)allgatherv__ring},
64        {"ompi", "allgatherv ompi collective", (void*)allgatherv__ompi},
65        {"ompi_neighborexchange", "allgatherv ompi_neighborexchange collective",
66         (void*)allgatherv__ompi_neighborexchange},
67        {"ompi_bruck", "allgatherv ompi_bruck collective", (void*)allgatherv__ompi_bruck},
68        {"mpich", "allgatherv mpich collective", (void*)allgatherv__mpich},
69        {"mpich_rdb", "allgatherv mpich_rdb collective", (void*)allgatherv__mpich_rdb},
70        {"mpich_ring", "allgatherv mpich_ring collective", (void*)allgatherv__mpich_ring},
71        {"mvapich2", "allgatherv mvapich2 collective", (void*)allgatherv__mvapich2},
72        {"impi", "allgatherv impi collective", (void*)allgatherv__impi},
73        {"automatic", "allgatherv automatic collective", (void*)allgatherv__automatic}}},
74
75      {"allreduce",
76       {{"default", "allreduce default collective", (void*)allreduce__default},
77        {"lr", "allreduce lr collective", (void*)allreduce__lr},
78        {"rab1", "allreduce rab1 collective", (void*)allreduce__rab1},
79        {"rab2", "allreduce rab2 collective", (void*)allreduce__rab2},
80        {"rab_rdb", "allreduce rab_rdb collective", (void*)allreduce__rab_rdb},
81        {"rdb", "allreduce rdb collective", (void*)allreduce__rdb},
82        {"smp_binomial", "allreduce smp_binomial collective", (void*)allreduce__smp_binomial},
83        {"smp_binomial_pipeline", "allreduce smp_binomial_pipeline collective", (void*)allreduce__smp_binomial_pipeline},
84        {"smp_rdb", "allreduce smp_rdb collective", (void*)allreduce__smp_rdb},
85        {"smp_rsag", "allreduce smp_rsag collective", (void*)allreduce__smp_rsag},
86        {"smp_rsag_lr", "allreduce smp_rsag_lr collective", (void*)allreduce__smp_rsag_lr},
87        {"smp_rsag_rab", "allreduce smp_rsag_rab collective", (void*)allreduce__smp_rsag_rab},
88        {"redbcast", "allreduce redbcast collective", (void*)allreduce__redbcast},
89        {"ompi", "allreduce ompi collective", (void*)allreduce__ompi},
90        {"ompi_ring_segmented", "allreduce ompi_ring_segmented collective", (void*)allreduce__ompi_ring_segmented},
91        {"mpich", "allreduce mpich collective", (void*)allreduce__mpich},
92        {"mvapich2", "allreduce mvapich2 collective", (void*)allreduce__mvapich2},
93        {"mvapich2_rs", "allreduce mvapich2_rs collective", (void*)allreduce__mvapich2_rs},
94        {"mvapich2_two_level", "allreduce mvapich2_two_level collective", (void*)allreduce__mvapich2_two_level},
95        {"impi", "allreduce impi collective", (void*)allreduce__impi},
96        {"rab", "allreduce rab collective", (void*)allreduce__rab},
97        {"automatic", "allreduce automatic collective", (void*)allreduce__automatic}}},
98
99      {"reduce_scatter",
100       {{"default", "reduce_scatter default collective", (void*)reduce_scatter__default},
101        {"ompi", "reduce_scatter ompi collective", (void*)reduce_scatter__ompi},
102        {"ompi_basic_recursivehalving", "reduce_scatter ompi_basic_recursivehalving collective",
103         (void*)reduce_scatter__ompi_basic_recursivehalving},
104        {"ompi_ring", "reduce_scatter ompi_ring collective", (void*)reduce_scatter__ompi_ring},
105        {"ompi_butterfly", "reduce_scatter ompi_butterfly collective", (void*)reduce_scatter__ompi_butterfly},
106        {"mpich", "reduce_scatter mpich collective", (void*)reduce_scatter__mpich},
107        {"mpich_pair", "reduce_scatter mpich_pair collective", (void*)reduce_scatter__mpich_pair},
108        {"mpich_rdb", "reduce_scatter mpich_rdb collective", (void*)reduce_scatter__mpich_rdb},
109        {"mpich_noncomm", "reduce_scatter mpich_noncomm collective", (void*)reduce_scatter__mpich_noncomm},
110        {"mvapich2", "reduce_scatter mvapich2 collective", (void*)reduce_scatter__mvapich2},
111        {"impi", "reduce_scatter impi collective", (void*)reduce_scatter__impi},
112        {"automatic", "reduce_scatter automatic collective", (void*)reduce_scatter__automatic}}},
113
114      {"scatter",
115       {{"default", "scatter default collective", (void*)scatter__default},
116        {"ompi", "scatter ompi collective", (void*)scatter__ompi},
117        {"ompi_basic_linear", "scatter ompi_basic_linear collective", (void*)scatter__ompi_basic_linear},
118        {"ompi_linear_nb", "scatter ompi_linear nonblocking collective", (void*)scatter__ompi_linear_nb},
119        {"ompi_binomial", "scatter ompi_binomial collective", (void*)scatter__ompi_binomial},
120        {"mpich", "scatter mpich collective", (void*)scatter__mpich},
121        {"mvapich2", "scatter mvapich2 collective", (void*)scatter__mvapich2},
122        {"mvapich2_two_level_binomial", "scatter mvapich2_two_level_binomial collective",
123         (void*)scatter__mvapich2_two_level_binomial},
124        {"mvapich2_two_level_direct", "scatter mvapich2_two_level_direct collective",
125         (void*)scatter__mvapich2_two_level_direct},
126        {"impi", "scatter impi collective", (void*)scatter__impi},
127        {"automatic", "scatter automatic collective", (void*)scatter__automatic}}},
128
129      {"barrier",
130       {{"default", "barrier default collective", (void*)barrier__default},
131        {"ompi", "barrier ompi collective", (void*)barrier__ompi},
132        {"ompi_basic_linear", "barrier ompi_basic_linear collective", (void*)barrier__ompi_basic_linear},
133        {"ompi_two_procs", "barrier ompi_two_procs collective", (void*)barrier__ompi_two_procs},
134        {"ompi_tree", "barrier ompi_tree collective", (void*)barrier__ompi_tree},
135        {"ompi_bruck", "barrier ompi_bruck collective", (void*)barrier__ompi_bruck},
136        {"ompi_recursivedoubling", "barrier ompi_recursivedoubling collective", (void*)barrier__ompi_recursivedoubling},
137        {"ompi_doublering", "barrier ompi_doublering collective", (void*)barrier__ompi_doublering},
138        {"mpich_smp", "barrier mpich_smp collective", (void*)barrier__mpich_smp},
139        {"mpich", "barrier mpich collective", (void*)barrier__mpich},
140        {"mvapich2_pair", "barrier mvapich2_pair collective", (void*)barrier__mvapich2_pair},
141        {"mvapich2", "barrier mvapich2 collective", (void*)barrier__mvapich2},
142        {"impi", "barrier impi collective", (void*)barrier__impi},
143        {"automatic", "barrier automatic collective", (void*)barrier__automatic}}},
144
145      {"alltoall",
146       {{"default", "alltoall default collective", (void*)alltoall__default},
147        {"2dmesh", "alltoall 2dmesh collective", (void*)alltoall__2dmesh},
148        {"3dmesh", "alltoall 3dmesh collective", (void*)alltoall__3dmesh},
149        {"basic_linear", "alltoall basic_linear collective", (void*)alltoall__basic_linear},
150        {"bruck", "alltoall bruck collective", (void*)alltoall__bruck},
151        {"pair", "alltoall pair collective", (void*)alltoall__pair},
152        {"pair_rma", "alltoall pair_rma collective", (void*)alltoall__pair_rma},
153        {"pair_light_barrier", "alltoall pair_light_barrier collective", (void*)alltoall__pair_light_barrier},
154        {"pair_mpi_barrier", "alltoall pair_mpi_barrier collective", (void*)alltoall__pair_mpi_barrier},
155        {"pair_one_barrier", "alltoall pair_one_barrier collective", (void*)alltoall__pair_one_barrier},
156        {"rdb", "alltoall rdb collective", (void*)alltoall__rdb},
157        {"ring", "alltoall ring collective", (void*)alltoall__ring},
158        {"ring_light_barrier", "alltoall ring_light_barrier collective", (void*)alltoall__ring_light_barrier},
159        {"ring_mpi_barrier", "alltoall ring_mpi_barrier collective", (void*)alltoall__ring_mpi_barrier},
160        {"ring_one_barrier", "alltoall ring_one_barrier collective", (void*)alltoall__ring_one_barrier},
161        {"mvapich2", "alltoall mvapich2 collective", (void*)alltoall__mvapich2},
162        {"mvapich2_scatter_dest", "alltoall mvapich2_scatter_dest collective", (void*)alltoall__mvapich2_scatter_dest},
163        {"ompi", "alltoall ompi collective", (void*)alltoall__ompi},
164        {"mpich", "alltoall mpich collective", (void*)alltoall__mpich},
165        {"impi", "alltoall impi collective", (void*)alltoall__impi},
166        {"automatic", "alltoall automatic collective", (void*)alltoall__automatic}}},
167
168      {"alltoallv",
169       {{"default", "alltoallv default collective", (void*)alltoallv__default},
170        {"bruck", "alltoallv bruck collective", (void*)alltoallv__bruck},
171        {"pair", "alltoallv pair collective", (void*)alltoallv__pair},
172        {"pair_light_barrier", "alltoallv pair_light_barrier collective", (void*)alltoallv__pair_light_barrier},
173        {"pair_mpi_barrier", "alltoallv pair_mpi_barrier collective", (void*)alltoallv__pair_mpi_barrier},
174        {"pair_one_barrier", "alltoallv pair_one_barrier collective", (void*)alltoallv__pair_one_barrier},
175        {"ring", "alltoallv ring collective", (void*)alltoallv__ring},
176        {"ring_light_barrier", "alltoallv ring_light_barrier collective", (void*)alltoallv__ring_light_barrier},
177        {"ring_mpi_barrier", "alltoallv ring_mpi_barrier collective", (void*)alltoallv__ring_mpi_barrier},
178        {"ring_one_barrier", "alltoallv ring_one_barrier collective", (void*)alltoallv__ring_one_barrier},
179        {"ompi", "alltoallv ompi collective", (void*)alltoallv__ompi},
180        {"mpich", "alltoallv mpich collective", (void*)alltoallv__mpich},
181        {"ompi_basic_linear", "alltoallv ompi_basic_linear collective", (void*)alltoallv__ompi_basic_linear},
182        {"mvapich2", "alltoallv mvapich2 collective", (void*)alltoallv__mvapich2},
183        {"impi", "alltoallv impi collective", (void*)alltoallv__impi},
184        {"automatic", "alltoallv automatic collective", (void*)alltoallv__automatic}}},
185
186      {"bcast",
187       {{"default", "bcast default collective", (void*)bcast__default},
188        {"arrival_pattern_aware", "bcast arrival_pattern_aware collective", (void*)bcast__arrival_pattern_aware},
189        {"arrival_pattern_aware_wait", "bcast arrival_pattern_aware_wait collective",
190         (void*)bcast__arrival_pattern_aware_wait},
191        {"arrival_scatter", "bcast arrival_scatter collective", (void*)bcast__arrival_scatter},
192        {"binomial_tree", "bcast binomial_tree collective", (void*)bcast__binomial_tree},
193        {"flattree", "bcast flattree collective", (void*)bcast__flattree},
194        {"flattree_pipeline", "bcast flattree_pipeline collective", (void*)bcast__flattree_pipeline},
195        {"NTSB", "bcast NTSB collective", (void*)bcast__NTSB},
196        {"NTSL", "bcast NTSL collective", (void*)bcast__NTSL},
197        {"NTSL_Isend", "bcast NTSL_Isend collective", (void*)bcast__NTSL_Isend},
198        {"scatter_LR_allgather", "bcast scatter_LR_allgather collective", (void*)bcast__scatter_LR_allgather},
199        {"scatter_rdb_allgather", "bcast scatter_rdb_allgather collective", (void*)bcast__scatter_rdb_allgather},
200        {"SMP_binary", "bcast SMP_binary collective", (void*)bcast__SMP_binary},
201        {"SMP_binomial", "bcast SMP_binomial collective", (void*)bcast__SMP_binomial},
202        {"SMP_linear", "bcast SMP_linear collective", (void*)bcast__SMP_linear},
203        {"ompi", "bcast ompi collective", (void*)bcast__ompi},
204        {"ompi_split_bintree", "bcast ompi_split_bintree collective", (void*)bcast__ompi_split_bintree},
205        {"ompi_pipeline", "bcast ompi_pipeline collective", (void*)bcast__ompi_pipeline},
206        {"mpich", "bcast mpich collective", (void*)bcast__mpich},
207        {"mvapich2", "bcast mvapich2 collective", (void*)bcast__mvapich2},
208        {"mvapich2_inter_node", "bcast mvapich2_inter_node collective", (void*)bcast__mvapich2_inter_node},
209        {"mvapich2_intra_node", "bcast mvapich2_intra_node collective", (void*)bcast__mvapich2_intra_node},
210        {"mvapich2_knomial_intra_node", "bcast mvapich2_knomial_intra_node collective",
211         (void*)bcast__mvapich2_knomial_intra_node},
212        {"impi", "bcast impi collective", (void*)bcast__impi},
213        {"automatic", "bcast automatic collective", (void*)bcast__automatic}}},
214
215      {"reduce",
216       {{"default", "reduce default collective", (void*)reduce__default},
217        {"arrival_pattern_aware", "reduce arrival_pattern_aware collective", (void*)reduce__arrival_pattern_aware},
218        {"binomial", "reduce binomial collective", (void*)reduce__binomial},
219        {"flat_tree", "reduce flat_tree collective", (void*)reduce__flat_tree},
220        {"NTSL", "reduce NTSL collective", (void*)reduce__NTSL},
221        {"scatter_gather", "reduce scatter_gather collective", (void*)reduce__scatter_gather},
222        {"ompi", "reduce ompi collective", (void*)reduce__ompi},
223        {"ompi_chain", "reduce ompi_chain collective", (void*)reduce__ompi_chain},
224        {"ompi_pipeline", "reduce ompi_pipeline collective", (void*)reduce__ompi_pipeline},
225        {"ompi_basic_linear", "reduce ompi_basic_linear collective", (void*)reduce__ompi_basic_linear},
226        {"ompi_in_order_binary", "reduce ompi_in_order_binary collective", (void*)reduce__ompi_in_order_binary},
227        {"ompi_binary", "reduce ompi_binary collective", (void*)reduce__ompi_binary},
228        {"ompi_binomial", "reduce ompi_binomial collective", (void*)reduce__ompi_binomial},
229        {"mpich", "reduce mpich collective", (void*)reduce__mpich},
230        {"mvapich2", "reduce mvapich2 collective", (void*)reduce__mvapich2},
231        {"mvapich2_knomial", "reduce mvapich2_knomial collective", (void*)reduce__mvapich2_knomial},
232        {"mvapich2_two_level", "reduce mvapich2_two_level collective", (void*)reduce__mvapich2_two_level},
233        {"impi", "reduce impi collective", (void*)reduce__impi},
234        {"rab", "reduce rab collective", (void*)reduce__rab},
235        {"automatic", "reduce automatic collective", (void*)reduce__automatic}}}});
236
237 // Needed by the automatic selector weird implementation
238 std::vector<s_mpi_coll_description_t>* colls::get_smpi_coll_descriptions(const std::string& name)
239 {
240   auto iter = smpi_coll_descriptions.find(name);
241   xbt_assert(iter != smpi_coll_descriptions.end(), "No collective named %s. This is a bug.", name.c_str());
242   return &iter->second;
243 }
244
245 static s_mpi_coll_description_t* find_coll_description(const std::string& collective, const std::string& algo)
246 {
247   std::vector<s_mpi_coll_description_t>* table = colls::get_smpi_coll_descriptions(collective);
248   xbt_assert(not table->empty(), "No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
249
250   for (auto& desc : *table) {
251     if (algo == desc.name) {
252       if (desc.name != "default")
253         XBT_INFO("Switch to algorithm %s for collective %s", desc.name.c_str(), collective.c_str());
254       return &desc;
255     }
256   }
257
258   std::string name_list = table->at(0).name;
259   for (unsigned long i = 1; i < table->size(); i++)
260     name_list = name_list + ", " + table->at(i).name;
261   xbt_die("Collective '%s' has no algorithm '%s'! Valid algorithms: %s.", collective.c_str(), algo.c_str(), name_list.c_str());
262 }
263
264 int (*colls::gather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
265                      MPI_Datatype recv_type, int root, MPI_Comm comm);
266 int (*colls::allgather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
267                         MPI_Datatype recv_type, MPI_Comm comm);
268 int (*colls::allgatherv)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff,
269                          const int* recv_count, const int* recv_disps, MPI_Datatype recv_type, MPI_Comm comm);
270 int (*colls::alltoall)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
271                        MPI_Datatype recv_type, MPI_Comm comm);
272 int (*colls::alltoallv)(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
273                         void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
274                         MPI_Comm comm);
275 int (*colls::bcast)(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
276 int (*colls::reduce)(const void* buf, void* rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
277 int (*colls::allreduce)(const void* sbuf, void* rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
278 int (*colls::reduce_scatter)(const void* sbuf, void* rbuf, const int* rcounts, MPI_Datatype dtype, MPI_Op op,
279                              MPI_Comm comm);
280 int (*colls::scatter)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
281                       MPI_Datatype recvtype, int root, MPI_Comm comm);
282 int (*colls::barrier)(MPI_Comm comm);
283
284 void (*colls::smpi_coll_cleanup_callback)();
285
286 #define COLL_SETTER(cat, ret, args, args2)                                                                             \
287   void colls::_XBT_CONCAT(set_, cat)(const std::string& name)                                                          \
288   {                                                                                                                    \
289     auto desc = find_coll_description(_XBT_STRINGIFY(cat), name);                                                      \
290     cat       = reinterpret_cast<ret(*) args>(desc->coll);                                                             \
291     xbt_assert(cat != nullptr, "Collective " _XBT_STRINGIFY(cat) " set to nullptr!");                                  \
292   }
293 COLL_APPLY(COLL_SETTER, COLL_GATHER_SIG, "")
294 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"")
295 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"")
296 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"")
297 COLL_APPLY(COLL_SETTER,COLL_ALLREDUCE_SIG,"")
298 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SCATTER_SIG,"")
299 COLL_APPLY(COLL_SETTER,COLL_SCATTER_SIG,"")
300 COLL_APPLY(COLL_SETTER,COLL_BARRIER_SIG,"")
301 COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,"")
302 COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,"")
303 COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,"")
304
305 void colls::set_collectives()
306 {
307   std::string selector_name = simgrid::config::get_value<std::string>("smpi/coll-selector");
308   if (selector_name.empty())
309     selector_name = "default";
310
311   std::pair<std::string, std::function<void(std::string)>> setter_callbacks[] = {
312       {"gather", &colls::set_gather},         {"allgather", &colls::set_allgather},
313       {"allgatherv", &colls::set_allgatherv}, {"allreduce", &colls::set_allreduce},
314       {"alltoall", &colls::set_alltoall},     {"alltoallv", &colls::set_alltoallv},
315       {"reduce", &colls::set_reduce},         {"reduce_scatter", &colls::set_reduce_scatter},
316       {"scatter", &colls::set_scatter},       {"bcast", &colls::set_bcast},
317       {"barrier", &colls::set_barrier}};
318
319   for (auto& elem : setter_callbacks) {
320     std::string name = simgrid::config::get_value<std::string>(("smpi/" + elem.first).c_str());
321     if (name.empty())
322       name = selector_name;
323
324     (elem.second)(name);
325   }
326 }
327
328 //Implementations of the single algorithm collectives
329
330 int colls::gatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int* recvcounts,
331                    const int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm)
332 {
333   MPI_Request request;
334   colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request, 0);
335   return Request::wait(&request, MPI_STATUS_IGNORE);
336 }
337
338 int colls::scatterv(const void* sendbuf, const int* sendcounts, const int* displs, MPI_Datatype sendtype, void* recvbuf,
339                     int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
340 {
341   MPI_Request request;
342   colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0);
343   return Request::wait(&request, MPI_STATUS_IGNORE);
344 }
345
346 int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
347 {
348   int system_tag = -888;
349   MPI_Aint lb      = 0;
350   MPI_Aint dataext = 0;
351
352   int rank = comm->rank();
353   int size = comm->size();
354
355   datatype->extent(&lb, &dataext);
356
357   // Local copy from self
358   Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
359
360   // Send/Recv buffers to/from others
361   auto* requests = new MPI_Request[size - 1];
362   auto** tmpbufs = new unsigned char*[rank];
363   int index = 0;
364   for (int other = 0; other < rank; other++) {
365     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
366     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
367     index++;
368   }
369   for (int other = rank + 1; other < size; other++) {
370     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
371     index++;
372   }
373   // Wait for completion of all comms.
374   Request::startall(size - 1, requests);
375
376   if(op != MPI_OP_NULL && op->is_commutative()){
377     for (int other = 0; other < size - 1; other++) {
378       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
379       if(index == MPI_UNDEFINED) {
380         break;
381       }
382       if(index < rank) {
383         // #Request is below rank: it's an irecv
384         op->apply( tmpbufs[index], recvbuf, &count, datatype);
385       }
386     }
387   }else{
388     //non commutative case, wait in order
389     for (int other = 0; other < size - 1; other++) {
390       Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
391       if(index < rank && op!=MPI_OP_NULL) {
392         op->apply( tmpbufs[other], recvbuf, &count, datatype);
393       }
394     }
395   }
396   for(index = 0; index < rank; index++) {
397     smpi_free_tmp_buffer(tmpbufs[index]);
398   }
399   for(index = 0; index < size-1; index++) {
400     Request::unref(&requests[index]);
401   }
402   delete[] tmpbufs;
403   delete[] requests;
404   return MPI_SUCCESS;
405 }
406
407 int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
408 {
409   int system_tag = -888;
410   MPI_Aint lb         = 0;
411   MPI_Aint dataext    = 0;
412   int recvbuf_is_empty=1;
413   int rank = comm->rank();
414   int size = comm->size();
415
416   datatype->extent(&lb, &dataext);
417
418   // Send/Recv buffers to/from others
419   auto* requests = new MPI_Request[size - 1];
420   auto** tmpbufs = new unsigned char*[rank];
421   int index = 0;
422   for (int other = 0; other < rank; other++) {
423     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
424     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
425     index++;
426   }
427   for (int other = rank + 1; other < size; other++) {
428     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
429     index++;
430   }
431   // Wait for completion of all comms.
432   Request::startall(size - 1, requests);
433
434   if(op != MPI_OP_NULL && op->is_commutative()){
435     for (int other = 0; other < size - 1; other++) {
436       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
437       if(index == MPI_UNDEFINED) {
438         break;
439       }
440       if(index < rank) {
441         if(recvbuf_is_empty){
442           Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
443           recvbuf_is_empty=0;
444         } else
445           // #Request is below rank: it's an irecv
446           op->apply( tmpbufs[index], recvbuf, &count, datatype);
447       }
448     }
449   }else{
450     //non commutative case, wait in order
451     for (int other = 0; other < size - 1; other++) {
452      Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
453       if(index < rank) {
454         if (recvbuf_is_empty) {
455           Datatype::copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
456           recvbuf_is_empty = 0;
457         } else
458           if(op!=MPI_OP_NULL)
459             op->apply( tmpbufs[other], recvbuf, &count, datatype);
460       }
461     }
462   }
463   for(index = 0; index < rank; index++) {
464     smpi_free_tmp_buffer(tmpbufs[index]);
465   }
466   for(index = 0; index < size-1; index++) {
467     Request::unref(&requests[index]);
468   }
469   delete[] tmpbufs;
470   delete[] requests;
471   return MPI_SUCCESS;
472 }
473
474 int colls::alltoallw(const void* sendbuf, const int* sendcounts, const int* senddisps, const MPI_Datatype* sendtypes,
475                      void* recvbuf, const int* recvcounts, const int* recvdisps, const MPI_Datatype* recvtypes,
476                      MPI_Comm comm)
477 {
478   MPI_Request request;
479   colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm,
480                     &request, 0);
481   return Request::wait(&request, MPI_STATUS_IGNORE);
482 }
483
484 } // namespace simgrid::smpi