Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
smpi colls: use C++ containers instead of nul-terminted C vectors
[simgrid.git] / src / smpi / colls / smpi_coll.cpp
1 /* smpi_coll.c -- various optimized routing for collectives                 */
2
3 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
4
5 /* This program is free software; you can redistribute it and/or modify it
6  * under the terms of the license (GNU LGPL) which comes with this package. */
7
8 #include "smpi_coll.hpp"
9 #include "private.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_datatype.hpp"
12 #include "smpi_op.hpp"
13 #include "smpi_request.hpp"
14 #include "xbt/config.hpp"
15
16 #include <map>
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives.");
19
20 namespace simgrid {
21 namespace smpi {
22
23 std::map<std::string, std::vector<s_mpi_coll_description_t>> smpi_coll_descriptions(
24     {{std::string("gather"),
25       {{"default", "gather default collective", (void*)gather__default},
26        {"ompi", "gather ompi collective", (void*)gather__ompi},
27        {"ompi_basic_linear", "gather ompi_basic_linear collective", (void*)gather__ompi_basic_linear},
28        {"ompi_binomial", "gather ompi_binomial collective", (void*)gather__ompi_binomial},
29        {"ompi_linear_sync", "gather ompi_linear_sync collective", (void*)gather__ompi_linear_sync},
30        {"mpich", "gather mpich collective", (void*)gather__mpich},
31        {"mvapich2", "gather mvapich2 collective", (void*)gather__mvapich2},
32        {"mvapich2_two_level", "gather mvapich2_two_level collective", (void*)gather__mvapich2_two_level},
33        {"impi", "gather impi collective", (void*)gather__impi},
34        {"automatic", "gather automatic collective", (void*)gather__automatic}}},
35
36      {"allgather",
37       {{"default", "allgather default collective", (void*)allgather__default},
38        {"2dmesh", "allgather 2dmesh collective", (void*)allgather__2dmesh},
39        {"3dmesh", "allgather 3dmesh collective", (void*)allgather__3dmesh},
40        {"bruck", "allgather bruck collective", (void*)allgather__bruck},
41        {"GB", "allgather GB collective", (void*)allgather__GB},
42        {"loosely_lr", "allgather loosely_lr collective", (void*)allgather__loosely_lr},
43        {"NTSLR", "allgather NTSLR collective", (void*)allgather__NTSLR},
44        {"NTSLR_NB", "allgather NTSLR_NB collective", (void*)allgather__NTSLR_NB},
45        {"pair", "allgather pair collective", (void*)allgather__pair},
46        {"rdb", "allgather rdb collective", (void*)allgather__rdb},
47        {"rhv", "allgather rhv collective", (void*)allgather__rhv},
48        {"ring", "allgather ring collective", (void*)allgather__ring},
49        {"SMP_NTS", "allgather SMP_NTS collective", (void*)allgather__SMP_NTS},
50        {"smp_simple", "allgather smp_simple collective", (void*)allgather__smp_simple},
51        {"spreading_simple", "allgather spreading_simple collective", (void*)allgather__spreading_simple},
52        {"ompi", "allgather ompi collective", (void*)allgather__ompi},
53        {"ompi_neighborexchange", "allgather ompi_neighborexchange collective", (void*)allgather__ompi_neighborexchange},
54        {"mvapich2", "allgather mvapich2 collective", (void*)allgather__mvapich2},
55        {"mvapich2_smp", "allgather mvapich2_smp collective", (void*)allgather__mvapich2_smp},
56        {"mpich", "allgather mpich collective", (void*)allgather__mpich},
57        {"impi", "allgather impi collective", (void*)allgather__impi},
58        {"automatic", "allgather automatic collective", (void*)allgather__automatic}}},
59
60      {"allgatherv",
61       {{"default", "allgatherv default collective", (void*)allgatherv__default},
62        {"GB", "allgatherv GB collective", (void*)allgatherv__GB},
63        {"pair", "allgatherv pair collective", (void*)allgatherv__pair},
64        {"ring", "allgatherv ring collective", (void*)allgatherv__ring},
65        {"ompi", "allgatherv ompi collective", (void*)allgatherv__ompi},
66        {"ompi_neighborexchange", "allgatherv ompi_neighborexchange collective",
67         (void*)allgatherv__ompi_neighborexchange},
68        {"ompi_bruck", "allgatherv ompi_bruck collective", (void*)allgatherv__ompi_bruck},
69        {"mpich", "allgatherv mpich collective", (void*)allgatherv__mpich},
70        {"mpich_rdb", "allgatherv mpich_rdb collective", (void*)allgatherv__mpich_rdb},
71        {"mpich_ring", "allgatherv mpich_ring collective", (void*)allgatherv__mpich_ring},
72        {"mvapich2", "allgatherv mvapich2 collective", (void*)allgatherv__mvapich2},
73        {"impi", "allgatherv impi collective", (void*)allgatherv__impi},
74        {"automatic", "allgatherv automatic collective", (void*)allgatherv__automatic}}},
75
76      {"allreduce",
77       {{"default", "allreduce default collective", (void*)allreduce__default},
78        {"lr", "allreduce lr collective", (void*)allreduce__lr},
79        {"rab1", "allreduce rab1 collective", (void*)allreduce__rab1},
80        {"rab2", "allreduce rab2 collective", (void*)allreduce__rab2},
81        {"rab_rdb", "allreduce rab_rdb collective", (void*)allreduce__rab_rdb},
82        {"rdb", "allreduce rdb collective", (void*)allreduce__rdb},
83        {"smp_binomial", "allreduce smp_binomial collective", (void*)allreduce__smp_binomial},
84        {"smp_binomial_pipeline", "allreduce smp_binomial_pipeline collective", (void*)allreduce__smp_binomial_pipeline},
85        {"smp_rdb", "allreduce smp_rdb collective", (void*)allreduce__smp_rdb},
86        {"smp_rsag", "allreduce smp_rsag collective", (void*)allreduce__smp_rsag},
87        {"smp_rsag_lr", "allreduce smp_rsag_lr collective", (void*)allreduce__smp_rsag_lr},
88        {"smp_rsag_rab", "allreduce smp_rsag_rab collective", (void*)allreduce__smp_rsag_rab},
89        {"redbcast", "allreduce redbcast collective", (void*)allreduce__redbcast},
90        {"ompi", "allreduce ompi collective", (void*)allreduce__ompi},
91        {"ompi_ring_segmented", "allreduce ompi_ring_segmented collective", (void*)allreduce__ompi_ring_segmented},
92        {"mpich", "allreduce mpich collective", (void*)allreduce__mpich},
93        {"mvapich2", "allreduce mvapich2 collective", (void*)allreduce__mvapich2},
94        {"mvapich2_rs", "allreduce mvapich2_rs collective", (void*)allreduce__mvapich2_rs},
95        {"mvapich2_two_level", "allreduce mvapich2_two_level collective", (void*)allreduce__mvapich2_two_level},
96        {"impi", "allreduce impi collective", (void*)allreduce__impi},
97        {"rab", "allreduce rab collective", (void*)allreduce__rab},
98        {"automatic", "allreduce automatic collective", (void*)allreduce__automatic}}},
99
100      {"reduce_scatter",
101       {{"default", "reduce_scatter default collective", (void*)reduce_scatter__default},
102        {"ompi", "reduce_scatter ompi collective", (void*)reduce_scatter__ompi},
103        {"ompi_basic_recursivehalving", "reduce_scatter ompi_basic_recursivehalving collective",
104         (void*)reduce_scatter__ompi_basic_recursivehalving},
105        {"ompi_ring", "reduce_scatter ompi_ring collective", (void*)reduce_scatter__ompi_ring},
106        {"mpich", "reduce_scatter mpich collective", (void*)reduce_scatter__mpich},
107        {"mpich_pair", "reduce_scatter mpich_pair collective", (void*)reduce_scatter__mpich_pair},
108        {"mpich_rdb", "reduce_scatter mpich_rdb collective", (void*)reduce_scatter__mpich_rdb},
109        {"mpich_noncomm", "reduce_scatter mpich_noncomm collective", (void*)reduce_scatter__mpich_noncomm},
110        {"mvapich2", "reduce_scatter mvapich2 collective", (void*)reduce_scatter__mvapich2},
111        {"impi", "reduce_scatter impi collective", (void*)reduce_scatter__impi},
112        {"automatic", "reduce_scatter automatic collective", (void*)reduce_scatter__automatic}}},
113
114      {"scatter",
115       {{"default", "scatter default collective", (void*)scatter__default},
116        {"ompi", "scatter ompi collective", (void*)scatter__ompi},
117        {"ompi_basic_linear", "scatter ompi_basic_linear collective", (void*)scatter__ompi_basic_linear},
118        {"ompi_binomial", "scatter ompi_binomial collective", (void*)scatter__ompi_binomial},
119        {"mpich", "scatter mpich collective", (void*)scatter__mpich},
120        {"mvapich2", "scatter mvapich2 collective", (void*)scatter__mvapich2},
121        {"mvapich2_two_level_binomial", "scatter mvapich2_two_level_binomial collective",
122         (void*)scatter__mvapich2_two_level_binomial},
123        {"mvapich2_two_level_direct", "scatter mvapich2_two_level_direct collective",
124         (void*)scatter__mvapich2_two_level_direct},
125        {"impi", "scatter impi collective", (void*)scatter__impi},
126        {"automatic", "scatter automatic collective", (void*)scatter__automatic}}},
127
128      {"barrier",
129       {{"default", "barrier default collective", (void*)barrier__default},
130        {"ompi", "barrier ompi collective", (void*)barrier__ompi},
131        {"ompi_basic_linear", "barrier ompi_basic_linear collective", (void*)barrier__ompi_basic_linear},
132        {"ompi_two_procs", "barrier ompi_two_procs collective", (void*)barrier__ompi_two_procs},
133        {"ompi_tree", "barrier ompi_tree collective", (void*)barrier__ompi_tree},
134        {"ompi_bruck", "barrier ompi_bruck collective", (void*)barrier__ompi_bruck},
135        {"ompi_recursivedoubling", "barrier ompi_recursivedoubling collective", (void*)barrier__ompi_recursivedoubling},
136        {"ompi_doublering", "barrier ompi_doublering collective", (void*)barrier__ompi_doublering},
137        {"mpich_smp", "barrier mpich_smp collective", (void*)barrier__mpich_smp},
138        {"mpich", "barrier mpich collective", (void*)barrier__mpich},
139        {"mvapich2_pair", "barrier mvapich2_pair collective", (void*)barrier__mvapich2_pair},
140        {"mvapich2", "barrier mvapich2 collective", (void*)barrier__mvapich2},
141        {"impi", "barrier impi collective", (void*)barrier__impi},
142        {"automatic", "barrier automatic collective", (void*)barrier__automatic}}},
143
144      {"alltoall",
145       {{"default", "alltoall default collective", (void*)alltoall__default},
146        {"2dmesh", "alltoall 2dmesh collective", (void*)alltoall__2dmesh},
147        {"3dmesh", "alltoall 3dmesh collective", (void*)alltoall__3dmesh},
148        {"basic_linear", "alltoall basic_linear collective", (void*)alltoall__basic_linear},
149        {"bruck", "alltoall bruck collective", (void*)alltoall__bruck},
150        {"pair", "alltoall pair collective", (void*)alltoall__pair},
151        {"pair_rma", "alltoall pair_rma collective", (void*)alltoall__pair_rma},
152        {"pair_light_barrier", "alltoall pair_light_barrier collective", (void*)alltoall__pair_light_barrier},
153        {"pair_mpi_barrier", "alltoall pair_mpi_barrier collective", (void*)alltoall__pair_mpi_barrier},
154        {"pair_one_barrier", "alltoall pair_one_barrier collective", (void*)alltoall__pair_one_barrier},
155        {"rdb", "alltoall rdb collective", (void*)alltoall__rdb},
156        {"ring", "alltoall ring collective", (void*)alltoall__ring},
157        {"ring_light_barrier", "alltoall ring_light_barrier collective", (void*)alltoall__ring_light_barrier},
158        {"ring_mpi_barrier", "alltoall ring_mpi_barrier collective", (void*)alltoall__ring_mpi_barrier},
159        {"ring_one_barrier", "alltoall ring_one_barrier collective", (void*)alltoall__ring_one_barrier},
160        {"mvapich2", "alltoall mvapich2 collective", (void*)alltoall__mvapich2},
161        {"mvapich2_scatter_dest", "alltoall mvapich2_scatter_dest collective", (void*)alltoall__mvapich2_scatter_dest},
162        {"ompi", "alltoall ompi collective", (void*)alltoall__ompi},
163        {"mpich", "alltoall mpich collective", (void*)alltoall__mpich},
164        {"impi", "alltoall impi collective", (void*)alltoall__impi},
165        {"automatic", "alltoall automatic collective", (void*)alltoall__automatic}}},
166
167      {"alltoallv",
168       {{"default", "alltoallv default collective", (void*)alltoallv__default},
169        {"bruck", "alltoallv bruck collective", (void*)alltoallv__bruck},
170        {"pair", "alltoallv pair collective", (void*)alltoallv__pair},
171        {"pair_light_barrier", "alltoallv pair_light_barrier collective", (void*)alltoallv__pair_light_barrier},
172        {"pair_mpi_barrier", "alltoallv pair_mpi_barrier collective", (void*)alltoallv__pair_mpi_barrier},
173        {"pair_one_barrier", "alltoallv pair_one_barrier collective", (void*)alltoallv__pair_one_barrier},
174        {"ring", "alltoallv ring collective", (void*)alltoallv__ring},
175        {"ring_light_barrier", "alltoallv ring_light_barrier collective", (void*)alltoallv__ring_light_barrier},
176        {"ring_mpi_barrier", "alltoallv ring_mpi_barrier collective", (void*)alltoallv__ring_mpi_barrier},
177        {"ring_one_barrier", "alltoallv ring_one_barrier collective", (void*)alltoallv__ring_one_barrier},
178        {"ompi", "alltoallv ompi collective", (void*)alltoallv__ompi},
179        {"mpich", "alltoallv mpich collective", (void*)alltoallv__mpich},
180        {"ompi_basic_linear", "alltoallv ompi_basic_linear collective", (void*)alltoallv__ompi_basic_linear},
181        {"mvapich2", "alltoallv mvapich2 collective", (void*)alltoallv__mvapich2},
182        {"impi", "alltoallv impi collective", (void*)alltoallv__impi},
183        {"automatic", "alltoallv automatic collective", (void*)alltoallv__automatic}}},
184
185      {"bcast",
186       {{"default", "bcast default collective", (void*)bcast__default},
187        {"arrival_pattern_aware", "bcast arrival_pattern_aware collective", (void*)bcast__arrival_pattern_aware},
188        {"arrival_pattern_aware_wait", "bcast arrival_pattern_aware_wait collective",
189         (void*)bcast__arrival_pattern_aware_wait},
190        {"arrival_scatter", "bcast arrival_scatter collective", (void*)bcast__arrival_scatter},
191        {"binomial_tree", "bcast binomial_tree collective", (void*)bcast__binomial_tree},
192        {"flattree", "bcast flattree collective", (void*)bcast__flattree},
193        {"flattree_pipeline", "bcast flattree_pipeline collective", (void*)bcast__flattree_pipeline},
194        {"NTSB", "bcast NTSB collective", (void*)bcast__NTSB},
195        {"NTSL", "bcast NTSL collective", (void*)bcast__NTSL},
196        {"NTSL_Isend", "bcast NTSL_Isend collective", (void*)bcast__NTSL_Isend},
197        {"scatter_LR_allgather", "bcast scatter_LR_allgather collective", (void*)bcast__scatter_LR_allgather},
198        {"scatter_rdb_allgather", "bcast scatter_rdb_allgather collective", (void*)bcast__scatter_rdb_allgather},
199        {"SMP_binary", "bcast SMP_binary collective", (void*)bcast__SMP_binary},
200        {"SMP_binomial", "bcast SMP_binomial collective", (void*)bcast__SMP_binomial},
201        {"SMP_linear", "bcast SMP_linear collective", (void*)bcast__SMP_linear},
202        {"ompi", "bcast ompi collective", (void*)bcast__ompi},
203        {"ompi_split_bintree", "bcast ompi_split_bintree collective", (void*)bcast__ompi_split_bintree},
204        {"ompi_pipeline", "bcast ompi_pipeline collective", (void*)bcast__ompi_pipeline},
205        {"mpich", "bcast mpich collective", (void*)bcast__mpich},
206        {"mvapich2", "bcast mvapich2 collective", (void*)bcast__mvapich2},
207        {"mvapich2_inter_node", "bcast mvapich2_inter_node collective", (void*)bcast__mvapich2_inter_node},
208        {"mvapich2_intra_node", "bcast mvapich2_intra_node collective", (void*)bcast__mvapich2_intra_node},
209        {"mvapich2_knomial_intra_node", "bcast mvapich2_knomial_intra_node collective",
210         (void*)bcast__mvapich2_knomial_intra_node},
211        {"impi", "bcast impi collective", (void*)bcast__impi},
212        {"automatic", "bcast automatic collective", (void*)bcast__automatic}}},
213
214      {"reduce",
215       {{"default", "reduce default collective", (void*)reduce__default},
216        {"arrival_pattern_aware", "reduce arrival_pattern_aware collective", (void*)reduce__arrival_pattern_aware},
217        {"binomial", "reduce binomial collective", (void*)reduce__binomial},
218        {"flat_tree", "reduce flat_tree collective", (void*)reduce__flat_tree},
219        {"NTSL", "reduce NTSL collective", (void*)reduce__NTSL},
220        {"scatter_gather", "reduce scatter_gather collective", (void*)reduce__scatter_gather},
221        {"ompi", "reduce ompi collective", (void*)reduce__ompi},
222        {"ompi_chain", "reduce ompi_chain collective", (void*)reduce__ompi_chain},
223        {"ompi_pipeline", "reduce ompi_pipeline collective", (void*)reduce__ompi_pipeline},
224        {"ompi_basic_linear", "reduce ompi_basic_linear collective", (void*)reduce__ompi_basic_linear},
225        {"ompi_in_order_binary", "reduce ompi_in_order_binary collective", (void*)reduce__ompi_in_order_binary},
226        {"ompi_binary", "reduce ompi_binary collective", (void*)reduce__ompi_binary},
227        {"ompi_binomial", "reduce ompi_binomial collective", (void*)reduce__ompi_binomial},
228        {"mpich", "reduce mpich collective", (void*)reduce__mpich},
229        {"mvapich2", "reduce mvapich2 collective", (void*)reduce__mvapich2},
230        {"mvapich2_knomial", "reduce mvapich2_knomial collective", (void*)reduce__mvapich2_knomial},
231        {"mvapich2_two_level", "reduce mvapich2_two_level collective", (void*)reduce__mvapich2_two_level},
232        {"impi", "reduce impi collective", (void*)reduce__impi},
233        {"rab", "reduce rab collective", (void*)reduce__rab},
234        {"automatic", "reduce automatic collective", (void*)reduce__automatic}}}});
235
236 // Needed by the automatic selector weird implementation
237 std::vector<s_mpi_coll_description_t>* colls::get_smpi_coll_descriptions(const std::string& name)
238 {
239   if (smpi_coll_descriptions.find(name) == smpi_coll_descriptions.end())
240     xbt_die("No collective named %s. This is a bug.", name.c_str());
241   return &smpi_coll_descriptions[name];
242 }
243
244 static s_mpi_coll_description_t* find_coll_description(const std::string& collective, const std::string& algo)
245 {
246   std::vector<s_mpi_coll_description_t>* table = colls::get_smpi_coll_descriptions(collective);
247   if (table->empty())
248     xbt_die("No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
249
250   for (unsigned long i = 0; i < table->size(); i++) {
251     auto desc = &table->at(i);
252     if (algo == desc->name) {
253       if (desc->name != "default")
254         XBT_INFO("Switch to algorithm %s for collective %s", desc->name.c_str(), collective.c_str());
255       return desc;
256     }
257   }
258
259   std::string name_list = table->at(0).name;
260   for (unsigned long i = 1; i < table->size(); i++)
261     name_list = name_list + ", " + table->at(i).name;
262   xbt_die("Collective '%s' has no algorithm '%s'! Valid algorithms: %s.", collective.c_str(), algo.c_str(), name_list.c_str());
263 }
264
265 int (*colls::gather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
266                      MPI_Datatype recv_type, int root, MPI_Comm comm);
267 int (*colls::allgather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
268                         MPI_Datatype recv_type, MPI_Comm comm);
269 int (*colls::allgatherv)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff,
270                          const int* recv_count, const int* recv_disps, MPI_Datatype recv_type, MPI_Comm comm);
271 int (*colls::alltoall)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
272                        MPI_Datatype recv_type, MPI_Comm comm);
273 int (*colls::alltoallv)(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
274                         void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
275                         MPI_Comm comm);
276 int (*colls::bcast)(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
277 int (*colls::reduce)(const void* buf, void* rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
278 int (*colls::allreduce)(const void* sbuf, void* rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
279 int (*colls::reduce_scatter)(const void* sbuf, void* rbuf, const int* rcounts, MPI_Datatype dtype, MPI_Op op,
280                              MPI_Comm comm);
281 int (*colls::scatter)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
282                       MPI_Datatype recvtype, int root, MPI_Comm comm);
283 int (*colls::barrier)(MPI_Comm comm);
284
285 void (*colls::smpi_coll_cleanup_callback)();
286
287 #define COLL_SETTER(cat, ret, args, args2)                                                                             \
288   void colls::_XBT_CONCAT(set_, cat)(const std::string& name)                                                          \
289   {                                                                                                                    \
290     auto desc = find_coll_description(_XBT_STRINGIFY(cat), name);                                                      \
291     cat       = reinterpret_cast<ret(*) args>(desc->coll);                                                             \
292     if (cat == nullptr)                                                                                                \
293       xbt_die("Collective " _XBT_STRINGIFY(cat) " set to nullptr!");                                                   \
294   }
295 COLL_APPLY(COLL_SETTER, COLL_GATHER_SIG, "")
296 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"")
297 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"")
298 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"")
299 COLL_APPLY(COLL_SETTER,COLL_ALLREDUCE_SIG,"")
300 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SCATTER_SIG,"")
301 COLL_APPLY(COLL_SETTER,COLL_SCATTER_SIG,"")
302 COLL_APPLY(COLL_SETTER,COLL_BARRIER_SIG,"")
303 COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,"")
304 COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,"")
305 COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,"")
306
307 void colls::set_collectives()
308 {
309   std::string selector_name = simgrid::config::get_value<std::string>("smpi/coll-selector");
310   if (selector_name.empty())
311     selector_name = "default";
312
313   std::pair<std::string, std::function<void(std::string)>> setter_callbacks[] = {
314       {"gather", &colls::set_gather},         {"allgather", &colls::set_allgather},
315       {"allgatherv", &colls::set_allgatherv}, {"allreduce", &colls::set_allreduce},
316       {"alltoall", &colls::set_alltoall},     {"alltoallv", &colls::set_alltoallv},
317       {"reduce", &colls::set_reduce},         {"reduce_scatter", &colls::set_reduce_scatter},
318       {"scatter", &colls::set_scatter},       {"bcast", &colls::set_bcast},
319       {"barrier", &colls::set_barrier}};
320
321   for (auto& elem : setter_callbacks) {
322     std::string name = simgrid::config::get_value<std::string>(("smpi/" + elem.first).c_str());
323     if (name.empty())
324       name = selector_name;
325
326     (elem.second)(name);
327   }
328 }
329
330 //Implementations of the single algorithm collectives
331
332 int colls::gatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int* recvcounts,
333                    const int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm)
334 {
335   MPI_Request request;
336   colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request, 0);
337   return Request::wait(&request, MPI_STATUS_IGNORE);
338 }
339
340 int colls::scatterv(const void* sendbuf, const int* sendcounts, const int* displs, MPI_Datatype sendtype, void* recvbuf,
341                     int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
342 {
343   MPI_Request request;
344   colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0);
345   return Request::wait(&request, MPI_STATUS_IGNORE);
346 }
347
348 int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
349 {
350   int system_tag = -888;
351   MPI_Aint lb      = 0;
352   MPI_Aint dataext = 0;
353
354   int rank = comm->rank();
355   int size = comm->size();
356
357   datatype->extent(&lb, &dataext);
358
359   // Local copy from self
360   Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
361
362   // Send/Recv buffers to/from others
363   MPI_Request* requests = new MPI_Request[size - 1];
364   unsigned char** tmpbufs = new unsigned char*[rank];
365   int index = 0;
366   for (int other = 0; other < rank; other++) {
367     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
368     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
369     index++;
370   }
371   for (int other = rank + 1; other < size; other++) {
372     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
373     index++;
374   }
375   // Wait for completion of all comms.
376   Request::startall(size - 1, requests);
377
378   if(op != MPI_OP_NULL && op->is_commutative()){
379     for (int other = 0; other < size - 1; other++) {
380       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
381       if(index == MPI_UNDEFINED) {
382         break;
383       }
384       if(index < rank) {
385         // #Request is below rank: it's a irecv
386         op->apply( tmpbufs[index], recvbuf, &count, datatype);
387       }
388     }
389   }else{
390     //non commutative case, wait in order
391     for (int other = 0; other < size - 1; other++) {
392       Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
393       if(index < rank && op!=MPI_OP_NULL) {
394         op->apply( tmpbufs[other], recvbuf, &count, datatype);
395       }
396     }
397   }
398   for(index = 0; index < rank; index++) {
399     smpi_free_tmp_buffer(tmpbufs[index]);
400   }
401   for(index = 0; index < size-1; index++) {
402     Request::unref(&requests[index]);
403   }
404   delete[] tmpbufs;
405   delete[] requests;
406   return MPI_SUCCESS;
407 }
408
409 int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
410 {
411   int system_tag = -888;
412   MPI_Aint lb         = 0;
413   MPI_Aint dataext    = 0;
414   int recvbuf_is_empty=1;
415   int rank = comm->rank();
416   int size = comm->size();
417
418   datatype->extent(&lb, &dataext);
419
420   // Send/Recv buffers to/from others
421   MPI_Request* requests = new MPI_Request[size - 1];
422   unsigned char** tmpbufs = new unsigned char*[rank];
423   int index = 0;
424   for (int other = 0; other < rank; other++) {
425     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
426     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
427     index++;
428   }
429   for (int other = rank + 1; other < size; other++) {
430     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
431     index++;
432   }
433   // Wait for completion of all comms.
434   Request::startall(size - 1, requests);
435
436   if(op != MPI_OP_NULL && op->is_commutative()){
437     for (int other = 0; other < size - 1; other++) {
438       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
439       if(index == MPI_UNDEFINED) {
440         break;
441       }
442       if(index < rank) {
443         if(recvbuf_is_empty){
444           Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
445           recvbuf_is_empty=0;
446         } else
447           // #Request is below rank: it's a irecv
448           op->apply( tmpbufs[index], recvbuf, &count, datatype);
449       }
450     }
451   }else{
452     //non commutative case, wait in order
453     for (int other = 0; other < size - 1; other++) {
454      Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
455       if(index < rank) {
456         if (recvbuf_is_empty) {
457           Datatype::copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
458           recvbuf_is_empty = 0;
459         } else
460           if(op!=MPI_OP_NULL)
461             op->apply( tmpbufs[other], recvbuf, &count, datatype);
462       }
463     }
464   }
465   for(index = 0; index < rank; index++) {
466     smpi_free_tmp_buffer(tmpbufs[index]);
467   }
468   for(index = 0; index < size-1; index++) {
469     Request::unref(&requests[index]);
470   }
471   delete[] tmpbufs;
472   delete[] requests;
473   return MPI_SUCCESS;
474 }
475
476 int colls::alltoallw(const void* sendbuf, const int* sendcounts, const int* senddisps, const MPI_Datatype* sendtypes,
477                      void* recvbuf, const int* recvcounts, const int* recvdisps, const MPI_Datatype* recvtypes,
478                      MPI_Comm comm)
479 {
480   MPI_Request request;
481   colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm,
482                     &request, 0);
483   return Request::wait(&request, MPI_STATUS_IGNORE);
484 }
485
486 }
487 }