Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
A single lookup is enough.
[simgrid.git] / src / smpi / colls / smpi_coll.cpp
1 /* smpi_coll.c -- various optimized routing for collectives                 */
2
3 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
4
5 /* This program is free software; you can redistribute it and/or modify it
6  * under the terms of the license (GNU LGPL) which comes with this package. */
7
8 #include "smpi_coll.hpp"
9 #include "private.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_datatype.hpp"
12 #include "smpi_op.hpp"
13 #include "smpi_request.hpp"
14 #include "xbt/config.hpp"
15
16 #include <map>
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_coll, smpi, "Logging specific to SMPI collectives.");
19
20 namespace simgrid {
21 namespace smpi {
22
23 std::map<std::string, std::vector<s_mpi_coll_description_t>> smpi_coll_descriptions(
24     {{std::string("gather"),
25       {{"default", "gather default collective", (void*)gather__default},
26        {"ompi", "gather ompi collective", (void*)gather__ompi},
27        {"ompi_basic_linear", "gather ompi_basic_linear collective", (void*)gather__ompi_basic_linear},
28        {"ompi_binomial", "gather ompi_binomial collective", (void*)gather__ompi_binomial},
29        {"ompi_linear_sync", "gather ompi_linear_sync collective", (void*)gather__ompi_linear_sync},
30        {"mpich", "gather mpich collective", (void*)gather__mpich},
31        {"mvapich2", "gather mvapich2 collective", (void*)gather__mvapich2},
32        {"mvapich2_two_level", "gather mvapich2_two_level collective", (void*)gather__mvapich2_two_level},
33        {"impi", "gather impi collective", (void*)gather__impi},
34        {"automatic", "gather automatic collective", (void*)gather__automatic}}},
35
36      {"allgather",
37       {{"default", "allgather default collective", (void*)allgather__default},
38        {"2dmesh", "allgather 2dmesh collective", (void*)allgather__2dmesh},
39        {"3dmesh", "allgather 3dmesh collective", (void*)allgather__3dmesh},
40        {"bruck", "allgather bruck collective", (void*)allgather__bruck},
41        {"GB", "allgather GB collective", (void*)allgather__GB},
42        {"loosely_lr", "allgather loosely_lr collective", (void*)allgather__loosely_lr},
43        {"NTSLR", "allgather NTSLR collective", (void*)allgather__NTSLR},
44        {"NTSLR_NB", "allgather NTSLR_NB collective", (void*)allgather__NTSLR_NB},
45        {"pair", "allgather pair collective", (void*)allgather__pair},
46        {"rdb", "allgather rdb collective", (void*)allgather__rdb},
47        {"rhv", "allgather rhv collective", (void*)allgather__rhv},
48        {"ring", "allgather ring collective", (void*)allgather__ring},
49        {"SMP_NTS", "allgather SMP_NTS collective", (void*)allgather__SMP_NTS},
50        {"smp_simple", "allgather smp_simple collective", (void*)allgather__smp_simple},
51        {"spreading_simple", "allgather spreading_simple collective", (void*)allgather__spreading_simple},
52        {"ompi", "allgather ompi collective", (void*)allgather__ompi},
53        {"ompi_neighborexchange", "allgather ompi_neighborexchange collective", (void*)allgather__ompi_neighborexchange},
54        {"mvapich2", "allgather mvapich2 collective", (void*)allgather__mvapich2},
55        {"mvapich2_smp", "allgather mvapich2_smp collective", (void*)allgather__mvapich2_smp},
56        {"mpich", "allgather mpich collective", (void*)allgather__mpich},
57        {"impi", "allgather impi collective", (void*)allgather__impi},
58        {"automatic", "allgather automatic collective", (void*)allgather__automatic}}},
59
60      {"allgatherv",
61       {{"default", "allgatherv default collective", (void*)allgatherv__default},
62        {"GB", "allgatherv GB collective", (void*)allgatherv__GB},
63        {"pair", "allgatherv pair collective", (void*)allgatherv__pair},
64        {"ring", "allgatherv ring collective", (void*)allgatherv__ring},
65        {"ompi", "allgatherv ompi collective", (void*)allgatherv__ompi},
66        {"ompi_neighborexchange", "allgatherv ompi_neighborexchange collective",
67         (void*)allgatherv__ompi_neighborexchange},
68        {"ompi_bruck", "allgatherv ompi_bruck collective", (void*)allgatherv__ompi_bruck},
69        {"mpich", "allgatherv mpich collective", (void*)allgatherv__mpich},
70        {"mpich_rdb", "allgatherv mpich_rdb collective", (void*)allgatherv__mpich_rdb},
71        {"mpich_ring", "allgatherv mpich_ring collective", (void*)allgatherv__mpich_ring},
72        {"mvapich2", "allgatherv mvapich2 collective", (void*)allgatherv__mvapich2},
73        {"impi", "allgatherv impi collective", (void*)allgatherv__impi},
74        {"automatic", "allgatherv automatic collective", (void*)allgatherv__automatic}}},
75
76      {"allreduce",
77       {{"default", "allreduce default collective", (void*)allreduce__default},
78        {"lr", "allreduce lr collective", (void*)allreduce__lr},
79        {"rab1", "allreduce rab1 collective", (void*)allreduce__rab1},
80        {"rab2", "allreduce rab2 collective", (void*)allreduce__rab2},
81        {"rab_rdb", "allreduce rab_rdb collective", (void*)allreduce__rab_rdb},
82        {"rdb", "allreduce rdb collective", (void*)allreduce__rdb},
83        {"smp_binomial", "allreduce smp_binomial collective", (void*)allreduce__smp_binomial},
84        {"smp_binomial_pipeline", "allreduce smp_binomial_pipeline collective", (void*)allreduce__smp_binomial_pipeline},
85        {"smp_rdb", "allreduce smp_rdb collective", (void*)allreduce__smp_rdb},
86        {"smp_rsag", "allreduce smp_rsag collective", (void*)allreduce__smp_rsag},
87        {"smp_rsag_lr", "allreduce smp_rsag_lr collective", (void*)allreduce__smp_rsag_lr},
88        {"smp_rsag_rab", "allreduce smp_rsag_rab collective", (void*)allreduce__smp_rsag_rab},
89        {"redbcast", "allreduce redbcast collective", (void*)allreduce__redbcast},
90        {"ompi", "allreduce ompi collective", (void*)allreduce__ompi},
91        {"ompi_ring_segmented", "allreduce ompi_ring_segmented collective", (void*)allreduce__ompi_ring_segmented},
92        {"mpich", "allreduce mpich collective", (void*)allreduce__mpich},
93        {"mvapich2", "allreduce mvapich2 collective", (void*)allreduce__mvapich2},
94        {"mvapich2_rs", "allreduce mvapich2_rs collective", (void*)allreduce__mvapich2_rs},
95        {"mvapich2_two_level", "allreduce mvapich2_two_level collective", (void*)allreduce__mvapich2_two_level},
96        {"impi", "allreduce impi collective", (void*)allreduce__impi},
97        {"rab", "allreduce rab collective", (void*)allreduce__rab},
98        {"automatic", "allreduce automatic collective", (void*)allreduce__automatic}}},
99
100      {"reduce_scatter",
101       {{"default", "reduce_scatter default collective", (void*)reduce_scatter__default},
102        {"ompi", "reduce_scatter ompi collective", (void*)reduce_scatter__ompi},
103        {"ompi_basic_recursivehalving", "reduce_scatter ompi_basic_recursivehalving collective",
104         (void*)reduce_scatter__ompi_basic_recursivehalving},
105        {"ompi_ring", "reduce_scatter ompi_ring collective", (void*)reduce_scatter__ompi_ring},
106        {"mpich", "reduce_scatter mpich collective", (void*)reduce_scatter__mpich},
107        {"mpich_pair", "reduce_scatter mpich_pair collective", (void*)reduce_scatter__mpich_pair},
108        {"mpich_rdb", "reduce_scatter mpich_rdb collective", (void*)reduce_scatter__mpich_rdb},
109        {"mpich_noncomm", "reduce_scatter mpich_noncomm collective", (void*)reduce_scatter__mpich_noncomm},
110        {"mvapich2", "reduce_scatter mvapich2 collective", (void*)reduce_scatter__mvapich2},
111        {"impi", "reduce_scatter impi collective", (void*)reduce_scatter__impi},
112        {"automatic", "reduce_scatter automatic collective", (void*)reduce_scatter__automatic}}},
113
114      {"scatter",
115       {{"default", "scatter default collective", (void*)scatter__default},
116        {"ompi", "scatter ompi collective", (void*)scatter__ompi},
117        {"ompi_basic_linear", "scatter ompi_basic_linear collective", (void*)scatter__ompi_basic_linear},
118        {"ompi_binomial", "scatter ompi_binomial collective", (void*)scatter__ompi_binomial},
119        {"mpich", "scatter mpich collective", (void*)scatter__mpich},
120        {"mvapich2", "scatter mvapich2 collective", (void*)scatter__mvapich2},
121        {"mvapich2_two_level_binomial", "scatter mvapich2_two_level_binomial collective",
122         (void*)scatter__mvapich2_two_level_binomial},
123        {"mvapich2_two_level_direct", "scatter mvapich2_two_level_direct collective",
124         (void*)scatter__mvapich2_two_level_direct},
125        {"impi", "scatter impi collective", (void*)scatter__impi},
126        {"automatic", "scatter automatic collective", (void*)scatter__automatic}}},
127
128      {"barrier",
129       {{"default", "barrier default collective", (void*)barrier__default},
130        {"ompi", "barrier ompi collective", (void*)barrier__ompi},
131        {"ompi_basic_linear", "barrier ompi_basic_linear collective", (void*)barrier__ompi_basic_linear},
132        {"ompi_two_procs", "barrier ompi_two_procs collective", (void*)barrier__ompi_two_procs},
133        {"ompi_tree", "barrier ompi_tree collective", (void*)barrier__ompi_tree},
134        {"ompi_bruck", "barrier ompi_bruck collective", (void*)barrier__ompi_bruck},
135        {"ompi_recursivedoubling", "barrier ompi_recursivedoubling collective", (void*)barrier__ompi_recursivedoubling},
136        {"ompi_doublering", "barrier ompi_doublering collective", (void*)barrier__ompi_doublering},
137        {"mpich_smp", "barrier mpich_smp collective", (void*)barrier__mpich_smp},
138        {"mpich", "barrier mpich collective", (void*)barrier__mpich},
139        {"mvapich2_pair", "barrier mvapich2_pair collective", (void*)barrier__mvapich2_pair},
140        {"mvapich2", "barrier mvapich2 collective", (void*)barrier__mvapich2},
141        {"impi", "barrier impi collective", (void*)barrier__impi},
142        {"automatic", "barrier automatic collective", (void*)barrier__automatic}}},
143
144      {"alltoall",
145       {{"default", "alltoall default collective", (void*)alltoall__default},
146        {"2dmesh", "alltoall 2dmesh collective", (void*)alltoall__2dmesh},
147        {"3dmesh", "alltoall 3dmesh collective", (void*)alltoall__3dmesh},
148        {"basic_linear", "alltoall basic_linear collective", (void*)alltoall__basic_linear},
149        {"bruck", "alltoall bruck collective", (void*)alltoall__bruck},
150        {"pair", "alltoall pair collective", (void*)alltoall__pair},
151        {"pair_rma", "alltoall pair_rma collective", (void*)alltoall__pair_rma},
152        {"pair_light_barrier", "alltoall pair_light_barrier collective", (void*)alltoall__pair_light_barrier},
153        {"pair_mpi_barrier", "alltoall pair_mpi_barrier collective", (void*)alltoall__pair_mpi_barrier},
154        {"pair_one_barrier", "alltoall pair_one_barrier collective", (void*)alltoall__pair_one_barrier},
155        {"rdb", "alltoall rdb collective", (void*)alltoall__rdb},
156        {"ring", "alltoall ring collective", (void*)alltoall__ring},
157        {"ring_light_barrier", "alltoall ring_light_barrier collective", (void*)alltoall__ring_light_barrier},
158        {"ring_mpi_barrier", "alltoall ring_mpi_barrier collective", (void*)alltoall__ring_mpi_barrier},
159        {"ring_one_barrier", "alltoall ring_one_barrier collective", (void*)alltoall__ring_one_barrier},
160        {"mvapich2", "alltoall mvapich2 collective", (void*)alltoall__mvapich2},
161        {"mvapich2_scatter_dest", "alltoall mvapich2_scatter_dest collective", (void*)alltoall__mvapich2_scatter_dest},
162        {"ompi", "alltoall ompi collective", (void*)alltoall__ompi},
163        {"mpich", "alltoall mpich collective", (void*)alltoall__mpich},
164        {"impi", "alltoall impi collective", (void*)alltoall__impi},
165        {"automatic", "alltoall automatic collective", (void*)alltoall__automatic}}},
166
167      {"alltoallv",
168       {{"default", "alltoallv default collective", (void*)alltoallv__default},
169        {"bruck", "alltoallv bruck collective", (void*)alltoallv__bruck},
170        {"pair", "alltoallv pair collective", (void*)alltoallv__pair},
171        {"pair_light_barrier", "alltoallv pair_light_barrier collective", (void*)alltoallv__pair_light_barrier},
172        {"pair_mpi_barrier", "alltoallv pair_mpi_barrier collective", (void*)alltoallv__pair_mpi_barrier},
173        {"pair_one_barrier", "alltoallv pair_one_barrier collective", (void*)alltoallv__pair_one_barrier},
174        {"ring", "alltoallv ring collective", (void*)alltoallv__ring},
175        {"ring_light_barrier", "alltoallv ring_light_barrier collective", (void*)alltoallv__ring_light_barrier},
176        {"ring_mpi_barrier", "alltoallv ring_mpi_barrier collective", (void*)alltoallv__ring_mpi_barrier},
177        {"ring_one_barrier", "alltoallv ring_one_barrier collective", (void*)alltoallv__ring_one_barrier},
178        {"ompi", "alltoallv ompi collective", (void*)alltoallv__ompi},
179        {"mpich", "alltoallv mpich collective", (void*)alltoallv__mpich},
180        {"ompi_basic_linear", "alltoallv ompi_basic_linear collective", (void*)alltoallv__ompi_basic_linear},
181        {"mvapich2", "alltoallv mvapich2 collective", (void*)alltoallv__mvapich2},
182        {"impi", "alltoallv impi collective", (void*)alltoallv__impi},
183        {"automatic", "alltoallv automatic collective", (void*)alltoallv__automatic}}},
184
185      {"bcast",
186       {{"default", "bcast default collective", (void*)bcast__default},
187        {"arrival_pattern_aware", "bcast arrival_pattern_aware collective", (void*)bcast__arrival_pattern_aware},
188        {"arrival_pattern_aware_wait", "bcast arrival_pattern_aware_wait collective",
189         (void*)bcast__arrival_pattern_aware_wait},
190        {"arrival_scatter", "bcast arrival_scatter collective", (void*)bcast__arrival_scatter},
191        {"binomial_tree", "bcast binomial_tree collective", (void*)bcast__binomial_tree},
192        {"flattree", "bcast flattree collective", (void*)bcast__flattree},
193        {"flattree_pipeline", "bcast flattree_pipeline collective", (void*)bcast__flattree_pipeline},
194        {"NTSB", "bcast NTSB collective", (void*)bcast__NTSB},
195        {"NTSL", "bcast NTSL collective", (void*)bcast__NTSL},
196        {"NTSL_Isend", "bcast NTSL_Isend collective", (void*)bcast__NTSL_Isend},
197        {"scatter_LR_allgather", "bcast scatter_LR_allgather collective", (void*)bcast__scatter_LR_allgather},
198        {"scatter_rdb_allgather", "bcast scatter_rdb_allgather collective", (void*)bcast__scatter_rdb_allgather},
199        {"SMP_binary", "bcast SMP_binary collective", (void*)bcast__SMP_binary},
200        {"SMP_binomial", "bcast SMP_binomial collective", (void*)bcast__SMP_binomial},
201        {"SMP_linear", "bcast SMP_linear collective", (void*)bcast__SMP_linear},
202        {"ompi", "bcast ompi collective", (void*)bcast__ompi},
203        {"ompi_split_bintree", "bcast ompi_split_bintree collective", (void*)bcast__ompi_split_bintree},
204        {"ompi_pipeline", "bcast ompi_pipeline collective", (void*)bcast__ompi_pipeline},
205        {"mpich", "bcast mpich collective", (void*)bcast__mpich},
206        {"mvapich2", "bcast mvapich2 collective", (void*)bcast__mvapich2},
207        {"mvapich2_inter_node", "bcast mvapich2_inter_node collective", (void*)bcast__mvapich2_inter_node},
208        {"mvapich2_intra_node", "bcast mvapich2_intra_node collective", (void*)bcast__mvapich2_intra_node},
209        {"mvapich2_knomial_intra_node", "bcast mvapich2_knomial_intra_node collective",
210         (void*)bcast__mvapich2_knomial_intra_node},
211        {"impi", "bcast impi collective", (void*)bcast__impi},
212        {"automatic", "bcast automatic collective", (void*)bcast__automatic}}},
213
214      {"reduce",
215       {{"default", "reduce default collective", (void*)reduce__default},
216        {"arrival_pattern_aware", "reduce arrival_pattern_aware collective", (void*)reduce__arrival_pattern_aware},
217        {"binomial", "reduce binomial collective", (void*)reduce__binomial},
218        {"flat_tree", "reduce flat_tree collective", (void*)reduce__flat_tree},
219        {"NTSL", "reduce NTSL collective", (void*)reduce__NTSL},
220        {"scatter_gather", "reduce scatter_gather collective", (void*)reduce__scatter_gather},
221        {"ompi", "reduce ompi collective", (void*)reduce__ompi},
222        {"ompi_chain", "reduce ompi_chain collective", (void*)reduce__ompi_chain},
223        {"ompi_pipeline", "reduce ompi_pipeline collective", (void*)reduce__ompi_pipeline},
224        {"ompi_basic_linear", "reduce ompi_basic_linear collective", (void*)reduce__ompi_basic_linear},
225        {"ompi_in_order_binary", "reduce ompi_in_order_binary collective", (void*)reduce__ompi_in_order_binary},
226        {"ompi_binary", "reduce ompi_binary collective", (void*)reduce__ompi_binary},
227        {"ompi_binomial", "reduce ompi_binomial collective", (void*)reduce__ompi_binomial},
228        {"mpich", "reduce mpich collective", (void*)reduce__mpich},
229        {"mvapich2", "reduce mvapich2 collective", (void*)reduce__mvapich2},
230        {"mvapich2_knomial", "reduce mvapich2_knomial collective", (void*)reduce__mvapich2_knomial},
231        {"mvapich2_two_level", "reduce mvapich2_two_level collective", (void*)reduce__mvapich2_two_level},
232        {"impi", "reduce impi collective", (void*)reduce__impi},
233        {"rab", "reduce rab collective", (void*)reduce__rab},
234        {"automatic", "reduce automatic collective", (void*)reduce__automatic}}}});
235
236 // Needed by the automatic selector weird implementation
237 std::vector<s_mpi_coll_description_t>* colls::get_smpi_coll_descriptions(const std::string& name)
238 {
239   auto iter = smpi_coll_descriptions.find(name);
240   if (iter == smpi_coll_descriptions.end())
241     xbt_die("No collective named %s. This is a bug.", name.c_str());
242   return &iter->second;
243 }
244
245 static s_mpi_coll_description_t* find_coll_description(const std::string& collective, const std::string& algo)
246 {
247   std::vector<s_mpi_coll_description_t>* table = colls::get_smpi_coll_descriptions(collective);
248   if (table->empty())
249     xbt_die("No registered algorithm for collective '%s'! This is a bug.", collective.c_str());
250
251   for (unsigned long i = 0; i < table->size(); i++) {
252     auto desc = &table->at(i);
253     if (algo == desc->name) {
254       if (desc->name != "default")
255         XBT_INFO("Switch to algorithm %s for collective %s", desc->name.c_str(), collective.c_str());
256       return desc;
257     }
258   }
259
260   std::string name_list = table->at(0).name;
261   for (unsigned long i = 1; i < table->size(); i++)
262     name_list = name_list + ", " + table->at(i).name;
263   xbt_die("Collective '%s' has no algorithm '%s'! Valid algorithms: %s.", collective.c_str(), algo.c_str(), name_list.c_str());
264 }
265
266 int (*colls::gather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
267                      MPI_Datatype recv_type, int root, MPI_Comm comm);
268 int (*colls::allgather)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
269                         MPI_Datatype recv_type, MPI_Comm comm);
270 int (*colls::allgatherv)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff,
271                          const int* recv_count, const int* recv_disps, MPI_Datatype recv_type, MPI_Comm comm);
272 int (*colls::alltoall)(const void* send_buff, int send_count, MPI_Datatype send_type, void* recv_buff, int recv_count,
273                        MPI_Datatype recv_type, MPI_Comm comm);
274 int (*colls::alltoallv)(const void* send_buff, const int* send_counts, const int* send_disps, MPI_Datatype send_type,
275                         void* recv_buff, const int* recv_counts, const int* recv_disps, MPI_Datatype recv_type,
276                         MPI_Comm comm);
277 int (*colls::bcast)(void* buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
278 int (*colls::reduce)(const void* buf, void* rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
279 int (*colls::allreduce)(const void* sbuf, void* rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
280 int (*colls::reduce_scatter)(const void* sbuf, void* rbuf, const int* rcounts, MPI_Datatype dtype, MPI_Op op,
281                              MPI_Comm comm);
282 int (*colls::scatter)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
283                       MPI_Datatype recvtype, int root, MPI_Comm comm);
284 int (*colls::barrier)(MPI_Comm comm);
285
286 void (*colls::smpi_coll_cleanup_callback)();
287
288 #define COLL_SETTER(cat, ret, args, args2)                                                                             \
289   void colls::_XBT_CONCAT(set_, cat)(const std::string& name)                                                          \
290   {                                                                                                                    \
291     auto desc = find_coll_description(_XBT_STRINGIFY(cat), name);                                                      \
292     cat       = reinterpret_cast<ret(*) args>(desc->coll);                                                             \
293     if (cat == nullptr)                                                                                                \
294       xbt_die("Collective " _XBT_STRINGIFY(cat) " set to nullptr!");                                                   \
295   }
296 COLL_APPLY(COLL_SETTER, COLL_GATHER_SIG, "")
297 COLL_APPLY(COLL_SETTER,COLL_ALLGATHER_SIG,"")
298 COLL_APPLY(COLL_SETTER,COLL_ALLGATHERV_SIG,"")
299 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SIG,"")
300 COLL_APPLY(COLL_SETTER,COLL_ALLREDUCE_SIG,"")
301 COLL_APPLY(COLL_SETTER,COLL_REDUCE_SCATTER_SIG,"")
302 COLL_APPLY(COLL_SETTER,COLL_SCATTER_SIG,"")
303 COLL_APPLY(COLL_SETTER,COLL_BARRIER_SIG,"")
304 COLL_APPLY(COLL_SETTER,COLL_BCAST_SIG,"")
305 COLL_APPLY(COLL_SETTER,COLL_ALLTOALL_SIG,"")
306 COLL_APPLY(COLL_SETTER,COLL_ALLTOALLV_SIG,"")
307
308 void colls::set_collectives()
309 {
310   std::string selector_name = simgrid::config::get_value<std::string>("smpi/coll-selector");
311   if (selector_name.empty())
312     selector_name = "default";
313
314   std::pair<std::string, std::function<void(std::string)>> setter_callbacks[] = {
315       {"gather", &colls::set_gather},         {"allgather", &colls::set_allgather},
316       {"allgatherv", &colls::set_allgatherv}, {"allreduce", &colls::set_allreduce},
317       {"alltoall", &colls::set_alltoall},     {"alltoallv", &colls::set_alltoallv},
318       {"reduce", &colls::set_reduce},         {"reduce_scatter", &colls::set_reduce_scatter},
319       {"scatter", &colls::set_scatter},       {"bcast", &colls::set_bcast},
320       {"barrier", &colls::set_barrier}};
321
322   for (auto& elem : setter_callbacks) {
323     std::string name = simgrid::config::get_value<std::string>(("smpi/" + elem.first).c_str());
324     if (name.empty())
325       name = selector_name;
326
327     (elem.second)(name);
328   }
329 }
330
331 //Implementations of the single algorithm collectives
332
333 int colls::gatherv(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, const int* recvcounts,
334                    const int* displs, MPI_Datatype recvtype, int root, MPI_Comm comm)
335 {
336   MPI_Request request;
337   colls::igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, &request, 0);
338   return Request::wait(&request, MPI_STATUS_IGNORE);
339 }
340
341 int colls::scatterv(const void* sendbuf, const int* sendcounts, const int* displs, MPI_Datatype sendtype, void* recvbuf,
342                     int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
343 {
344   MPI_Request request;
345   colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, &request, 0);
346   return Request::wait(&request, MPI_STATUS_IGNORE);
347 }
348
349 int colls::scan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
350 {
351   int system_tag = -888;
352   MPI_Aint lb      = 0;
353   MPI_Aint dataext = 0;
354
355   int rank = comm->rank();
356   int size = comm->size();
357
358   datatype->extent(&lb, &dataext);
359
360   // Local copy from self
361   Datatype::copy(sendbuf, count, datatype, recvbuf, count, datatype);
362
363   // Send/Recv buffers to/from others
364   MPI_Request* requests = new MPI_Request[size - 1];
365   unsigned char** tmpbufs = new unsigned char*[rank];
366   int index = 0;
367   for (int other = 0; other < rank; other++) {
368     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
369     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
370     index++;
371   }
372   for (int other = rank + 1; other < size; other++) {
373     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
374     index++;
375   }
376   // Wait for completion of all comms.
377   Request::startall(size - 1, requests);
378
379   if(op != MPI_OP_NULL && op->is_commutative()){
380     for (int other = 0; other < size - 1; other++) {
381       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
382       if(index == MPI_UNDEFINED) {
383         break;
384       }
385       if(index < rank) {
386         // #Request is below rank: it's a irecv
387         op->apply( tmpbufs[index], recvbuf, &count, datatype);
388       }
389     }
390   }else{
391     //non commutative case, wait in order
392     for (int other = 0; other < size - 1; other++) {
393       Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
394       if(index < rank && op!=MPI_OP_NULL) {
395         op->apply( tmpbufs[other], recvbuf, &count, datatype);
396       }
397     }
398   }
399   for(index = 0; index < rank; index++) {
400     smpi_free_tmp_buffer(tmpbufs[index]);
401   }
402   for(index = 0; index < size-1; index++) {
403     Request::unref(&requests[index]);
404   }
405   delete[] tmpbufs;
406   delete[] requests;
407   return MPI_SUCCESS;
408 }
409
410 int colls::exscan(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
411 {
412   int system_tag = -888;
413   MPI_Aint lb         = 0;
414   MPI_Aint dataext    = 0;
415   int recvbuf_is_empty=1;
416   int rank = comm->rank();
417   int size = comm->size();
418
419   datatype->extent(&lb, &dataext);
420
421   // Send/Recv buffers to/from others
422   MPI_Request* requests = new MPI_Request[size - 1];
423   unsigned char** tmpbufs = new unsigned char*[rank];
424   int index = 0;
425   for (int other = 0; other < rank; other++) {
426     tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
427     requests[index] = Request::irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
428     index++;
429   }
430   for (int other = rank + 1; other < size; other++) {
431     requests[index] = Request::isend_init(sendbuf, count, datatype, other, system_tag, comm);
432     index++;
433   }
434   // Wait for completion of all comms.
435   Request::startall(size - 1, requests);
436
437   if(op != MPI_OP_NULL && op->is_commutative()){
438     for (int other = 0; other < size - 1; other++) {
439       index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
440       if(index == MPI_UNDEFINED) {
441         break;
442       }
443       if(index < rank) {
444         if(recvbuf_is_empty){
445           Datatype::copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
446           recvbuf_is_empty=0;
447         } else
448           // #Request is below rank: it's a irecv
449           op->apply( tmpbufs[index], recvbuf, &count, datatype);
450       }
451     }
452   }else{
453     //non commutative case, wait in order
454     for (int other = 0; other < size - 1; other++) {
455      Request::wait(&(requests[other]), MPI_STATUS_IGNORE);
456       if(index < rank) {
457         if (recvbuf_is_empty) {
458           Datatype::copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
459           recvbuf_is_empty = 0;
460         } else
461           if(op!=MPI_OP_NULL)
462             op->apply( tmpbufs[other], recvbuf, &count, datatype);
463       }
464     }
465   }
466   for(index = 0; index < rank; index++) {
467     smpi_free_tmp_buffer(tmpbufs[index]);
468   }
469   for(index = 0; index < size-1; index++) {
470     Request::unref(&requests[index]);
471   }
472   delete[] tmpbufs;
473   delete[] requests;
474   return MPI_SUCCESS;
475 }
476
477 int colls::alltoallw(const void* sendbuf, const int* sendcounts, const int* senddisps, const MPI_Datatype* sendtypes,
478                      void* recvbuf, const int* recvcounts, const int* recvdisps, const MPI_Datatype* recvtypes,
479                      MPI_Comm comm)
480 {
481   MPI_Request request;
482   colls::ialltoallw(sendbuf, sendcounts, senddisps, sendtypes, recvbuf, recvcounts, recvdisps, recvtypes, comm,
483                     &request, 0);
484   return Request::wait(&request, MPI_STATUS_IGNORE);
485 }
486
487 }
488 }