1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include <xbt/config.hpp>
8 #include <boost/tokenizer.hpp>
12 #include "xbt/virtu.h"
14 #include "src/mc/mc_replay.h"
15 #include "xbt/replay.h"
17 #include "src/simix/smx_private.h"
18 #include "surf/surf.h"
19 #include "simgrid/sg_config.h"
20 #include "colls/colls.h"
22 #include "src/simix/SynchroComm.hpp"
24 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
26 static int match_recv(void* a, void* b, smx_synchro_t ignored) {
27 MPI_Request ref = static_cast<MPI_Request>(a);
28 MPI_Request req = static_cast<MPI_Request>(b);
29 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
31 xbt_assert(ref, "Cannot match recv against null reference");
32 xbt_assert(req, "Cannot match recv against null request");
33 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
34 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
35 //we match, we can transfer some values
36 if(ref->src == MPI_ANY_SOURCE)
37 ref->real_src = req->src;
38 if(ref->tag == MPI_ANY_TAG)
39 ref->real_tag = req->tag;
40 if(ref->real_size < req->real_size)
43 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
44 XBT_DEBUG("match succeeded");
49 static int match_send(void* a, void* b,smx_synchro_t ignored) {
50 MPI_Request ref = static_cast<MPI_Request>(a);
51 MPI_Request req = static_cast<MPI_Request>(b);
52 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
53 xbt_assert(ref, "Cannot match send against null reference");
54 xbt_assert(req, "Cannot match send against null request");
56 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
57 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
59 if(req->src == MPI_ANY_SOURCE)
60 req->real_src = ref->src;
61 if(req->tag == MPI_ANY_TAG)
62 req->real_tag = ref->tag;
63 if(req->real_size < ref->real_size)
66 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
67 XBT_DEBUG("match succeeded");
72 // Methods used to parse and store the values for timing injections in smpi
73 // These are taken from surf/network.c and generalized to have more values for each factor
74 typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t;
75 typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c
77 std::vector<double> values;
78 } s_smpi_factor_multival_t;
80 std::vector<s_smpi_factor_multival_t> smpi_os_values;
81 std::vector<s_smpi_factor_multival_t> smpi_or_values;
82 std::vector<s_smpi_factor_multival_t> smpi_ois_values;
84 static simgrid::config::Flag<double> smpi_wtime_sleep(
85 "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
86 static simgrid::config::Flag<double> smpi_init_sleep(
87 "smpi/init", "Time to inject inside a call to MPI_Init", 0.0);
88 static simgrid::config::Flag<double> smpi_iprobe_sleep(
89 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
90 static simgrid::config::Flag<double> smpi_test_sleep(
91 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
93 static int factor_cmp(const s_smpi_factor_multival_t& pa, const s_smpi_factor_multival_t& pb)
95 return (pa.factor > pb.factor) ? 1 :
96 (pa.factor < pb.factor) ? -1 : 0;
99 static std::vector<s_smpi_factor_multival_t> parse_factor(const char *smpi_coef_string)
101 std::vector<s_smpi_factor_multival_t> smpi_factor;
102 s_smpi_factor_multival_t fact;
104 /** Setup the tokenizer that parses the string **/
105 typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
106 boost::char_separator<char> sep(";");
107 boost::char_separator<char> factor_separator(":");
108 std::string tmp_string(smpi_coef_string);
109 Tokenizer tokens(tmp_string, sep);
112 * Iterate over patterns like A:B:C:D;E:F;G:H
113 * These will be broken down into:
118 for (Tokenizer::iterator token_iter = tokens.begin();
119 token_iter != tokens.end(); token_iter++) {
120 XBT_DEBUG("token : %s", token_iter->c_str());
121 Tokenizer factor_values(*token_iter, factor_separator);
123 if (factor_values.begin() == factor_values.end()) {
124 xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
126 unsigned int iteration = 0;
127 for (Tokenizer::iterator factor_iter = factor_values.begin();
128 factor_iter != factor_values.end(); factor_iter++, iteration++) {
131 if (factor_iter == factor_values.begin()) { /* first element */
132 errmsg = bprintf("Invalid factor in chunk #%zu: %%s", smpi_factor.size()+1);
133 fact.factor = xbt_str_parse_int(factor_iter->c_str(), errmsg);
136 errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1);
137 fact.values.push_back(xbt_str_parse_double((*factor_iter).c_str(), errmsg));
142 smpi_factor.push_back(fact);
143 XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]);
145 std::sort(smpi_factor.begin(), smpi_factor.end(), &factor_cmp);
146 for (auto& fact : smpi_factor) {
147 XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]);
153 static double smpi_os(size_t size)
155 if (smpi_os_values.empty()) {
156 smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
158 double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size;
159 // Iterate over all the sections that were specified and find the right
160 // value. (fact.factor represents the interval sizes; we want to find the
161 // section that has fact.factor <= size and no other such fact.factor <= size)
162 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
163 for (auto& fact : smpi_os_values) {
164 if (size <= fact.factor) { // Values already too large, use the previously
165 // computed value of current!
166 XBT_DEBUG("os : %zu <= %ld return %.10f", size, fact.factor, current);
169 // If the next section is too large, the current section must be used.
170 // Hence, save the cost, as we might have to use it.
171 current = fact.values[0]+fact.values[1]*size;
174 XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
179 static double smpi_ois(size_t size)
181 if (smpi_ois_values.empty()) {
182 smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
184 double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size;
185 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
186 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
187 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
188 for (auto& fact : smpi_ois_values) {
189 if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
190 XBT_DEBUG("ois : %zu <= %ld return %.10f", size, fact.factor, current);
193 // If the next section is too large, the current section must be used.
194 // Hence, save the cost, as we might have to use it.
195 current = fact.values[0]+fact.values[1]*size;
198 XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
203 static double smpi_or(size_t size)
205 if (smpi_or_values.empty()) {
206 smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
209 double current=smpi_or_values.empty()?0.0:smpi_or_values[0].values[0]+smpi_or_values[0].values[1]*size;
210 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
211 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
212 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
213 for (auto fact : smpi_or_values) {
214 if (size <= fact.factor) { // Values already too large, use the previously
215 // computed value of current!
216 XBT_DEBUG("or : %zu <= %ld return %.10f", size, fact.factor, current);
219 // If the next section is too large, the current section must be used.
220 // Hence, save the cost, as we might have to use it.
221 current=fact.values[0]+fact.values[1]*size;
224 XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
229 void smpi_mpi_init() {
230 if(smpi_init_sleep > 0)
231 simcall_process_sleep(smpi_init_sleep);
234 double smpi_mpi_wtime(){
236 if (smpi_process_initialized() != 0 &&
237 smpi_process_finalized() == 0 &&
238 smpi_process_get_sampling() == 0) {
240 time = SIMIX_get_clock();
241 // to avoid deadlocks if used as a break condition, such as
242 // while (MPI_Wtime(...) < time_limit) {
245 // because the time will not normally advance when only calls to MPI_Wtime
246 // are made -> deadlock (MPI_Wtime never reaches the time limit)
247 if(smpi_wtime_sleep > 0)
248 simcall_process_sleep(smpi_wtime_sleep);
251 time = SIMIX_get_clock();
256 static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
259 MPI_Request request = nullptr;
261 void *old_buf = nullptr;
263 request = xbt_new(s_smpi_mpi_request_t, 1);
265 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
267 if((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->sizeof_substruct != 0)){
268 // This part handles the problem of non-contiguous memory
270 buf = count==0 ? nullptr : xbt_malloc(count*smpi_datatype_size(datatype));
271 if ((datatype->sizeof_substruct != 0) && ((flags & SEND) != 0)) {
272 subtype->serialize(old_buf, buf, count, datatype->substruct);
277 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
278 request->old_buf = old_buf;
279 request->old_type = datatype;
281 request->size = smpi_datatype_size(datatype) * count;
282 smpi_datatype_use(datatype);
286 request->comm = comm;
287 smpi_comm_use(request->comm);
288 request->action = nullptr;
289 request->flags = flags;
290 request->detached = 0;
291 request->detached_sender = nullptr;
292 request->real_src = 0;
293 request->truncated = 0;
294 request->real_size = 0;
295 request->real_tag = 0;
296 if (flags & PERSISTENT)
297 request->refcount = 1;
299 request->refcount = 0;
300 request->op = MPI_REPLACE;
307 void smpi_empty_status(MPI_Status * status)
309 if(status != MPI_STATUS_IGNORE) {
310 status->MPI_SOURCE = MPI_ANY_SOURCE;
311 status->MPI_TAG = MPI_ANY_TAG;
312 status->MPI_ERROR = MPI_SUCCESS;
317 static void smpi_mpi_request_free_voidp(void* request)
319 MPI_Request req = static_cast<MPI_Request>(request);
320 smpi_mpi_request_free(&req);
323 /* MPI Low level calls */
324 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
325 int dst, int tag, MPI_Comm comm)
327 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
328 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
329 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SEND | PREPARED);
333 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
334 int dst, int tag, MPI_Comm comm)
336 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
337 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
338 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
342 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
343 int src, int tag, MPI_Comm comm)
345 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
346 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
347 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src),
348 smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED);
352 void smpi_mpi_start(MPI_Request request)
354 smx_mailbox_t mailbox;
356 xbt_assert(request->action == nullptr, "Cannot (re-)start unfinished communication");
357 request->flags &= ~PREPARED;
358 request->flags &= ~FINISHED;
361 if ((request->flags & RECV) != 0) {
362 print_request("New recv", request);
364 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
366 xbt_mutex_t mut = smpi_process_mailboxes_mutex();
367 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
368 xbt_mutex_acquire(mut);
370 if (async_small_thresh == 0 && (request->flags & RMA) == 0 ) {
371 mailbox = smpi_process_mailbox();
373 else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) {
374 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
375 //begin with the more appropriate one : the small one.
376 mailbox = smpi_process_mailbox_small();
377 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
378 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
380 if (action == nullptr) {
381 mailbox = smpi_process_mailbox();
382 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
383 action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
384 if (action == nullptr) {
385 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
386 mailbox = smpi_process_mailbox_small();
390 XBT_DEBUG("yes there was something for us in the large mailbox");
394 mailbox = smpi_process_mailbox_small();
395 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
396 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
398 if (action == nullptr) {
399 XBT_DEBUG("No, nothing in the permanent receive mailbox");
400 mailbox = smpi_process_mailbox();
403 XBT_DEBUG("yes there was something for us in the small mailbox");
407 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
408 request->real_size=request->size;
409 request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf, &request->real_size, &match_recv,
410 ! smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
411 : &smpi_comm_null_copy_buffer_callback, request, -1.0);
412 XBT_DEBUG("recv simcall posted");
414 if (async_small_thresh != 0 || (request->flags & RMA) != 0 )
415 xbt_mutex_release(mut);
417 else { /* the RECV flag was not set, so this is a send */
418 int receiver = request->dst;
420 int rank = request->src;
421 if (TRACE_smpi_view_internals()) {
422 TRACE_smpi_send(rank, rank, receiver,request->size);
424 print_request("New send", request);
426 void* buf = request->buf;
427 if ( (request->flags & SSEND) == 0
428 && ( (request->flags & RMA) != 0 || static_cast<int>(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
429 void *oldbuf = nullptr;
430 request->detached = 1;
431 XBT_DEBUG("Send request %p is detached", request);
433 if(request->old_type->sizeof_substruct == 0){
434 oldbuf = request->buf;
435 if (!smpi_process_get_replaying() && oldbuf != nullptr && request->size!=0){
436 if((smpi_privatize_global_variables != 0)
437 && (static_cast<char*>(request->buf) >= smpi_start_data_exe)
438 && (static_cast<char*>(request->buf) < smpi_start_data_exe + smpi_size_data_exe )){
439 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
440 smpi_switch_data_segment(request->src);
442 buf = xbt_malloc(request->size);
443 memcpy(buf,oldbuf,request->size);
444 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
449 //if we are giving back the control to the user without waiting for completion, we have to inject timings
450 double sleeptime = 0.0;
451 if(request->detached != 0 || ((request->flags & (ISEND|SSEND)) != 0)){// issend should be treated as isend
452 //isend and send timings may be different
453 sleeptime = ((request->flags & ISEND) != 0) ? smpi_ois(request->size) : smpi_os(request->size);
457 simcall_process_sleep(sleeptime);
458 XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime);
461 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
463 xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
465 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
466 xbt_mutex_acquire(mut);
468 if (!(async_small_thresh != 0 || (request->flags & RMA) !=0)) {
469 mailbox = smpi_process_remote_mailbox(receiver);
471 else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
472 mailbox = smpi_process_remote_mailbox(receiver);
473 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
474 smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
475 if (action == nullptr) {
476 if ((request->flags & SSEND) == 0){
477 mailbox = smpi_process_remote_mailbox_small(receiver);
478 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
481 mailbox = smpi_process_remote_mailbox_small(receiver);
482 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
483 action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
484 if (action == nullptr) {
485 XBT_DEBUG("No, we are first, send to large mailbox");
486 mailbox = smpi_process_remote_mailbox(receiver);
491 XBT_DEBUG("Yes there was something for us in the large mailbox");
495 mailbox = smpi_process_remote_mailbox(receiver);
496 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
499 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
500 request->real_size=request->size;
501 request->action = simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
502 buf, request->real_size, &match_send,
503 &xbt_free_f, // how to free the userdata if a detached send fails
504 !smpi_process_get_replaying() ? &smpi_comm_copy_buffer_callback
505 : &smpi_comm_null_copy_buffer_callback, request,
506 // detach if msg size < eager/rdv switch limit
508 XBT_DEBUG("send simcall posted");
510 /* FIXME: detached sends are not traceable (request->action == nullptr) */
511 if (request->action != nullptr)
512 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
514 if (async_small_thresh != 0 || ((request->flags & RMA)!=0))
515 xbt_mutex_release(mut);
519 void smpi_mpi_startall(int count, MPI_Request * requests)
521 if(requests== nullptr)
524 for(int i = 0; i < count; i++) {
525 smpi_mpi_start(requests[i]);
529 void smpi_mpi_request_free(MPI_Request * request)
531 if((*request) != MPI_REQUEST_NULL){
532 (*request)->refcount--;
533 if((*request)->refcount<0) xbt_die("wrong refcount");
535 if((*request)->refcount==0){
536 smpi_datatype_unuse((*request)->old_type);
537 smpi_comm_unuse((*request)->comm);
538 print_request("Destroying", (*request));
540 *request = MPI_REQUEST_NULL;
542 print_request("Decrementing", (*request));
545 xbt_die("freeing an already free request");
549 MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
552 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
554 request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, src, dst, tag,
555 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
557 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
558 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
564 MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
567 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
569 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
570 comm, RMA | NON_PERSISTENT | RECV | PREPARED);
572 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
573 comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
579 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
581 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
582 request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process_index(),
583 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
587 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
589 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
590 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
591 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
592 smpi_mpi_start(request);
596 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
598 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
599 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
600 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND);
601 smpi_mpi_start(request);
605 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
607 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
608 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
609 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
610 comm, PERSISTENT | RECV | PREPARED);
614 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
616 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
617 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
618 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm,
619 NON_PERSISTENT | RECV);
620 smpi_mpi_start(request);
624 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
626 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
627 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
628 smpi_mpi_wait(&request, status);
632 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
634 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
635 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
636 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SEND);
638 smpi_mpi_start(request);
639 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
643 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
645 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
646 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
647 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
649 smpi_mpi_start(request);
650 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
654 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
655 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
656 MPI_Comm comm, MPI_Status * status)
658 MPI_Request requests[2];
660 int myid=smpi_process_index();
661 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)){
662 smpi_datatype_copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
665 requests[0] = smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
666 requests[1] = smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
667 smpi_mpi_startall(2, requests);
668 smpi_mpi_waitall(2, requests, stats);
669 smpi_mpi_request_free(&requests[0]);
670 smpi_mpi_request_free(&requests[1]);
671 if(status != MPI_STATUS_IGNORE) {
672 // Copy receive status
677 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
679 return status->count / smpi_datatype_size(datatype);
682 static void finish_wait(MPI_Request * request, MPI_Status * status)
684 MPI_Request req = *request;
685 smpi_empty_status(status);
687 if(!((req->detached != 0) && ((req->flags & SEND) != 0)) && ((req->flags & PREPARED) == 0)){
688 if(status != MPI_STATUS_IGNORE) {
689 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
690 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
691 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
692 status->MPI_ERROR = req->truncated != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
693 // this handles the case were size in receive differs from size in send
694 status->count = req->real_size;
697 print_request("Finishing", req);
698 MPI_Datatype datatype = req->old_type;
700 if(((req->flags & ACCUMULATE) != 0) || (datatype->sizeof_substruct != 0)){
701 if (!smpi_process_get_replaying()){
702 if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf) >= smpi_start_data_exe)
703 && ((char*)req->old_buf < smpi_start_data_exe + smpi_size_data_exe )){
704 XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
705 smpi_switch_data_segment(smpi_process_index());
709 if(datatype->sizeof_substruct != 0){
710 // This part handles the problem of non-contignous memory the unserialization at the reception
711 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
712 if(req->flags & RECV)
713 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) ,
714 datatype->substruct, req->op);
716 }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
717 int n =req->real_size/smpi_datatype_size(datatype);
718 smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
724 if (TRACE_smpi_view_internals() && ((req->flags & RECV) != 0)){
725 int rank = smpi_process_index();
726 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
727 TRACE_smpi_recv(rank, src_traced, rank);
730 if(req->detached_sender != nullptr){
732 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
733 double sleeptime = smpi_or(req->real_size);
735 simcall_process_sleep(sleeptime);
736 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime);
738 smpi_mpi_request_free(&(req->detached_sender));
740 if(req->flags & PERSISTENT)
741 req->action = nullptr;
742 req->flags |= FINISHED;
744 smpi_mpi_request_free(request);
747 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
748 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
750 // to avoid deadlocks if used as a break condition, such as
751 // while (MPI_Test(request, flag, status) && flag) {
753 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
754 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
755 static int nsleeps = 1;
756 if(smpi_test_sleep > 0)
757 simcall_process_sleep(nsleeps*smpi_test_sleep);
759 smpi_empty_status(status);
761 if (((*request)->flags & PREPARED) == 0) {
762 if ((*request)->action != nullptr)
763 flag = simcall_comm_test((*request)->action);
765 finish_wait(request, status);
766 nsleeps=1;//reset the number of sleeps we will do next time
767 if (*request != MPI_REQUEST_NULL && ((*request)->flags & PERSISTENT)==0)
768 *request = MPI_REQUEST_NULL;
776 int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
782 *index = MPI_UNDEFINED;
783 comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
784 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
785 for(i = 0; i < count; i++) {
786 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
787 xbt_dynar_push(comms, &requests[i]->action);
792 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
793 static int nsleeps = 1;
794 if(smpi_test_sleep > 0)
795 simcall_process_sleep(nsleeps*smpi_test_sleep);
797 i = simcall_comm_testany(comms); // The i-th element in comms matches!
798 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
800 finish_wait(&requests[*index], status);
803 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) {
804 requests[*index] = MPI_REQUEST_NULL;
810 //all requests are null or inactive, return true
812 smpi_empty_status(status);
814 xbt_dynar_free(&comms);
819 int smpi_mpi_testall(int count, MPI_Request requests[], MPI_Status status[])
822 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
825 for(i=0; i<count; i++){
826 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
827 if (smpi_mpi_test(&requests[i], pstat)!=1){
830 requests[i]=MPI_REQUEST_NULL;
833 smpi_empty_status(pstat);
835 if(status != MPI_STATUSES_IGNORE) {
842 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
844 //FIXME find another way to avoid busy waiting ?
845 // the issue here is that we have to wait on a nonexistent comm
847 smpi_mpi_iprobe(source, tag, comm, &flag, status);
848 XBT_DEBUG("Busy Waiting on probing : %d", flag);
852 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
854 MPI_Request request = build_request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
855 smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV);
857 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
858 // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... )
859 // multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
860 static int nsleeps = 1;
861 if(smpi_iprobe_sleep > 0)
862 simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
863 // behave like a receive, but don't do it
864 smx_mailbox_t mailbox;
866 print_request("New iprobe", request);
867 // We have to test both mailboxes as we don't know if we will receive one one or another
868 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
869 mailbox = smpi_process_mailbox_small();
870 XBT_DEBUG("Trying to probe the perm recv mailbox");
871 request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, static_cast<void*>(request));
874 if (request->action == nullptr){
875 mailbox = smpi_process_mailbox();
876 XBT_DEBUG("trying to probe the other mailbox");
877 request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
880 if (request->action != nullptr){
881 simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
882 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
884 if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
885 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
886 status->MPI_TAG = req->tag;
887 status->MPI_ERROR = MPI_SUCCESS;
888 status->count = req->real_size;
890 nsleeps = 1;//reset the number of sleeps we will do next time
896 smpi_mpi_request_free(&request);
901 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
903 print_request("Waiting", *request);
904 if ((*request)->flags & PREPARED) {
905 smpi_empty_status(status);
909 if ((*request)->action != nullptr)
910 // this is not a detached send
911 simcall_comm_wait((*request)->action, -1.0);
913 finish_wait(request, status);
914 if (*request != MPI_REQUEST_NULL && (((*request)->flags & NON_PERSISTENT)!=0))
915 *request = MPI_REQUEST_NULL;
918 int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status)
923 int index = MPI_UNDEFINED;
927 // Wait for a request to complete
928 comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
929 map = xbt_new(int, count);
930 XBT_DEBUG("Wait for one of %d", count);
931 for(i = 0; i < count; i++) {
932 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED) && !(requests[i]->flags & FINISHED)) {
933 if (requests[i]->action != nullptr) {
934 XBT_DEBUG("Waiting any %p ", requests[i]);
935 xbt_dynar_push(comms, &requests[i]->action);
939 //This is a finished detached request, let's return this one
940 size=0;//so we free the dynar but don't do the waitany call
942 finish_wait(&requests[i], status);//cleanup if refcount = 0
943 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
944 requests[i]=MPI_REQUEST_NULL;//set to null
950 i = simcall_comm_waitany(comms);
952 // not MPI_UNDEFINED, as this is a simix return code
955 finish_wait(&requests[index], status);
956 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
957 requests[index] = MPI_REQUEST_NULL;
961 xbt_dynar_free(&comms);
964 if (index==MPI_UNDEFINED)
965 smpi_empty_status(status);
970 int smpi_mpi_waitall(int count, MPI_Request requests[], MPI_Status status[])
974 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
975 int retvalue = MPI_SUCCESS;
976 //tag invalid requests in the set
977 if (status != MPI_STATUSES_IGNORE) {
978 for (c = 0; c < count; c++) {
979 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL || (requests[c]->flags & PREPARED)) {
980 smpi_empty_status(&status[c]);
981 } else if (requests[c]->src == MPI_PROC_NULL) {
982 smpi_empty_status(&status[c]);
983 status[c].MPI_SOURCE = MPI_PROC_NULL;
987 for(c = 0; c < count; c++) {
989 if (MC_is_active() || MC_record_replay_is_active()) {
990 smpi_mpi_wait(&requests[c], pstat);
993 index = smpi_mpi_waitany(count, requests, pstat);
994 if (index == MPI_UNDEFINED)
996 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
997 requests[index]=MPI_REQUEST_NULL;
999 if (status != MPI_STATUSES_IGNORE) {
1000 status[index] = *pstat;
1001 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1002 retvalue = MPI_ERR_IN_STATUS;
1009 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1015 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1017 for(i = 0; i < incount; i++)
1019 index=smpi_mpi_waitany(incount, requests, pstat);
1020 if(index!=MPI_UNDEFINED){
1021 indices[count] = index;
1023 if(status != MPI_STATUSES_IGNORE) {
1024 status[index] = *pstat;
1026 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1027 requests[index]=MPI_REQUEST_NULL;
1029 return MPI_UNDEFINED;
1035 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1041 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1043 for(i = 0; i < incount; i++) {
1044 if((requests[i] != MPI_REQUEST_NULL)) {
1045 if(smpi_mpi_test(&requests[i], pstat)) {
1048 if(status != MPI_STATUSES_IGNORE) {
1051 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags & NON_PERSISTENT)
1052 requests[i]=MPI_REQUEST_NULL;
1058 if(count_dead==incount)
1059 return MPI_UNDEFINED;
1063 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
1065 smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
1068 void smpi_mpi_barrier(MPI_Comm comm)
1070 smpi_coll_tuned_barrier_ompi_basic_linear(comm);
1073 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1074 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1076 int system_tag = COLL_TAG_GATHER;
1077 int rank, size, src, index;
1078 MPI_Aint lb = 0, recvext = 0;
1079 MPI_Request *requests;
1081 rank = smpi_comm_rank(comm);
1082 size = smpi_comm_size(comm);
1084 // Send buffer to root
1085 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1087 smpi_datatype_extent(recvtype, &lb, &recvext);
1088 // Local copy from root
1089 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext, recvcount, recvtype);
1090 // Receive buffers from senders
1091 requests = xbt_new(MPI_Request, size - 1);
1093 for(src = 0; src < size; src++) {
1095 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + src * recvcount * recvext, recvcount, recvtype,
1096 src, system_tag, comm);
1100 // Wait for completion of irecv's.
1101 smpi_mpi_startall(size - 1, requests);
1102 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1103 for(src = 0; src < size-1; src++) {
1104 smpi_mpi_request_free(&requests[src]);
1110 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op,
1115 int rank = smpi_comm_rank(comm);
1118 /* arbitrarily choose root as rank 0 */
1119 size = smpi_comm_size(comm);
1121 displs = xbt_new(int, size);
1122 for (i = 0; i < size; i++) {
1124 count += recvcounts[i];
1126 tmpbuf=static_cast<void*>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1128 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
1129 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
1131 smpi_free_tmp_buffer(tmpbuf);
1134 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
1135 MPI_Datatype recvtype, int root, MPI_Comm comm)
1137 int system_tag = COLL_TAG_GATHERV;
1138 int rank, size, src, index;
1139 MPI_Aint lb = 0, recvext = 0;
1140 MPI_Request *requests;
1142 rank = smpi_comm_rank(comm);
1143 size = smpi_comm_size(comm);
1145 // Send buffer to root
1146 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1148 smpi_datatype_extent(recvtype, &lb, &recvext);
1149 // Local copy from root
1150 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + displs[root] * recvext,
1151 recvcounts[root], recvtype);
1152 // Receive buffers from senders
1153 requests = xbt_new(MPI_Request, size - 1);
1155 for(src = 0; src < size; src++) {
1157 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + displs[src] * recvext,
1158 recvcounts[src], recvtype, src, system_tag, comm);
1162 // Wait for completion of irecv's.
1163 smpi_mpi_startall(size - 1, requests);
1164 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1165 for(src = 0; src < size-1; src++) {
1166 smpi_mpi_request_free(&requests[src]);
1172 void smpi_mpi_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1173 void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
1175 int system_tag = COLL_TAG_ALLGATHER;
1176 int rank, size, other, index;
1177 MPI_Aint lb = 0, recvext = 0;
1178 MPI_Request *requests;
1180 rank = smpi_comm_rank(comm);
1181 size = smpi_comm_size(comm);
1182 // FIXME: check for errors
1183 smpi_datatype_extent(recvtype, &lb, &recvext);
1184 // Local copy from self
1185 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
1186 // Send/Recv buffers to/from others;
1187 requests = xbt_new(MPI_Request, 2 * (size - 1));
1189 for(other = 0; other < size; other++) {
1191 requests[index] = smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,comm);
1193 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype, other,
1198 // Wait for completion of all comms.
1199 smpi_mpi_startall(2 * (size - 1), requests);
1200 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1201 for(other = 0; other < 2*(size-1); other++) {
1202 smpi_mpi_request_free(&requests[other]);
1207 void smpi_mpi_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
1208 int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
1210 int system_tag = COLL_TAG_ALLGATHERV;
1211 int rank, size, other, index;
1212 MPI_Aint lb = 0, recvext = 0;
1213 MPI_Request *requests;
1215 rank = smpi_comm_rank(comm);
1216 size = smpi_comm_size(comm);
1217 smpi_datatype_extent(recvtype, &lb, &recvext);
1218 // Local copy from self
1219 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
1220 // Send buffers to others;
1221 requests = xbt_new(MPI_Request, 2 * (size - 1));
1223 for(other = 0; other < size; other++) {
1226 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag, comm);
1228 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + displs[other] * recvext, recvcounts[other],
1229 recvtype, other, system_tag, comm);
1233 // Wait for completion of all comms.
1234 smpi_mpi_startall(2 * (size - 1), requests);
1235 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1236 for(other = 0; other < 2*(size-1); other++) {
1237 smpi_mpi_request_free(&requests[other]);
1242 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1243 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1245 int system_tag = COLL_TAG_SCATTER;
1246 int rank, size, dst, index;
1247 MPI_Aint lb = 0, sendext = 0;
1248 MPI_Request *requests;
1250 rank = smpi_comm_rank(comm);
1251 size = smpi_comm_size(comm);
1253 // Recv buffer from root
1254 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1256 smpi_datatype_extent(sendtype, &lb, &sendext);
1257 // Local copy from root
1258 if(recvbuf!=MPI_IN_PLACE){
1259 smpi_datatype_copy(static_cast<char *>(sendbuf) + root * sendcount * sendext,
1260 sendcount, sendtype, recvbuf, recvcount, recvtype);
1262 // Send buffers to receivers
1263 requests = xbt_new(MPI_Request, size - 1);
1265 for(dst = 0; dst < size; dst++) {
1267 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst,
1272 // Wait for completion of isend's.
1273 smpi_mpi_startall(size - 1, requests);
1274 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1275 for(dst = 0; dst < size-1; dst++) {
1276 smpi_mpi_request_free(&requests[dst]);
1282 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,
1283 MPI_Datatype recvtype, int root, MPI_Comm comm)
1285 int system_tag = COLL_TAG_SCATTERV;
1286 int rank, size, dst, index;
1287 MPI_Aint lb = 0, sendext = 0;
1288 MPI_Request *requests;
1290 rank = smpi_comm_rank(comm);
1291 size = smpi_comm_size(comm);
1293 // Recv buffer from root
1294 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1296 smpi_datatype_extent(sendtype, &lb, &sendext);
1297 // Local copy from root
1298 if(recvbuf!=MPI_IN_PLACE){
1299 smpi_datatype_copy(static_cast<char *>(sendbuf) + displs[root] * sendext, sendcounts[root],
1300 sendtype, recvbuf, recvcount, recvtype);
1302 // Send buffers to receivers
1303 requests = xbt_new(MPI_Request, size - 1);
1305 for(dst = 0; dst < size; dst++) {
1307 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + displs[dst] * sendext, sendcounts[dst],
1308 sendtype, dst, system_tag, comm);
1312 // Wait for completion of isend's.
1313 smpi_mpi_startall(size - 1, requests);
1314 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1315 for(dst = 0; dst < size-1; dst++) {
1316 smpi_mpi_request_free(&requests[dst]);
1322 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
1325 int system_tag = COLL_TAG_REDUCE;
1326 int rank, size, src, index;
1327 MPI_Aint lb = 0, dataext = 0;
1328 MPI_Request *requests;
1331 char* sendtmpbuf = static_cast<char *>(sendbuf);
1334 rank = smpi_comm_rank(comm);
1335 size = smpi_comm_size(comm);
1336 //non commutative case, use a working algo from openmpi
1337 if(!smpi_op_is_commute(op)){
1338 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
1342 if( sendbuf == MPI_IN_PLACE ) {
1343 sendtmpbuf = static_cast<char *>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1344 smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
1348 // Send buffer to root
1349 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1351 smpi_datatype_extent(datatype, &lb, &dataext);
1352 // Local copy from root
1353 if (sendtmpbuf != nullptr && recvbuf != nullptr)
1354 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1355 // Receive buffers from senders
1356 requests = xbt_new(MPI_Request, size - 1);
1357 tmpbufs = xbt_new(void *, size - 1);
1359 for(src = 0; src < size; src++) {
1361 if (!smpi_process_get_replaying())
1362 tmpbufs[index] = xbt_malloc(count * dataext);
1364 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1366 smpi_irecv_init(tmpbufs[index], count, datatype, src, system_tag, comm);
1370 // Wait for completion of irecv's.
1371 smpi_mpi_startall(size - 1, requests);
1372 for(src = 0; src < size - 1; src++) {
1373 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1374 XBT_DEBUG("finished waiting any request with index %d", index);
1375 if(index == MPI_UNDEFINED) {
1378 smpi_mpi_request_free(&requests[index]);
1380 if(op) /* op can be MPI_OP_NULL that does nothing */
1381 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1383 for(index = 0; index < size - 1; index++) {
1384 smpi_free_tmp_buffer(tmpbufs[index]);
1390 if( sendbuf == MPI_IN_PLACE ) {
1391 smpi_free_tmp_buffer(sendtmpbuf);
1395 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1397 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1398 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1401 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1403 int system_tag = -888;
1404 int rank, size, other, index;
1405 MPI_Aint lb = 0, dataext = 0;
1406 MPI_Request *requests;
1409 rank = smpi_comm_rank(comm);
1410 size = smpi_comm_size(comm);
1412 smpi_datatype_extent(datatype, &lb, &dataext);
1414 // Local copy from self
1415 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1417 // Send/Recv buffers to/from others;
1418 requests = xbt_new(MPI_Request, size - 1);
1419 tmpbufs = xbt_new(void *, rank);
1421 for(other = 0; other < rank; other++) {
1422 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1423 requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1426 for(other = rank + 1; other < size; other++) {
1427 requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1430 // Wait for completion of all comms.
1431 smpi_mpi_startall(size - 1, requests);
1433 if(smpi_op_is_commute(op)){
1434 for(other = 0; other < size - 1; other++) {
1435 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1436 if(index == MPI_UNDEFINED) {
1440 // #Request is below rank: it's a irecv
1441 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1445 //non commutative case, wait in order
1446 for(other = 0; other < size - 1; other++) {
1447 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1449 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1453 for(index = 0; index < rank; index++) {
1454 smpi_free_tmp_buffer(tmpbufs[index]);
1456 for(index = 0; index < size-1; index++) {
1457 smpi_mpi_request_free(&requests[index]);
1463 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1465 int system_tag = -888;
1466 int rank, size, other, index;
1467 MPI_Aint lb = 0, dataext = 0;
1468 MPI_Request *requests;
1470 int recvbuf_is_empty=1;
1471 rank = smpi_comm_rank(comm);
1472 size = smpi_comm_size(comm);
1474 smpi_datatype_extent(datatype, &lb, &dataext);
1476 // Send/Recv buffers to/from others;
1477 requests = xbt_new(MPI_Request, size - 1);
1478 tmpbufs = xbt_new(void *, rank);
1480 for(other = 0; other < rank; other++) {
1481 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1483 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1486 for(other = rank + 1; other < size; other++) {
1488 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1491 // Wait for completion of all comms.
1492 smpi_mpi_startall(size - 1, requests);
1493 if(smpi_op_is_commute(op)){
1494 for(other = 0; other < size - 1; other++) {
1495 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1496 if(index == MPI_UNDEFINED) {
1500 if(recvbuf_is_empty){
1501 smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1504 // #Request is below rank: it's a irecv
1505 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1509 //non commutative case, wait in order
1510 for(other = 0; other < size - 1; other++) {
1511 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1513 if(recvbuf_is_empty){
1514 smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
1516 }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1520 for(index = 0; index < rank; index++) {
1521 smpi_free_tmp_buffer(tmpbufs[index]);
1523 for(index = 0; index < size-1; index++) {
1524 smpi_mpi_request_free(&requests[index]);