1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include <xbt/config.hpp>
11 #include "xbt/virtu.h"
13 #include "src/mc/mc_replay.h"
14 #include "xbt/replay.h"
16 #include "src/simix/smx_private.h"
17 #include "surf/surf.h"
18 #include "simgrid/sg_config.h"
19 #include "smpi/smpi_utils.hpp"
20 #include "colls/colls.h"
21 #include <simgrid/s4u/host.hpp>
23 #include "src/kernel/activity/SynchroComm.hpp"
25 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
27 extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
30 static int match_recv(void* a, void* b, smx_activity_t ignored) {
31 MPI_Request ref = static_cast<MPI_Request>(a);
32 MPI_Request req = static_cast<MPI_Request>(b);
33 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
35 xbt_assert(ref, "Cannot match recv against null reference");
36 xbt_assert(req, "Cannot match recv against null request");
37 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
38 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
39 //we match, we can transfer some values
40 if(ref->src == MPI_ANY_SOURCE)
41 ref->real_src = req->src;
42 if(ref->tag == MPI_ANY_TAG)
43 ref->real_tag = req->tag;
44 if(ref->real_size < req->real_size)
47 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
48 XBT_DEBUG("match succeeded");
53 static int match_send(void* a, void* b,smx_activity_t ignored) {
54 MPI_Request ref = static_cast<MPI_Request>(a);
55 MPI_Request req = static_cast<MPI_Request>(b);
56 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
57 xbt_assert(ref, "Cannot match send against null reference");
58 xbt_assert(req, "Cannot match send against null request");
60 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
61 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag)){
62 if(req->src == MPI_ANY_SOURCE)
63 req->real_src = ref->src;
64 if(req->tag == MPI_ANY_TAG)
65 req->real_tag = ref->tag;
66 if(req->real_size < ref->real_size)
69 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
70 XBT_DEBUG("match succeeded");
76 std::vector<s_smpi_factor_t> smpi_os_values;
77 std::vector<s_smpi_factor_t> smpi_or_values;
78 std::vector<s_smpi_factor_t> smpi_ois_values;
80 static simgrid::config::Flag<double> smpi_wtime_sleep(
81 "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
82 static simgrid::config::Flag<double> smpi_init_sleep(
83 "smpi/init", "Time to inject inside a call to MPI_Init", 0.0);
84 static simgrid::config::Flag<double> smpi_iprobe_sleep(
85 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
86 static simgrid::config::Flag<double> smpi_test_sleep(
87 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
90 static double smpi_os(size_t size)
92 if (smpi_os_values.empty()) {
93 smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
95 double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size;
96 // Iterate over all the sections that were specified and find the right
97 // value. (fact.factor represents the interval sizes; we want to find the
98 // section that has fact.factor <= size and no other such fact.factor <= size)
99 // Note: parse_factor() (used before) already sorts the vector we iterate over!
100 for (auto& fact : smpi_os_values) {
101 if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
102 XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
105 // If the next section is too large, the current section must be used.
106 // Hence, save the cost, as we might have to use it.
107 current = fact.values[0]+fact.values[1]*size;
110 XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
115 static double smpi_ois(size_t size)
117 if (smpi_ois_values.empty()) {
118 smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
120 double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size;
121 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
122 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
123 // Note: parse_factor() (used before) already sorts the vector we iterate over!
124 for (auto& fact : smpi_ois_values) {
125 if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
126 XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
129 // If the next section is too large, the current section must be used.
130 // Hence, save the cost, as we might have to use it.
131 current = fact.values[0]+fact.values[1]*size;
134 XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
139 static double smpi_or(size_t size)
141 if (smpi_or_values.empty()) {
142 smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
145 double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size;
147 // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
148 // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
149 // Note: parse_factor() (used before) already sorts the vector we iterate over!
150 for (auto fact : smpi_or_values) {
151 if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
152 XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
155 // If the next section is too large, the current section must be used.
156 // Hence, save the cost, as we might have to use it.
157 current=fact.values[0]+fact.values[1]*size;
160 XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
165 void smpi_mpi_init() {
166 if(smpi_init_sleep > 0)
167 simcall_process_sleep(smpi_init_sleep);
170 double smpi_mpi_wtime(){
172 if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) {
174 time = SIMIX_get_clock();
175 // to avoid deadlocks if used as a break condition, such as
176 // while (MPI_Wtime(...) < time_limit) {
179 // because the time will not normally advance when only calls to MPI_Wtime
180 // are made -> deadlock (MPI_Wtime never reaches the time limit)
181 if(smpi_wtime_sleep > 0)
182 simcall_process_sleep(smpi_wtime_sleep);
185 time = SIMIX_get_clock();
190 static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
193 MPI_Request request = nullptr;
195 void *old_buf = nullptr;
197 request = xbt_new(s_smpi_mpi_request_t, 1);
199 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
201 if((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->sizeof_substruct != 0)){
202 // This part handles the problem of non-contiguous memory
204 buf = count==0 ? nullptr : xbt_malloc(count*smpi_datatype_size(datatype));
205 if ((datatype->sizeof_substruct != 0) && ((flags & SEND) != 0)) {
206 subtype->serialize(old_buf, buf, count, datatype->substruct);
211 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
212 request->old_buf = old_buf;
213 request->old_type = datatype;
215 request->size = smpi_datatype_size(datatype) * count;
216 smpi_datatype_use(datatype);
220 request->comm = comm;
221 smpi_comm_use(request->comm);
222 request->action = nullptr;
223 request->flags = flags;
224 request->detached = 0;
225 request->detached_sender = nullptr;
226 request->real_src = 0;
227 request->truncated = 0;
228 request->real_size = 0;
229 request->real_tag = 0;
230 if (flags & PERSISTENT)
231 request->refcount = 1;
233 request->refcount = 0;
234 request->op = MPI_REPLACE;
241 void smpi_empty_status(MPI_Status * status)
243 if(status != MPI_STATUS_IGNORE) {
244 status->MPI_SOURCE = MPI_ANY_SOURCE;
245 status->MPI_TAG = MPI_ANY_TAG;
246 status->MPI_ERROR = MPI_SUCCESS;
251 static void smpi_mpi_request_free_voidp(void* request)
253 MPI_Request req = static_cast<MPI_Request>(request);
254 smpi_mpi_request_free(&req);
257 /* MPI Low level calls */
258 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
260 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
261 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
262 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SEND | PREPARED);
266 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
268 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
269 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
270 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
274 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
276 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
277 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
278 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src),
279 smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED);
283 void smpi_mpi_start(MPI_Request request)
285 smx_mailbox_t mailbox;
287 xbt_assert(request->action == nullptr, "Cannot (re-)start unfinished communication");
288 request->flags &= ~PREPARED;
289 request->flags &= ~FINISHED;
292 if ((request->flags & RECV) != 0) {
293 print_request("New recv", request);
295 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
297 xbt_mutex_t mut = smpi_process_mailboxes_mutex();
298 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
299 xbt_mutex_acquire(mut);
301 if (async_small_thresh == 0 && (request->flags & RMA) == 0 ) {
302 mailbox = smpi_process_mailbox();
304 else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) {
305 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
306 //begin with the more appropriate one : the small one.
307 mailbox = smpi_process_mailbox_small();
308 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
309 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv,
310 static_cast<void*>(request));
312 if (action == nullptr) {
313 mailbox = smpi_process_mailbox();
314 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
315 action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
316 if (action == nullptr) {
317 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
318 mailbox = smpi_process_mailbox_small();
321 XBT_DEBUG("yes there was something for us in the large mailbox");
324 mailbox = smpi_process_mailbox_small();
325 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
326 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
328 if (action == nullptr) {
329 XBT_DEBUG("No, nothing in the permanent receive mailbox");
330 mailbox = smpi_process_mailbox();
332 XBT_DEBUG("yes there was something for us in the small mailbox");
336 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
337 request->real_size=request->size;
338 request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf, &request->real_size, &match_recv,
339 ! smpi_process_get_replaying()? smpi_comm_copy_data_callback
340 : &smpi_comm_null_copy_buffer_callback, request, -1.0);
341 XBT_DEBUG("recv simcall posted");
343 if (async_small_thresh != 0 || (request->flags & RMA) != 0 )
344 xbt_mutex_release(mut);
345 } else { /* the RECV flag was not set, so this is a send */
346 int receiver = request->dst;
348 int rank = request->src;
349 if (TRACE_smpi_view_internals()) {
350 TRACE_smpi_send(rank, rank, receiver, request->tag, request->size);
352 print_request("New send", request);
354 void* buf = request->buf;
355 if ((request->flags & SSEND) == 0 && ( (request->flags & RMA) != 0
356 || static_cast<int>(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
357 void *oldbuf = nullptr;
358 request->detached = 1;
359 XBT_DEBUG("Send request %p is detached", request);
361 if(request->old_type->sizeof_substruct == 0){
362 oldbuf = request->buf;
363 if (!smpi_process_get_replaying() && oldbuf != nullptr && request->size!=0){
364 if((smpi_privatize_global_variables != 0)
365 && (static_cast<char*>(request->buf) >= smpi_start_data_exe)
366 && (static_cast<char*>(request->buf) < smpi_start_data_exe + smpi_size_data_exe )){
367 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
368 smpi_switch_data_segment(request->src);
370 buf = xbt_malloc(request->size);
371 memcpy(buf,oldbuf,request->size);
372 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
377 //if we are giving back the control to the user without waiting for completion, we have to inject timings
378 double sleeptime = 0.0;
379 if(request->detached != 0 || ((request->flags & (ISEND|SSEND)) != 0)){// issend should be treated as isend
380 //isend and send timings may be different
381 sleeptime = ((request->flags & ISEND) != 0) ? smpi_ois(request->size) : smpi_os(request->size);
385 simcall_process_sleep(sleeptime);
386 XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime);
389 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
391 xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
393 if (async_small_thresh != 0 || (request->flags & RMA) != 0)
394 xbt_mutex_acquire(mut);
396 if (!(async_small_thresh != 0 || (request->flags & RMA) !=0)) {
397 mailbox = smpi_process_remote_mailbox(receiver);
398 } else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
399 mailbox = smpi_process_remote_mailbox(receiver);
400 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
401 smx_activity_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send,
402 static_cast<void*>(request));
403 if (action == nullptr) {
404 if ((request->flags & SSEND) == 0){
405 mailbox = smpi_process_remote_mailbox_small(receiver);
406 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
408 mailbox = smpi_process_remote_mailbox_small(receiver);
409 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
410 action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
411 if (action == nullptr) {
412 XBT_DEBUG("No, we are first, send to large mailbox");
413 mailbox = smpi_process_remote_mailbox(receiver);
417 XBT_DEBUG("Yes there was something for us in the large mailbox");
420 mailbox = smpi_process_remote_mailbox(receiver);
421 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
424 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
425 request->real_size=request->size;
426 request->action = simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
427 buf, request->real_size, &match_send,
428 &xbt_free_f, // how to free the userdata if a detached send fails
429 !smpi_process_get_replaying() ? smpi_comm_copy_data_callback
430 : &smpi_comm_null_copy_buffer_callback, request,
431 // detach if msg size < eager/rdv switch limit
433 XBT_DEBUG("send simcall posted");
435 /* FIXME: detached sends are not traceable (request->action == nullptr) */
436 if (request->action != nullptr)
437 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
439 if (async_small_thresh != 0 || ((request->flags & RMA)!=0))
440 xbt_mutex_release(mut);
444 void smpi_mpi_startall(int count, MPI_Request * requests)
446 if(requests== nullptr)
449 for(int i = 0; i < count; i++) {
450 smpi_mpi_start(requests[i]);
454 void smpi_mpi_request_free(MPI_Request * request)
456 if((*request) != MPI_REQUEST_NULL){
457 (*request)->refcount--;
458 if((*request)->refcount<0) xbt_die("wrong refcount");
460 if((*request)->refcount==0){
461 smpi_datatype_unuse((*request)->old_type);
462 smpi_comm_unuse((*request)->comm);
463 print_request("Destroying", (*request));
465 *request = MPI_REQUEST_NULL;
467 print_request("Decrementing", (*request));
470 xbt_die("freeing an already free request");
474 MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
477 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
479 request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, src, dst, tag,
480 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
482 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
483 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
489 MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
492 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
494 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
495 comm, RMA | NON_PERSISTENT | RECV | PREPARED);
497 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
498 comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
504 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
506 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
507 request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process_index(),
508 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
512 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
514 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
515 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
516 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
517 smpi_mpi_start(request);
521 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
523 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
524 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
525 smpi_group_index(smpi_comm_group(comm), dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND);
526 smpi_mpi_start(request);
530 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
532 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
533 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
534 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
535 comm, PERSISTENT | RECV | PREPARED);
539 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
541 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
542 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
543 smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm,
544 NON_PERSISTENT | RECV);
545 smpi_mpi_start(request);
549 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
551 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
552 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
553 smpi_mpi_wait(&request, status);
557 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
559 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
560 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
561 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SEND);
563 smpi_mpi_start(request);
564 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
568 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
570 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
571 request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
572 smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
574 smpi_mpi_start(request);
575 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
579 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
580 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
581 MPI_Comm comm, MPI_Status * status)
583 MPI_Request requests[2];
585 int myid=smpi_process_index();
586 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)){
587 smpi_datatype_copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
590 requests[0] = smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
591 requests[1] = smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
592 smpi_mpi_startall(2, requests);
593 smpi_mpi_waitall(2, requests, stats);
594 smpi_mpi_request_free(&requests[0]);
595 smpi_mpi_request_free(&requests[1]);
596 if(status != MPI_STATUS_IGNORE) {
597 // Copy receive status
602 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
604 return status->count / smpi_datatype_size(datatype);
607 static void finish_wait(MPI_Request * request, MPI_Status * status)
609 MPI_Request req = *request;
610 smpi_empty_status(status);
612 if(!((req->detached != 0) && ((req->flags & SEND) != 0)) && ((req->flags & PREPARED) == 0)){
613 if(status != MPI_STATUS_IGNORE) {
614 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
615 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
616 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
617 status->MPI_ERROR = req->truncated != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
618 // this handles the case were size in receive differs from size in send
619 status->count = req->real_size;
622 print_request("Finishing", req);
623 MPI_Datatype datatype = req->old_type;
625 if(((req->flags & ACCUMULATE) != 0) || (datatype->sizeof_substruct != 0)){
626 if (!smpi_process_get_replaying()){
627 if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf) >= smpi_start_data_exe)
628 && ((char*)req->old_buf < smpi_start_data_exe + smpi_size_data_exe )){
629 XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
630 smpi_switch_data_segment(smpi_process_index());
634 if(datatype->sizeof_substruct != 0){
635 // This part handles the problem of non-contignous memory the unserialization at the reception
636 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
637 if(req->flags & RECV)
638 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) ,
639 datatype->substruct, req->op);
641 }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
642 int n =req->real_size/smpi_datatype_size(datatype);
643 smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
649 if (TRACE_smpi_view_internals() && ((req->flags & RECV) != 0)){
650 int rank = smpi_process_index();
651 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
652 TRACE_smpi_recv(rank, src_traced, rank,req->tag);
655 if(req->detached_sender != nullptr){
656 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
657 double sleeptime = smpi_or(req->real_size);
659 simcall_process_sleep(sleeptime);
660 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime);
662 smpi_mpi_request_free(&(req->detached_sender));
664 if(req->flags & PERSISTENT)
665 req->action = nullptr;
666 req->flags |= FINISHED;
668 smpi_mpi_request_free(request);
671 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
672 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
674 // to avoid deadlocks if used as a break condition, such as
675 // while (MPI_Test(request, flag, status) && flag) {
677 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
678 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
679 static int nsleeps = 1;
680 if(smpi_test_sleep > 0)
681 simcall_process_sleep(nsleeps*smpi_test_sleep);
683 smpi_empty_status(status);
685 if (((*request)->flags & PREPARED) == 0) {
686 if ((*request)->action != nullptr)
687 flag = simcall_comm_test((*request)->action);
689 finish_wait(request, status);
690 nsleeps=1;//reset the number of sleeps we will do next time
691 if (*request != MPI_REQUEST_NULL && ((*request)->flags & PERSISTENT)==0)
692 *request = MPI_REQUEST_NULL;
693 } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
700 int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
702 std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
703 comms.reserve(count);
708 *index = MPI_UNDEFINED;
710 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
711 for(i = 0; i < count; i++) {
712 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
713 comms.push_back(requests[i]->action);
718 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
719 static int nsleeps = 1;
720 if(smpi_test_sleep > 0)
721 simcall_process_sleep(nsleeps*smpi_test_sleep);
723 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
724 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
726 finish_wait(&requests[*index], status);
729 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) {
730 requests[*index] = MPI_REQUEST_NULL;
736 //all requests are null or inactive, return true
738 smpi_empty_status(status);
744 int smpi_mpi_testall(int count, MPI_Request requests[], MPI_Status status[])
747 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
749 for(int i=0; i<count; i++){
750 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
751 if (smpi_mpi_test(&requests[i], pstat)!=1){
754 requests[i]=MPI_REQUEST_NULL;
757 smpi_empty_status(pstat);
759 if(status != MPI_STATUSES_IGNORE) {
766 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
768 //FIXME find another way to avoid busy waiting ?
769 // the issue here is that we have to wait on a nonexistent comm
771 smpi_mpi_iprobe(source, tag, comm, &flag, status);
772 XBT_DEBUG("Busy Waiting on probing : %d", flag);
776 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
777 MPI_Request request = build_request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
778 smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV);
780 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
781 // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... )
782 // multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
783 static int nsleeps = 1;
784 double speed = simgrid::s4u::Actor::self()->host()->speed();
785 double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
786 if (smpi_iprobe_sleep > 0) {
787 smx_activity_t iprobe_sleep = simcall_execution_start("iprobe", /* flops to executek*/nsleeps*smpi_iprobe_sleep*speed*maxrate, /* priority */1.0, /* performance bound */maxrate*speed);
788 simcall_execution_wait(iprobe_sleep);
790 // behave like a receive, but don't do it
791 smx_mailbox_t mailbox;
793 print_request("New iprobe", request);
794 // We have to test both mailboxes as we don't know if we will receive one one or another
795 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
796 mailbox = smpi_process_mailbox_small();
797 XBT_DEBUG("Trying to probe the perm recv mailbox");
798 request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv,
799 static_cast<void*>(request));
802 if (request->action == nullptr){
803 mailbox = smpi_process_mailbox();
804 XBT_DEBUG("trying to probe the other mailbox");
805 request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv,
806 static_cast<void*>(request));
809 if (request->action != nullptr){
810 simgrid::kernel::activity::Comm *sync_comm = static_cast<simgrid::kernel::activity::Comm*>(request->action);
811 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
813 if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
814 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
815 status->MPI_TAG = req->tag;
816 status->MPI_ERROR = MPI_SUCCESS;
817 status->count = req->real_size;
819 nsleeps = 1;//reset the number of sleeps we will do next time
823 if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
826 smpi_mpi_request_free(&request);
829 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
831 print_request("Waiting", *request);
832 if ((*request)->flags & PREPARED) {
833 smpi_empty_status(status);
837 if ((*request)->action != nullptr)
838 // this is not a detached send
839 simcall_comm_wait((*request)->action, -1.0);
841 finish_wait(request, status);
842 if (*request != MPI_REQUEST_NULL && (((*request)->flags & NON_PERSISTENT)!=0))
843 *request = MPI_REQUEST_NULL;
846 static int sort_accumulates(MPI_Request a, MPI_Request b)
848 return (a->tag < b->tag);
851 int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status)
853 s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
856 int index = MPI_UNDEFINED;
860 // Wait for a request to complete
861 xbt_dynar_init(&comms, sizeof(smx_activity_t), nullptr);
862 map = xbt_new(int, count);
863 XBT_DEBUG("Wait for one of %d", count);
864 for(i = 0; i < count; i++) {
865 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED) && !(requests[i]->flags & FINISHED)) {
866 if (requests[i]->action != nullptr) {
867 XBT_DEBUG("Waiting any %p ", requests[i]);
868 xbt_dynar_push(&comms, &requests[i]->action);
872 // This is a finished detached request, let's return this one
873 size = 0; // so we free the dynar but don't do the waitany call
875 finish_wait(&requests[i], status); // cleanup if refcount = 0
876 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
877 requests[i] = MPI_REQUEST_NULL; // set to null
883 i = simcall_comm_waitany(&comms, -1);
885 // not MPI_UNDEFINED, as this is a simix return code
888 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
889 if ((requests[index] == MPI_REQUEST_NULL)
890 || (!((requests[index]->flags & ACCUMULATE) && (requests[index]->flags & RECV)))){
891 finish_wait(&requests[index], status);
892 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
893 requests[index] = MPI_REQUEST_NULL;
900 xbt_dynar_free_data(&comms);
904 if (index==MPI_UNDEFINED)
905 smpi_empty_status(status);
910 int smpi_mpi_waitall(int count, MPI_Request requests[], MPI_Status status[])
912 std::vector<MPI_Request> accumulates;
915 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
916 int retvalue = MPI_SUCCESS;
917 //tag invalid requests in the set
918 if (status != MPI_STATUSES_IGNORE) {
919 for (int c = 0; c < count; c++) {
920 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL || (requests[c]->flags & PREPARED)) {
921 smpi_empty_status(&status[c]);
922 } else if (requests[c]->src == MPI_PROC_NULL) {
923 smpi_empty_status(&status[c]);
924 status[c].MPI_SOURCE = MPI_PROC_NULL;
928 for (int c = 0; c < count; c++) {
929 if (MC_is_active() || MC_record_replay_is_active()) {
930 smpi_mpi_wait(&requests[c], pstat);
933 index = smpi_mpi_waitany(count, requests, pstat);
934 if (index == MPI_UNDEFINED)
937 if (requests[index] != MPI_REQUEST_NULL
938 && (requests[index]->flags & RECV)
939 && (requests[index]->flags & ACCUMULATE))
940 accumulates.push_back(requests[index]);
941 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
942 requests[index] = MPI_REQUEST_NULL;
944 if (status != MPI_STATUSES_IGNORE) {
945 status[index] = *pstat;
946 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
947 retvalue = MPI_ERR_IN_STATUS;
951 if (!accumulates.empty()) {
952 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
953 for (auto req : accumulates) {
954 finish_wait(&req, status);
961 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
967 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
969 for(i = 0; i < incount; i++)
971 index=smpi_mpi_waitany(incount, requests, pstat);
972 if(index!=MPI_UNDEFINED){
973 indices[count] = index;
975 if(status != MPI_STATUSES_IGNORE) {
976 status[index] = *pstat;
978 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
979 requests[index]=MPI_REQUEST_NULL;
981 return MPI_UNDEFINED;
987 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
993 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
995 for(i = 0; i < incount; i++) {
996 if((requests[i] != MPI_REQUEST_NULL)) {
997 if(smpi_mpi_test(&requests[i], pstat)) {
1000 if(status != MPI_STATUSES_IGNORE) {
1003 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags & NON_PERSISTENT)
1004 requests[i]=MPI_REQUEST_NULL;
1010 if(count_dead==incount)
1011 return MPI_UNDEFINED;
1015 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
1017 smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
1020 void smpi_mpi_barrier(MPI_Comm comm)
1022 smpi_coll_tuned_barrier_ompi_basic_linear(comm);
1025 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1026 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1028 int system_tag = COLL_TAG_GATHER;
1030 MPI_Aint recvext = 0;
1032 int rank = smpi_comm_rank(comm);
1033 int size = smpi_comm_size(comm);
1035 // Send buffer to root
1036 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1038 smpi_datatype_extent(recvtype, &lb, &recvext);
1039 // Local copy from root
1040 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext,
1041 recvcount, recvtype);
1042 // Receive buffers from senders
1043 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1045 for (int src = 0; src < size; src++) {
1047 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + src * recvcount * recvext, recvcount, recvtype,
1048 src, system_tag, comm);
1052 // Wait for completion of irecv's.
1053 smpi_mpi_startall(size - 1, requests);
1054 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1055 for (int src = 0; src < size-1; src++) {
1056 smpi_mpi_request_free(&requests[src]);
1062 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op,
1065 int rank = smpi_comm_rank(comm);
1067 /* arbitrarily choose root as rank 0 */
1068 int size = smpi_comm_size(comm);
1070 int *displs = xbt_new(int, size);
1071 for (int i = 0; i < size; i++) {
1073 count += recvcounts[i];
1075 void *tmpbuf = static_cast<void*>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1077 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
1078 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
1080 smpi_free_tmp_buffer(tmpbuf);
1083 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
1084 MPI_Datatype recvtype, int root, MPI_Comm comm)
1086 int system_tag = COLL_TAG_GATHERV;
1088 MPI_Aint recvext = 0;
1090 int rank = smpi_comm_rank(comm);
1091 int size = smpi_comm_size(comm);
1093 // Send buffer to root
1094 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1096 smpi_datatype_extent(recvtype, &lb, &recvext);
1097 // Local copy from root
1098 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + displs[root] * recvext,
1099 recvcounts[root], recvtype);
1100 // Receive buffers from senders
1101 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1103 for (int src = 0; src < size; src++) {
1105 requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + displs[src] * recvext,
1106 recvcounts[src], recvtype, src, system_tag, comm);
1110 // Wait for completion of irecv's.
1111 smpi_mpi_startall(size - 1, requests);
1112 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1113 for (int src = 0; src < size-1; src++) {
1114 smpi_mpi_request_free(&requests[src]);
1120 void smpi_mpi_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1121 void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
1123 int system_tag = COLL_TAG_ALLGATHER;
1125 MPI_Aint recvext = 0;
1126 MPI_Request *requests;
1128 int rank = smpi_comm_rank(comm);
1129 int size = smpi_comm_size(comm);
1130 // FIXME: check for errors
1131 smpi_datatype_extent(recvtype, &lb, &recvext);
1132 // Local copy from self
1133 smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount,
1135 // Send/Recv buffers to/from others;
1136 requests = xbt_new(MPI_Request, 2 * (size - 1));
1138 for (int other = 0; other < size; other++) {
1140 requests[index] = smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,comm);
1142 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype,
1143 other, system_tag, comm);
1147 // Wait for completion of all comms.
1148 smpi_mpi_startall(2 * (size - 1), requests);
1149 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1150 for (int other = 0; other < 2*(size-1); other++) {
1151 smpi_mpi_request_free(&requests[other]);
1156 void smpi_mpi_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
1157 int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
1159 int system_tag = COLL_TAG_ALLGATHERV;
1161 MPI_Aint recvext = 0;
1163 int rank = smpi_comm_rank(comm);
1164 int size = smpi_comm_size(comm);
1165 smpi_datatype_extent(recvtype, &lb, &recvext);
1166 // Local copy from self
1167 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1168 static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
1169 // Send buffers to others;
1170 MPI_Request *requests = xbt_new(MPI_Request, 2 * (size - 1));
1172 for (int other = 0; other < size; other++) {
1175 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag, comm);
1177 requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + displs[other] * recvext, recvcounts[other],
1178 recvtype, other, system_tag, comm);
1182 // Wait for completion of all comms.
1183 smpi_mpi_startall(2 * (size - 1), requests);
1184 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1185 for (int other = 0; other < 2*(size-1); other++) {
1186 smpi_mpi_request_free(&requests[other]);
1191 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1192 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
1194 int system_tag = COLL_TAG_SCATTER;
1196 MPI_Aint sendext = 0;
1197 MPI_Request *requests;
1199 int rank = smpi_comm_rank(comm);
1200 int size = smpi_comm_size(comm);
1202 // Recv buffer from root
1203 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1205 smpi_datatype_extent(sendtype, &lb, &sendext);
1206 // Local copy from root
1207 if(recvbuf!=MPI_IN_PLACE){
1208 smpi_datatype_copy(static_cast<char *>(sendbuf) + root * sendcount * sendext,
1209 sendcount, sendtype, recvbuf, recvcount, recvtype);
1211 // Send buffers to receivers
1212 requests = xbt_new(MPI_Request, size - 1);
1214 for(int dst = 0; dst < size; dst++) {
1216 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype,
1217 dst, system_tag, comm);
1221 // Wait for completion of isend's.
1222 smpi_mpi_startall(size - 1, requests);
1223 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1224 for (int dst = 0; dst < size-1; dst++) {
1225 smpi_mpi_request_free(&requests[dst]);
1231 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,
1232 MPI_Datatype recvtype, int root, MPI_Comm comm)
1234 int system_tag = COLL_TAG_SCATTERV;
1236 MPI_Aint sendext = 0;
1238 int rank = smpi_comm_rank(comm);
1239 int size = smpi_comm_size(comm);
1241 // Recv buffer from root
1242 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
1244 smpi_datatype_extent(sendtype, &lb, &sendext);
1245 // Local copy from root
1246 if(recvbuf!=MPI_IN_PLACE){
1247 smpi_datatype_copy(static_cast<char *>(sendbuf) + displs[root] * sendext, sendcounts[root],
1248 sendtype, recvbuf, recvcount, recvtype);
1250 // Send buffers to receivers
1251 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1253 for (int dst = 0; dst < size; dst++) {
1255 requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + displs[dst] * sendext, sendcounts[dst],
1256 sendtype, dst, system_tag, comm);
1260 // Wait for completion of isend's.
1261 smpi_mpi_startall(size - 1, requests);
1262 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1263 for (int dst = 0; dst < size-1; dst++) {
1264 smpi_mpi_request_free(&requests[dst]);
1270 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
1273 int system_tag = COLL_TAG_REDUCE;
1275 MPI_Aint dataext = 0;
1277 char* sendtmpbuf = static_cast<char *>(sendbuf);
1279 int rank = smpi_comm_rank(comm);
1280 int size = smpi_comm_size(comm);
1281 //non commutative case, use a working algo from openmpi
1282 if(!smpi_op_is_commute(op)){
1283 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
1287 if( sendbuf == MPI_IN_PLACE ) {
1288 sendtmpbuf = static_cast<char *>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
1289 smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
1293 // Send buffer to root
1294 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1296 smpi_datatype_extent(datatype, &lb, &dataext);
1297 // Local copy from root
1298 if (sendtmpbuf != nullptr && recvbuf != nullptr)
1299 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1300 // Receive buffers from senders
1301 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1302 void **tmpbufs = xbt_new(void *, size - 1);
1304 for (int src = 0; src < size; src++) {
1306 if (!smpi_process_get_replaying())
1307 tmpbufs[index] = xbt_malloc(count * dataext);
1309 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1311 smpi_irecv_init(tmpbufs[index], count, datatype, src, system_tag, comm);
1315 // Wait for completion of irecv's.
1316 smpi_mpi_startall(size - 1, requests);
1317 for (int src = 0; src < size - 1; src++) {
1318 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1319 XBT_DEBUG("finished waiting any request with index %d", index);
1320 if(index == MPI_UNDEFINED) {
1323 smpi_mpi_request_free(&requests[index]);
1325 if(op) /* op can be MPI_OP_NULL that does nothing */
1326 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1328 for(index = 0; index < size - 1; index++) {
1329 smpi_free_tmp_buffer(tmpbufs[index]);
1335 if( sendbuf == MPI_IN_PLACE ) {
1336 smpi_free_tmp_buffer(sendtmpbuf);
1340 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1342 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1343 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1346 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1348 int system_tag = -888;
1350 MPI_Aint dataext = 0;
1352 int rank = smpi_comm_rank(comm);
1353 int size = smpi_comm_size(comm);
1355 smpi_datatype_extent(datatype, &lb, &dataext);
1357 // Local copy from self
1358 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1360 // Send/Recv buffers to/from others;
1361 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1362 void **tmpbufs = xbt_new(void *, rank);
1364 for (int other = 0; other < rank; other++) {
1365 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1366 requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1369 for (int other = rank + 1; other < size; other++) {
1370 requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1373 // Wait for completion of all comms.
1374 smpi_mpi_startall(size - 1, requests);
1376 if(smpi_op_is_commute(op)){
1377 for (int other = 0; other < size - 1; other++) {
1378 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1379 if(index == MPI_UNDEFINED) {
1383 // #Request is below rank: it's a irecv
1384 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1388 //non commutative case, wait in order
1389 for (int other = 0; other < size - 1; other++) {
1390 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1392 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1396 for(index = 0; index < rank; index++) {
1397 smpi_free_tmp_buffer(tmpbufs[index]);
1399 for(index = 0; index < size-1; index++) {
1400 smpi_mpi_request_free(&requests[index]);
1406 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1408 int system_tag = -888;
1410 MPI_Aint dataext = 0;
1411 int recvbuf_is_empty=1;
1412 int rank = smpi_comm_rank(comm);
1413 int size = smpi_comm_size(comm);
1415 smpi_datatype_extent(datatype, &lb, &dataext);
1417 // Send/Recv buffers to/from others;
1418 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
1419 void **tmpbufs = xbt_new(void *, rank);
1421 for (int other = 0; other < rank; other++) {
1422 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1423 requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
1426 for (int other = rank + 1; other < size; other++) {
1427 requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1430 // Wait for completion of all comms.
1431 smpi_mpi_startall(size - 1, requests);
1433 if(smpi_op_is_commute(op)){
1434 for (int other = 0; other < size - 1; other++) {
1435 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1436 if(index == MPI_UNDEFINED) {
1440 if(recvbuf_is_empty){
1441 smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1444 // #Request is below rank: it's a irecv
1445 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1449 //non commutative case, wait in order
1450 for (int other = 0; other < size - 1; other++) {
1451 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1453 if (recvbuf_is_empty) {
1454 smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
1455 recvbuf_is_empty = 0;
1457 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1461 for(index = 0; index < rank; index++) {
1462 smpi_free_tmp_buffer(tmpbufs[index]);
1464 for(index = 0; index < size-1; index++) {
1465 smpi_mpi_request_free(&requests[index]);