1 /* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
10 #include "simgrid/Exception.hpp"
11 #include "simgrid/s4u/Exec.hpp"
12 #include "simgrid/s4u/Mutex.hpp"
13 #include "simgrid/s4u/ConditionVariable.hpp"
14 #include "smpi_comm.hpp"
15 #include "smpi_datatype.hpp"
16 #include "smpi_host.hpp"
17 #include "smpi_op.hpp"
18 #include "src/kernel/activity/CommImpl.hpp"
19 #include "src/mc/mc_replay.hpp"
20 #include "src/smpi/include/smpi_actor.hpp"
25 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
27 static simgrid::config::Flag<double> smpi_iprobe_sleep(
28 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
29 static simgrid::config::Flag<double> smpi_test_sleep(
30 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
32 std::vector<s_smpi_factor_t> smpi_ois_values;
34 extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
39 Request::Request(const void* buf, int count, MPI_Datatype datatype, aid_t src, aid_t dst, int tag, MPI_Comm comm,
40 unsigned flags, MPI_Op op)
41 : buf_(const_cast<void*>(buf))
44 , size_(datatype->size() * count)
54 if(op != MPI_REPLACE && op != MPI_OP_NULL)
58 detached_sender_ = nullptr;
61 unmatched_types_ = false;
64 if (flags & MPI_REQ_PERSISTENT)
76 void Request::unref(MPI_Request* request)
78 xbt_assert(*request != MPI_REQUEST_NULL, "freeing an already free request");
80 (*request)->refcount_--;
81 if ((*request)->refcount_ < 0) {
82 (*request)->print_request("wrong refcount");
83 xbt_die("Whoops, wrong refcount");
85 if ((*request)->refcount_ == 0) {
86 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
87 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
89 Comm::unref((*request)->comm_);
90 Datatype::unref((*request)->type_);
92 if ((*request)->op_ != MPI_REPLACE && (*request)->op_ != MPI_OP_NULL)
93 Op::unref(&(*request)->op_);
95 (*request)->print_request("Destroying");
96 F2C::free_f((*request)->f2c_id());
98 *request = MPI_REQUEST_NULL;
100 (*request)->print_request("Decrementing");
104 bool Request::match_types(MPI_Datatype stype, MPI_Datatype rtype){
106 if ((stype == rtype) ||
107 //byte and packed always match with anything
108 (stype == MPI_PACKED || rtype == MPI_PACKED || stype == MPI_BYTE || rtype == MPI_BYTE) ||
109 //complex datatypes - we don't properly match these yet, as it would mean checking each subtype recursively.
110 (stype->flags() & DT_FLAG_DERIVED || rtype->flags() & DT_FLAG_DERIVED) ||
111 //duplicated datatypes, check if underlying is ok
112 (stype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype->duplicated_datatype(), rtype)) ||
113 (rtype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype, rtype->duplicated_datatype())))
116 XBT_WARN("Mismatched datatypes : sending %s and receiving %s", stype->name().c_str(), rtype->name().c_str());
121 bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request receiver)
123 xbt_assert(sender, "Cannot match against null sender");
124 xbt_assert(receiver, "Cannot match against null receiver");
125 XBT_DEBUG("Trying to match %s of sender src %ld against %ld, tag %d against %d, id %d against %d",
126 (req == receiver ? "send" : "recv"), sender->src_, receiver->src_, sender->tag_, receiver->tag_,
127 sender->comm_->id(), receiver->comm_->id());
129 if ((receiver->comm_->id() == MPI_UNDEFINED || sender->comm_->id() == MPI_UNDEFINED ||
130 receiver->comm_->id() == sender->comm_->id()) &&
131 ((receiver->src_ == MPI_ANY_SOURCE && (receiver->comm_->group()->rank(sender->src_) != MPI_UNDEFINED)) ||
132 receiver->src_ == sender->src_) &&
133 ((receiver->tag_ == MPI_ANY_TAG && sender->tag_ >= 0) || receiver->tag_ == sender->tag_)) {
134 // we match, we can transfer some values
135 if (receiver->src_ == MPI_ANY_SOURCE)
136 receiver->real_src_ = sender->src_;
137 if (receiver->tag_ == MPI_ANY_TAG)
138 receiver->real_tag_ = sender->tag_;
139 if ((receiver->flags_ & MPI_REQ_PROBE) == 0 ){
140 if (receiver->real_size_ < sender->real_size_){
141 XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_, sender->real_size_);
142 receiver->truncated_ = true;
143 } else if (receiver->real_size_ > sender->real_size_){
144 receiver->real_size_=sender->real_size_;
147 //0-sized datatypes/counts should not interfere and match
148 if ( sender->real_size_ != 0 && receiver->real_size_ != 0 &&
149 !match_types(sender->type_, receiver->type_))
150 receiver->unmatched_types_ = true;
151 if (sender->detached_)
152 receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in
154 req->flags_ |= MPI_REQ_MATCHED; // mark as impossible to cancel anymore
155 XBT_DEBUG("match succeeded");
161 void Request::init_buffer(int count){
162 // FIXME Handle the case of a partial shared malloc.
163 // This part handles the problem of non-contiguous memory (for the unserialization at the reception)
164 if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (type_->flags() & DT_FLAG_DERIVED)) {
165 // This part handles the problem of non-contiguous memory
170 buf_ = xbt_malloc(count*type_->size());
171 if ((type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) {
172 type_->serialize(old_buf_, buf_, count);
178 bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
180 auto ref = static_cast<MPI_Request>(a);
181 auto req = static_cast<MPI_Request>(b);
182 return match_common(req, req, ref);
185 bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
187 auto ref = static_cast<MPI_Request>(a);
188 auto req = static_cast<MPI_Request>(b);
189 return match_common(req, ref, req);
192 void Request::print_request(const char* message) const
194 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %ld, dst = %ld, tag = %d, flags = %x]", message, this, buf_,
195 size_, src_, dst_, tag_, flags_);
198 /* factories, to hide the internal flags from the caller */
199 MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
201 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
202 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
203 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
206 MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
208 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
209 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
210 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
213 MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
215 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
216 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
217 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
220 MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
222 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
223 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
224 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
227 MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
230 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
232 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
233 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
234 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
236 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
237 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
238 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
245 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
247 aid_t source = MPI_PROC_NULL;
248 if (src == MPI_ANY_SOURCE)
249 source = MPI_ANY_SOURCE;
250 else if (src != MPI_PROC_NULL)
251 source = comm->group()->actor(src);
252 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
254 simgrid::s4u::this_actor::get_pid(), tag, comm,
255 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
258 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
261 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
262 aid_t source = MPI_PROC_NULL;
263 if (src == MPI_ANY_SOURCE)
264 source = MPI_ANY_SOURCE;
265 else if (src != MPI_PROC_NULL)
266 source = comm->group()->actor(src);
268 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
269 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
270 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
273 new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
274 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
275 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
280 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
282 aid_t source = MPI_PROC_NULL;
283 if (src == MPI_ANY_SOURCE)
284 source = MPI_ANY_SOURCE;
285 else if (src != MPI_PROC_NULL)
286 source = comm->group()->actor(src);
287 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
288 source, simgrid::s4u::this_actor::get_pid(), tag, comm,
289 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
292 MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
294 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
295 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
296 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
297 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
298 if(dst != MPI_PROC_NULL)
303 MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
305 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
306 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
307 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
308 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
309 if(dst != MPI_PROC_NULL)
314 MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
316 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
317 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
318 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
319 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
320 if(dst != MPI_PROC_NULL)
325 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
327 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
328 aid_t source = MPI_PROC_NULL;
329 if (src == MPI_ANY_SOURCE)
330 source = MPI_ANY_SOURCE;
331 else if (src != MPI_PROC_NULL)
332 source = comm->group()->actor(src);
333 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
334 source, simgrid::s4u::this_actor::get_pid(), tag, comm,
335 MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
336 if(src != MPI_PROC_NULL)
341 int Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
343 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
344 request = irecv(buf, count, datatype, src, tag, comm);
345 int retval = wait(&request,status);
350 void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
352 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
353 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
354 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
355 MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
357 if(dst != MPI_PROC_NULL)
359 wait(&request, MPI_STATUS_IGNORE);
363 void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
365 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
366 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
367 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
368 MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
369 if(dst != MPI_PROC_NULL)
371 wait(&request, MPI_STATUS_IGNORE);
375 void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
377 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
378 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
379 dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
380 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
382 if(dst != MPI_PROC_NULL)
384 wait(&request,MPI_STATUS_IGNORE);
388 void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
389 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
390 MPI_Comm comm, MPI_Status * status)
392 aid_t source = MPI_PROC_NULL;
393 if (src == MPI_ANY_SOURCE)
394 source = MPI_ANY_SOURCE;
395 else if (src != MPI_PROC_NULL)
396 source = comm->group()->actor(src);
397 aid_t destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL;
399 std::array<MPI_Request, 2> requests;
400 std::array<MPI_Status, 2> stats;
401 aid_t myid = simgrid::s4u::this_actor::get_pid();
402 if ((destination == myid) && (source == myid)) {
403 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
404 if (status != MPI_STATUS_IGNORE) {
405 status->MPI_SOURCE = source;
406 status->MPI_TAG = recvtag;
407 status->MPI_ERROR = MPI_SUCCESS;
408 status->count = sendcount * sendtype->size();
412 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
413 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
414 startall(2, requests.data());
415 waitall(2, requests.data(), stats.data());
418 if(status != MPI_STATUS_IGNORE) {
419 // Copy receive status
424 void Request::start()
426 s4u::Mailbox* mailbox;
428 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
429 //reinitialize temporary buffer for persistent requests
430 if(real_size_ > 0 && flags_ & MPI_REQ_FINISHED){
432 init_buffer(real_size_/type_->size());
434 flags_ &= ~MPI_REQ_PREPARED;
435 flags_ &= ~MPI_REQ_FINISHED;
438 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
440 if ((flags_ & MPI_REQ_RECV) != 0) {
441 this->print_request("New recv");
443 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
445 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
446 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
449 if (smpi_cfg_async_small_thresh() == 0 && (flags_ & MPI_REQ_RMA) == 0) {
450 mailbox = process->mailbox();
451 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) {
452 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
453 //begin with the more appropriate one : the small one.
454 mailbox = process->mailbox_small();
455 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
456 mailbox->get_cname());
457 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
459 if (action == nullptr) {
460 mailbox = process->mailbox();
461 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
462 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
463 if (action == nullptr) {
464 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
465 mailbox = process->mailbox_small();
468 XBT_DEBUG("yes there was something for us in the large mailbox");
471 mailbox = process->mailbox_small();
472 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
473 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
475 if (action == nullptr) {
476 XBT_DEBUG("No, nothing in the permanent receive mailbox");
477 mailbox = process->mailbox();
479 XBT_DEBUG("yes there was something for us in the small mailbox");
483 action_ = simcall_comm_irecv(
484 process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
485 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
486 XBT_DEBUG("recv simcall posted");
488 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
490 } else { /* the RECV flag was not set, so this is a send */
491 const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
492 xbt_assert(process, "Actor pid=%ld is gone??", dst_);
493 if (TRACE_smpi_view_internals())
494 TRACE_smpi_send(src_, src_, dst_, tag_, size_);
495 this->print_request("New send");
498 if ((flags_ & MPI_REQ_SSEND) == 0 &&
499 ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
500 static_cast<int>(size_) < smpi_cfg_detached_send_thresh())) {
501 void *oldbuf = nullptr;
503 XBT_DEBUG("Send request %p is detached", this);
505 if (not(type_->flags() & DT_FLAG_DERIVED)) {
507 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
508 if (smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_), buf_))
509 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
511 //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
512 //so actually ... don't use manually attached buffer space.
513 buf = xbt_malloc(size_);
514 memcpy(buf,oldbuf,size_);
515 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
520 //if we are giving back the control to the user without waiting for completion, we have to inject timings
521 double sleeptime = 0.0;
522 if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
523 // isend and send timings may be different
524 sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
525 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
526 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
530 simgrid::s4u::this_actor::sleep_for(sleeptime);
531 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
534 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
536 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
539 if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
540 mailbox = process->mailbox();
541 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) { // eager mode
542 mailbox = process->mailbox();
543 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
544 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
545 if (action == nullptr) {
546 if ((flags_ & MPI_REQ_SSEND) == 0) {
547 mailbox = process->mailbox_small();
548 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
549 mailbox->get_cname());
551 mailbox = process->mailbox_small();
552 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
553 mailbox->get_cname());
554 action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
555 if (action == nullptr) {
556 XBT_DEBUG("No, we are first, send to large mailbox");
557 mailbox = process->mailbox();
561 XBT_DEBUG("Yes there was something for us in the large mailbox");
564 mailbox = process->mailbox();
565 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
568 size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator)
569 action_ = simcall_comm_isend(
570 simgrid::kernel::actor::ActorImpl::by_pid(src_), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_,
572 &xbt_free_f, // how to free the userdata if a detached send fails
573 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this,
574 // detach if msg size < eager/rdv switch limit
576 XBT_DEBUG("send simcall posted");
578 /* FIXME: detached sends are not traceable (action_ == nullptr) */
579 if (action_ != nullptr) {
580 boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
581 smpi_process()->get_tracing_category());
584 if (smpi_cfg_async_small_thresh() != 0 || ((flags_ & MPI_REQ_RMA) != 0))
589 void Request::startall(int count, MPI_Request * requests)
591 if(requests== nullptr)
594 for(int i = 0; i < count; i++) {
595 if(requests[i]->src_ != MPI_PROC_NULL && requests[i]->dst_ != MPI_PROC_NULL)
596 requests[i]->start();
600 void Request::cancel()
602 this->flags_ |= MPI_REQ_CANCELLED;
603 if (this->action_ != nullptr)
604 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
607 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
608 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
609 // to avoid deadlocks if used as a break condition, such as
610 // while (MPI_Test(request, flag, status) && flag) dostuff...
611 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
612 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
613 xbt_assert(*request != MPI_REQUEST_NULL);
615 static int nsleeps = 1;
616 int ret = MPI_SUCCESS;
618 if(smpi_test_sleep > 0)
619 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
621 Status::empty(status);
624 if ((*request)->flags_ & MPI_REQ_NBC){
625 *flag = finish_nbc_requests(request, 1);
628 if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) {
629 if ((*request)->action_ != nullptr && ((*request)->flags_ & MPI_REQ_CANCELLED) == 0){
631 *flag = simcall_comm_test((*request)->action_.get());
632 } catch (const Exception&) {
637 if (((*request)->flags_ & MPI_REQ_GENERALIZED) && not((*request)->flags_ & MPI_REQ_COMPLETE))
640 finish_wait(request, status); // may invalidate *request
641 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
642 MPI_Status tmp_status;
643 MPI_Status* mystatus;
644 if (status == MPI_STATUS_IGNORE) {
645 mystatus = &tmp_status;
646 Status::empty(mystatus);
650 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
652 nsleeps=1;//reset the number of sleeps we will do next time
653 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
654 *request = MPI_REQUEST_NULL;
655 } else if (smpi_cfg_grow_injected_times()) {
662 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
668 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
671 for (int i = 0; i < incount; i++) {
672 if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
673 int ret = test(&requests[i], pstat, &flag);
678 if (status != MPI_STATUSES_IGNORE)
679 status[*count] = *pstat;
681 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
682 requests[i] = MPI_REQUEST_NULL;
688 if(count_dead==incount)*count=MPI_UNDEFINED;
690 return MPI_ERR_IN_STATUS;
695 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
697 std::vector<simgrid::kernel::activity::CommImpl*> comms;
698 comms.reserve(count);
702 int ret = MPI_SUCCESS;
703 *index = MPI_UNDEFINED;
705 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
706 for(i = 0; i < count; i++) {
707 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
708 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
712 if (not map.empty()) {
713 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
714 static int nsleeps = 1;
715 if(smpi_test_sleep > 0)
716 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
718 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
719 } catch (const Exception&) {
720 XBT_DEBUG("Exception in testany");
724 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
726 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED) &&
727 not(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
730 finish_wait(&requests[*index],status);
731 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
732 MPI_Status tmp_status;
733 MPI_Status* mystatus;
734 if (status == MPI_STATUS_IGNORE) {
735 mystatus = &tmp_status;
736 Status::empty(mystatus);
740 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
743 if (requests[*index] != MPI_REQUEST_NULL && requests[*index]->flags_ & MPI_REQ_NBC){
744 *flag = finish_nbc_requests(&requests[*index] , 1);
747 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
748 requests[*index] = MPI_REQUEST_NULL;
749 XBT_DEBUG("Testany - returning with index %d", *index);
757 XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
758 //all requests are null or inactive, return true
760 *index = MPI_UNDEFINED;
761 Status::empty(status);
767 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
770 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
774 for(int i=0; i<count; i++){
775 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
776 int ret = test(&requests[i], pstat, &flag);
782 if (ret != MPI_SUCCESS)
785 Status::empty(pstat);
787 if(status != MPI_STATUSES_IGNORE) {
792 return MPI_ERR_IN_STATUS;
797 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
799 //FIXME find another way to avoid busy waiting ?
800 // the issue here is that we have to wait on a nonexistent comm
802 iprobe(source, tag, comm, &flag, status);
803 XBT_DEBUG("Busy Waiting on probing : %d", flag);
807 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
808 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
809 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
810 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
811 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
812 static int nsleeps = 1;
813 double speed = s4u::this_actor::get_host()->get_speed();
814 double maxrate = smpi_cfg_iprobe_cpu_usage();
816 new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source),
817 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE);
818 if (smpi_iprobe_sleep > 0) {
819 /** Compute the number of flops we will sleep **/
820 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
821 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
823 /* Not the entire CPU can be used when iprobing: This is important for
824 * the energy consumption caused by polling with iprobes.
825 * Note also that the number of flops that was
826 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
828 ->set_bound(maxrate*speed)
832 // behave like a receive, but don't do it
833 s4u::Mailbox* mailbox;
835 request->print_request("New iprobe");
836 // We have to test both mailboxes as we don't know if we will receive one or another
837 if (smpi_cfg_async_small_thresh() > 0) {
838 mailbox = smpi_process()->mailbox_small();
839 XBT_DEBUG("Trying to probe the perm recv mailbox");
840 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
843 if (request->action_ == nullptr){
844 mailbox = smpi_process()->mailbox();
845 XBT_DEBUG("trying to probe the other mailbox");
846 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
849 if (request->action_ != nullptr){
850 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
851 const Request* req = static_cast<MPI_Request>(sync_comm->src_data_);
853 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
854 status->MPI_SOURCE = comm->group()->rank(req->src_);
855 status->MPI_TAG = req->tag_;
856 status->MPI_ERROR = MPI_SUCCESS;
857 status->count = req->real_size_;
859 nsleeps = 1;//reset the number of sleeps we will do next time
863 if (smpi_cfg_grow_injected_times())
867 xbt_assert(request == MPI_REQUEST_NULL);
870 int Request::finish_nbc_requests(MPI_Request* request, int test){
874 ret = waitall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), MPI_STATUSES_IGNORE);
876 ret = testall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), &flag, MPI_STATUSES_IGNORE);
879 xbt_die("Failure when waiting on non blocking collective sub-requests");
881 XBT_DEBUG("Finishing non blocking collective request with %zu sub-requests", (*request)->nbc_requests_.size());
882 for(auto& req: (*request)->nbc_requests_){
883 if((*request)->buf_!=nullptr && req!=MPI_REQUEST_NULL){//reduce case
884 void * buf=req->buf_;
885 if((*request)->type_->flags() & DT_FLAG_DERIVED)
887 if(req->flags_ & MPI_REQ_RECV ){
888 if((*request)->op_!=MPI_OP_NULL){
889 int count=(*request)->size_/ (*request)->type_->size();
890 (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->type_);
892 smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
895 if(req!=MPI_REQUEST_NULL)
896 Request::unref(&req);
898 (*request)->nbc_requests_.clear();
903 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
905 MPI_Request req = *request;
906 Status::empty(status);
907 if((req->flags_ & MPI_REQ_CANCELLED) != 0 && (req->flags_ & MPI_REQ_MATCHED) == 0) {
908 if (status!=MPI_STATUS_IGNORE)
910 if(req->detached_sender_ != nullptr)
911 unref(&(req->detached_sender_));
916 if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
917 if (status != MPI_STATUS_IGNORE) {
918 if (req->src_== MPI_PROC_NULL || req->dst_== MPI_PROC_NULL){
919 Status::empty(status);
920 status->MPI_SOURCE = MPI_PROC_NULL;
922 aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
923 status->MPI_SOURCE = req->comm_->group()->rank(src);
924 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
925 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
927 // this handles the case were size in receive differs from size in send
928 status->count = req->real_size_;
930 //detached send will be finished at the other end
931 if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
932 req->print_request("Finishing");
933 MPI_Datatype datatype = req->type_;
935 // FIXME Handle the case of a partial shared malloc.
936 if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
937 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
938 if (not smpi_process()->replaying() && smpi_switch_data_segment(simgrid::s4u::Actor::self(), req->old_buf_))
939 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
941 if(datatype->flags() & DT_FLAG_DERIVED){
942 // This part handles the problem of non-contiguous memory the unserialization at the reception
943 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
944 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
947 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
948 if (datatype->size() != 0) {
949 int n = req->real_size_ / datatype->size();
950 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
959 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
960 aid_t rank = simgrid::s4u::this_actor::get_pid();
961 aid_t src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
962 TRACE_smpi_recv(src_traced, rank,req->tag_);
964 if(req->detached_sender_ != nullptr){
965 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
967 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
968 if (sleeptime > 0.0) {
969 simgrid::s4u::this_actor::sleep_for(sleeptime);
970 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
972 unref(&(req->detached_sender_));
974 if (req->flags_ & MPI_REQ_PERSISTENT)
975 req->action_ = nullptr;
976 req->flags_ |= MPI_REQ_FINISHED;
978 if (req->truncated_ || req->unmatched_types_) {
979 char error_string[MPI_MAX_ERROR_STRING];
983 errkind = MPI_ERR_TRUNCATE;
985 errkind = MPI_ERR_TYPE;
986 PMPI_Error_string(errkind, error_string, &error_size);
987 MPI_Errhandler err = (req->comm_) ? (req->comm_)->errhandler() : MPI_ERRHANDLER_NULL;
988 if (err == MPI_ERRHANDLER_NULL || err == MPI_ERRORS_RETURN)
989 XBT_WARN("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
990 else if (err == MPI_ERRORS_ARE_FATAL)
991 xbt_die("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
993 err->call((req->comm_), errkind);
994 if (err != MPI_ERRHANDLER_NULL)
995 simgrid::smpi::Errhandler::unref(err);
996 MC_assert(not MC_is_active()); /* Only fail in MC mode */
998 if(req->src_ != MPI_PROC_NULL && req->dst_ != MPI_PROC_NULL)
1002 int Request::wait(MPI_Request * request, MPI_Status * status)
1004 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Wait before)
1005 xbt_assert(*request != MPI_REQUEST_NULL);
1007 int ret=MPI_SUCCESS;
1009 if((*request)->src_ == MPI_PROC_NULL || (*request)->dst_ == MPI_PROC_NULL){
1010 if (status != MPI_STATUS_IGNORE) {
1011 Status::empty(status);
1012 status->MPI_SOURCE = MPI_PROC_NULL;
1014 (*request)=MPI_REQUEST_NULL;
1018 (*request)->print_request("Waiting");
1019 if ((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) {
1020 Status::empty(status);
1024 if ((*request)->action_ != nullptr){
1026 // this is not a detached send
1027 simcall_comm_wait((*request)->action_.get(), -1.0);
1028 } catch (const CancelException&) {
1029 XBT_VERB("Request cancelled");
1033 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
1034 if (not((*request)->flags_ & MPI_REQ_COMPLETE)) {
1035 ((*request)->generalized_funcs)->mutex->lock();
1036 ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
1037 ((*request)->generalized_funcs)->mutex->unlock();
1039 MPI_Status tmp_status;
1040 MPI_Status* mystatus;
1041 if (status == MPI_STATUS_IGNORE) {
1042 mystatus = &tmp_status;
1043 Status::empty(mystatus);
1047 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
1050 if ((*request)->truncated_)
1051 ret = MPI_ERR_TRUNCATE;
1053 if ((*request)->flags_ & MPI_REQ_NBC)
1054 finish_nbc_requests(request, 0);
1056 finish_wait(request, status); // may invalidate *request
1057 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
1058 *request = MPI_REQUEST_NULL;
1062 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
1064 int index = MPI_UNDEFINED;
1067 // Wait for a request to complete
1068 std::vector<simgrid::kernel::activity::CommImpl*> comms;
1069 std::vector<int> map;
1070 XBT_DEBUG("Wait for one of %d", count);
1071 for(int i = 0; i < count; i++) {
1072 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
1073 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1074 if (requests[i]->action_ != nullptr) {
1075 XBT_DEBUG("Waiting any %p ", requests[i]);
1076 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
1079 // This is a finished detached request, let's return this one
1080 comms.clear(); // don't do the waitany call afterwards
1082 if (requests[index] != MPI_REQUEST_NULL && (requests[index])->flags_ & MPI_REQ_NBC)
1083 finish_nbc_requests(&requests[index], 0);
1084 finish_wait(&requests[i], status); // cleanup if refcount = 0
1085 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1086 requests[i] = MPI_REQUEST_NULL; // set to null
1091 if (not comms.empty()) {
1092 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
1095 i = simcall_comm_waitany(comms.data(), comms.size(), -1);
1096 } catch (const CancelException&) {
1097 XBT_INFO("request cancelled");
1101 // not MPI_UNDEFINED, as this is a simix return code
1104 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
1105 if ((requests[index] == MPI_REQUEST_NULL) ||
1106 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
1107 finish_wait(&requests[index],status);
1108 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1109 requests[index] = MPI_REQUEST_NULL;
1116 if (index==MPI_UNDEFINED)
1117 Status::empty(status);
1122 static int sort_accumulates(const Request* a, const Request* b)
1124 return (a->tag() > b->tag());
1127 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
1129 std::vector<MPI_Request> accumulates;
1132 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
1133 int retvalue = MPI_SUCCESS;
1134 //tag invalid requests in the set
1135 if (status != MPI_STATUSES_IGNORE) {
1136 for (int c = 0; c < count; c++) {
1137 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
1138 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
1139 Status::empty(&status[c]);
1140 } else if (requests[c]->src_ == MPI_PROC_NULL) {
1141 Status::empty(&status[c]);
1142 status[c].MPI_SOURCE = MPI_PROC_NULL;
1146 for (int c = 0; c < count; c++) {
1147 if (MC_is_active() || MC_record_replay_is_active()) {
1148 wait(&requests[c],pstat);
1151 index = waitany(count, requests, pstat);
1153 if (index == MPI_UNDEFINED)
1156 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1157 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1158 accumulates.push_back(requests[index]);
1159 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1160 requests[index] = MPI_REQUEST_NULL;
1162 if (status != MPI_STATUSES_IGNORE) {
1163 status[index] = *pstat;
1164 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1165 retvalue = MPI_ERR_IN_STATUS;
1169 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1170 for (auto& req : accumulates)
1171 finish_wait(&req, status);
1176 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1182 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1183 index = waitany(incount, requests, pstat);
1184 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1185 if(status != MPI_STATUSES_IGNORE) {
1186 status[count] = *pstat;
1188 indices[count] = index;
1190 for (int i = 0; i < incount; i++) {
1191 if (i!=index && requests[i] != MPI_REQUEST_NULL
1192 && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1193 test(&requests[i], pstat,&flag);
1196 if(status != MPI_STATUSES_IGNORE) {
1197 status[count] = *pstat;
1199 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1200 requests[i]=MPI_REQUEST_NULL;
1208 MPI_Request Request::f2c(int id)
1210 if(id==MPI_FORTRAN_REQUEST_NULL)
1211 return MPI_REQUEST_NULL;
1212 return static_cast<MPI_Request>(F2C::lookup()->at(id));
1215 void Request::free_f(int id)
1217 if (id != MPI_FORTRAN_REQUEST_NULL) {
1218 F2C::lookup()->erase(id);
1222 int Request::get_status(const Request* req, int* flag, MPI_Status* status)
1226 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1227 req->iprobe(req->comm_->group()->rank(req->src_), req->tag_, req->comm_, flag, status);
1231 if (req != MPI_REQUEST_NULL && (req->flags_ & MPI_REQ_GENERALIZED) && not(req->flags_ & MPI_REQ_COMPLETE)) {
1237 if(req != MPI_REQUEST_NULL &&
1238 status != MPI_STATUS_IGNORE) {
1239 aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1240 status->MPI_SOURCE = req->comm_->group()->rank(src);
1241 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1242 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1243 status->count = req->real_size_;
1248 int Request::grequest_start(MPI_Grequest_query_function* query_fn, MPI_Grequest_free_function* free_fn,
1249 MPI_Grequest_cancel_function* cancel_fn, void* extra_state, MPI_Request* request)
1251 *request = new Request();
1252 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1253 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1254 (*request)->refcount_ = 1;
1255 ((*request)->generalized_funcs) = std::make_unique<smpi_mpi_generalized_request_funcs_t>();
1256 ((*request)->generalized_funcs)->query_fn=query_fn;
1257 ((*request)->generalized_funcs)->free_fn=free_fn;
1258 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1259 ((*request)->generalized_funcs)->extra_state=extra_state;
1260 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1261 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1265 int Request::grequest_complete(MPI_Request request)
1267 if ((not(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
1268 return MPI_ERR_REQUEST;
1269 request->generalized_funcs->mutex->lock();
1270 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1271 request->generalized_funcs->cond->notify_one();
1272 request->generalized_funcs->mutex->unlock();
1276 void Request::start_nbc_requests(std::vector<MPI_Request> reqs){
1277 if (not reqs.empty()) {
1278 nbc_requests_ = reqs;
1279 Request::startall(reqs.size(), reqs.data());
1283 std::vector<MPI_Request> Request::get_nbc_requests() const
1285 return nbc_requests_;