1 /* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
10 #include "simgrid/Exception.hpp"
11 #include "simgrid/s4u/Exec.hpp"
12 #include "smpi_comm.hpp"
13 #include "smpi_datatype.hpp"
14 #include "smpi_host.hpp"
15 #include "smpi_op.hpp"
16 #include "src/kernel/activity/CommImpl.hpp"
17 #include "src/mc/mc_replay.hpp"
18 #include "src/smpi/include/smpi_actor.hpp"
23 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
25 static simgrid::config::Flag<double> smpi_iprobe_sleep(
26 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
27 static simgrid::config::Flag<double> smpi_test_sleep(
28 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
30 std::vector<s_smpi_factor_t> smpi_ois_values;
32 extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
37 Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
38 unsigned flags, MPI_Op op)
39 : buf_(const_cast<void*>(buf))
41 , size_(datatype->size() * count)
51 if(op != MPI_REPLACE && op != MPI_OP_NULL)
55 detached_sender_ = nullptr;
60 if (flags & MPI_REQ_PERSISTENT)
64 nbc_requests_=nullptr;
74 void Request::unref(MPI_Request* request)
76 xbt_assert(*request != MPI_REQUEST_NULL, "freeing an already free request");
78 (*request)->refcount_--;
79 if ((*request)->refcount_ < 0) {
80 (*request)->print_request("wrong refcount");
81 xbt_die("Whoops, wrong refcount");
83 if ((*request)->refcount_ == 0) {
84 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
85 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
87 Comm::unref((*request)->comm_);
88 Datatype::unref((*request)->old_type_);
90 if ((*request)->op_ != MPI_REPLACE && (*request)->op_ != MPI_OP_NULL)
91 Op::unref(&(*request)->op_);
93 (*request)->print_request("Destroying");
94 F2C::free_f((*request)->c2f());
96 *request = MPI_REQUEST_NULL;
98 (*request)->print_request("Decrementing");
102 bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request receiver)
104 xbt_assert(sender, "Cannot match against null sender");
105 xbt_assert(receiver, "Cannot match against null receiver");
106 XBT_DEBUG("Trying to match %s of sender src %d against %d, tag %d against %d, id %d against %d",
107 (req == receiver ? "send" : "recv"), sender->src_, receiver->src_, sender->tag_, receiver->tag_,
108 sender->comm_->id(), receiver->comm_->id());
110 if ((receiver->comm_->id() == MPI_UNDEFINED || sender->comm_->id() == MPI_UNDEFINED ||
111 receiver->comm_->id() == sender->comm_->id()) &&
112 ((receiver->src_ == MPI_ANY_SOURCE && (receiver->comm_->group()->rank(sender->src_) != MPI_UNDEFINED)) ||
113 receiver->src_ == sender->src_) &&
114 ((receiver->tag_ == MPI_ANY_TAG && sender->tag_ >= 0) || receiver->tag_ == sender->tag_)) {
115 // we match, we can transfer some values
116 if (receiver->src_ == MPI_ANY_SOURCE)
117 receiver->real_src_ = sender->src_;
118 if (receiver->tag_ == MPI_ANY_TAG)
119 receiver->real_tag_ = sender->tag_;
120 if ((receiver->flags_ & MPI_REQ_PROBE) == 0 ){
121 if (receiver->real_size_ < sender->real_size_){
122 XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_, sender->real_size_);
123 receiver->truncated_ = true;
124 } else if (receiver->real_size_ > sender->real_size_){
125 receiver->real_size_=sender->real_size_;
128 if (sender->detached_)
129 receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in
131 req->flags_ |= MPI_REQ_MATCHED; // mark as impossible to cancel anymore
132 XBT_DEBUG("match succeeded");
138 void Request::init_buffer(int count){
139 void *old_buf = nullptr;
140 // FIXME Handle the case of a partial shared malloc.
141 // This part handles the problem of non-contiguous memory (for the unserialization at the reception)
142 if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (old_type_->flags() & DT_FLAG_DERIVED)) {
143 // This part handles the problem of non-contiguous memory
148 buf_ = xbt_malloc(count*old_type_->size());
149 if ((old_type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) {
150 old_type_->serialize(old_buf, buf_, count);
157 bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
159 auto ref = static_cast<MPI_Request>(a);
160 auto req = static_cast<MPI_Request>(b);
161 return match_common(req, req, ref);
164 bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
166 auto ref = static_cast<MPI_Request>(a);
167 auto req = static_cast<MPI_Request>(b);
168 return match_common(req, ref, req);
171 void Request::print_request(const char* message) const
173 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
174 message, this, buf_, size_, src_, dst_, tag_, flags_);
177 /* factories, to hide the internal flags from the caller */
178 MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
180 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
181 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
182 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
185 MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
187 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
188 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
189 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
192 MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
194 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
195 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
196 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
199 MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
201 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
202 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
203 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
207 MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
210 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
212 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
213 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
214 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
216 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
217 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
218 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
219 MPI_REQ_ACCUMULATE, op);
224 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
226 int source = MPI_PROC_NULL;
227 if (src == MPI_ANY_SOURCE)
228 source = MPI_ANY_SOURCE;
229 else if (src != MPI_PROC_NULL)
230 source = comm->group()->actor(src)->get_pid();
231 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
233 simgrid::s4u::this_actor::get_pid(), tag, comm,
234 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
237 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
240 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
241 int source = MPI_PROC_NULL;
242 if (src == MPI_ANY_SOURCE)
243 source = MPI_ANY_SOURCE;
244 else if (src != MPI_PROC_NULL)
245 source = comm->group()->actor(src)->get_pid();
247 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
248 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
249 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
251 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
252 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
253 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
258 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
260 int source = MPI_PROC_NULL;
261 if (src == MPI_ANY_SOURCE)
262 source = MPI_ANY_SOURCE;
263 else if (src != MPI_PROC_NULL)
264 source = comm->group()->actor(src)->get_pid();
265 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
266 source, simgrid::s4u::this_actor::get_pid(), tag, comm,
267 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
270 MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
272 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
273 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
274 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
275 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
276 if(dst != MPI_PROC_NULL)
281 MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
283 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
284 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
285 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
286 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
287 if(dst != MPI_PROC_NULL)
292 MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
294 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
295 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
296 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
297 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
298 if(dst != MPI_PROC_NULL)
304 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
306 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
307 int source = MPI_PROC_NULL;
308 if (src == MPI_ANY_SOURCE)
309 source = MPI_ANY_SOURCE;
310 else if (src != MPI_PROC_NULL)
311 source = comm->group()->actor(src)->get_pid();
312 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
313 source, simgrid::s4u::this_actor::get_pid(), tag, comm,
314 MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
315 if(src != MPI_PROC_NULL)
320 int Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
322 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
323 request = irecv(buf, count, datatype, src, tag, comm);
324 int retval = wait(&request,status);
329 void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
331 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
332 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
333 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL,
334 tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
336 if(dst != MPI_PROC_NULL)
338 wait(&request, MPI_STATUS_IGNORE);
342 void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
344 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
345 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
346 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL,
347 tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
348 if(dst != MPI_PROC_NULL)
350 wait(&request, MPI_STATUS_IGNORE);
354 void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
356 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
357 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
358 dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm,
359 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
361 if(dst != MPI_PROC_NULL)
363 wait(&request,MPI_STATUS_IGNORE);
367 void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
368 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
369 MPI_Comm comm, MPI_Status * status)
371 int source = MPI_PROC_NULL;
372 if (src == MPI_ANY_SOURCE)
373 source = MPI_ANY_SOURCE;
374 else if (src != MPI_PROC_NULL)
375 source = comm->group()->actor(src)->get_pid();
376 int destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL;
378 std::array<MPI_Request, 2> requests;
379 std::array<MPI_Status, 2> stats;
380 int myid = simgrid::s4u::this_actor::get_pid();
381 if ((destination == myid) && (source == myid)) {
382 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
383 if (status != MPI_STATUS_IGNORE) {
384 status->MPI_SOURCE = source;
385 status->MPI_TAG = recvtag;
386 status->MPI_ERROR = MPI_SUCCESS;
387 status->count = sendcount * sendtype->size();
391 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
392 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
393 startall(2, requests.data());
394 waitall(2, requests.data(), stats.data());
397 if(status != MPI_STATUS_IGNORE) {
398 // Copy receive status
403 void Request::start()
405 s4u::Mailbox* mailbox;
407 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
408 //reinitialize temporary buffer for persistent requests
409 if(real_size_ > 0 && flags_ & MPI_REQ_FINISHED){
411 init_buffer(real_size_/old_type_->size());
413 flags_ &= ~MPI_REQ_PREPARED;
414 flags_ &= ~MPI_REQ_FINISHED;
417 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
419 if ((flags_ & MPI_REQ_RECV) != 0) {
420 this->print_request("New recv");
422 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
424 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
425 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
428 if (smpi_cfg_async_small_thresh() == 0 && (flags_ & MPI_REQ_RMA) == 0) {
429 mailbox = process->mailbox();
430 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) {
431 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
432 //begin with the more appropriate one : the small one.
433 mailbox = process->mailbox_small();
434 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
435 mailbox->get_cname());
436 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
438 if (action == nullptr) {
439 mailbox = process->mailbox();
440 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
441 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
442 if (action == nullptr) {
443 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
444 mailbox = process->mailbox_small();
447 XBT_DEBUG("yes there was something for us in the large mailbox");
450 mailbox = process->mailbox_small();
451 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
452 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
454 if (action == nullptr) {
455 XBT_DEBUG("No, nothing in the permanent receive mailbox");
456 mailbox = process->mailbox();
458 XBT_DEBUG("yes there was something for us in the small mailbox");
462 action_ = simcall_comm_irecv(
463 process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
464 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
465 XBT_DEBUG("recv simcall posted");
467 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
469 } else { /* the RECV flag was not set, so this is a send */
470 const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
471 xbt_assert(process, "Actor pid=%d is gone??", dst_);
473 if (TRACE_smpi_view_internals()) {
474 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
476 this->print_request("New send");
479 if ((flags_ & MPI_REQ_SSEND) == 0 &&
480 ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
481 static_cast<int>(size_) < smpi_cfg_detached_send_thresh())) {
482 void *oldbuf = nullptr;
484 XBT_DEBUG("Send request %p is detached", this);
486 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
488 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
489 if ((smpi_cfg_privatization() != SmpiPrivStrategies::NONE) &&
490 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
491 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
492 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
493 smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
495 //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
496 //so actually ... don't use manually attached buffer space.
497 buf = xbt_malloc(size_);
498 memcpy(buf,oldbuf,size_);
499 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
504 //if we are giving back the control to the user without waiting for completion, we have to inject timings
505 double sleeptime = 0.0;
506 if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
507 // isend and send timings may be different
508 sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
509 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
510 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
514 simgrid::s4u::this_actor::sleep_for(sleeptime);
515 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
518 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
520 if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
523 if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
524 mailbox = process->mailbox();
525 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) { // eager mode
526 mailbox = process->mailbox();
527 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
528 simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
529 if (action == nullptr) {
530 if ((flags_ & MPI_REQ_SSEND) == 0) {
531 mailbox = process->mailbox_small();
532 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
533 mailbox->get_cname());
535 mailbox = process->mailbox_small();
536 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
537 mailbox->get_cname());
538 action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
539 if (action == nullptr) {
540 XBT_DEBUG("No, we are first, send to large mailbox");
541 mailbox = process->mailbox();
545 XBT_DEBUG("Yes there was something for us in the large mailbox");
548 mailbox = process->mailbox();
549 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
552 size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator)
553 action_ = simcall_comm_isend(
554 simgrid::kernel::actor::ActorImpl::by_pid(src_), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_,
556 &xbt_free_f, // how to free the userdata if a detached send fails
557 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this,
558 // detach if msg size < eager/rdv switch limit
560 XBT_DEBUG("send simcall posted");
562 /* FIXME: detached sends are not traceable (action_ == nullptr) */
563 if (action_ != nullptr) {
564 boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
565 smpi_process()->get_tracing_category());
568 if (smpi_cfg_async_small_thresh() != 0 || ((flags_ & MPI_REQ_RMA) != 0))
573 void Request::startall(int count, MPI_Request * requests)
575 if(requests== nullptr)
578 for(int i = 0; i < count; i++) {
579 if(requests[i]->src_ != MPI_PROC_NULL && requests[i]->dst_ != MPI_PROC_NULL)
580 requests[i]->start();
584 void Request::cancel()
586 this->flags_ |= MPI_REQ_CANCELLED;
587 if (this->action_ != nullptr)
588 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
591 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
592 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
593 // to avoid deadlocks if used as a break condition, such as
594 // while (MPI_Test(request, flag, status) && flag) dostuff...
595 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
596 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
597 xbt_assert(*request != MPI_REQUEST_NULL);
599 static int nsleeps = 1;
600 int ret = MPI_SUCCESS;
602 // Are we testing a request meant for non blocking collectives ?
603 // If so, test all the subrequests.
604 if ((*request)->nbc_requests_size_>0){
605 ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
607 delete[] (*request)->nbc_requests_;
608 (*request)->nbc_requests_size_=0;
614 if(smpi_test_sleep > 0)
615 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
617 Status::empty(status);
619 if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) {
620 if ((*request)->action_ != nullptr && ((*request)->flags_ & MPI_REQ_CANCELLED) == 0){
622 *flag = simcall_comm_test((*request)->action_.get());
623 } catch (const Exception&) {
628 if (((*request)->flags_ & MPI_REQ_GENERALIZED) && !((*request)->flags_ & MPI_REQ_COMPLETE))
631 finish_wait(request, status); // may invalidate *request
632 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
633 MPI_Status tmp_status;
634 MPI_Status* mystatus;
635 if (status == MPI_STATUS_IGNORE) {
636 mystatus = &tmp_status;
637 Status::empty(mystatus);
641 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
643 nsleeps=1;//reset the number of sleeps we will do next time
644 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
645 *request = MPI_REQUEST_NULL;
646 } else if (smpi_cfg_grow_injected_times()) {
653 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
659 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
662 for (int i = 0; i < incount; i++) {
663 if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
664 int ret = test(&requests[i], pstat, &flag);
669 if (status != MPI_STATUSES_IGNORE)
670 status[*count] = *pstat;
672 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
673 requests[i] = MPI_REQUEST_NULL;
679 if(count_dead==incount)*count=MPI_UNDEFINED;
681 return MPI_ERR_IN_STATUS;
686 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
688 std::vector<simgrid::kernel::activity::CommImpl*> comms;
689 comms.reserve(count);
693 int ret = MPI_SUCCESS;
694 *index = MPI_UNDEFINED;
696 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
697 for(i = 0; i < count; i++) {
698 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
699 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
703 if (not map.empty()) {
704 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
705 static int nsleeps = 1;
706 if(smpi_test_sleep > 0)
707 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
709 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
710 } catch (const Exception&) {
711 XBT_DEBUG("Exception in testany");
715 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
717 if (requests[*index] != MPI_REQUEST_NULL &&
718 (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
719 && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
722 finish_wait(&requests[*index],status);
723 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
724 MPI_Status tmp_status;
725 MPI_Status* mystatus;
726 if (status == MPI_STATUS_IGNORE) {
727 mystatus = &tmp_status;
728 Status::empty(mystatus);
732 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
735 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
736 requests[*index] = MPI_REQUEST_NULL;
737 XBT_DEBUG("Testany - returning with index %d", *index);
745 XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
746 //all requests are null or inactive, return true
748 *index = MPI_UNDEFINED;
749 Status::empty(status);
755 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
758 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
762 for(int i=0; i<count; i++){
763 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
764 int ret = test(&requests[i], pstat, &flag);
767 requests[i]=MPI_REQUEST_NULL;
771 if (ret != MPI_SUCCESS)
774 Status::empty(pstat);
776 if(status != MPI_STATUSES_IGNORE) {
781 return MPI_ERR_IN_STATUS;
786 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
788 //FIXME find another way to avoid busy waiting ?
789 // the issue here is that we have to wait on a nonexistent comm
791 iprobe(source, tag, comm, &flag, status);
792 XBT_DEBUG("Busy Waiting on probing : %d", flag);
796 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
797 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
798 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
799 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
800 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
801 static int nsleeps = 1;
802 double speed = s4u::this_actor::get_host()->get_speed();
803 double maxrate = smpi_cfg_iprobe_cpu_usage();
804 auto request = new Request(nullptr, 0, MPI_CHAR,
805 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
806 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE);
807 if (smpi_iprobe_sleep > 0) {
808 /** Compute the number of flops we will sleep **/
809 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
810 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
812 /* Not the entire CPU can be used when iprobing: This is important for
813 * the energy consumption caused by polling with iprobes.
814 * Note also that the number of flops that was
815 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
817 ->set_bound(maxrate*speed)
821 // behave like a receive, but don't do it
822 s4u::Mailbox* mailbox;
824 request->print_request("New iprobe");
825 // We have to test both mailboxes as we don't know if we will receive one or another
826 if (smpi_cfg_async_small_thresh() > 0) {
827 mailbox = smpi_process()->mailbox_small();
828 XBT_DEBUG("Trying to probe the perm recv mailbox");
829 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
832 if (request->action_ == nullptr){
833 mailbox = smpi_process()->mailbox();
834 XBT_DEBUG("trying to probe the other mailbox");
835 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
838 if (request->action_ != nullptr){
839 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
840 const Request* req = static_cast<MPI_Request>(sync_comm->src_data_);
842 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
843 status->MPI_SOURCE = comm->group()->rank(req->src_);
844 status->MPI_TAG = req->tag_;
845 status->MPI_ERROR = MPI_SUCCESS;
846 status->count = req->real_size_;
848 nsleeps = 1;//reset the number of sleeps we will do next time
852 if (smpi_cfg_grow_injected_times())
856 xbt_assert(request == MPI_REQUEST_NULL);
859 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
861 MPI_Request req = *request;
862 Status::empty(status);
863 if((req->flags_ & MPI_REQ_CANCELLED) != 0 && (req->flags_ & MPI_REQ_MATCHED) == 0) {
864 if (status!=MPI_STATUS_IGNORE)
866 if(req->detached_sender_ != nullptr)
867 unref(&(req->detached_sender_));
872 if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
873 if (status != MPI_STATUS_IGNORE) {
874 if (req->src_== MPI_PROC_NULL || req->dst_== MPI_PROC_NULL){
875 Status::empty(status);
876 status->MPI_SOURCE = MPI_PROC_NULL;
878 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
879 status->MPI_SOURCE = req->comm_->group()->rank(src);
880 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
881 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
883 // this handles the case were size in receive differs from size in send
884 status->count = req->real_size_;
886 //detached send will be finished at the other end
887 if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
888 req->print_request("Finishing");
889 MPI_Datatype datatype = req->old_type_;
891 // FIXME Handle the case of a partial shared malloc.
892 if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
893 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
894 if (not smpi_process()->replaying() && smpi_cfg_privatization() != SmpiPrivStrategies::NONE &&
895 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
896 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
897 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
898 smpi_switch_data_segment(simgrid::s4u::Actor::self());
901 if(datatype->flags() & DT_FLAG_DERIVED){
902 // This part handles the problem of non-contiguous memory the unserialization at the reception
903 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
904 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
907 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
908 if (datatype->size() != 0) {
909 int n = req->real_size_ / datatype->size();
910 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
919 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
920 int rank = simgrid::s4u::this_actor::get_pid();
921 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
922 TRACE_smpi_recv(src_traced, rank,req->tag_);
924 if(req->detached_sender_ != nullptr){
925 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
927 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
928 if (sleeptime > 0.0) {
929 simgrid::s4u::this_actor::sleep_for(sleeptime);
930 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
932 unref(&(req->detached_sender_));
934 if (req->flags_ & MPI_REQ_PERSISTENT)
935 req->action_ = nullptr;
936 req->flags_ |= MPI_REQ_FINISHED;
938 if (req->truncated_) {
939 char error_string[MPI_MAX_ERROR_STRING];
941 PMPI_Error_string(MPI_ERR_TRUNCATE, error_string, &error_size);
942 MPI_Errhandler err = (req->comm_) ? (req->comm_)->errhandler() : MPI_ERRHANDLER_NULL;
943 if (err == MPI_ERRHANDLER_NULL || err == MPI_ERRORS_RETURN)
944 XBT_WARN("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
945 else if (err == MPI_ERRORS_ARE_FATAL)
946 xbt_die("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
948 err->call((req->comm_), MPI_ERR_TRUNCATE);
949 if (err != MPI_ERRHANDLER_NULL)
950 simgrid::smpi::Errhandler::unref(err);
951 MC_assert(not MC_is_active()); /* Only fail in MC mode */
953 if(req->src_ != MPI_PROC_NULL && req->dst_ != MPI_PROC_NULL)
957 int Request::wait(MPI_Request * request, MPI_Status * status)
959 // assume that *request is not MPI_REQUEST_NULL (filtered in PMPI_Wait before)
960 xbt_assert(*request != MPI_REQUEST_NULL);
964 if((*request)->src_ == MPI_PROC_NULL || (*request)->dst_ == MPI_PROC_NULL){
965 if (status != MPI_STATUS_IGNORE) {
966 Status::empty(status);
967 status->MPI_SOURCE = MPI_PROC_NULL;
969 (*request)=MPI_REQUEST_NULL;
972 // Are we waiting on a request meant for non blocking collectives ?
973 // If so, wait for all the subrequests.
974 if ((*request)->nbc_requests_size_>0){
975 ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
976 for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
977 if((*request)->buf_!=nullptr && (*request)->nbc_requests_[i]!=MPI_REQUEST_NULL){//reduce case
978 void * buf=(*request)->nbc_requests_[i]->buf_;
979 if((*request)->old_type_->flags() & DT_FLAG_DERIVED)
980 buf=(*request)->nbc_requests_[i]->old_buf_;
981 if((*request)->nbc_requests_[i]->flags_ & MPI_REQ_RECV ){
982 if((*request)->op_!=MPI_OP_NULL){
983 int count=(*request)->size_/ (*request)->old_type_->size();
984 (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
986 smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
989 if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
990 Request::unref(&((*request)->nbc_requests_[i]));
992 delete[] (*request)->nbc_requests_;
993 (*request)->nbc_requests_size_=0;
995 (*request)=MPI_REQUEST_NULL;
999 (*request)->print_request("Waiting");
1000 if ((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) {
1001 Status::empty(status);
1005 if ((*request)->action_ != nullptr){
1007 // this is not a detached send
1008 simcall_comm_wait((*request)->action_.get(), -1.0);
1009 } catch (const Exception&) {
1010 XBT_VERB("Request cancelled");
1014 if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
1015 if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
1016 ((*request)->generalized_funcs)->mutex->lock();
1017 ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
1018 ((*request)->generalized_funcs)->mutex->unlock();
1020 MPI_Status tmp_status;
1021 MPI_Status* mystatus;
1022 if (status == MPI_STATUS_IGNORE) {
1023 mystatus = &tmp_status;
1024 Status::empty(mystatus);
1028 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
1031 if ((*request)->truncated_)
1032 ret = MPI_ERR_TRUNCATE;
1034 finish_wait(request, status); // may invalidate *request
1035 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
1036 *request = MPI_REQUEST_NULL;
1040 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
1042 int index = MPI_UNDEFINED;
1045 // Wait for a request to complete
1046 std::vector<simgrid::kernel::activity::CommImpl*> comms;
1047 std::vector<int> map;
1048 XBT_DEBUG("Wait for one of %d", count);
1049 for(int i = 0; i < count; i++) {
1050 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
1051 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1052 if (requests[i]->action_ != nullptr) {
1053 XBT_DEBUG("Waiting any %p ", requests[i]);
1054 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
1057 // This is a finished detached request, let's return this one
1058 comms.clear(); // don't do the waitany call afterwards
1060 finish_wait(&requests[i], status); // cleanup if refcount = 0
1061 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1062 requests[i] = MPI_REQUEST_NULL; // set to null
1067 if (not comms.empty()) {
1068 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
1071 i = simcall_comm_waitany(comms.data(), comms.size(), -1);
1072 } catch (const Exception&) {
1073 XBT_INFO("request cancelled");
1077 // not MPI_UNDEFINED, as this is a simix return code
1080 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
1081 if ((requests[index] == MPI_REQUEST_NULL) ||
1082 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
1083 finish_wait(&requests[index],status);
1084 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1085 requests[index] = MPI_REQUEST_NULL;
1091 if (index==MPI_UNDEFINED)
1092 Status::empty(status);
1097 static int sort_accumulates(const Request* a, const Request* b)
1099 return (a->tag() > b->tag());
1102 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
1104 std::vector<MPI_Request> accumulates;
1107 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
1108 int retvalue = MPI_SUCCESS;
1109 //tag invalid requests in the set
1110 if (status != MPI_STATUSES_IGNORE) {
1111 for (int c = 0; c < count; c++) {
1112 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
1113 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
1114 Status::empty(&status[c]);
1115 } else if (requests[c]->src_ == MPI_PROC_NULL) {
1116 Status::empty(&status[c]);
1117 status[c].MPI_SOURCE = MPI_PROC_NULL;
1121 for (int c = 0; c < count; c++) {
1122 if (MC_is_active() || MC_record_replay_is_active()) {
1123 wait(&requests[c],pstat);
1126 index = waitany(count, requests, pstat);
1128 if (index == MPI_UNDEFINED)
1131 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1132 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1133 accumulates.push_back(requests[index]);
1134 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1135 requests[index] = MPI_REQUEST_NULL;
1137 if (status != MPI_STATUSES_IGNORE) {
1138 status[index] = *pstat;
1139 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1140 retvalue = MPI_ERR_IN_STATUS;
1144 if (not accumulates.empty()) {
1145 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1146 for (auto& req : accumulates) {
1147 finish_wait(&req, status);
1154 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1160 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1161 index = waitany(incount, requests, pstat);
1162 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1163 if(status != MPI_STATUSES_IGNORE) {
1164 status[count] = *pstat;
1166 indices[count] = index;
1168 for (int i = 0; i < incount; i++) {
1169 if (i!=index && requests[i] != MPI_REQUEST_NULL
1170 && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1171 test(&requests[i], pstat,&flag);
1174 if(status != MPI_STATUSES_IGNORE) {
1175 status[count] = *pstat;
1177 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1178 requests[i]=MPI_REQUEST_NULL;
1186 MPI_Request Request::f2c(int id)
1188 if(id==MPI_FORTRAN_REQUEST_NULL)
1189 return MPI_REQUEST_NULL;
1190 return static_cast<MPI_Request>(F2C::lookup()->at(id));
1193 void Request::free_f(int id)
1195 if (id != MPI_FORTRAN_REQUEST_NULL) {
1196 F2C::lookup()->erase(id);
1200 int Request::get_status(const Request* req, int* flag, MPI_Status* status)
1204 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1205 req->iprobe(req->comm_->group()->rank(req->src_), req->tag_, req->comm_, flag, status);
1209 if (req != MPI_REQUEST_NULL &&
1210 (req->flags_ & MPI_REQ_GENERALIZED)
1211 && !(req->flags_ & MPI_REQ_COMPLETE)) {
1217 if(req != MPI_REQUEST_NULL &&
1218 status != MPI_STATUS_IGNORE) {
1219 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1220 status->MPI_SOURCE = req->comm_->group()->rank(src);
1221 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1222 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1223 status->count = req->real_size_;
1228 int Request::grequest_start(MPI_Grequest_query_function* query_fn, MPI_Grequest_free_function* free_fn,
1229 MPI_Grequest_cancel_function* cancel_fn, void* extra_state, MPI_Request* request)
1231 *request = new Request();
1232 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1233 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1234 (*request)->refcount_ = 1;
1235 ((*request)->generalized_funcs) = std::make_unique<smpi_mpi_generalized_request_funcs_t>();
1236 ((*request)->generalized_funcs)->query_fn=query_fn;
1237 ((*request)->generalized_funcs)->free_fn=free_fn;
1238 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1239 ((*request)->generalized_funcs)->extra_state=extra_state;
1240 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1241 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1245 int Request::grequest_complete(MPI_Request request)
1247 if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
1248 return MPI_ERR_REQUEST;
1249 request->generalized_funcs->mutex->lock();
1250 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1251 request->generalized_funcs->cond->notify_one();
1252 request->generalized_funcs->mutex->unlock();
1256 void Request::set_nbc_requests(MPI_Request* reqs, int size){
1257 nbc_requests_size_ = size;
1259 nbc_requests_ = reqs;
1262 nbc_requests_ = nullptr;
1266 int Request::get_nbc_requests_size() const
1268 return nbc_requests_size_;
1271 MPI_Request* Request::get_nbc_requests() const
1273 return nbc_requests_;