1 /* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
5 #include "simgrid/s4u/Mutex.hpp"
6 #include "simgrid/s4u/ConditionVariable.hpp"
7 #include "smpi_request.hpp"
10 #include "private.hpp"
11 #include "simgrid/Exception.hpp"
12 #include "simgrid/s4u/Exec.hpp"
13 #include "smpi_comm.hpp"
14 #include "smpi_datatype.hpp"
15 #include "smpi_host.hpp"
16 #include "smpi_op.hpp"
17 #include "src/kernel/activity/CommImpl.hpp"
18 #include "src/mc/mc_replay.hpp"
19 #include "src/smpi/include/smpi_actor.hpp"
20 #include "xbt/config.hpp"
25 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
27 static simgrid::config::Flag<double> smpi_iprobe_sleep(
28 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
29 static simgrid::config::Flag<double> smpi_test_sleep(
30 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
32 std::vector<s_smpi_factor_t> smpi_ois_values;
34 extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
39 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
40 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
42 void *old_buf = nullptr;
43 // FIXME Handle the case of a partial shared malloc.
44 if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
45 // This part handles the problem of non-contiguous memory
50 buf_ = xbt_malloc(count*datatype->size());
51 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & MPI_REQ_SEND) != 0)) {
52 datatype->serialize(old_buf, buf_, count);
56 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
58 size_ = datatype->size() * count;
61 if(op != MPI_REPLACE && op != MPI_OP_NULL)
65 detached_sender_ = nullptr;
70 if (flags & MPI_REQ_PERSISTENT)
75 generalized_funcs=nullptr;
76 nbc_requests_=nullptr;
84 void Request::unref(MPI_Request* request)
86 if((*request) != MPI_REQUEST_NULL){
87 (*request)->refcount_--;
88 if((*request)->refcount_ < 0) {
89 (*request)->print_request("wrong refcount");
90 xbt_die("Whoops, wrong refcount");
92 if((*request)->refcount_==0){
93 if ((*request)->flags_ & MPI_REQ_GENERALIZED){
94 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
96 Comm::unref((*request)->comm_);
97 Datatype::unref((*request)->old_type_);
99 if ((*request)->op_!=MPI_REPLACE && (*request)->op_!=MPI_OP_NULL)
100 Op::unref(&(*request)->op_);
102 (*request)->print_request("Destroying");
104 *request = MPI_REQUEST_NULL;
106 (*request)->print_request("Decrementing");
109 xbt_die("freeing an already free request");
113 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
115 MPI_Request ref = static_cast<MPI_Request>(a);
116 MPI_Request req = static_cast<MPI_Request>(b);
117 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
119 xbt_assert(ref, "Cannot match recv against null reference");
120 xbt_assert(req, "Cannot match recv against null request");
121 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
122 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
123 //we match, we can transfer some values
124 if(ref->src_ == MPI_ANY_SOURCE)
125 ref->real_src_ = req->src_;
126 if(ref->tag_ == MPI_ANY_TAG)
127 ref->real_tag_ = req->tag_;
128 if(ref->real_size_ < req->real_size_)
129 ref->truncated_ = true;
131 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
132 if(req->cancelled_==0)
133 req->cancelled_=-1;//mark as uncancellable
134 XBT_DEBUG("match succeeded");
139 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
141 MPI_Request ref = static_cast<MPI_Request>(a);
142 MPI_Request req = static_cast<MPI_Request>(b);
143 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
144 xbt_assert(ref, "Cannot match send against null reference");
145 xbt_assert(req, "Cannot match send against null request");
147 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
148 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
149 if(req->src_ == MPI_ANY_SOURCE)
150 req->real_src_ = ref->src_;
151 if(req->tag_ == MPI_ANY_TAG)
152 req->real_tag_ = ref->tag_;
153 if(req->real_size_ < ref->real_size_)
154 req->truncated_ = true;
156 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
157 if(req->cancelled_==0)
158 req->cancelled_=-1;//mark as uncancellable
159 XBT_DEBUG("match succeeded");
165 void Request::print_request(const char *message)
167 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
168 message, this, buf_, size_, src_, dst_, tag_, flags_);
172 /* factories, to hide the internal flags from the caller */
173 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
176 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
177 comm->group()->actor(dst)->get_pid(), tag, comm,
178 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
181 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
183 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
184 comm->group()->actor(dst)->get_pid(), tag, comm,
185 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
188 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
190 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
191 comm->group()->actor(dst)->get_pid(), tag, comm,
192 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
196 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
199 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
201 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
202 comm->group()->actor(dst)->get_pid(), tag, comm,
203 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
205 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
206 comm->group()->actor(dst)->get_pid(), tag, comm,
207 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
208 MPI_REQ_ACCUMULATE, op);
213 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
215 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
216 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
217 simgrid::s4u::this_actor::get_pid(), tag, comm,
218 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
221 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
224 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
226 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
227 comm->group()->actor(dst)->get_pid(), tag, comm,
228 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
230 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
231 comm->group()->actor(dst)->get_pid(), tag, comm,
232 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
237 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
239 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
240 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
241 simgrid::s4u::this_actor::get_pid(), tag, comm,
242 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
245 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
247 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
248 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
249 comm->group()->actor(dst)->get_pid(), tag, comm,
250 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
255 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
257 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
258 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
259 comm->group()->actor(dst)->get_pid(), tag, comm,
260 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
266 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
268 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
269 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
270 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
271 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
276 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
278 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
279 request = irecv(buf, count, datatype, src, tag, comm);
280 wait(&request,status);
284 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
286 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
287 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
288 comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
291 wait(&request, MPI_STATUS_IGNORE);
295 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
297 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
298 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
299 comm->group()->actor(dst)->get_pid(), tag, comm,
300 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
303 wait(&request,MPI_STATUS_IGNORE);
307 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
308 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
309 MPI_Comm comm, MPI_Status * status)
311 MPI_Request requests[2];
313 int myid = simgrid::s4u::this_actor::get_pid();
314 if ((comm->group()->actor(dst)->get_pid() == myid) && (comm->group()->actor(src)->get_pid() == myid)) {
315 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
316 if (status != MPI_STATUS_IGNORE) {
317 status->MPI_SOURCE = src;
318 status->MPI_TAG = recvtag;
319 status->MPI_ERROR = MPI_SUCCESS;
320 status->count = sendcount * sendtype->size();
324 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
325 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
326 startall(2, requests);
327 waitall(2, requests, stats);
330 if(status != MPI_STATUS_IGNORE) {
331 // Copy receive status
336 void Request::start()
338 s4u::Mailbox* mailbox;
340 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
341 flags_ &= ~MPI_REQ_PREPARED;
342 flags_ &= ~MPI_REQ_FINISHED;
345 if ((flags_ & MPI_REQ_RECV) != 0) {
346 this->print_request("New recv");
348 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
350 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
352 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
353 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
356 if (async_small_thresh == 0 && (flags_ & MPI_REQ_RMA) == 0) {
357 mailbox = process->mailbox();
358 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
359 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
360 //begin with the more appropriate one : the small one.
361 mailbox = process->mailbox_small();
362 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
363 mailbox->get_cname());
364 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
366 if (action == nullptr) {
367 mailbox = process->mailbox();
368 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
369 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
370 if (action == nullptr) {
371 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
372 mailbox = process->mailbox_small();
375 XBT_DEBUG("yes there was something for us in the large mailbox");
378 mailbox = process->mailbox_small();
379 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
380 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
382 if (action == nullptr) {
383 XBT_DEBUG("No, nothing in the permanent receive mailbox");
384 mailbox = process->mailbox();
386 XBT_DEBUG("yes there was something for us in the small mailbox");
390 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
392 action_ = simcall_comm_irecv(
393 process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
394 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
395 XBT_DEBUG("recv simcall posted");
397 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
399 } else { /* the RECV flag was not set, so this is a send */
400 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
402 if (TRACE_smpi_view_internals()) {
403 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
405 this->print_request("New send");
408 if ((flags_ & MPI_REQ_SSEND) == 0 &&
409 ((flags_ & MPI_REQ_RMA) != 0 ||
410 static_cast<int>(size_) < simgrid::config::get_value<int>("smpi/send-is-detached-thresh"))) {
411 void *oldbuf = nullptr;
413 XBT_DEBUG("Send request %p is detached", this);
415 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
417 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
418 if ((smpi_privatize_global_variables != SmpiPrivStrategies::NONE) &&
419 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
420 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
421 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
422 smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
424 buf = xbt_malloc(size_);
425 memcpy(buf,oldbuf,size_);
426 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
431 //if we are giving back the control to the user without waiting for completion, we have to inject timings
432 double sleeptime = 0.0;
433 if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
434 // isend and send timings may be different
435 sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
436 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
437 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
441 simcall_process_sleep(sleeptime);
442 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
445 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
447 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
449 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
452 if (not(async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
453 mailbox = process->mailbox();
454 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
455 mailbox = process->mailbox();
456 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
457 smx_activity_t action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
458 if (action == nullptr) {
459 if ((flags_ & MPI_REQ_SSEND) == 0) {
460 mailbox = process->mailbox_small();
461 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
462 mailbox->get_cname());
464 mailbox = process->mailbox_small();
465 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
466 mailbox->get_cname());
467 action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
468 if (action == nullptr) {
469 XBT_DEBUG("No, we are first, send to large mailbox");
470 mailbox = process->mailbox();
474 XBT_DEBUG("Yes there was something for us in the large mailbox");
477 mailbox = process->mailbox();
478 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
481 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
483 action_ = simcall_comm_isend(
484 simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), size_, -1.0, buf, real_size_, &match_send,
485 &xbt_free_f, // how to free the userdata if a detached send fails
486 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
487 // detach if msg size < eager/rdv switch limit
489 XBT_DEBUG("send simcall posted");
491 /* FIXME: detached sends are not traceable (action_ == nullptr) */
492 if (action_ != nullptr) {
493 boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
494 smpi_process()->get_tracing_category());
497 if (async_small_thresh != 0 || ((flags_ & MPI_REQ_RMA) != 0))
502 void Request::startall(int count, MPI_Request * requests)
504 if(requests== nullptr)
507 for(int i = 0; i < count; i++) {
508 requests[i]->start();
512 void Request::cancel()
516 if (this->action_ != nullptr)
517 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
520 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
521 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
522 // to avoid deadlocks if used as a break condition, such as
523 // while (MPI_Test(request, flag, status) && flag) dostuff...
524 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
525 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
526 static int nsleeps = 1;
527 int ret = MPI_SUCCESS;
529 // Are we testing a request meant for non blocking collectives ?
530 // If so, test all the subrequests.
531 if ((*request)->nbc_requests_size_>0){
532 ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
534 delete[] (*request)->nbc_requests_;
535 (*request)->nbc_requests_size_=0;
541 if(smpi_test_sleep > 0)
542 simcall_process_sleep(nsleeps*smpi_test_sleep);
544 MPI_Status* mystatus;
545 Status::empty(status);
547 if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) {
548 if ((*request)->action_ != nullptr){
550 *flag = simcall_comm_test((*request)->action_);
556 if (*request != MPI_REQUEST_NULL &&
557 ((*request)->flags_ & MPI_REQ_GENERALIZED)
558 && !((*request)->flags_ & MPI_REQ_COMPLETE))
561 finish_wait(request,status);
562 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
563 if(status==MPI_STATUS_IGNORE){
564 mystatus=new MPI_Status();
565 Status::empty(mystatus);
569 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
570 if(status==MPI_STATUS_IGNORE)
573 nsleeps=1;//reset the number of sleeps we will do next time
574 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
575 *request = MPI_REQUEST_NULL;
576 } else if (simgrid::config::get_value<bool>("smpi/grow-injected-times")) {
583 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
585 int ret = MPI_SUCCESS;
590 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
593 for (int i = 0; i < incount; i++) {
594 if (requests[i] != MPI_REQUEST_NULL) {
595 ret = test(&requests[i], pstat, &flag);
601 if (status != MPI_STATUSES_IGNORE)
603 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
604 requests[i] = MPI_REQUEST_NULL;
610 if(count_dead==incount)*count=MPI_UNDEFINED;
612 return MPI_ERR_IN_STATUS;
617 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
619 std::vector<simgrid::kernel::activity::CommImpl*> comms;
620 comms.reserve(count);
624 int ret = MPI_SUCCESS;
625 MPI_Status* mystatus;
626 *index = MPI_UNDEFINED;
628 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
629 for(i = 0; i < count; i++) {
630 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
631 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
635 if (not map.empty()) {
636 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
637 static int nsleeps = 1;
638 if(smpi_test_sleep > 0)
639 simcall_process_sleep(nsleeps*smpi_test_sleep);
641 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
646 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
648 if (requests[*index] != MPI_REQUEST_NULL &&
649 (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
650 && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
653 finish_wait(&requests[*index],status);
654 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
655 if(status==MPI_STATUS_IGNORE){
656 mystatus=new MPI_Status();
657 Status::empty(mystatus);
661 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
662 if(status==MPI_STATUS_IGNORE)
666 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
667 requests[*index] = MPI_REQUEST_NULL;
675 //all requests are null or inactive, return true
677 Status::empty(status);
683 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
686 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
690 for(int i=0; i<count; i++){
691 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
692 ret = test(&requests[i], pstat, &flag);
695 requests[i]=MPI_REQUEST_NULL;
699 if (ret != MPI_SUCCESS)
702 Status::empty(pstat);
704 if(status != MPI_STATUSES_IGNORE) {
709 return MPI_ERR_IN_STATUS;
714 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
716 //FIXME find another way to avoid busy waiting ?
717 // the issue here is that we have to wait on a nonexistent comm
719 iprobe(source, tag, comm, &flag, status);
720 XBT_DEBUG("Busy Waiting on probing : %d", flag);
724 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
725 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
726 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
727 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
728 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
729 static int nsleeps = 1;
730 double speed = s4u::this_actor::get_host()->get_speed();
731 double maxrate = simgrid::config::get_value<double>("smpi/iprobe-cpu-usage");
732 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
733 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
734 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
735 if (smpi_iprobe_sleep > 0) {
736 /** Compute the number of flops we will sleep **/
737 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
738 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
740 /* Not the entire CPU can be used when iprobing: This is important for
741 * the energy consumption caused by polling with iprobes.
742 * Note also that the number of flops that was
743 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
745 ->set_bound(maxrate*speed)
749 // behave like a receive, but don't do it
750 s4u::Mailbox* mailbox;
752 request->print_request("New iprobe");
753 // We have to test both mailboxes as we don't know if we will receive one one or another
754 if (simgrid::config::get_value<int>("smpi/async-small-thresh") > 0) {
755 mailbox = smpi_process()->mailbox_small();
756 XBT_DEBUG("Trying to probe the perm recv mailbox");
757 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
760 if (request->action_ == nullptr){
761 mailbox = smpi_process()->mailbox();
762 XBT_DEBUG("trying to probe the other mailbox");
763 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
766 if (request->action_ != nullptr){
767 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
768 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data_);
770 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
771 status->MPI_SOURCE = comm->group()->rank(req->src_);
772 status->MPI_TAG = req->tag_;
773 status->MPI_ERROR = MPI_SUCCESS;
774 status->count = req->real_size_;
776 nsleeps = 1;//reset the number of sleeps we will do next time
780 if (simgrid::config::get_value<bool>("smpi/grow-injected-times"))
786 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
788 MPI_Request req = *request;
789 Status::empty(status);
791 if (req->cancelled_==1){
792 if (status!=MPI_STATUS_IGNORE)
794 if(req->detached_sender_ != nullptr)
795 unref(&(req->detached_sender_));
800 if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0)) && ((req->flags_ & MPI_REQ_PREPARED) == 0) &&
801 ((req->flags_ & MPI_REQ_GENERALIZED) == 0)) {
802 if(status != MPI_STATUS_IGNORE) {
803 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
804 status->MPI_SOURCE = req->comm_->group()->rank(src);
805 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
806 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
807 // this handles the case were size in receive differs from size in send
808 status->count = req->real_size_;
811 req->print_request("Finishing");
812 MPI_Datatype datatype = req->old_type_;
814 // FIXME Handle the case of a partial shared malloc.
815 if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
816 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
818 if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
819 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
820 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
821 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
822 smpi_switch_data_segment(simgrid::s4u::Actor::self());
825 if(datatype->flags() & DT_FLAG_DERIVED){
826 // This part handles the problem of non-contignous memory the unserialization at the reception
827 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
828 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
830 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
831 if (datatype->size() != 0) {
832 int n = req->real_size_ / datatype->size();
833 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
840 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
841 int rank = simgrid::s4u::this_actor::get_pid();
842 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
843 TRACE_smpi_recv(src_traced, rank,req->tag_);
845 if(req->detached_sender_ != nullptr){
846 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
848 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
850 simcall_process_sleep(sleeptime);
851 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
853 unref(&(req->detached_sender_));
855 if (req->flags_ & MPI_REQ_PERSISTENT)
856 req->action_ = nullptr;
857 req->flags_ |= MPI_REQ_FINISHED;
861 int Request::wait(MPI_Request * request, MPI_Status * status)
864 // Are we waiting on a request meant for non blocking collectives ?
865 // If so, wait for all the subrequests.
866 if ((*request)->nbc_requests_size_>0){
867 ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
868 for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
869 if((*request)->buf_!=nullptr && (*request)->nbc_requests_[i]!=MPI_REQUEST_NULL){//reduce case
870 void * buf=(*request)->nbc_requests_[i]->buf_;
871 if((*request)->old_type_->flags() & DT_FLAG_DERIVED)
872 buf=(*request)->nbc_requests_[i]->old_buf_;
873 if((*request)->nbc_requests_[i]->flags_ & MPI_REQ_RECV ){
874 if((*request)->op_!=MPI_OP_NULL){
875 int count=(*request)->size_/ (*request)->old_type_->size();
876 (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
878 smpi_free_tmp_buffer(buf);
881 if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
882 Request::unref(&((*request)->nbc_requests_[i]));
884 delete[] (*request)->nbc_requests_;
885 (*request)->nbc_requests_size_=0;
887 (*request)=MPI_REQUEST_NULL;
891 (*request)->print_request("Waiting");
892 if ((*request)->flags_ & MPI_REQ_PREPARED) {
893 Status::empty(status);
897 if ((*request)->action_ != nullptr){
899 // this is not a detached send
900 simcall_comm_wait((*request)->action_, -1.0);
902 XBT_VERB("Request cancelled");
906 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
907 MPI_Status* mystatus;
908 if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
909 ((*request)->generalized_funcs)->mutex->lock();
910 ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
911 ((*request)->generalized_funcs)->mutex->unlock();
913 if(status==MPI_STATUS_IGNORE){
914 mystatus=new MPI_Status();
915 Status::empty(mystatus);
919 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
920 if(status==MPI_STATUS_IGNORE)
924 finish_wait(request,status);
925 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
926 *request = MPI_REQUEST_NULL;
930 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
932 std::vector<simgrid::kernel::activity::CommImpl*> comms;
933 comms.reserve(count);
934 int index = MPI_UNDEFINED;
937 // Wait for a request to complete
938 std::vector<int> map;
939 XBT_DEBUG("Wait for one of %d", count);
940 for(int i = 0; i < count; i++) {
941 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
942 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
943 if (requests[i]->action_ != nullptr) {
944 XBT_DEBUG("Waiting any %p ", requests[i]);
945 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
948 // This is a finished detached request, let's return this one
949 comms.clear(); // so we free don't do the waitany call
951 finish_wait(&requests[i], status); // cleanup if refcount = 0
952 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
953 requests[i] = MPI_REQUEST_NULL; // set to null
958 if (not comms.empty()) {
959 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
962 // this is not a detached send
963 i = simcall_comm_waitany(comms.data(), comms.size(), -1);
965 XBT_INFO("request %d cancelled ",i);
969 // not MPI_UNDEFINED, as this is a simix return code
972 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
973 if ((requests[index] == MPI_REQUEST_NULL) ||
974 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
975 finish_wait(&requests[index],status);
976 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
977 requests[index] = MPI_REQUEST_NULL;
983 if (index==MPI_UNDEFINED)
984 Status::empty(status);
989 static int sort_accumulates(MPI_Request a, MPI_Request b)
991 return (a->tag() > b->tag());
994 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
996 std::vector<MPI_Request> accumulates;
999 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
1000 int retvalue = MPI_SUCCESS;
1001 //tag invalid requests in the set
1002 if (status != MPI_STATUSES_IGNORE) {
1003 for (int c = 0; c < count; c++) {
1004 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
1005 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
1006 Status::empty(&status[c]);
1007 } else if (requests[c]->src_ == MPI_PROC_NULL) {
1008 Status::empty(&status[c]);
1009 status[c].MPI_SOURCE = MPI_PROC_NULL;
1013 for (int c = 0; c < count; c++) {
1014 if (MC_is_active() || MC_record_replay_is_active()) {
1015 wait(&requests[c],pstat);
1018 index = waitany(count, (MPI_Request*)requests, pstat);
1020 if (index == MPI_UNDEFINED)
1023 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1024 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1025 accumulates.push_back(requests[index]);
1026 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1027 requests[index] = MPI_REQUEST_NULL;
1029 if (status != MPI_STATUSES_IGNORE) {
1030 status[index] = *pstat;
1031 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1032 retvalue = MPI_ERR_IN_STATUS;
1036 if (not accumulates.empty()) {
1037 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1038 for (auto& req : accumulates) {
1039 finish_wait(&req, status);
1046 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1052 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1054 index = waitany(incount, (MPI_Request*)requests, pstat);
1055 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1056 if(status != MPI_STATUSES_IGNORE) {
1057 status[count] = *pstat;
1059 indices[count] = index;
1061 for (int i = 0; i < incount; i++) {
1062 if((requests[i] != MPI_REQUEST_NULL)) {
1063 test(&requests[i], pstat,&flag);
1066 if(status != MPI_STATUSES_IGNORE) {
1067 status[count] = *pstat;
1069 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1070 requests[i]=MPI_REQUEST_NULL;
1078 MPI_Request Request::f2c(int id) {
1080 if(id==MPI_FORTRAN_REQUEST_NULL)
1081 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
1082 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
1085 int Request::add_f()
1087 if (F2C::f2c_lookup() == nullptr) {
1088 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
1091 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
1092 F2C::f2c_id_increment();
1093 return F2C::f2c_id()-1;
1096 void Request::free_f(int id)
1098 if (id != MPI_FORTRAN_REQUEST_NULL) {
1100 F2C::f2c_lookup()->erase(get_key_id(key, id));
1105 int Request::get_status(MPI_Request req, int* flag, MPI_Status * status){
1108 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1109 req->iprobe(req->src_, req->tag_, req->comm_, flag, status);
1113 if (req != MPI_REQUEST_NULL &&
1114 (req->flags_ & MPI_REQ_GENERALIZED)
1115 && !(req->flags_ & MPI_REQ_COMPLETE)) {
1121 if(req != MPI_REQUEST_NULL &&
1122 status != MPI_STATUS_IGNORE) {
1123 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1124 status->MPI_SOURCE = req->comm_->group()->rank(src);
1125 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1126 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1127 status->count = req->real_size_;
1132 int Request::grequest_start( MPI_Grequest_query_function *query_fn, MPI_Grequest_free_function *free_fn, MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request){
1134 *request = new Request();
1135 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1136 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1137 (*request)->refcount_ = 1;
1138 ((*request)->generalized_funcs)=xbt_new0(s_smpi_mpi_generalized_request_funcs_t ,1);
1139 ((*request)->generalized_funcs)->query_fn=query_fn;
1140 ((*request)->generalized_funcs)->free_fn=free_fn;
1141 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1142 ((*request)->generalized_funcs)->extra_state=extra_state;
1143 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1144 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1148 int Request::grequest_complete( MPI_Request request){
1149 if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
1150 return MPI_ERR_REQUEST;
1151 request->generalized_funcs->mutex->lock();
1152 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1153 request->generalized_funcs->cond->notify_one();
1154 request->generalized_funcs->mutex->unlock();
1158 void Request::set_nbc_requests(MPI_Request* reqs, int size){
1160 nbc_requests_size_=size;
1163 int Request::get_nbc_requests_size(){
1164 return nbc_requests_size_;
1167 MPI_Request* Request::get_nbc_requests(){
1168 return nbc_requests_;