1 /* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
5 #include "simgrid/s4u/Mutex.hpp"
6 #include "simgrid/s4u/ConditionVariable.hpp"
7 #include "smpi_request.hpp"
10 #include "private.hpp"
11 #include "simgrid/Exception.hpp"
12 #include "simgrid/s4u/Exec.hpp"
13 #include "smpi_comm.hpp"
14 #include "smpi_datatype.hpp"
15 #include "smpi_host.hpp"
16 #include "smpi_op.hpp"
17 #include "src/kernel/activity/CommImpl.hpp"
18 #include "src/mc/mc_replay.hpp"
19 #include "src/smpi/include/smpi_actor.hpp"
20 #include "xbt/config.hpp"
25 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
27 static simgrid::config::Flag<double> smpi_iprobe_sleep(
28 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
29 static simgrid::config::Flag<double> smpi_test_sleep(
30 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
32 std::vector<s_smpi_factor_t> smpi_ois_values;
34 extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
39 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
40 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
42 void *old_buf = nullptr;
43 // FIXME Handle the case of a partial shared malloc.
44 if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
45 // This part handles the problem of non-contiguous memory
50 buf_ = xbt_malloc(count*datatype->size());
51 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & MPI_REQ_SEND) != 0)) {
52 datatype->serialize(old_buf, buf_, count);
56 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
58 size_ = datatype->size() * count;
63 detached_sender_ = nullptr;
68 if (flags & MPI_REQ_PERSISTENT)
74 generalized_funcs=nullptr;
75 nbc_requests_=nullptr;
83 void Request::unref(MPI_Request* request)
85 if((*request) != MPI_REQUEST_NULL){
86 (*request)->refcount_--;
87 if((*request)->refcount_ < 0) {
88 (*request)->print_request("wrong refcount");
89 xbt_die("Whoops, wrong refcount");
91 if((*request)->refcount_==0){
92 if ((*request)->flags_ & MPI_REQ_GENERALIZED){
93 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
95 Comm::unref((*request)->comm_);
96 Datatype::unref((*request)->old_type_);
98 (*request)->print_request("Destroying");
100 *request = MPI_REQUEST_NULL;
102 (*request)->print_request("Decrementing");
105 xbt_die("freeing an already free request");
109 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
111 MPI_Request ref = static_cast<MPI_Request>(a);
112 MPI_Request req = static_cast<MPI_Request>(b);
113 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
115 xbt_assert(ref, "Cannot match recv against null reference");
116 xbt_assert(req, "Cannot match recv against null request");
117 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
118 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
119 //we match, we can transfer some values
120 if(ref->src_ == MPI_ANY_SOURCE)
121 ref->real_src_ = req->src_;
122 if(ref->tag_ == MPI_ANY_TAG)
123 ref->real_tag_ = req->tag_;
124 if(ref->real_size_ < req->real_size_)
126 if(req->detached_==1)
127 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
128 if(req->cancelled_==0)
129 req->cancelled_=-1;//mark as uncancellable
130 XBT_DEBUG("match succeeded");
135 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
137 MPI_Request ref = static_cast<MPI_Request>(a);
138 MPI_Request req = static_cast<MPI_Request>(b);
139 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
140 xbt_assert(ref, "Cannot match send against null reference");
141 xbt_assert(req, "Cannot match send against null request");
143 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
144 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
145 if(req->src_ == MPI_ANY_SOURCE)
146 req->real_src_ = ref->src_;
147 if(req->tag_ == MPI_ANY_TAG)
148 req->real_tag_ = ref->tag_;
149 if(req->real_size_ < ref->real_size_)
151 if(ref->detached_==1)
152 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
153 if(req->cancelled_==0)
154 req->cancelled_=-1;//mark as uncancellable
155 XBT_DEBUG("match succeeded");
161 void Request::print_request(const char *message)
163 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
164 message, this, buf_, size_, src_, dst_, tag_, flags_);
168 /* factories, to hide the internal flags from the caller */
169 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
172 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
173 comm->group()->actor(dst)->get_pid(), tag, comm,
174 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
177 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
179 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
180 comm->group()->actor(dst)->get_pid(), tag, comm,
181 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
184 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
186 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
187 comm->group()->actor(dst)->get_pid(), tag, comm,
188 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
192 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
195 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
197 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
198 comm->group()->actor(dst)->get_pid(), tag, comm,
199 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
201 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
202 comm->group()->actor(dst)->get_pid(), tag, comm,
203 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
210 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
212 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
213 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
214 simgrid::s4u::this_actor::get_pid(), tag, comm,
215 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
218 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
221 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
223 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
224 comm->group()->actor(dst)->get_pid(), tag, comm,
225 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
227 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
228 comm->group()->actor(dst)->get_pid(), tag, comm,
229 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE);
235 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
237 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
238 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
239 simgrid::s4u::this_actor::get_pid(), tag, comm,
240 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
243 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
245 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
246 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
247 comm->group()->actor(dst)->get_pid(), tag, comm,
248 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
253 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
255 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
256 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
257 comm->group()->actor(dst)->get_pid(), tag, comm,
258 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
264 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
266 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
267 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
268 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
269 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
274 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
276 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
277 request = irecv(buf, count, datatype, src, tag, comm);
278 wait(&request,status);
282 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
284 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
285 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
286 comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
289 wait(&request, MPI_STATUS_IGNORE);
293 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
295 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
296 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
297 comm->group()->actor(dst)->get_pid(), tag, comm,
298 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
301 wait(&request,MPI_STATUS_IGNORE);
305 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
306 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
307 MPI_Comm comm, MPI_Status * status)
309 MPI_Request requests[2];
311 int myid = simgrid::s4u::this_actor::get_pid();
312 if ((comm->group()->actor(dst)->get_pid() == myid) && (comm->group()->actor(src)->get_pid() == myid)) {
313 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
314 if (status != MPI_STATUS_IGNORE) {
315 status->MPI_SOURCE = src;
316 status->MPI_TAG = recvtag;
317 status->MPI_ERROR = MPI_SUCCESS;
318 status->count = sendcount * sendtype->size();
322 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
323 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
324 startall(2, requests);
325 waitall(2, requests, stats);
328 if(status != MPI_STATUS_IGNORE) {
329 // Copy receive status
334 void Request::start()
336 s4u::Mailbox* mailbox;
338 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
339 flags_ &= ~MPI_REQ_PREPARED;
340 flags_ &= ~MPI_REQ_FINISHED;
343 if ((flags_ & MPI_REQ_RECV) != 0) {
344 this->print_request("New recv");
346 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
348 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
350 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
351 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
354 if (async_small_thresh == 0 && (flags_ & MPI_REQ_RMA) == 0) {
355 mailbox = process->mailbox();
356 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
357 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
358 //begin with the more appropriate one : the small one.
359 mailbox = process->mailbox_small();
360 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
361 mailbox->get_cname());
362 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
364 if (action == nullptr) {
365 mailbox = process->mailbox();
366 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
367 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
368 if (action == nullptr) {
369 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
370 mailbox = process->mailbox_small();
373 XBT_DEBUG("yes there was something for us in the large mailbox");
376 mailbox = process->mailbox_small();
377 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
378 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
380 if (action == nullptr) {
381 XBT_DEBUG("No, nothing in the permanent receive mailbox");
382 mailbox = process->mailbox();
384 XBT_DEBUG("yes there was something for us in the small mailbox");
388 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
390 action_ = simcall_comm_irecv(
391 process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
392 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
393 XBT_DEBUG("recv simcall posted");
395 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
397 } else { /* the RECV flag was not set, so this is a send */
398 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
400 if (TRACE_smpi_view_internals()) {
401 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
403 this->print_request("New send");
406 if ((flags_ & MPI_REQ_SSEND) == 0 &&
407 ((flags_ & MPI_REQ_RMA) != 0 ||
408 static_cast<int>(size_) < simgrid::config::get_value<int>("smpi/send-is-detached-thresh"))) {
409 void *oldbuf = nullptr;
411 XBT_DEBUG("Send request %p is detached", this);
413 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
415 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
416 if ((smpi_privatize_global_variables != SmpiPrivStrategies::NONE) &&
417 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
418 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
419 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
420 smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
422 buf = xbt_malloc(size_);
423 memcpy(buf,oldbuf,size_);
424 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
429 //if we are giving back the control to the user without waiting for completion, we have to inject timings
430 double sleeptime = 0.0;
431 if (detached_ != 0 || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
432 // isend and send timings may be different
433 sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
434 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
435 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
439 simcall_process_sleep(sleeptime);
440 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
443 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
445 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
447 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
450 if (not(async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
451 mailbox = process->mailbox();
452 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
453 mailbox = process->mailbox();
454 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
455 smx_activity_t action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
456 if (action == nullptr) {
457 if ((flags_ & MPI_REQ_SSEND) == 0) {
458 mailbox = process->mailbox_small();
459 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
460 mailbox->get_cname());
462 mailbox = process->mailbox_small();
463 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
464 mailbox->get_cname());
465 action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
466 if (action == nullptr) {
467 XBT_DEBUG("No, we are first, send to large mailbox");
468 mailbox = process->mailbox();
472 XBT_DEBUG("Yes there was something for us in the large mailbox");
475 mailbox = process->mailbox();
476 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
479 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
481 action_ = simcall_comm_isend(
482 simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), size_, -1.0, buf, real_size_, &match_send,
483 &xbt_free_f, // how to free the userdata if a detached send fails
484 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
485 // detach if msg size < eager/rdv switch limit
487 XBT_DEBUG("send simcall posted");
489 /* FIXME: detached sends are not traceable (action_ == nullptr) */
490 if (action_ != nullptr) {
491 std::string category = smpi_process()->get_tracing_category();
492 simgrid::simix::simcall([this, category] { this->action_->set_category(category); });
495 if (async_small_thresh != 0 || ((flags_ & MPI_REQ_RMA) != 0))
500 void Request::startall(int count, MPI_Request * requests)
502 if(requests== nullptr)
505 for(int i = 0; i < count; i++) {
506 requests[i]->start();
510 void Request::cancel()
514 if (this->action_ != nullptr)
515 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
518 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
519 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
520 // to avoid deadlocks if used as a break condition, such as
521 // while (MPI_Test(request, flag, status) && flag) dostuff...
522 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
523 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
524 static int nsleeps = 1;
525 int ret = MPI_SUCCESS;
527 // Are we testing a request meant for non blocking collectives ?
528 // If so, test all the subrequests.
529 if ((*request)->nbc_requests_size_>0){
530 ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
532 delete[] (*request)->nbc_requests_;
533 (*request)->nbc_requests_size_=0;
539 if(smpi_test_sleep > 0)
540 simcall_process_sleep(nsleeps*smpi_test_sleep);
542 MPI_Status* mystatus;
543 Status::empty(status);
545 if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) {
546 if ((*request)->action_ != nullptr){
548 *flag = simcall_comm_test((*request)->action_);
554 if (*request != MPI_REQUEST_NULL &&
555 ((*request)->flags_ & MPI_REQ_GENERALIZED)
556 && !((*request)->flags_ & MPI_REQ_COMPLETE))
559 finish_wait(request,status);
560 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
561 if(status==MPI_STATUS_IGNORE){
562 mystatus=new MPI_Status();
563 Status::empty(mystatus);
567 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
568 if(status==MPI_STATUS_IGNORE)
571 nsleeps=1;//reset the number of sleeps we will do next time
572 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
573 *request = MPI_REQUEST_NULL;
574 } else if (simgrid::config::get_value<bool>("smpi/grow-injected-times")) {
581 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
583 int ret = MPI_SUCCESS;
588 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
591 for (int i = 0; i < incount; i++) {
592 if (requests[i] != MPI_REQUEST_NULL) {
593 ret = test(&requests[i], pstat, &flag);
599 if (status != MPI_STATUSES_IGNORE)
601 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
602 requests[i] = MPI_REQUEST_NULL;
608 if(count_dead==incount)*count=MPI_UNDEFINED;
610 return MPI_ERR_IN_STATUS;
615 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
617 std::vector<simgrid::kernel::activity::CommImpl*> comms;
618 comms.reserve(count);
622 int ret = MPI_SUCCESS;
623 MPI_Status* mystatus;
624 *index = MPI_UNDEFINED;
626 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
627 for(i = 0; i < count; i++) {
628 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
629 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
633 if (not map.empty()) {
634 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
635 static int nsleeps = 1;
636 if(smpi_test_sleep > 0)
637 simcall_process_sleep(nsleeps*smpi_test_sleep);
639 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
644 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
646 if (requests[*index] != MPI_REQUEST_NULL &&
647 (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
648 && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
651 finish_wait(&requests[*index],status);
652 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
653 if(status==MPI_STATUS_IGNORE){
654 mystatus=new MPI_Status();
655 Status::empty(mystatus);
659 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
660 if(status==MPI_STATUS_IGNORE)
664 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
665 requests[*index] = MPI_REQUEST_NULL;
673 //all requests are null or inactive, return true
675 Status::empty(status);
681 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
684 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
688 for(int i=0; i<count; i++){
689 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
690 ret = test(&requests[i], pstat, &flag);
693 requests[i]=MPI_REQUEST_NULL;
697 if (ret != MPI_SUCCESS)
700 Status::empty(pstat);
702 if(status != MPI_STATUSES_IGNORE) {
707 return MPI_ERR_IN_STATUS;
712 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
714 //FIXME find another way to avoid busy waiting ?
715 // the issue here is that we have to wait on a nonexistent comm
717 iprobe(source, tag, comm, &flag, status);
718 XBT_DEBUG("Busy Waiting on probing : %d", flag);
722 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
723 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
724 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
725 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
726 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
727 static int nsleeps = 1;
728 double speed = s4u::this_actor::get_host()->get_speed();
729 double maxrate = simgrid::config::get_value<double>("smpi/iprobe-cpu-usage");
730 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
731 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
732 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
733 if (smpi_iprobe_sleep > 0) {
734 /** Compute the number of flops we will sleep **/
735 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
736 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
738 /* Not the entire CPU can be used when iprobing: This is important for
739 * the energy consumption caused by polling with iprobes.
740 * Note also that the number of flops that was
741 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
743 ->set_bound(maxrate*speed)
747 // behave like a receive, but don't do it
748 s4u::Mailbox* mailbox;
750 request->print_request("New iprobe");
751 // We have to test both mailboxes as we don't know if we will receive one one or another
752 if (simgrid::config::get_value<int>("smpi/async-small-thresh") > 0) {
753 mailbox = smpi_process()->mailbox_small();
754 XBT_DEBUG("Trying to probe the perm recv mailbox");
755 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
758 if (request->action_ == nullptr){
759 mailbox = smpi_process()->mailbox();
760 XBT_DEBUG("trying to probe the other mailbox");
761 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
764 if (request->action_ != nullptr){
765 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
766 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data_);
768 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
769 status->MPI_SOURCE = comm->group()->rank(req->src_);
770 status->MPI_TAG = req->tag_;
771 status->MPI_ERROR = MPI_SUCCESS;
772 status->count = req->real_size_;
774 nsleeps = 1;//reset the number of sleeps we will do next time
778 if (simgrid::config::get_value<bool>("smpi/grow-injected-times"))
784 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
786 MPI_Request req = *request;
787 Status::empty(status);
789 if (req->cancelled_==1){
790 if (status!=MPI_STATUS_IGNORE)
792 if(req->detached_sender_ != nullptr)
793 unref(&(req->detached_sender_));
798 if (not((req->detached_ != 0) && ((req->flags_ & MPI_REQ_SEND) != 0))
799 && ((req->flags_ & MPI_REQ_PREPARED) == 0)
800 && ((req->flags_ & MPI_REQ_GENERALIZED) == 0)) {
801 if(status != MPI_STATUS_IGNORE) {
802 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
803 status->MPI_SOURCE = req->comm_->group()->rank(src);
804 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
805 status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
806 // this handles the case were size in receive differs from size in send
807 status->count = req->real_size_;
809 // Request::get_status(req,&flag,status);
812 req->print_request("Finishing");
813 MPI_Datatype datatype = req->old_type_;
815 // FIXME Handle the case of a partial shared malloc.
816 if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
817 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
819 if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
820 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
821 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
822 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
823 smpi_switch_data_segment(simgrid::s4u::Actor::self());
826 if(datatype->flags() & DT_FLAG_DERIVED){
827 // This part handles the problem of non-contignous memory the unserialization at the reception
828 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
829 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
831 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
832 if (datatype->size() != 0) {
833 int n = req->real_size_ / datatype->size();
834 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
841 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
842 int rank = simgrid::s4u::this_actor::get_pid();
843 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
844 TRACE_smpi_recv(src_traced, rank,req->tag_);
846 if(req->detached_sender_ != nullptr){
847 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
849 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
851 simcall_process_sleep(sleeptime);
852 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
854 unref(&(req->detached_sender_));
856 if (req->flags_ & MPI_REQ_PERSISTENT)
857 req->action_ = nullptr;
858 req->flags_ |= MPI_REQ_FINISHED;
862 int Request::wait(MPI_Request * request, MPI_Status * status)
865 // Are we waiting on a request meant for non blocking collectives ?
866 // If so, wait for all the subrequests.
867 if ((*request)->nbc_requests_size_>0){
868 ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
869 for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
870 if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
871 Request::unref(&((*request)->nbc_requests_[i]));
873 delete[] (*request)->nbc_requests_;
874 (*request)->nbc_requests_size_=0;
879 (*request)->print_request("Waiting");
880 if ((*request)->flags_ & MPI_REQ_PREPARED) {
881 Status::empty(status);
885 if ((*request)->action_ != nullptr){
887 // this is not a detached send
888 simcall_comm_wait((*request)->action_, -1.0);
890 XBT_VERB("Request cancelled");
894 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
895 MPI_Status* mystatus;
896 if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
897 ((*request)->generalized_funcs)->mutex->lock();
898 ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
899 ((*request)->generalized_funcs)->mutex->unlock();
901 if(status==MPI_STATUS_IGNORE){
902 mystatus=new MPI_Status();
903 Status::empty(mystatus);
907 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
908 if(status==MPI_STATUS_IGNORE)
912 finish_wait(request,status);
913 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
914 *request = MPI_REQUEST_NULL;
918 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
920 std::vector<simgrid::kernel::activity::CommImpl*> comms;
921 comms.reserve(count);
922 int index = MPI_UNDEFINED;
925 // Wait for a request to complete
926 std::vector<int> map;
927 XBT_DEBUG("Wait for one of %d", count);
928 for(int i = 0; i < count; i++) {
929 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
930 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
931 if (requests[i]->action_ != nullptr) {
932 XBT_DEBUG("Waiting any %p ", requests[i]);
933 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
936 // This is a finished detached request, let's return this one
937 comms.clear(); // so we free don't do the waitany call
939 finish_wait(&requests[i], status); // cleanup if refcount = 0
940 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
941 requests[i] = MPI_REQUEST_NULL; // set to null
946 if (not comms.empty()) {
947 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
950 // this is not a detached send
951 i = simcall_comm_waitany(comms.data(), comms.size(), -1);
953 XBT_INFO("request %d cancelled ",i);
957 // not MPI_UNDEFINED, as this is a simix return code
960 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
961 if ((requests[index] == MPI_REQUEST_NULL) ||
962 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
963 finish_wait(&requests[index],status);
964 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
965 requests[index] = MPI_REQUEST_NULL;
971 if (index==MPI_UNDEFINED)
972 Status::empty(status);
977 static int sort_accumulates(MPI_Request a, MPI_Request b)
979 return (a->tag() > b->tag());
982 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
984 std::vector<MPI_Request> accumulates;
987 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
988 int retvalue = MPI_SUCCESS;
989 //tag invalid requests in the set
990 if (status != MPI_STATUSES_IGNORE) {
991 for (int c = 0; c < count; c++) {
992 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
993 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
994 Status::empty(&status[c]);
995 } else if (requests[c]->src_ == MPI_PROC_NULL) {
996 Status::empty(&status[c]);
997 status[c].MPI_SOURCE = MPI_PROC_NULL;
1001 for (int c = 0; c < count; c++) {
1002 if (MC_is_active() || MC_record_replay_is_active()) {
1003 wait(&requests[c],pstat);
1006 index = waitany(count, (MPI_Request*)requests, pstat);
1008 if (index == MPI_UNDEFINED)
1011 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1012 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1013 accumulates.push_back(requests[index]);
1014 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1015 requests[index] = MPI_REQUEST_NULL;
1017 if (status != MPI_STATUSES_IGNORE) {
1018 status[index] = *pstat;
1019 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1020 retvalue = MPI_ERR_IN_STATUS;
1024 if (not accumulates.empty()) {
1025 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1026 for (auto& req : accumulates) {
1027 finish_wait(&req, status);
1034 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1040 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1042 index = waitany(incount, (MPI_Request*)requests, pstat);
1043 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1044 if(status != MPI_STATUSES_IGNORE) {
1045 status[count] = *pstat;
1047 indices[count] = index;
1049 for (int i = 0; i < incount; i++) {
1050 if((requests[i] != MPI_REQUEST_NULL)) {
1051 test(&requests[i], pstat,&flag);
1054 if(status != MPI_STATUSES_IGNORE) {
1055 status[count] = *pstat;
1057 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1058 requests[i]=MPI_REQUEST_NULL;
1066 MPI_Request Request::f2c(int id) {
1068 if(id==MPI_FORTRAN_REQUEST_NULL)
1069 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
1070 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
1073 int Request::add_f()
1075 if (F2C::f2c_lookup() == nullptr) {
1076 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
1079 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
1080 F2C::f2c_id_increment();
1081 return F2C::f2c_id()-1;
1084 void Request::free_f(int id)
1086 if (id != MPI_FORTRAN_REQUEST_NULL) {
1088 F2C::f2c_lookup()->erase(get_key_id(key, id));
1093 int Request::get_status(MPI_Request req, int* flag, MPI_Status * status){
1096 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1097 req->iprobe(req->src_, req->tag_, req->comm_, flag, status);
1101 if (req != MPI_REQUEST_NULL &&
1102 (req->flags_ & MPI_REQ_GENERALIZED)
1103 && !(req->flags_ & MPI_REQ_COMPLETE)) {
1109 if(status != MPI_STATUS_IGNORE) {
1110 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1111 status->MPI_SOURCE = req->comm_->group()->rank(src);
1112 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1113 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1114 status->count = req->real_size_;
1119 int Request::grequest_start( MPI_Grequest_query_function *query_fn, MPI_Grequest_free_function *free_fn, MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request){
1121 *request = new Request();
1122 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1123 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1124 (*request)->refcount_ = 1;
1125 ((*request)->generalized_funcs)=xbt_new0(s_smpi_mpi_generalized_request_funcs_t ,1);
1126 ((*request)->generalized_funcs)->query_fn=query_fn;
1127 ((*request)->generalized_funcs)->free_fn=free_fn;
1128 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1129 ((*request)->generalized_funcs)->extra_state=extra_state;
1130 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1131 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1135 int Request::grequest_complete( MPI_Request request){
1136 if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
1137 return MPI_ERR_REQUEST;
1138 request->generalized_funcs->mutex->lock();
1139 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1140 request->generalized_funcs->cond->notify_one();
1141 request->generalized_funcs->mutex->unlock();
1145 void Request::set_nbc_requests(MPI_Request* reqs, int size){
1147 nbc_requests_size_=size;
1150 int Request::get_nbc_requests_size(){
1151 return nbc_requests_size_;
1154 MPI_Request* Request::get_nbc_requests(){
1155 return nbc_requests_;