1 /* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
8 #include "SmpiHost.hpp"
10 #include "private.hpp"
11 #include "smpi_comm.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_op.hpp"
14 #include "smpi_process.hpp"
15 #include "src/kernel/activity/CommImpl.hpp"
16 #include "src/mc/mc_replay.hpp"
17 #include "src/simix/ActorImpl.hpp"
18 #include "xbt/config.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (reques)");
24 static simgrid::config::Flag<double> smpi_iprobe_sleep(
25 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
26 static simgrid::config::Flag<double> smpi_test_sleep(
27 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
29 std::vector<s_smpi_factor_t> smpi_ois_values;
31 extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
36 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
37 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
39 void *old_buf = nullptr;
40 // FIXME Handle the case of a partial shared malloc.
41 if ((((flags & RECV) != 0) && ((flags & ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
42 // This part handles the problem of non-contiguous memory
47 buf_ = xbt_malloc(count*datatype->size());
48 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
49 datatype->serialize(old_buf, buf_, count);
53 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
55 size_ = datatype->size() * count;
60 detached_sender_ = nullptr;
65 if (flags & PERSISTENT)
72 MPI_Comm Request::comm(){
92 int Request::detached(){
96 size_t Request::size(){
100 size_t Request::real_size(){
104 void Request::unref(MPI_Request* request)
106 if((*request) != MPI_REQUEST_NULL){
107 (*request)->refcount_--;
108 if((*request)->refcount_<0) xbt_die("wrong refcount");
109 if((*request)->refcount_==0){
110 Datatype::unref((*request)->old_type_);
111 Comm::unref((*request)->comm_);
112 (*request)->print_request("Destroying");
114 *request = MPI_REQUEST_NULL;
116 (*request)->print_request("Decrementing");
119 xbt_die("freeing an already free request");
123 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
125 MPI_Request ref = static_cast<MPI_Request>(a);
126 MPI_Request req = static_cast<MPI_Request>(b);
127 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
129 xbt_assert(ref, "Cannot match recv against null reference");
130 xbt_assert(req, "Cannot match recv against null request");
131 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
132 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
133 //we match, we can transfer some values
134 if(ref->src_ == MPI_ANY_SOURCE)
135 ref->real_src_ = req->src_;
136 if(ref->tag_ == MPI_ANY_TAG)
137 ref->real_tag_ = req->tag_;
138 if(ref->real_size_ < req->real_size_)
140 if(req->detached_==1)
141 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
142 XBT_DEBUG("match succeeded");
147 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
149 MPI_Request ref = static_cast<MPI_Request>(a);
150 MPI_Request req = static_cast<MPI_Request>(b);
151 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
152 xbt_assert(ref, "Cannot match send against null reference");
153 xbt_assert(req, "Cannot match send against null request");
155 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
156 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
157 if(req->src_ == MPI_ANY_SOURCE)
158 req->real_src_ = ref->src_;
159 if(req->tag_ == MPI_ANY_TAG)
160 req->real_tag_ = ref->tag_;
161 if(req->real_size_ < ref->real_size_)
163 if(ref->detached_==1)
164 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
165 XBT_DEBUG("match succeeded");
171 void Request::print_request(const char *message)
173 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
174 message, this, buf_, size_, src_, dst_, tag_, flags_);
178 /* factories, to hide the internal flags from the caller */
179 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
182 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
183 comm->group()->actor(dst)->get_pid(), tag, comm, PERSISTENT | SEND | PREPARED);
186 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
188 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
189 comm->group()->actor(dst)->get_pid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
192 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
194 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
195 comm->group()->actor(dst)->get_pid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
199 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
202 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
205 new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
206 comm->group()->actor(dst)->get_pid(), tag, comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
208 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
209 comm->group()->actor(dst)->get_pid(), tag, comm,
210 RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
216 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
218 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
219 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
220 simgrid::s4u::this_actor::get_pid(), tag, comm, PERSISTENT | RECV | PREPARED);
223 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
226 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
228 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
229 comm->group()->actor(dst)->get_pid(), tag, comm, RMA | NON_PERSISTENT | RECV | PREPARED);
231 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
232 comm->group()->actor(dst)->get_pid(), tag, comm,
233 RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
239 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
241 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
242 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
243 simgrid::s4u::this_actor::get_pid(), tag, comm, PERSISTENT | RECV | PREPARED);
246 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
248 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
249 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
250 comm->group()->actor(dst)->get_pid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
255 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
257 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
258 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
259 comm->group()->actor(dst)->get_pid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
265 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
267 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
268 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
269 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
270 simgrid::s4u::this_actor::get_pid(), tag, comm, NON_PERSISTENT | RECV);
275 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
277 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
278 request = irecv(buf, count, datatype, src, tag, comm);
279 wait(&request,status);
283 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
285 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
286 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
287 comm->group()->actor(dst)->get_pid(), tag, comm, NON_PERSISTENT | SEND);
290 wait(&request, MPI_STATUS_IGNORE);
294 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
296 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
297 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
298 comm->group()->actor(dst)->get_pid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
301 wait(&request,MPI_STATUS_IGNORE);
305 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
306 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
307 MPI_Comm comm, MPI_Status * status)
309 MPI_Request requests[2];
311 int myid = simgrid::s4u::this_actor::get_pid();
312 if ((comm->group()->actor(dst)->get_pid() == myid) && (comm->group()->actor(src)->get_pid() == myid)) {
313 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
314 if (status != MPI_STATUS_IGNORE) {
315 status->MPI_SOURCE = src;
316 status->MPI_TAG = recvtag;
317 status->MPI_ERROR = MPI_SUCCESS;
318 status->count = sendcount * sendtype->size();
322 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
323 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
324 startall(2, requests);
325 waitall(2, requests, stats);
328 if(status != MPI_STATUS_IGNORE) {
329 // Copy receive status
334 void Request::start()
336 smx_mailbox_t mailbox;
338 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
343 if ((flags_ & RECV) != 0) {
344 this->print_request("New recv");
346 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
348 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
350 xbt_mutex_t mut = process->mailboxes_mutex();
351 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
352 xbt_mutex_acquire(mut);
354 if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
355 mailbox = process->mailbox();
357 else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
358 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
359 //begin with the more appropriate one : the small one.
360 mailbox = process->mailbox_small();
361 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
362 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
364 if (action == nullptr) {
365 mailbox = process->mailbox();
366 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
367 action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
368 if (action == nullptr) {
369 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
370 mailbox = process->mailbox_small();
373 XBT_DEBUG("yes there was something for us in the large mailbox");
376 mailbox = process->mailbox_small();
377 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
378 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
380 if (action == nullptr) {
381 XBT_DEBUG("No, nothing in the permanent receive mailbox");
382 mailbox = process->mailbox();
384 XBT_DEBUG("yes there was something for us in the small mailbox");
388 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
390 action_ = simcall_comm_irecv(
391 process->process()->get_impl(), mailbox, buf_, &real_size_, &match_recv,
392 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
393 XBT_DEBUG("recv simcall posted");
395 if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
396 xbt_mutex_release(mut);
397 } else { /* the RECV flag was not set, so this is a send */
398 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
400 if (TRACE_smpi_view_internals()) {
401 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
403 this->print_request("New send");
406 if ((flags_ & SSEND) == 0 && ( (flags_ & RMA) != 0
407 || static_cast<int>(size_) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
408 void *oldbuf = nullptr;
410 XBT_DEBUG("Send request %p is detached", this);
412 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
414 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
415 if ((smpi_privatize_global_variables != SmpiPrivStrategies::None) &&
416 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
417 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
418 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
419 smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
421 buf = xbt_malloc(size_);
422 memcpy(buf,oldbuf,size_);
423 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
428 //if we are giving back the control to the user without waiting for completion, we have to inject timings
429 double sleeptime = 0.0;
430 if (detached_ != 0 || ((flags_ & (ISEND | SSEND)) != 0)) { // issend should be treated as isend
431 // isend and send timings may be different
432 sleeptime = ((flags_ & ISEND) != 0)
433 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::SmpiHost>()->oisend(size_)
434 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::SmpiHost>()->osend(size_);
438 simcall_process_sleep(sleeptime);
439 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
442 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
444 xbt_mutex_t mut=process->mailboxes_mutex();
446 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
447 xbt_mutex_acquire(mut);
449 if (not(async_small_thresh != 0 || (flags_ & RMA) != 0)) {
450 mailbox = process->mailbox();
451 } else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
452 mailbox = process->mailbox();
453 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
454 smx_activity_t action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
455 if (action == nullptr) {
456 if ((flags_ & SSEND) == 0){
457 mailbox = process->mailbox_small();
458 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
460 mailbox = process->mailbox_small();
461 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
462 action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
463 if (action == nullptr) {
464 XBT_DEBUG("No, we are first, send to large mailbox");
465 mailbox = process->mailbox();
469 XBT_DEBUG("Yes there was something for us in the large mailbox");
472 mailbox = process->mailbox();
473 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
476 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
478 action_ = simcall_comm_isend(
479 simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
480 &xbt_free_f, // how to free the userdata if a detached send fails
481 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
482 // detach if msg size < eager/rdv switch limit
484 XBT_DEBUG("send simcall posted");
486 /* FIXME: detached sends are not traceable (action_ == nullptr) */
487 if (action_ != nullptr)
488 simcall_set_category(action_, TRACE_internal_smpi_get_category());
489 if (async_small_thresh != 0 || ((flags_ & RMA)!=0))
490 xbt_mutex_release(mut);
494 void Request::startall(int count, MPI_Request * requests)
496 if(requests== nullptr)
499 for(int i = 0; i < count; i++) {
500 requests[i]->start();
504 int Request::test(MPI_Request * request, MPI_Status * status) {
505 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
506 // to avoid deadlocks if used as a break condition, such as
507 // while (MPI_Test(request, flag, status) && flag) dostuff...
508 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
509 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
510 static int nsleeps = 1;
511 if(smpi_test_sleep > 0)
512 simcall_process_sleep(nsleeps*smpi_test_sleep);
514 Status::empty(status);
516 if (((*request)->flags_ & PREPARED) == 0) {
517 if ((*request)->action_ != nullptr)
518 flag = simcall_comm_test((*request)->action_);
520 finish_wait(request,status);
521 nsleeps=1;//reset the number of sleeps we will do next time
522 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT) == 0)
523 *request = MPI_REQUEST_NULL;
524 } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
531 int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
536 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
538 for (int i = 0; i < incount; i++) {
539 if (requests[i] != MPI_REQUEST_NULL) {
540 if (test(&requests[i], pstat)) {
543 if (status != MPI_STATUSES_IGNORE)
545 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & NON_PERSISTENT))
546 requests[i] = MPI_REQUEST_NULL;
552 if(count_dead==incount)
553 return MPI_UNDEFINED;
557 int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
559 std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
560 comms.reserve(count);
565 *index = MPI_UNDEFINED;
567 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
568 for(i = 0; i < count; i++) {
569 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & PREPARED)) {
570 comms.push_back(requests[i]->action_);
574 if (not map.empty()) {
575 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
576 static int nsleeps = 1;
577 if(smpi_test_sleep > 0)
578 simcall_process_sleep(nsleeps*smpi_test_sleep);
580 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
581 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
583 finish_wait(&requests[*index],status);
586 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & NON_PERSISTENT)) {
587 requests[*index] = MPI_REQUEST_NULL;
593 //all requests are null or inactive, return true
595 Status::empty(status);
601 int Request::testall(int count, MPI_Request requests[], MPI_Status status[])
604 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
606 for(int i=0; i<count; i++){
607 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED)) {
608 if (test(&requests[i], pstat)!=1){
611 requests[i]=MPI_REQUEST_NULL;
614 Status::empty(pstat);
616 if(status != MPI_STATUSES_IGNORE) {
623 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
625 //FIXME find another way to avoid busy waiting ?
626 // the issue here is that we have to wait on a nonexistent comm
628 iprobe(source, tag, comm, &flag, status);
629 XBT_DEBUG("Busy Waiting on probing : %d", flag);
633 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
634 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
635 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
636 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
637 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
638 static int nsleeps = 1;
639 double speed = simgrid::s4u::Actor::self()->get_host()->getSpeed();
640 double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
641 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
642 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
643 simgrid::s4u::this_actor::get_pid(), tag, comm, PERSISTENT | RECV);
644 if (smpi_iprobe_sleep > 0) {
645 smx_activity_t iprobe_sleep = simcall_execution_start(
646 "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
647 /* performance bound */ maxrate * speed, smpi_process()->process()->get_impl()->host);
648 simcall_execution_wait(iprobe_sleep);
650 // behave like a receive, but don't do it
651 smx_mailbox_t mailbox;
653 request->print_request("New iprobe");
654 // We have to test both mailboxes as we don't know if we will receive one one or another
655 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
656 mailbox = smpi_process()->mailbox_small();
657 XBT_DEBUG("Trying to probe the perm recv mailbox");
658 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
661 if (request->action_ == nullptr){
662 mailbox = smpi_process()->mailbox();
663 XBT_DEBUG("trying to probe the other mailbox");
664 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
667 if (request->action_ != nullptr){
668 simgrid::kernel::activity::CommImplPtr sync_comm =
669 boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(request->action_);
670 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
672 if(status != MPI_STATUS_IGNORE && (req->flags_ & PREPARED) == 0) {
673 status->MPI_SOURCE = comm->group()->rank(req->src_);
674 status->MPI_TAG = req->tag_;
675 status->MPI_ERROR = MPI_SUCCESS;
676 status->count = req->real_size_;
678 nsleeps = 1;//reset the number of sleeps we will do next time
682 if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
688 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
690 MPI_Request req = *request;
691 Status::empty(status);
693 if (not((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)) {
694 if(status != MPI_STATUS_IGNORE) {
695 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
696 status->MPI_SOURCE = req->comm_->group()->rank(src);
697 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
698 status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
699 // this handles the case were size in receive differs from size in send
700 status->count = req->real_size_;
703 req->print_request("Finishing");
704 MPI_Datatype datatype = req->old_type_;
706 // FIXME Handle the case of a partial shared malloc.
707 if (((req->flags_ & ACCUMULATE) != 0) ||
708 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
710 if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::None &&
711 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
712 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
713 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
714 smpi_switch_data_segment(simgrid::s4u::Actor::self());
717 if(datatype->flags() & DT_FLAG_DERIVED){
718 // This part handles the problem of non-contignous memory the unserialization at the reception
719 if((req->flags_ & RECV) && datatype->size()!=0)
720 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
722 }else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
723 if(datatype->size()!=0){
724 int n =req->real_size_/datatype->size();
725 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
732 if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
733 int rank = simgrid::s4u::this_actor::get_pid();
734 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
735 TRACE_smpi_recv(src_traced, rank,req->tag_);
737 if(req->detached_sender_ != nullptr){
738 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
740 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::SmpiHost>()->orecv(req->real_size());
742 simcall_process_sleep(sleeptime);
743 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
745 unref(&(req->detached_sender_));
747 if(req->flags_ & PERSISTENT)
748 req->action_ = nullptr;
749 req->flags_ |= FINISHED;
753 void Request::wait(MPI_Request * request, MPI_Status * status)
755 (*request)->print_request("Waiting");
756 if ((*request)->flags_ & PREPARED) {
757 Status::empty(status);
761 if ((*request)->action_ != nullptr)
762 // this is not a detached send
763 simcall_comm_wait((*request)->action_, -1.0);
765 finish_wait(request,status);
766 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & NON_PERSISTENT)!=0))
767 *request = MPI_REQUEST_NULL;
770 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
772 s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
773 int index = MPI_UNDEFINED;
777 // Wait for a request to complete
778 xbt_dynar_init(&comms, sizeof(smx_activity_t), [](void*ptr){
779 intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
781 int *map = xbt_new(int, count);
782 XBT_DEBUG("Wait for one of %d", count);
783 for(int i = 0; i < count; i++) {
784 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
785 not(requests[i]->flags_ & FINISHED)) {
786 if (requests[i]->action_ != nullptr) {
787 XBT_DEBUG("Waiting any %p ", requests[i]);
788 intrusive_ptr_add_ref(requests[i]->action_.get());
789 xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
793 // This is a finished detached request, let's return this one
794 size = 0; // so we free the dynar but don't do the waitany call
796 finish_wait(&requests[i], status); // cleanup if refcount = 0
797 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
798 requests[i] = MPI_REQUEST_NULL; // set to null
804 XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
805 int i = simcall_comm_waitany(&comms, -1);
807 // not MPI_UNDEFINED, as this is a simix return code
810 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
811 if ((requests[index] == MPI_REQUEST_NULL) ||
812 (not((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))) {
813 finish_wait(&requests[index],status);
814 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
815 requests[index] = MPI_REQUEST_NULL;
820 xbt_dynar_free_data(&comms);
824 if (index==MPI_UNDEFINED)
825 Status::empty(status);
830 static int sort_accumulates(MPI_Request a, MPI_Request b)
832 return (a->tag() > b->tag());
835 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
837 std::vector<MPI_Request> accumulates;
840 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
841 int retvalue = MPI_SUCCESS;
842 //tag invalid requests in the set
843 if (status != MPI_STATUSES_IGNORE) {
844 for (int c = 0; c < count; c++) {
845 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL || (requests[c]->flags_ & PREPARED)) {
846 Status::empty(&status[c]);
847 } else if (requests[c]->src_ == MPI_PROC_NULL) {
848 Status::empty(&status[c]);
849 status[c].MPI_SOURCE = MPI_PROC_NULL;
853 for (int c = 0; c < count; c++) {
854 if (MC_is_active() || MC_record_replay_is_active()) {
855 wait(&requests[c],pstat);
858 index = waitany(count, (MPI_Request*)requests, pstat);
859 if (index == MPI_UNDEFINED)
862 if (requests[index] != MPI_REQUEST_NULL
863 && (requests[index]->flags_ & RECV)
864 && (requests[index]->flags_ & ACCUMULATE))
865 accumulates.push_back(requests[index]);
866 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
867 requests[index] = MPI_REQUEST_NULL;
869 if (status != MPI_STATUSES_IGNORE) {
870 status[index] = *pstat;
871 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
872 retvalue = MPI_ERR_IN_STATUS;
876 if (not accumulates.empty()) {
877 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
878 for (auto& req : accumulates) {
879 finish_wait(&req, status);
886 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
890 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
892 for (int i = 0; i < incount; i++) {
893 int index = waitany(incount, requests, pstat);
894 if(index!=MPI_UNDEFINED){
895 indices[count] = index;
897 if(status != MPI_STATUSES_IGNORE) {
898 status[index] = *pstat;
900 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
901 requests[index] = MPI_REQUEST_NULL;
903 return MPI_UNDEFINED;
909 MPI_Request Request::f2c(int id) {
911 if(id==MPI_FORTRAN_REQUEST_NULL)
912 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
913 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
918 if (F2C::f2c_lookup() == nullptr) {
919 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
922 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
923 F2C::f2c_id_increment();
924 return F2C::f2c_id()-1;
927 void Request::free_f(int id)
929 if (id != MPI_FORTRAN_REQUEST_NULL) {
931 F2C::f2c_lookup()->erase(get_key_id(key, id));