1 /* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
8 #include "SmpiHost.hpp"
10 #include "private.hpp"
11 #include "smpi_comm.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_op.hpp"
14 #include "smpi_process.hpp"
15 #include "src/kernel/activity/CommImpl.hpp"
16 #include "src/mc/mc_replay.hpp"
17 #include "src/simix/ActorImpl.hpp"
18 #include "xbt/config.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (reques)");
24 static simgrid::config::Flag<double> smpi_iprobe_sleep(
25 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
26 static simgrid::config::Flag<double> smpi_test_sleep(
27 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
29 std::vector<s_smpi_factor_t> smpi_ois_values;
31 extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
36 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
37 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
39 void *old_buf = nullptr;
40 // FIXME Handle the case of a partial shared malloc.
41 if ((((flags & RECV) != 0) && ((flags & ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
42 // This part handles the problem of non-contiguous memory
47 buf_ = xbt_malloc(count*datatype->size());
48 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
49 datatype->serialize(old_buf, buf_, count);
53 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
55 size_ = datatype->size() * count;
60 detached_sender_ = nullptr;
65 if (flags & PERSISTENT)
72 MPI_Comm Request::comm(){
92 int Request::detached(){
96 size_t Request::size(){
100 size_t Request::real_size(){
104 void Request::unref(MPI_Request* request)
106 if((*request) != MPI_REQUEST_NULL){
107 (*request)->refcount_--;
108 if((*request)->refcount_<0) xbt_die("wrong refcount");
109 if((*request)->refcount_==0){
110 Datatype::unref((*request)->old_type_);
111 Comm::unref((*request)->comm_);
112 (*request)->print_request("Destroying");
114 *request = MPI_REQUEST_NULL;
116 (*request)->print_request("Decrementing");
119 xbt_die("freeing an already free request");
123 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
125 MPI_Request ref = static_cast<MPI_Request>(a);
126 MPI_Request req = static_cast<MPI_Request>(b);
127 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
129 xbt_assert(ref, "Cannot match recv against null reference");
130 xbt_assert(req, "Cannot match recv against null request");
131 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
132 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
133 //we match, we can transfer some values
134 if(ref->src_ == MPI_ANY_SOURCE)
135 ref->real_src_ = req->src_;
136 if(ref->tag_ == MPI_ANY_TAG)
137 ref->real_tag_ = req->tag_;
138 if(ref->real_size_ < req->real_size_)
140 if(req->detached_==1)
141 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
142 XBT_DEBUG("match succeeded");
147 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
149 MPI_Request ref = static_cast<MPI_Request>(a);
150 MPI_Request req = static_cast<MPI_Request>(b);
151 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
152 xbt_assert(ref, "Cannot match send against null reference");
153 xbt_assert(req, "Cannot match send against null request");
155 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
156 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
157 if(req->src_ == MPI_ANY_SOURCE)
158 req->real_src_ = ref->src_;
159 if(req->tag_ == MPI_ANY_TAG)
160 req->real_tag_ = ref->tag_;
161 if(req->real_size_ < ref->real_size_)
163 if(ref->detached_==1)
164 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
165 XBT_DEBUG("match succeeded");
171 void Request::print_request(const char *message)
173 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
174 message, this, buf_, size_, src_, dst_, tag_, flags_);
178 /* factories, to hide the internal flags from the caller */
179 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
182 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
183 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
186 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
188 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
189 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
192 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
194 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
195 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
199 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
202 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
204 request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, comm->group()->actor(src)->getPid(),
205 comm->group()->actor(dst)->getPid(), tag,
206 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
208 request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(),
209 comm->group()->actor(dst)->getPid(), tag,
210 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
216 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
218 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
219 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
220 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
223 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
226 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
228 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
229 RMA | NON_PERSISTENT | RECV | PREPARED);
231 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
232 RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
238 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
240 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
241 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
242 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
245 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
247 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
248 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
249 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
254 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
256 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
257 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
258 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
264 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
266 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
267 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
268 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
269 simgrid::s4u::this_actor::getPid(), tag, comm, NON_PERSISTENT | RECV);
274 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
276 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
277 request = irecv(buf, count, datatype, src, tag, comm);
278 wait(&request,status);
282 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
284 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
285 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
286 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
289 wait(&request, MPI_STATUS_IGNORE);
293 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
295 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
296 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::getPid(),
297 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
300 wait(&request,MPI_STATUS_IGNORE);
304 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
305 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
306 MPI_Comm comm, MPI_Status * status)
308 MPI_Request requests[2];
310 int myid = simgrid::s4u::this_actor::getPid();
311 if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
312 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
313 if(status !=MPI_STATUS_IGNORE){
314 status->MPI_SOURCE = src;
315 status->MPI_TAG = recvtag;
316 status->MPI_ERROR = MPI_SUCCESS;
317 status->count = sendcount*sendtype->size();
321 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
322 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
323 startall(2, requests);
324 waitall(2, requests, stats);
327 if(status != MPI_STATUS_IGNORE) {
328 // Copy receive status
333 void Request::start()
335 smx_mailbox_t mailbox;
337 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
342 if ((flags_ & RECV) != 0) {
343 this->print_request("New recv");
345 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
347 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
349 xbt_mutex_t mut = process->mailboxes_mutex();
350 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
351 xbt_mutex_acquire(mut);
353 if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
354 mailbox = process->mailbox();
356 else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
357 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
358 //begin with the more appropriate one : the small one.
359 mailbox = process->mailbox_small();
360 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
361 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
363 if (action == nullptr) {
364 mailbox = process->mailbox();
365 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
366 action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
367 if (action == nullptr) {
368 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
369 mailbox = process->mailbox_small();
372 XBT_DEBUG("yes there was something for us in the large mailbox");
375 mailbox = process->mailbox_small();
376 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
377 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
379 if (action == nullptr) {
380 XBT_DEBUG("No, nothing in the permanent receive mailbox");
381 mailbox = process->mailbox();
383 XBT_DEBUG("yes there was something for us in the small mailbox");
387 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
389 action_ = simcall_comm_irecv(
390 process->process()->getImpl(), mailbox, buf_, &real_size_, &match_recv,
391 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
392 XBT_DEBUG("recv simcall posted");
394 if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
395 xbt_mutex_release(mut);
396 } else { /* the RECV flag was not set, so this is a send */
397 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
399 if (TRACE_smpi_view_internals()) {
400 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
402 this->print_request("New send");
405 if ((flags_ & SSEND) == 0 && ( (flags_ & RMA) != 0
406 || static_cast<int>(size_) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
407 void *oldbuf = nullptr;
409 XBT_DEBUG("Send request %p is detached", this);
411 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
413 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
414 if ((smpi_privatize_global_variables != SmpiPrivStrategies::None) &&
415 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
416 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
417 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
418 smpi_switch_data_segment(simgrid::s4u::Actor::byPid(src_));
420 buf = xbt_malloc(size_);
421 memcpy(buf,oldbuf,size_);
422 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
427 //if we are giving back the control to the user without waiting for completion, we have to inject timings
428 double sleeptime = 0.0;
429 if (detached_ != 0 || ((flags_ & (ISEND | SSEND)) != 0)) { // issend should be treated as isend
430 // isend and send timings may be different
431 sleeptime = ((flags_ & ISEND) != 0)
432 ? simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->oisend(size_)
433 : simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->osend(size_);
437 simcall_process_sleep(sleeptime);
438 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
441 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
443 xbt_mutex_t mut=process->mailboxes_mutex();
445 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
446 xbt_mutex_acquire(mut);
448 if (not(async_small_thresh != 0 || (flags_ & RMA) != 0)) {
449 mailbox = process->mailbox();
450 } else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
451 mailbox = process->mailbox();
452 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
453 smx_activity_t action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
454 if (action == nullptr) {
455 if ((flags_ & SSEND) == 0){
456 mailbox = process->mailbox_small();
457 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
459 mailbox = process->mailbox_small();
460 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
461 action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
462 if (action == nullptr) {
463 XBT_DEBUG("No, we are first, send to large mailbox");
464 mailbox = process->mailbox();
468 XBT_DEBUG("Yes there was something for us in the large mailbox");
471 mailbox = process->mailbox();
472 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
475 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
477 action_ = simcall_comm_isend(
478 simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
479 &xbt_free_f, // how to free the userdata if a detached send fails
480 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
481 // detach if msg size < eager/rdv switch limit
483 XBT_DEBUG("send simcall posted");
485 /* FIXME: detached sends are not traceable (action_ == nullptr) */
486 if (action_ != nullptr)
487 simcall_set_category(action_, TRACE_internal_smpi_get_category());
488 if (async_small_thresh != 0 || ((flags_ & RMA)!=0))
489 xbt_mutex_release(mut);
493 void Request::startall(int count, MPI_Request * requests)
495 if(requests== nullptr)
498 for(int i = 0; i < count; i++) {
499 requests[i]->start();
503 int Request::test(MPI_Request * request, MPI_Status * status) {
504 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
505 // to avoid deadlocks if used as a break condition, such as
506 // while (MPI_Test(request, flag, status) && flag) dostuff...
507 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
508 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
509 static int nsleeps = 1;
510 if(smpi_test_sleep > 0)
511 simcall_process_sleep(nsleeps*smpi_test_sleep);
513 Status::empty(status);
515 if (((*request)->flags_ & PREPARED) == 0) {
516 if ((*request)->action_ != nullptr)
517 flag = simcall_comm_test((*request)->action_);
519 finish_wait(request,status);
520 nsleeps=1;//reset the number of sleeps we will do next time
521 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT) == 0)
522 *request = MPI_REQUEST_NULL;
523 } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
530 int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
535 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
537 for (int i = 0; i < incount; i++) {
538 if (requests[i] != MPI_REQUEST_NULL) {
539 if (test(&requests[i], pstat)) {
542 if (status != MPI_STATUSES_IGNORE)
544 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & NON_PERSISTENT))
545 requests[i] = MPI_REQUEST_NULL;
551 if(count_dead==incount)
552 return MPI_UNDEFINED;
556 int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
558 std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
559 comms.reserve(count);
564 *index = MPI_UNDEFINED;
566 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
567 for(i = 0; i < count; i++) {
568 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & PREPARED)) {
569 comms.push_back(requests[i]->action_);
573 if (not map.empty()) {
574 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
575 static int nsleeps = 1;
576 if(smpi_test_sleep > 0)
577 simcall_process_sleep(nsleeps*smpi_test_sleep);
579 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
580 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
582 finish_wait(&requests[*index],status);
585 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & NON_PERSISTENT)) {
586 requests[*index] = MPI_REQUEST_NULL;
592 //all requests are null or inactive, return true
594 Status::empty(status);
600 int Request::testall(int count, MPI_Request requests[], MPI_Status status[])
603 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
605 for(int i=0; i<count; i++){
606 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED)) {
607 if (test(&requests[i], pstat)!=1){
610 requests[i]=MPI_REQUEST_NULL;
613 Status::empty(pstat);
615 if(status != MPI_STATUSES_IGNORE) {
622 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
624 //FIXME find another way to avoid busy waiting ?
625 // the issue here is that we have to wait on a nonexistent comm
627 iprobe(source, tag, comm, &flag, status);
628 XBT_DEBUG("Busy Waiting on probing : %d", flag);
632 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
633 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
634 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
635 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
636 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
637 static int nsleeps = 1;
638 double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
639 double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
640 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
641 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
642 simgrid::s4u::this_actor::getPid(), tag, comm, PERSISTENT | RECV);
643 if (smpi_iprobe_sleep > 0) {
644 smx_activity_t iprobe_sleep = simcall_execution_start(
645 "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
646 /* performance bound */ maxrate * speed, smpi_process()->process()->getImpl()->host);
647 simcall_execution_wait(iprobe_sleep);
649 // behave like a receive, but don't do it
650 smx_mailbox_t mailbox;
652 request->print_request("New iprobe");
653 // We have to test both mailboxes as we don't know if we will receive one one or another
654 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
655 mailbox = smpi_process()->mailbox_small();
656 XBT_DEBUG("Trying to probe the perm recv mailbox");
657 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
660 if (request->action_ == nullptr){
661 mailbox = smpi_process()->mailbox();
662 XBT_DEBUG("trying to probe the other mailbox");
663 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
666 if (request->action_ != nullptr){
667 simgrid::kernel::activity::CommImplPtr sync_comm =
668 boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(request->action_);
669 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
671 if(status != MPI_STATUS_IGNORE && (req->flags_ & PREPARED) == 0) {
672 status->MPI_SOURCE = comm->group()->rank(req->src_);
673 status->MPI_TAG = req->tag_;
674 status->MPI_ERROR = MPI_SUCCESS;
675 status->count = req->real_size_;
677 nsleeps = 1;//reset the number of sleeps we will do next time
681 if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
687 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
689 MPI_Request req = *request;
690 Status::empty(status);
692 if (not((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)) {
693 if(status != MPI_STATUS_IGNORE) {
694 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
695 status->MPI_SOURCE = req->comm_->group()->rank(src);
696 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
697 status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
698 // this handles the case were size in receive differs from size in send
699 status->count = req->real_size_;
702 req->print_request("Finishing");
703 MPI_Datatype datatype = req->old_type_;
705 // FIXME Handle the case of a partial shared malloc.
706 if (((req->flags_ & ACCUMULATE) != 0) ||
707 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
709 if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::None &&
710 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
711 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
712 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
713 smpi_switch_data_segment(simgrid::s4u::Actor::self());
716 if(datatype->flags() & DT_FLAG_DERIVED){
717 // This part handles the problem of non-contignous memory the unserialization at the reception
718 if((req->flags_ & RECV) && datatype->size()!=0)
719 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
721 }else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
722 if(datatype->size()!=0){
723 int n =req->real_size_/datatype->size();
724 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
731 if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
732 int rank = simgrid::s4u::this_actor::getPid();
733 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
734 TRACE_smpi_recv(src_traced, rank,req->tag_);
736 if(req->detached_sender_ != nullptr){
737 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
739 simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->orecv(req->real_size());
741 simcall_process_sleep(sleeptime);
742 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
744 unref(&(req->detached_sender_));
746 if(req->flags_ & PERSISTENT)
747 req->action_ = nullptr;
748 req->flags_ |= FINISHED;
752 void Request::wait(MPI_Request * request, MPI_Status * status)
754 (*request)->print_request("Waiting");
755 if ((*request)->flags_ & PREPARED) {
756 Status::empty(status);
760 if ((*request)->action_ != nullptr)
761 // this is not a detached send
762 simcall_comm_wait((*request)->action_, -1.0);
764 finish_wait(request,status);
765 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & NON_PERSISTENT)!=0))
766 *request = MPI_REQUEST_NULL;
769 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
771 s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
772 int index = MPI_UNDEFINED;
776 // Wait for a request to complete
777 xbt_dynar_init(&comms, sizeof(smx_activity_t), [](void*ptr){
778 intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
780 int *map = xbt_new(int, count);
781 XBT_DEBUG("Wait for one of %d", count);
782 for(int i = 0; i < count; i++) {
783 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
784 not(requests[i]->flags_ & FINISHED)) {
785 if (requests[i]->action_ != nullptr) {
786 XBT_DEBUG("Waiting any %p ", requests[i]);
787 intrusive_ptr_add_ref(requests[i]->action_.get());
788 xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
792 // This is a finished detached request, let's return this one
793 size = 0; // so we free the dynar but don't do the waitany call
795 finish_wait(&requests[i], status); // cleanup if refcount = 0
796 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
797 requests[i] = MPI_REQUEST_NULL; // set to null
803 XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
804 int i = simcall_comm_waitany(&comms, -1);
806 // not MPI_UNDEFINED, as this is a simix return code
809 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
810 if ((requests[index] == MPI_REQUEST_NULL) ||
811 (not((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))) {
812 finish_wait(&requests[index],status);
813 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
814 requests[index] = MPI_REQUEST_NULL;
819 xbt_dynar_free_data(&comms);
823 if (index==MPI_UNDEFINED)
824 Status::empty(status);
829 static int sort_accumulates(MPI_Request a, MPI_Request b)
831 return (a->tag() > b->tag());
834 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
836 std::vector<MPI_Request> accumulates;
839 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
840 int retvalue = MPI_SUCCESS;
841 //tag invalid requests in the set
842 if (status != MPI_STATUSES_IGNORE) {
843 for (int c = 0; c < count; c++) {
844 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL || (requests[c]->flags_ & PREPARED)) {
845 Status::empty(&status[c]);
846 } else if (requests[c]->src_ == MPI_PROC_NULL) {
847 Status::empty(&status[c]);
848 status[c].MPI_SOURCE = MPI_PROC_NULL;
852 for (int c = 0; c < count; c++) {
853 if (MC_is_active() || MC_record_replay_is_active()) {
854 wait(&requests[c],pstat);
857 index = waitany(count, (MPI_Request*)requests, pstat);
858 if (index == MPI_UNDEFINED)
861 if (requests[index] != MPI_REQUEST_NULL
862 && (requests[index]->flags_ & RECV)
863 && (requests[index]->flags_ & ACCUMULATE))
864 accumulates.push_back(requests[index]);
865 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
866 requests[index] = MPI_REQUEST_NULL;
868 if (status != MPI_STATUSES_IGNORE) {
869 status[index] = *pstat;
870 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
871 retvalue = MPI_ERR_IN_STATUS;
875 if (not accumulates.empty()) {
876 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
877 for (auto& req : accumulates) {
878 finish_wait(&req, status);
885 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
889 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
891 for (int i = 0; i < incount; i++) {
892 int index = waitany(incount, requests, pstat);
893 if(index!=MPI_UNDEFINED){
894 indices[count] = index;
896 if(status != MPI_STATUSES_IGNORE) {
897 status[index] = *pstat;
899 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
900 requests[index] = MPI_REQUEST_NULL;
902 return MPI_UNDEFINED;
908 MPI_Request Request::f2c(int id) {
910 if(id==MPI_FORTRAN_REQUEST_NULL)
911 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
912 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
917 if (F2C::f2c_lookup() == nullptr) {
918 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
921 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
922 F2C::f2c_id_increment();
923 return F2C::f2c_id()-1;
926 void Request::free_f(int id)
928 if (id != MPI_FORTRAN_REQUEST_NULL) {
930 F2C::f2c_lookup()->erase(get_key_id(key, id));