1 /* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
8 #include "SmpiHost.hpp"
10 #include "private.hpp"
11 #include "smpi_comm.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_op.hpp"
14 #include "smpi_process.hpp"
15 #include "src/kernel/activity/CommImpl.hpp"
16 #include "src/mc/mc_replay.hpp"
17 #include "src/simix/ActorImpl.hpp"
21 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (reques)");
23 static simgrid::config::Flag<double> smpi_iprobe_sleep(
24 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
25 static simgrid::config::Flag<double> smpi_test_sleep(
26 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
28 std::vector<s_smpi_factor_t> smpi_ois_values;
30 extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
35 Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
36 : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
38 void *old_buf = nullptr;
39 // FIXME Handle the case of a partial shared malloc.
40 if ((((flags & RECV) != 0) && ((flags & ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
41 // This part handles the problem of non-contiguous memory
46 buf_ = xbt_malloc(count*datatype->size());
47 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
48 datatype->serialize(old_buf, buf_, count);
52 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
54 size_ = datatype->size() * count;
59 detached_sender_ = nullptr;
64 if (flags & PERSISTENT)
71 MPI_Comm Request::comm(){
91 int Request::detached(){
95 size_t Request::size(){
99 size_t Request::real_size(){
103 void Request::unref(MPI_Request* request)
105 if((*request) != MPI_REQUEST_NULL){
106 (*request)->refcount_--;
107 if((*request)->refcount_<0) xbt_die("wrong refcount");
108 if((*request)->refcount_==0){
109 Datatype::unref((*request)->old_type_);
110 Comm::unref((*request)->comm_);
111 (*request)->print_request("Destroying");
113 *request = MPI_REQUEST_NULL;
115 (*request)->print_request("Decrementing");
118 xbt_die("freeing an already free request");
122 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
124 MPI_Request ref = static_cast<MPI_Request>(a);
125 MPI_Request req = static_cast<MPI_Request>(b);
126 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
128 xbt_assert(ref, "Cannot match recv against null reference");
129 xbt_assert(req, "Cannot match recv against null request");
130 if((ref->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
131 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
132 //we match, we can transfer some values
133 if(ref->src_ == MPI_ANY_SOURCE)
134 ref->real_src_ = req->src_;
135 if(ref->tag_ == MPI_ANY_TAG)
136 ref->real_tag_ = req->tag_;
137 if(ref->real_size_ < req->real_size_)
139 if(req->detached_==1)
140 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
141 XBT_DEBUG("match succeeded");
146 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl* ignored)
148 MPI_Request ref = static_cast<MPI_Request>(a);
149 MPI_Request req = static_cast<MPI_Request>(b);
150 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src_,req->src_, ref->tag_, req->tag_);
151 xbt_assert(ref, "Cannot match send against null reference");
152 xbt_assert(req, "Cannot match send against null request");
154 if((req->src_ == MPI_ANY_SOURCE || req->src_ == ref->src_)
155 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
156 if(req->src_ == MPI_ANY_SOURCE)
157 req->real_src_ = ref->src_;
158 if(req->tag_ == MPI_ANY_TAG)
159 req->real_tag_ = ref->tag_;
160 if(req->real_size_ < ref->real_size_)
162 if(ref->detached_==1)
163 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
164 XBT_DEBUG("match succeeded");
170 void Request::print_request(const char *message)
172 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
173 message, this, buf_, size_, src_, dst_, tag_, flags_);
177 /* factories, to hide the internal flags from the caller */
178 MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
181 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
182 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
185 MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
187 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
188 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
191 MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
193 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
194 comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
198 MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
201 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
203 request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, comm->group()->actor(src)->getPid(),
204 comm->group()->actor(dst)->getPid(), tag,
205 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
207 request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(),
208 comm->group()->actor(dst)->getPid(), tag,
209 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
215 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
217 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
218 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
219 simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
222 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
225 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
227 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
228 RMA | NON_PERSISTENT | RECV | PREPARED);
230 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
231 RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
237 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
239 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
240 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
241 simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
244 MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
246 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
247 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
248 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
253 MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
255 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
256 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
257 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
263 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
265 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
266 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
267 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
268 simgrid::s4u::Actor::self()->getPid(), tag, comm, NON_PERSISTENT | RECV);
273 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
275 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
276 request = irecv(buf, count, datatype, src, tag, comm);
277 wait(&request,status);
281 void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
283 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
284 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
285 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
288 wait(&request, MPI_STATUS_IGNORE);
292 void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
294 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
295 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
296 comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
299 wait(&request,MPI_STATUS_IGNORE);
303 void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
304 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
305 MPI_Comm comm, MPI_Status * status)
307 MPI_Request requests[2];
309 int myid = simgrid::s4u::Actor::self()->getPid();
310 if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
311 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
312 if(status !=MPI_STATUS_IGNORE){
313 status->MPI_SOURCE = src;
314 status->MPI_TAG = recvtag;
315 status->MPI_ERROR = MPI_SUCCESS;
316 status->count = sendcount*sendtype->size();
320 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
321 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
322 startall(2, requests);
323 waitall(2, requests, stats);
326 if(status != MPI_STATUS_IGNORE) {
327 // Copy receive status
332 void Request::start()
334 smx_mailbox_t mailbox;
336 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
341 if ((flags_ & RECV) != 0) {
342 this->print_request("New recv");
344 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
346 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
348 xbt_mutex_t mut = process->mailboxes_mutex();
349 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
350 xbt_mutex_acquire(mut);
352 if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
353 mailbox = process->mailbox();
355 else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
356 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
357 //begin with the more appropriate one : the small one.
358 mailbox = process->mailbox_small();
359 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
360 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
362 if (action == nullptr) {
363 mailbox = process->mailbox();
364 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
365 action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
366 if (action == nullptr) {
367 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
368 mailbox = process->mailbox_small();
371 XBT_DEBUG("yes there was something for us in the large mailbox");
374 mailbox = process->mailbox_small();
375 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
376 smx_activity_t action = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(this));
378 if (action == nullptr) {
379 XBT_DEBUG("No, nothing in the permanent receive mailbox");
380 mailbox = process->mailbox();
382 XBT_DEBUG("yes there was something for us in the small mailbox");
386 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
388 action_ = simcall_comm_irecv(
389 process->process()->getImpl(), mailbox, buf_, &real_size_, &match_recv,
390 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
391 XBT_DEBUG("recv simcall posted");
393 if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
394 xbt_mutex_release(mut);
395 } else { /* the RECV flag was not set, so this is a send */
396 simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
398 if (TRACE_smpi_view_internals()) {
399 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
401 this->print_request("New send");
404 if ((flags_ & SSEND) == 0 && ( (flags_ & RMA) != 0
405 || static_cast<int>(size_) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
406 void *oldbuf = nullptr;
408 XBT_DEBUG("Send request %p is detached", this);
410 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
412 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
413 if ((smpi_privatize_global_variables != 0) && (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
414 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
415 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
416 smpi_switch_data_segment(simgrid::s4u::Actor::byPid(src_));
418 buf = xbt_malloc(size_);
419 memcpy(buf,oldbuf,size_);
420 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
425 //if we are giving back the control to the user without waiting for completion, we have to inject timings
426 double sleeptime = 0.0;
427 if (detached_ != 0 || ((flags_ & (ISEND | SSEND)) != 0)) { // issend should be treated as isend
428 // isend and send timings may be different
429 sleeptime = ((flags_ & ISEND) != 0)
430 ? simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->oisend(size_)
431 : simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->osend(size_);
435 simcall_process_sleep(sleeptime);
436 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
439 int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
441 xbt_mutex_t mut=process->mailboxes_mutex();
443 if (async_small_thresh != 0 || (flags_ & RMA) != 0)
444 xbt_mutex_acquire(mut);
446 if (not(async_small_thresh != 0 || (flags_ & RMA) != 0)) {
447 mailbox = process->mailbox();
448 } else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
449 mailbox = process->mailbox();
450 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
451 smx_activity_t action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
452 if (action == nullptr) {
453 if ((flags_ & SSEND) == 0){
454 mailbox = process->mailbox_small();
455 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
457 mailbox = process->mailbox_small();
458 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
459 action = simcall_comm_iprobe(mailbox, 1, &match_send, static_cast<void*>(this));
460 if (action == nullptr) {
461 XBT_DEBUG("No, we are first, send to large mailbox");
462 mailbox = process->mailbox();
466 XBT_DEBUG("Yes there was something for us in the large mailbox");
469 mailbox = process->mailbox();
470 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
473 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
475 action_ = simcall_comm_isend(
476 simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
477 &xbt_free_f, // how to free the userdata if a detached send fails
478 not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
479 // detach if msg size < eager/rdv switch limit
481 XBT_DEBUG("send simcall posted");
483 /* FIXME: detached sends are not traceable (action_ == nullptr) */
484 if (action_ != nullptr)
485 simcall_set_category(action_, TRACE_internal_smpi_get_category());
486 if (async_small_thresh != 0 || ((flags_ & RMA)!=0))
487 xbt_mutex_release(mut);
491 void Request::startall(int count, MPI_Request * requests)
493 if(requests== nullptr)
496 for(int i = 0; i < count; i++) {
497 requests[i]->start();
501 int Request::test(MPI_Request * request, MPI_Status * status) {
502 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
503 // to avoid deadlocks if used as a break condition, such as
504 // while (MPI_Test(request, flag, status) && flag) dostuff...
505 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
506 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
507 static int nsleeps = 1;
508 if(smpi_test_sleep > 0)
509 simcall_process_sleep(nsleeps*smpi_test_sleep);
511 Status::empty(status);
513 if (((*request)->flags_ & PREPARED) == 0) {
514 if ((*request)->action_ != nullptr)
515 flag = simcall_comm_test((*request)->action_);
517 finish_wait(request,status);
518 nsleeps=1;//reset the number of sleeps we will do next time
519 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT) == 0)
520 *request = MPI_REQUEST_NULL;
521 } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
528 int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
533 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
535 for (int i = 0; i < incount; i++) {
536 if (requests[i] != MPI_REQUEST_NULL) {
537 if (test(&requests[i], pstat)) {
540 if (status != MPI_STATUSES_IGNORE)
542 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & NON_PERSISTENT))
543 requests[i] = MPI_REQUEST_NULL;
549 if(count_dead==incount)
550 return MPI_UNDEFINED;
554 int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
556 std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
557 comms.reserve(count);
562 *index = MPI_UNDEFINED;
564 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
565 for(i = 0; i < count; i++) {
566 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & PREPARED)) {
567 comms.push_back(requests[i]->action_);
571 if (not map.empty()) {
572 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
573 static int nsleeps = 1;
574 if(smpi_test_sleep > 0)
575 simcall_process_sleep(nsleeps*smpi_test_sleep);
577 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
578 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
580 finish_wait(&requests[*index],status);
583 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & NON_PERSISTENT)) {
584 requests[*index] = MPI_REQUEST_NULL;
590 //all requests are null or inactive, return true
592 Status::empty(status);
598 int Request::testall(int count, MPI_Request requests[], MPI_Status status[])
601 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
603 for(int i=0; i<count; i++){
604 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED)) {
605 if (test(&requests[i], pstat)!=1){
608 requests[i]=MPI_REQUEST_NULL;
611 Status::empty(pstat);
613 if(status != MPI_STATUSES_IGNORE) {
620 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
622 //FIXME find another way to avoid busy waiting ?
623 // the issue here is that we have to wait on a nonexistent comm
625 iprobe(source, tag, comm, &flag, status);
626 XBT_DEBUG("Busy Waiting on probing : %d", flag);
630 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
631 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
632 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
633 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
634 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
635 static int nsleeps = 1;
636 double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
637 double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
638 MPI_Request request = new Request(
639 nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
640 simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV);
641 if (smpi_iprobe_sleep > 0) {
642 smx_activity_t iprobe_sleep = simcall_execution_start(
643 "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
644 /* performance bound */ maxrate * speed, smpi_process()->process()->getImpl()->host);
645 simcall_execution_wait(iprobe_sleep);
647 // behave like a receive, but don't do it
648 smx_mailbox_t mailbox;
650 request->print_request("New iprobe");
651 // We have to test both mailboxes as we don't know if we will receive one one or another
652 if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
653 mailbox = smpi_process()->mailbox_small();
654 XBT_DEBUG("Trying to probe the perm recv mailbox");
655 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
658 if (request->action_ == nullptr){
659 mailbox = smpi_process()->mailbox();
660 XBT_DEBUG("trying to probe the other mailbox");
661 request->action_ = simcall_comm_iprobe(mailbox, 0, &match_recv, static_cast<void*>(request));
664 if (request->action_ != nullptr){
665 simgrid::kernel::activity::CommImplPtr sync_comm =
666 boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(request->action_);
667 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
669 if(status != MPI_STATUS_IGNORE && (req->flags_ & PREPARED) == 0) {
670 status->MPI_SOURCE = comm->group()->rank(req->src_);
671 status->MPI_TAG = req->tag_;
672 status->MPI_ERROR = MPI_SUCCESS;
673 status->count = req->real_size_;
675 nsleeps = 1;//reset the number of sleeps we will do next time
679 if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
685 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
687 MPI_Request req = *request;
688 Status::empty(status);
690 if (not((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)) {
691 if(status != MPI_STATUS_IGNORE) {
692 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
693 status->MPI_SOURCE = req->comm_->group()->rank(src);
694 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
695 status->MPI_ERROR = req->truncated_ != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
696 // this handles the case were size in receive differs from size in send
697 status->count = req->real_size_;
700 req->print_request("Finishing");
701 MPI_Datatype datatype = req->old_type_;
703 // FIXME Handle the case of a partial shared malloc.
704 if (((req->flags_ & ACCUMULATE) != 0) ||
705 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
707 if (not smpi_process()->replaying() && smpi_privatize_global_variables != 0 &&
708 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
709 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
710 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
711 smpi_switch_data_segment(simgrid::s4u::Actor::self());
714 if(datatype->flags() & DT_FLAG_DERIVED){
715 // This part handles the problem of non-contignous memory the unserialization at the reception
716 if((req->flags_ & RECV) && datatype->size()!=0)
717 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
719 }else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
720 if(datatype->size()!=0){
721 int n =req->real_size_/datatype->size();
722 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
729 if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
730 int rank = simgrid::s4u::Actor::self()->getPid();
731 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
732 TRACE_smpi_recv(src_traced, rank,req->tag_);
734 if(req->detached_sender_ != nullptr){
735 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
737 simgrid::s4u::Actor::self()->getHost()->extension<simgrid::smpi::SmpiHost>()->orecv(req->real_size());
739 simcall_process_sleep(sleeptime);
740 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
742 unref(&(req->detached_sender_));
744 if(req->flags_ & PERSISTENT)
745 req->action_ = nullptr;
746 req->flags_ |= FINISHED;
750 void Request::wait(MPI_Request * request, MPI_Status * status)
752 (*request)->print_request("Waiting");
753 if ((*request)->flags_ & PREPARED) {
754 Status::empty(status);
758 if ((*request)->action_ != nullptr)
759 // this is not a detached send
760 simcall_comm_wait((*request)->action_, -1.0);
762 finish_wait(request,status);
763 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & NON_PERSISTENT)!=0))
764 *request = MPI_REQUEST_NULL;
767 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
769 s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
770 int index = MPI_UNDEFINED;
774 // Wait for a request to complete
775 xbt_dynar_init(&comms, sizeof(smx_activity_t), [](void*ptr){
776 intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
778 int *map = xbt_new(int, count);
779 XBT_DEBUG("Wait for one of %d", count);
780 for(int i = 0; i < count; i++) {
781 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
782 not(requests[i]->flags_ & FINISHED)) {
783 if (requests[i]->action_ != nullptr) {
784 XBT_DEBUG("Waiting any %p ", requests[i]);
785 intrusive_ptr_add_ref(requests[i]->action_.get());
786 xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
790 // This is a finished detached request, let's return this one
791 size = 0; // so we free the dynar but don't do the waitany call
793 finish_wait(&requests[i], status); // cleanup if refcount = 0
794 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
795 requests[i] = MPI_REQUEST_NULL; // set to null
801 XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
802 int i = simcall_comm_waitany(&comms, -1);
804 // not MPI_UNDEFINED, as this is a simix return code
807 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
808 if ((requests[index] == MPI_REQUEST_NULL) ||
809 (not((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))) {
810 finish_wait(&requests[index],status);
811 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
812 requests[index] = MPI_REQUEST_NULL;
817 xbt_dynar_free_data(&comms);
821 if (index==MPI_UNDEFINED)
822 Status::empty(status);
827 static int sort_accumulates(MPI_Request a, MPI_Request b)
829 return (a->tag() > b->tag());
832 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
834 std::vector<MPI_Request> accumulates;
837 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
838 int retvalue = MPI_SUCCESS;
839 //tag invalid requests in the set
840 if (status != MPI_STATUSES_IGNORE) {
841 for (int c = 0; c < count; c++) {
842 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL || (requests[c]->flags_ & PREPARED)) {
843 Status::empty(&status[c]);
844 } else if (requests[c]->src_ == MPI_PROC_NULL) {
845 Status::empty(&status[c]);
846 status[c].MPI_SOURCE = MPI_PROC_NULL;
850 for (int c = 0; c < count; c++) {
851 if (MC_is_active() || MC_record_replay_is_active()) {
852 wait(&requests[c],pstat);
855 index = waitany(count, (MPI_Request*)requests, pstat);
856 if (index == MPI_UNDEFINED)
859 if (requests[index] != MPI_REQUEST_NULL
860 && (requests[index]->flags_ & RECV)
861 && (requests[index]->flags_ & ACCUMULATE))
862 accumulates.push_back(requests[index]);
863 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
864 requests[index] = MPI_REQUEST_NULL;
866 if (status != MPI_STATUSES_IGNORE) {
867 status[index] = *pstat;
868 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
869 retvalue = MPI_ERR_IN_STATUS;
873 if (not accumulates.empty()) {
874 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
875 for (auto& req : accumulates) {
876 finish_wait(&req, status);
883 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
887 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
889 for (int i = 0; i < incount; i++) {
890 int index = waitany(incount, requests, pstat);
891 if(index!=MPI_UNDEFINED){
892 indices[count] = index;
894 if(status != MPI_STATUSES_IGNORE) {
895 status[index] = *pstat;
897 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
898 requests[index] = MPI_REQUEST_NULL;
900 return MPI_UNDEFINED;
906 MPI_Request Request::f2c(int id) {
908 if(id==MPI_FORTRAN_REQUEST_NULL)
909 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
910 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
915 if (F2C::f2c_lookup() == nullptr) {
916 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
919 (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
920 F2C::f2c_id_increment();
921 return F2C::f2c_id()-1;
924 void Request::free_f(int id)
926 if (id != MPI_FORTRAN_REQUEST_NULL) {
928 F2C::f2c_lookup()->erase(get_key_id(key, id));