1 /* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_request.hpp"
10 #include "simgrid/Exception.hpp"
11 #include "simgrid/s4u/Exec.hpp"
12 #include "smpi_comm.hpp"
13 #include "smpi_datatype.hpp"
14 #include "smpi_host.hpp"
15 #include "smpi_op.hpp"
16 #include "src/kernel/activity/CommImpl.hpp"
17 #include "src/mc/mc_replay.hpp"
18 #include "src/smpi/include/smpi_actor.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
24 static simgrid::config::Flag<double> smpi_iprobe_sleep(
25 "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4);
26 static simgrid::config::Flag<double> smpi_test_sleep(
27 "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
29 std::vector<s_smpi_factor_t> smpi_ois_values;
31 extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
36 Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
37 : buf_(const_cast<void*>(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
39 void *old_buf = nullptr;
40 // FIXME Handle the case of a partial shared malloc.
41 if ((((flags & MPI_REQ_RECV) != 0) && ((flags & MPI_REQ_ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
42 // This part handles the problem of non-contiguous memory
43 old_buf = const_cast<void*>(buf);
47 buf_ = xbt_malloc(count*datatype->size());
48 if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & MPI_REQ_SEND) != 0)) {
49 datatype->serialize(old_buf, buf_, count);
53 // This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
55 size_ = datatype->size() * count;
58 if(op != MPI_REPLACE && op != MPI_OP_NULL)
62 detached_sender_ = nullptr;
67 if (flags & MPI_REQ_PERSISTENT)
72 generalized_funcs=nullptr;
73 nbc_requests_=nullptr;
81 void Request::unref(MPI_Request* request)
83 if((*request) != MPI_REQUEST_NULL){
84 (*request)->refcount_--;
85 if((*request)->refcount_ < 0) {
86 (*request)->print_request("wrong refcount");
87 xbt_die("Whoops, wrong refcount");
89 if((*request)->refcount_==0){
90 if ((*request)->flags_ & MPI_REQ_GENERALIZED){
91 ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
92 delete (*request)->generalized_funcs;
94 Comm::unref((*request)->comm_);
95 Datatype::unref((*request)->old_type_);
97 if ((*request)->op_!=MPI_REPLACE && (*request)->op_!=MPI_OP_NULL)
98 Op::unref(&(*request)->op_);
100 (*request)->print_request("Destroying");
102 *request = MPI_REQUEST_NULL;
104 (*request)->print_request("Decrementing");
107 xbt_die("freeing an already free request");
111 int Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
113 MPI_Request ref = static_cast<MPI_Request>(a);
114 MPI_Request req = static_cast<MPI_Request>(b);
115 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
117 xbt_assert(ref, "Cannot match recv against null reference");
118 xbt_assert(req, "Cannot match recv against null request");
119 if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
120 && ((ref->src_ == MPI_ANY_SOURCE && (ref->comm_->group()->rank(req->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
121 && ((ref->tag_ == MPI_ANY_TAG && req->tag_ >=0) || req->tag_ == ref->tag_)){
122 //we match, we can transfer some values
123 if(ref->src_ == MPI_ANY_SOURCE)
124 ref->real_src_ = req->src_;
125 if(ref->tag_ == MPI_ANY_TAG)
126 ref->real_tag_ = req->tag_;
127 if(ref->real_size_ < req->real_size_)
128 ref->truncated_ = true;
130 ref->detached_sender_=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
131 if(req->cancelled_==0)
132 req->cancelled_=-1;//mark as uncancellable
133 XBT_DEBUG("match succeeded");
138 int Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
140 MPI_Request ref = static_cast<MPI_Request>(a);
141 MPI_Request req = static_cast<MPI_Request>(b);
142 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d, id %d against %d",ref->src_,req->src_, ref->tag_, req->tag_,ref->comm_->id(),req->comm_->id());
143 xbt_assert(ref, "Cannot match send against null reference");
144 xbt_assert(req, "Cannot match send against null request");
146 if((ref->comm_->id()==MPI_UNDEFINED || req->comm_->id() == MPI_UNDEFINED || (ref->comm_->id()==req->comm_->id()))
147 && ((req->src_ == MPI_ANY_SOURCE && (req->comm_->group()->rank(ref->src_) != MPI_UNDEFINED)) || req->src_ == ref->src_)
148 && ((req->tag_ == MPI_ANY_TAG && ref->tag_ >=0)|| req->tag_ == ref->tag_)){
149 if(req->src_ == MPI_ANY_SOURCE)
150 req->real_src_ = ref->src_;
151 if(req->tag_ == MPI_ANY_TAG)
152 req->real_tag_ = ref->tag_;
153 if(req->real_size_ < ref->real_size_)
154 req->truncated_ = true;
156 req->detached_sender_=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
157 if(req->cancelled_==0)
158 req->cancelled_=-1;//mark as uncancellable
159 XBT_DEBUG("match succeeded");
165 void Request::print_request(const char *message)
167 XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
168 message, this, buf_, size_, src_, dst_, tag_, flags_);
172 /* factories, to hide the internal flags from the caller */
173 MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
176 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
177 comm->group()->actor(dst)->get_pid(), tag, comm,
178 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
181 MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
184 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
185 comm->group()->actor(dst)->get_pid(), tag, comm,
186 MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
189 MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
191 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
192 comm->group()->actor(dst)->get_pid(), tag, comm,
193 MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
196 MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
198 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
199 comm->group()->actor(dst)->get_pid(), tag, comm,
200 MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
204 MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
207 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
209 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
210 comm->group()->actor(dst)->get_pid(), tag, comm,
211 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
213 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
214 comm->group()->actor(dst)->get_pid(), tag, comm,
215 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
216 MPI_REQ_ACCUMULATE, op);
221 MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
223 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
224 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
225 simgrid::s4u::this_actor::get_pid(), tag, comm,
226 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
229 MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
232 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
234 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
235 comm->group()->actor(dst)->get_pid(), tag, comm,
236 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
238 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(),
239 comm->group()->actor(dst)->get_pid(), tag, comm,
240 MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
245 MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
247 return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
248 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
249 simgrid::s4u::this_actor::get_pid(), tag, comm,
250 MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
253 MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
255 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
256 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
257 comm->group()->actor(dst)->get_pid(), tag, comm,
258 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
263 MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
265 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
266 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
267 comm->group()->actor(dst)->get_pid(), tag, comm,
268 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
273 MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
275 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
276 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
277 comm->group()->actor(dst)->get_pid(), tag, comm,
278 MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
284 MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
286 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
287 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
288 src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(),
289 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
294 void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
296 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
297 request = irecv(buf, count, datatype, src, tag, comm);
298 wait(&request,status);
302 void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
304 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
305 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
306 comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
309 wait(&request, MPI_STATUS_IGNORE);
313 void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
315 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
316 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
317 comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
320 wait(&request, MPI_STATUS_IGNORE);
324 void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
326 MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
327 request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
328 comm->group()->actor(dst)->get_pid(), tag, comm,
329 MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
332 wait(&request,MPI_STATUS_IGNORE);
336 void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
337 void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
338 MPI_Comm comm, MPI_Status * status)
340 MPI_Request requests[2];
342 int myid = simgrid::s4u::this_actor::get_pid();
343 if ((comm->group()->actor(dst)->get_pid() == myid) && (comm->group()->actor(src)->get_pid() == myid)) {
344 Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
345 if (status != MPI_STATUS_IGNORE) {
346 status->MPI_SOURCE = src;
347 status->MPI_TAG = recvtag;
348 status->MPI_ERROR = MPI_SUCCESS;
349 status->count = sendcount * sendtype->size();
353 requests[0] = isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
354 requests[1] = irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
355 startall(2, requests);
356 waitall(2, requests, stats);
359 if(status != MPI_STATUS_IGNORE) {
360 // Copy receive status
365 void Request::start()
367 s4u::Mailbox* mailbox;
369 xbt_assert(action_ == nullptr, "Cannot (re-)start unfinished communication");
370 flags_ &= ~MPI_REQ_PREPARED;
371 flags_ &= ~MPI_REQ_FINISHED;
374 if ((flags_ & MPI_REQ_RECV) != 0) {
375 this->print_request("New recv");
377 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
379 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
381 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
382 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
385 if (async_small_thresh == 0 && (flags_ & MPI_REQ_RMA) == 0) {
386 mailbox = process->mailbox();
387 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
388 //We have to check both mailboxes (because SSEND messages are sent to the large mbox).
389 //begin with the more appropriate one : the small one.
390 mailbox = process->mailbox_small();
391 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %s (in case of SSEND)?",
392 mailbox->get_cname());
393 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
395 if (action == nullptr) {
396 mailbox = process->mailbox();
397 XBT_DEBUG("No, nothing in the small mailbox test the other one : %s", mailbox->get_cname());
398 action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
399 if (action == nullptr) {
400 XBT_DEBUG("Still nothing, switch back to the small mailbox : %s", mailbox->get_cname());
401 mailbox = process->mailbox_small();
404 XBT_DEBUG("yes there was something for us in the large mailbox");
407 mailbox = process->mailbox_small();
408 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
409 smx_activity_t action = mailbox->iprobe(0, &match_recv, static_cast<void*>(this));
411 if (action == nullptr) {
412 XBT_DEBUG("No, nothing in the permanent receive mailbox");
413 mailbox = process->mailbox();
415 XBT_DEBUG("yes there was something for us in the small mailbox");
419 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
421 action_ = simcall_comm_irecv(
422 process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
423 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
424 XBT_DEBUG("recv simcall posted");
426 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
428 } else { /* the RECV flag was not set, so this is a send */
429 simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
430 xbt_assert(process, "Actor pid=%d is gone??", dst_);
432 if (TRACE_smpi_view_internals()) {
433 TRACE_smpi_send(rank, rank, dst_, tag_, size_);
435 this->print_request("New send");
438 if ((flags_ & MPI_REQ_SSEND) == 0 &&
439 ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
440 static_cast<int>(size_) < simgrid::config::get_value<int>("smpi/send-is-detached-thresh"))) {
441 void *oldbuf = nullptr;
443 XBT_DEBUG("Send request %p is detached", this);
445 if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
447 if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
448 if ((smpi_privatize_global_variables != SmpiPrivStrategies::NONE) &&
449 (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
450 (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
451 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
452 smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
454 //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
455 //so actually ... don't use manually attached buffer space.
456 buf = xbt_malloc(size_);
457 memcpy(buf,oldbuf,size_);
458 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
463 //if we are giving back the control to the user without waiting for completion, we have to inject timings
464 double sleeptime = 0.0;
465 if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
466 // isend and send timings may be different
467 sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
468 ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
469 : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
473 simgrid::s4u::this_actor::sleep_for(sleeptime);
474 XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
477 int async_small_thresh = simgrid::config::get_value<int>("smpi/async-small-thresh");
479 simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
481 if (async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)
484 if (not(async_small_thresh != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
485 mailbox = process->mailbox();
486 } else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
487 mailbox = process->mailbox();
488 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
489 smx_activity_t action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
490 if (action == nullptr) {
491 if ((flags_ & MPI_REQ_SSEND) == 0) {
492 mailbox = process->mailbox_small();
493 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %s",
494 mailbox->get_cname());
496 mailbox = process->mailbox_small();
497 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %s?",
498 mailbox->get_cname());
499 action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
500 if (action == nullptr) {
501 XBT_DEBUG("No, we are first, send to large mailbox");
502 mailbox = process->mailbox();
506 XBT_DEBUG("Yes there was something for us in the large mailbox");
509 mailbox = process->mailbox();
510 XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
513 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
515 action_ = simcall_comm_isend(
516 simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), size_, -1.0, buf, real_size_, &match_send,
517 &xbt_free_f, // how to free the userdata if a detached send fails
518 process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this,
519 // detach if msg size < eager/rdv switch limit
521 XBT_DEBUG("send simcall posted");
523 /* FIXME: detached sends are not traceable (action_ == nullptr) */
524 if (action_ != nullptr) {
525 boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
526 smpi_process()->get_tracing_category());
529 if (async_small_thresh != 0 || ((flags_ & MPI_REQ_RMA) != 0))
534 void Request::startall(int count, MPI_Request * requests)
536 if(requests== nullptr)
539 for(int i = 0; i < count; i++) {
540 requests[i]->start();
544 void Request::cancel()
548 if (this->action_ != nullptr)
549 (boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(this->action_))->cancel();
552 int Request::test(MPI_Request * request, MPI_Status * status, int* flag) {
553 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or testall before)
554 // to avoid deadlocks if used as a break condition, such as
555 // while (MPI_Test(request, flag, status) && flag) dostuff...
556 // because the time will not normally advance when only calls to MPI_Test are made -> deadlock
557 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
558 static int nsleeps = 1;
559 int ret = MPI_SUCCESS;
561 // Are we testing a request meant for non blocking collectives ?
562 // If so, test all the subrequests.
563 if ((*request)->nbc_requests_size_>0){
564 ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
566 delete[] (*request)->nbc_requests_;
567 (*request)->nbc_requests_size_=0;
573 if(smpi_test_sleep > 0)
574 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
576 Status::empty(status);
578 if (((*request)->flags_ & MPI_REQ_PREPARED) == 0) {
579 if ((*request)->action_ != nullptr && (*request)->cancelled_ != 1){
581 *flag = simcall_comm_test((*request)->action_);
582 } catch (const Exception&) {
587 if (*request != MPI_REQUEST_NULL &&
588 ((*request)->flags_ & MPI_REQ_GENERALIZED)
589 && !((*request)->flags_ & MPI_REQ_COMPLETE))
592 finish_wait(request,status);
593 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
594 MPI_Status* mystatus;
595 if(status==MPI_STATUS_IGNORE){
596 mystatus=new MPI_Status();
597 Status::empty(mystatus);
601 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
602 if(status==MPI_STATUS_IGNORE)
605 nsleeps=1;//reset the number of sleeps we will do next time
606 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_PERSISTENT) == 0)
607 *request = MPI_REQUEST_NULL;
608 } else if (simgrid::config::get_value<bool>("smpi/grow-injected-times")) {
615 int Request::testsome(int incount, MPI_Request requests[], int *count, int *indices, MPI_Status status[])
617 int ret = MPI_SUCCESS;
622 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
625 for (int i = 0; i < incount; i++) {
626 if (requests[i] != MPI_REQUEST_NULL && not (requests[i]->flags_ & MPI_REQ_FINISHED)) {
627 ret = test(&requests[i], pstat, &flag);
632 if (status != MPI_STATUSES_IGNORE)
633 status[*count] = *pstat;
635 if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
636 requests[i] = MPI_REQUEST_NULL;
642 if(count_dead==incount)*count=MPI_UNDEFINED;
644 return MPI_ERR_IN_STATUS;
649 int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
651 std::vector<simgrid::kernel::activity::CommImpl*> comms;
652 comms.reserve(count);
656 int ret = MPI_SUCCESS;
657 *index = MPI_UNDEFINED;
659 std::vector<int> map; /** Maps all matching comms back to their location in requests **/
660 for(i = 0; i < count; i++) {
661 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
662 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
666 if (not map.empty()) {
667 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
668 static int nsleeps = 1;
669 if(smpi_test_sleep > 0)
670 simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
672 i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
673 } catch (const Exception&) {
674 XBT_DEBUG("Exception in testany");
678 if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
680 if (requests[*index] != MPI_REQUEST_NULL &&
681 (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
682 && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
685 finish_wait(&requests[*index],status);
686 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED)){
687 MPI_Status* mystatus;
688 if(status==MPI_STATUS_IGNORE){
689 mystatus=new MPI_Status();
690 Status::empty(mystatus);
694 ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
695 if(status==MPI_STATUS_IGNORE)
699 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
700 requests[*index] = MPI_REQUEST_NULL;
701 XBT_DEBUG("Testany - returning with index %d", *index);
709 XBT_DEBUG("Testany on inactive handles, returning flag=1 but empty status");
710 //all requests are null or inactive, return true
712 *index = MPI_UNDEFINED;
713 Status::empty(status);
719 int Request::testall(int count, MPI_Request requests[], int* outflag, MPI_Status status[])
722 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
727 for(int i=0; i<count; i++){
728 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
729 ret = test(&requests[i], pstat, &flag);
732 requests[i]=MPI_REQUEST_NULL;
736 if (ret != MPI_SUCCESS)
739 Status::empty(pstat);
741 if(status != MPI_STATUSES_IGNORE) {
746 return MPI_ERR_IN_STATUS;
751 void Request::probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
753 //FIXME find another way to avoid busy waiting ?
754 // the issue here is that we have to wait on a nonexistent comm
756 iprobe(source, tag, comm, &flag, status);
757 XBT_DEBUG("Busy Waiting on probing : %d", flag);
761 void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
762 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
763 // especially when used as a break condition, such as while (MPI_Iprobe(...)) dostuff...
764 // nsleeps is a multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
765 // This can speed up the execution of certain applications by an order of magnitude, such as HPL
766 static int nsleeps = 1;
767 double speed = s4u::this_actor::get_host()->get_speed();
768 double maxrate = simgrid::config::get_value<double>("smpi/iprobe-cpu-usage");
769 MPI_Request request = new Request(nullptr, 0, MPI_CHAR,
770 source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(),
771 simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV);
772 if (smpi_iprobe_sleep > 0) {
773 /** Compute the number of flops we will sleep **/
774 s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps *
775 /*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
777 /* Not the entire CPU can be used when iprobing: This is important for
778 * the energy consumption caused by polling with iprobes.
779 * Note also that the number of flops that was
780 * computed above contains a maxrate factor and is hence reduced (maxrate < 1)
782 ->set_bound(maxrate*speed)
786 // behave like a receive, but don't do it
787 s4u::Mailbox* mailbox;
789 request->print_request("New iprobe");
790 // We have to test both mailboxes as we don't know if we will receive one or another
791 if (simgrid::config::get_value<int>("smpi/async-small-thresh") > 0) {
792 mailbox = smpi_process()->mailbox_small();
793 XBT_DEBUG("Trying to probe the perm recv mailbox");
794 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
797 if (request->action_ == nullptr){
798 mailbox = smpi_process()->mailbox();
799 XBT_DEBUG("trying to probe the other mailbox");
800 request->action_ = mailbox->iprobe(0, &match_recv, static_cast<void*>(request));
803 if (request->action_ != nullptr){
804 kernel::activity::CommImplPtr sync_comm = boost::static_pointer_cast<kernel::activity::CommImpl>(request->action_);
805 MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data_);
807 if (status != MPI_STATUS_IGNORE && (req->flags_ & MPI_REQ_PREPARED) == 0) {
808 status->MPI_SOURCE = comm->group()->rank(req->src_);
809 status->MPI_TAG = req->tag_;
810 status->MPI_ERROR = MPI_SUCCESS;
811 status->count = req->real_size_;
813 nsleeps = 1;//reset the number of sleeps we will do next time
817 if (simgrid::config::get_value<bool>("smpi/grow-injected-times"))
821 xbt_assert(request == MPI_REQUEST_NULL);
824 void Request::finish_wait(MPI_Request* request, MPI_Status * status)
826 MPI_Request req = *request;
827 Status::empty(status);
829 if (req->cancelled_==1){
830 if (status!=MPI_STATUS_IGNORE)
832 if(req->detached_sender_ != nullptr)
833 unref(&(req->detached_sender_));
838 if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) {
839 if(status != MPI_STATUS_IGNORE) {
840 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
841 status->MPI_SOURCE = req->comm_->group()->rank(src);
842 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
843 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
844 // this handles the case were size in receive differs from size in send
845 status->count = req->real_size_;
847 //detached send will be finished at the other end
848 if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
849 req->print_request("Finishing");
850 MPI_Datatype datatype = req->old_type_;
852 // FIXME Handle the case of a partial shared malloc.
853 if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
854 (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
856 if (not smpi_process()->replaying() && smpi_privatize_global_variables != SmpiPrivStrategies::NONE &&
857 static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
858 static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
859 XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
860 smpi_switch_data_segment(simgrid::s4u::Actor::self());
863 if(datatype->flags() & DT_FLAG_DERIVED){
864 // This part handles the problem of non-contignous memory the unserialization at the reception
865 if ((req->flags_ & MPI_REQ_RECV) && datatype->size() != 0)
866 datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
868 } else if (req->flags_ & MPI_REQ_RECV) { // apply op on contiguous buffer for accumulate
869 if (datatype->size() != 0) {
870 int n = req->real_size_ / datatype->size();
871 req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
879 if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
880 int rank = simgrid::s4u::this_actor::get_pid();
881 int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
882 TRACE_smpi_recv(src_traced, rank,req->tag_);
884 if(req->detached_sender_ != nullptr){
885 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
887 simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
888 if (sleeptime > 0.0) {
889 simgrid::s4u::this_actor::sleep_for(sleeptime);
890 XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
892 unref(&(req->detached_sender_));
894 if (req->flags_ & MPI_REQ_PERSISTENT)
895 req->action_ = nullptr;
896 req->flags_ |= MPI_REQ_FINISHED;
900 int Request::wait(MPI_Request * request, MPI_Status * status)
903 // Are we waiting on a request meant for non blocking collectives ?
904 // If so, wait for all the subrequests.
905 if ((*request)->nbc_requests_size_>0){
906 ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
907 for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
908 if((*request)->buf_!=nullptr && (*request)->nbc_requests_[i]!=MPI_REQUEST_NULL){//reduce case
909 void * buf=(*request)->nbc_requests_[i]->buf_;
910 if((*request)->old_type_->flags() & DT_FLAG_DERIVED)
911 buf=(*request)->nbc_requests_[i]->old_buf_;
912 if((*request)->nbc_requests_[i]->flags_ & MPI_REQ_RECV ){
913 if((*request)->op_!=MPI_OP_NULL){
914 int count=(*request)->size_/ (*request)->old_type_->size();
915 (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
917 smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
920 if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
921 Request::unref(&((*request)->nbc_requests_[i]));
923 delete[] (*request)->nbc_requests_;
924 (*request)->nbc_requests_size_=0;
926 (*request)=MPI_REQUEST_NULL;
930 (*request)->print_request("Waiting");
931 if ((*request)->flags_ & MPI_REQ_PREPARED) {
932 Status::empty(status);
936 if ((*request)->action_ != nullptr){
938 // this is not a detached send
939 simcall_comm_wait((*request)->action_, -1.0);
940 } catch (const Exception&) {
941 XBT_VERB("Request cancelled");
945 if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & MPI_REQ_GENERALIZED)){
946 MPI_Status* mystatus;
947 if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
948 ((*request)->generalized_funcs)->mutex->lock();
949 ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
950 ((*request)->generalized_funcs)->mutex->unlock();
952 if(status==MPI_STATUS_IGNORE){
953 mystatus=new MPI_Status();
954 Status::empty(mystatus);
958 ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus);
959 if(status==MPI_STATUS_IGNORE)
963 finish_wait(request,status);
964 if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
965 *request = MPI_REQUEST_NULL;
969 int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
971 std::vector<simgrid::kernel::activity::CommImpl*> comms;
972 comms.reserve(count);
973 int index = MPI_UNDEFINED;
976 // Wait for a request to complete
977 std::vector<int> map;
978 XBT_DEBUG("Wait for one of %d", count);
979 for(int i = 0; i < count; i++) {
980 if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_PREPARED) &&
981 not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
982 if (requests[i]->action_ != nullptr) {
983 XBT_DEBUG("Waiting any %p ", requests[i]);
984 comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
987 // This is a finished detached request, let's return this one
988 comms.clear(); // so we free don't do the waitany call
990 finish_wait(&requests[i], status); // cleanup if refcount = 0
991 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
992 requests[i] = MPI_REQUEST_NULL; // set to null
997 if (not comms.empty()) {
998 XBT_DEBUG("Enter waitany for %zu comms", comms.size());
1001 // this is not a detached send
1002 i = simcall_comm_waitany(comms.data(), comms.size(), -1);
1003 } catch (const Exception&) {
1004 XBT_INFO("request %d cancelled ", i);
1008 // not MPI_UNDEFINED, as this is a simix return code
1011 //in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
1012 if ((requests[index] == MPI_REQUEST_NULL) ||
1013 (not((requests[index]->flags_ & MPI_REQ_ACCUMULATE) && (requests[index]->flags_ & MPI_REQ_RECV)))) {
1014 finish_wait(&requests[index],status);
1015 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1016 requests[index] = MPI_REQUEST_NULL;
1022 if (index==MPI_UNDEFINED)
1023 Status::empty(status);
1028 static int sort_accumulates(MPI_Request a, MPI_Request b)
1030 return (a->tag() > b->tag());
1033 int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
1035 std::vector<MPI_Request> accumulates;
1038 MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
1039 int retvalue = MPI_SUCCESS;
1040 //tag invalid requests in the set
1041 if (status != MPI_STATUSES_IGNORE) {
1042 for (int c = 0; c < count; c++) {
1043 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst_ == MPI_PROC_NULL ||
1044 (requests[c]->flags_ & MPI_REQ_PREPARED)) {
1045 Status::empty(&status[c]);
1046 } else if (requests[c]->src_ == MPI_PROC_NULL) {
1047 Status::empty(&status[c]);
1048 status[c].MPI_SOURCE = MPI_PROC_NULL;
1052 for (int c = 0; c < count; c++) {
1053 if (MC_is_active() || MC_record_replay_is_active()) {
1054 wait(&requests[c],pstat);
1057 index = waitany(count, (MPI_Request*)requests, pstat);
1059 if (index == MPI_UNDEFINED)
1062 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_RECV) &&
1063 (requests[index]->flags_ & MPI_REQ_ACCUMULATE))
1064 accumulates.push_back(requests[index]);
1065 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & MPI_REQ_NON_PERSISTENT))
1066 requests[index] = MPI_REQUEST_NULL;
1068 if (status != MPI_STATUSES_IGNORE) {
1069 status[index] = *pstat;
1070 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1071 retvalue = MPI_ERR_IN_STATUS;
1075 if (not accumulates.empty()) {
1076 std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
1077 for (auto& req : accumulates) {
1078 finish_wait(&req, status);
1085 int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
1091 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1092 index = waitany(incount, (MPI_Request*)requests, pstat);
1093 if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
1094 if(status != MPI_STATUSES_IGNORE) {
1095 status[count] = *pstat;
1097 indices[count] = index;
1099 for (int i = 0; i < incount; i++) {
1100 if (i!=index && requests[i] != MPI_REQUEST_NULL
1101 && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
1102 test(&requests[i], pstat,&flag);
1105 if(status != MPI_STATUSES_IGNORE) {
1106 status[count] = *pstat;
1108 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
1109 requests[i]=MPI_REQUEST_NULL;
1117 MPI_Request Request::f2c(int id) {
1119 if(id==MPI_FORTRAN_REQUEST_NULL)
1120 return static_cast<MPI_Request>(MPI_REQUEST_NULL);
1121 return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key(key,id)));
1124 void Request::free_f(int id)
1126 if (id != MPI_FORTRAN_REQUEST_NULL) {
1128 F2C::f2c_lookup()->erase(get_key(key, id));
1132 int Request::get_status(MPI_Request req, int* flag, MPI_Status * status){
1135 if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
1136 req->iprobe(req->src_, req->tag_, req->comm_, flag, status);
1140 if (req != MPI_REQUEST_NULL &&
1141 (req->flags_ & MPI_REQ_GENERALIZED)
1142 && !(req->flags_ & MPI_REQ_COMPLETE)) {
1148 if(req != MPI_REQUEST_NULL &&
1149 status != MPI_STATUS_IGNORE) {
1150 int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
1151 status->MPI_SOURCE = req->comm_->group()->rank(src);
1152 status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
1153 status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
1154 status->count = req->real_size_;
1159 int Request::grequest_start( MPI_Grequest_query_function *query_fn, MPI_Grequest_free_function *free_fn, MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request){
1161 *request = new Request();
1162 (*request)->flags_ |= MPI_REQ_GENERALIZED;
1163 (*request)->flags_ |= MPI_REQ_PERSISTENT;
1164 (*request)->refcount_ = 1;
1165 ((*request)->generalized_funcs) = new s_smpi_mpi_generalized_request_funcs_t;
1166 ((*request)->generalized_funcs)->query_fn=query_fn;
1167 ((*request)->generalized_funcs)->free_fn=free_fn;
1168 ((*request)->generalized_funcs)->cancel_fn=cancel_fn;
1169 ((*request)->generalized_funcs)->extra_state=extra_state;
1170 ((*request)->generalized_funcs)->cond = simgrid::s4u::ConditionVariable::create();
1171 ((*request)->generalized_funcs)->mutex = simgrid::s4u::Mutex::create();
1175 int Request::grequest_complete( MPI_Request request){
1176 if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
1177 return MPI_ERR_REQUEST;
1178 request->generalized_funcs->mutex->lock();
1179 request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
1180 request->generalized_funcs->cond->notify_one();
1181 request->generalized_funcs->mutex->unlock();
1185 void Request::set_nbc_requests(MPI_Request* reqs, int size){
1186 nbc_requests_size_ = size;
1188 nbc_requests_ = reqs;
1191 nbc_requests_ = nullptr;
1195 int Request::get_nbc_requests_size(){
1196 return nbc_requests_size_;
1199 MPI_Request* Request::get_nbc_requests(){
1200 return nbc_requests_;