X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/1958b5999996b69f427e9190207300e48381ac3f..ea02d47c2ee462e84798356e4dda87b1467e22b8:/src/smpi/mpi/smpi_request.cpp diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index 3bd2db0d08..7fc8421b9b 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -61,10 +61,10 @@ Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int refcount_ = 1; else refcount_ = 0; - cancelled_ = 0; nbc_requests_=nullptr; nbc_requests_size_=0; init_buffer(count); + this->add_f(); } void Request::ref(){ @@ -73,30 +73,29 @@ void Request::ref(){ void Request::unref(MPI_Request* request) { - if((*request) != MPI_REQUEST_NULL){ - (*request)->refcount_--; - if((*request)->refcount_ < 0) { - (*request)->print_request("wrong refcount"); - xbt_die("Whoops, wrong refcount"); - } - if((*request)->refcount_==0){ - if ((*request)->flags_ & MPI_REQ_GENERALIZED){ - ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state); - }else{ - Comm::unref((*request)->comm_); - Datatype::unref((*request)->old_type_); - } - if ((*request)->op_!=MPI_REPLACE && (*request)->op_!=MPI_OP_NULL) - Op::unref(&(*request)->op_); + xbt_assert(*request != MPI_REQUEST_NULL, "freeing an already free request"); - (*request)->print_request("Destroying"); - delete *request; - *request = MPI_REQUEST_NULL; - }else{ - (*request)->print_request("Decrementing"); + (*request)->refcount_--; + if ((*request)->refcount_ < 0) { + (*request)->print_request("wrong refcount"); + xbt_die("Whoops, wrong refcount"); + } + if ((*request)->refcount_ == 0) { + if ((*request)->flags_ & MPI_REQ_GENERALIZED) { + ((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state); + } else { + Comm::unref((*request)->comm_); + Datatype::unref((*request)->old_type_); } - }else{ - xbt_die("freeing an already free request"); + if ((*request)->op_ != MPI_REPLACE && (*request)->op_ != MPI_OP_NULL) + Op::unref(&(*request)->op_); + + (*request)->print_request("Destroying"); + F2C::free_f((*request)->f2c_id()); + delete *request; + *request = MPI_REQUEST_NULL; + } else { + (*request)->print_request("Decrementing"); } } @@ -118,13 +117,18 @@ bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request rece receiver->real_src_ = sender->src_; if (receiver->tag_ == MPI_ANY_TAG) receiver->real_tag_ = sender->tag_; - if (receiver->real_size_ < sender->real_size_) - receiver->truncated_ = true; + if ((receiver->flags_ & MPI_REQ_PROBE) == 0 ){ + if (receiver->real_size_ < sender->real_size_){ + XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_, sender->real_size_); + receiver->truncated_ = true; + } else if (receiver->real_size_ > sender->real_size_){ + receiver->real_size_=sender->real_size_; + } + } if (sender->detached_) receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in // the receiver - if (req->cancelled_ == 0) - req->cancelled_ = -1; // mark as uncancelable + req->flags_ |= MPI_REQ_MATCHED; // mark as impossible to cancel anymore XBT_DEBUG("match succeeded"); return true; } @@ -174,28 +178,28 @@ void Request::print_request(const char* message) const MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND); } MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED); } MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED); } MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED); } @@ -206,11 +210,11 @@ MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype data MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ if(op==MPI_OP_NULL){ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED); }else{ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op); } @@ -219,8 +223,13 @@ MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype data MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { + int source = MPI_PROC_NULL; + if (src == MPI_ANY_SOURCE) + source = MPI_ANY_SOURCE; + else if (src != MPI_PROC_NULL) + source = comm->group()->actor(src)->get_pid(); return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(), + source, simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); } @@ -229,13 +238,18 @@ MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, MPI_Op op) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + int source = MPI_PROC_NULL; + if (src == MPI_ANY_SOURCE) + source = MPI_ANY_SOURCE; + else if (src != MPI_PROC_NULL) + source = comm->group()->actor(src)->get_pid(); if(op==MPI_OP_NULL){ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); }else{ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op); } return request; @@ -243,9 +257,13 @@ MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { + int source = MPI_PROC_NULL; + if (src == MPI_ANY_SOURCE) + source = MPI_ANY_SOURCE; + else if (src != MPI_PROC_NULL) + source = comm->group()->actor(src)->get_pid(); return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(), - simgrid::s4u::this_actor::get_pid(), tag, comm, + source, simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); } @@ -253,9 +271,10 @@ MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND); - request->start(); + if(dst != MPI_PROC_NULL) + request->start(); return request; } @@ -263,9 +282,10 @@ MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, in { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND); - request->start(); + if(dst != MPI_PROC_NULL) + request->start(); return request; } @@ -273,9 +293,10 @@ MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND); - request->start(); + if(dst != MPI_PROC_NULL) + request->start(); return request; } @@ -283,28 +304,37 @@ MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, i MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + int source = MPI_PROC_NULL; + if (src == MPI_ANY_SOURCE) + source = MPI_ANY_SOURCE; + else if (src != MPI_PROC_NULL) + source = comm->group()->actor(src)->get_pid(); request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->get_pid(), - simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV); - request->start(); + source, simgrid::s4u::this_actor::get_pid(), tag, comm, + MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV); + if(src != MPI_PROC_NULL) + request->start(); return request; } -void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) +int Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = irecv(buf, count, datatype, src, tag, comm); - wait(&request,status); + int retval = wait(&request,status); request = nullptr; + return retval; } void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND); + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, + tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND); - request->start(); + if(dst != MPI_PROC_NULL) + request->start(); wait(&request, MPI_STATUS_IGNORE); request = nullptr; } @@ -313,9 +343,10 @@ void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND); - - request->start(); + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, + tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND); + if(dst != MPI_PROC_NULL) + request->start(); wait(&request, MPI_STATUS_IGNORE); request = nullptr; } @@ -324,10 +355,11 @@ void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - comm->group()->actor(dst)->get_pid(), tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND); - request->start(); + if(dst != MPI_PROC_NULL) + request->start(); wait(&request,MPI_STATUS_IGNORE); request = nullptr; } @@ -336,13 +368,20 @@ void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag, MPI_Comm comm, MPI_Status * status) { + int source = MPI_PROC_NULL; + if (src == MPI_ANY_SOURCE) + source = MPI_ANY_SOURCE; + else if (src != MPI_PROC_NULL) + source = comm->group()->actor(src)->get_pid(); + int destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst)->get_pid() : MPI_PROC_NULL; + std::array requests; std::array stats; int myid = simgrid::s4u::this_actor::get_pid(); - if ((comm->group()->actor(dst)->get_pid() == myid) && (comm->group()->actor(src)->get_pid() == myid)) { + if ((destination == myid) && (source == myid)) { Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype); if (status != MPI_STATUS_IGNORE) { - status->MPI_SOURCE = src; + status->MPI_SOURCE = source; status->MPI_TAG = recvtag; status->MPI_ERROR = MPI_SUCCESS; status->count = sendcount * sendtype->size(); @@ -375,6 +414,8 @@ void Request::start() flags_ &= ~MPI_REQ_FINISHED; this->ref(); + // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later + real_size_=size_; if ((flags_ & MPI_REQ_RECV) != 0) { this->print_request("New recv"); @@ -418,8 +459,6 @@ void Request::start() } } - // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later - real_size_=size_; action_ = simcall_comm_irecv( process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv, process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0); @@ -510,11 +549,10 @@ void Request::start() XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_); } - // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later - real_size_=size_; size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator) - action_ = simcall_comm_isend( - simgrid::s4u::Actor::by_pid(src_)->get_impl(), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_, &match_send, + action_ = simcall_comm_isend( + simgrid::kernel::actor::ActorImpl::by_pid(src_), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_, + &match_send, &xbt_free_f, // how to free the userdata if a detached send fails process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, // detach if msg size < eager/rdv switch limit @@ -538,14 +576,14 @@ void Request::startall(int count, MPI_Request * requests) return; for(int i = 0; i < count; i++) { - requests[i]->start(); + if(requests[i]->src_ != MPI_PROC_NULL && requests[i]->dst_ != MPI_PROC_NULL) + requests[i]->start(); } } void Request::cancel() { - if(cancelled_!=-1) - cancelled_=1; + this->flags_ |= MPI_REQ_CANCELLED; if (this->action_ != nullptr) (boost::static_pointer_cast(this->action_))->cancel(); } @@ -579,7 +617,7 @@ int Request::test(MPI_Request * request, MPI_Status * status, int* flag) { Status::empty(status); *flag = 1; if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) { - if ((*request)->action_ != nullptr && (*request)->cancelled_ != 1){ + if ((*request)->action_ != nullptr && ((*request)->flags_ & MPI_REQ_CANCELLED) == 0){ try{ *flag = simcall_comm_test((*request)->action_.get()); } catch (const Exception&) { @@ -765,7 +803,7 @@ void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* double maxrate = smpi_cfg_iprobe_cpu_usage(); auto request = new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->get_pid(), - simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV); + simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE); if (smpi_iprobe_sleep > 0) { /** Compute the number of flops we will sleep **/ s4u::this_actor::exec_init(/*nsleeps: See comment above */ nsleeps * @@ -822,8 +860,7 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) { MPI_Request req = *request; Status::empty(status); - - if (req->cancelled_==1){ + if((req->flags_ & MPI_REQ_CANCELLED) != 0 && (req->flags_ & MPI_REQ_MATCHED) == 0) { if (status!=MPI_STATUS_IGNORE) status->cancelled=1; if(req->detached_sender_ != nullptr) @@ -833,11 +870,16 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) } if ((req->flags_ & (MPI_REQ_PREPARED | MPI_REQ_GENERALIZED | MPI_REQ_FINISHED)) == 0) { - if(status != MPI_STATUS_IGNORE) { - int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_; - status->MPI_SOURCE = req->comm_->group()->rank(src); - status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_; - status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS; + if (status != MPI_STATUS_IGNORE) { + if (req->src_== MPI_PROC_NULL || req->dst_== MPI_PROC_NULL){ + Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + } else { + int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_; + status->MPI_SOURCE = req->comm_->group()->rank(src); + status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_; + status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS; + } // this handles the case were size in receive differs from size in send status->count = req->real_size_; } @@ -892,7 +934,24 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) if (req->flags_ & MPI_REQ_PERSISTENT) req->action_ = nullptr; req->flags_ |= MPI_REQ_FINISHED; - unref(request); + + if (req->truncated_) { + char error_string[MPI_MAX_ERROR_STRING]; + int error_size; + PMPI_Error_string(MPI_ERR_TRUNCATE, error_string, &error_size); + MPI_Errhandler err = (req->comm_) ? (req->comm_)->errhandler() : MPI_ERRHANDLER_NULL; + if (err == MPI_ERRHANDLER_NULL || err == MPI_ERRORS_RETURN) + XBT_WARN("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string); + else if (err == MPI_ERRORS_ARE_FATAL) + xbt_die("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string); + else + err->call((req->comm_), MPI_ERR_TRUNCATE); + if (err != MPI_ERRHANDLER_NULL) + simgrid::smpi::Errhandler::unref(err); + MC_assert(not MC_is_active()); /* Only fail in MC mode */ + } + if(req->src_ != MPI_PROC_NULL && req->dst_ != MPI_PROC_NULL) + unref(request); } int Request::wait(MPI_Request * request, MPI_Status * status) @@ -901,6 +960,15 @@ int Request::wait(MPI_Request * request, MPI_Status * status) xbt_assert(*request != MPI_REQUEST_NULL); int ret=MPI_SUCCESS; + + if((*request)->src_ == MPI_PROC_NULL || (*request)->dst_ == MPI_PROC_NULL){ + if (status != MPI_STATUS_IGNORE) { + Status::empty(status); + status->MPI_SOURCE = MPI_PROC_NULL; + } + (*request)=MPI_REQUEST_NULL; + return ret; + } // Are we waiting on a request meant for non blocking collectives ? // If so, wait for all the subrequests. if ((*request)->nbc_requests_size_>0){ @@ -960,6 +1028,9 @@ int Request::wait(MPI_Request * request, MPI_Status * status) ret = ((*request)->generalized_funcs)->query_fn(((*request)->generalized_funcs)->extra_state, mystatus); } + if ((*request)->truncated_) + ret = MPI_ERR_TRUNCATE; + finish_wait(request, status); // may invalidate *request if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0)) *request = MPI_REQUEST_NULL; @@ -968,12 +1039,11 @@ int Request::wait(MPI_Request * request, MPI_Status * status) int Request::waitany(int count, MPI_Request requests[], MPI_Status * status) { - std::vector comms; - comms.reserve(count); int index = MPI_UNDEFINED; if(count > 0) { // Wait for a request to complete + std::vector comms; std::vector map; XBT_DEBUG("Wait for one of %d", count); for(int i = 0; i < count; i++) { @@ -985,7 +1055,7 @@ int Request::waitany(int count, MPI_Request requests[], MPI_Status * status) map.push_back(i); } else { // This is a finished detached request, let's return this one - comms.clear(); // so we free don't do the waitany call + comms.clear(); // don't do the waitany call afterwards index = i; finish_wait(&requests[i], status); // cleanup if refcount = 0 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT)) @@ -1000,7 +1070,7 @@ int Request::waitany(int count, MPI_Request requests[], MPI_Status * status) try{ i = simcall_comm_waitany(comms.data(), comms.size(), -1); } catch (const Exception&) { - XBT_INFO("request cancelled "); + XBT_INFO("request cancelled"); i = -1; } @@ -1132,7 +1202,7 @@ int Request::get_status(const Request* req, int* flag, MPI_Status* status) *flag=0; if(req != MPI_REQUEST_NULL && req->action_ != nullptr) { - req->iprobe(req->src_, req->tag_, req->comm_, flag, status); + req->iprobe(req->comm_->group()->rank(req->src_), req->tag_, req->comm_, flag, status); if(*flag) return MPI_SUCCESS; }