-/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "mc/mc.h"
#include "private.hpp"
#include "simgrid/Exception.hpp"
+#include "simgrid/s4u/ConditionVariable.hpp"
#include "simgrid/s4u/Exec.hpp"
#include "simgrid/s4u/Mutex.hpp"
-#include "simgrid/s4u/ConditionVariable.hpp"
#include "smpi_comm.hpp"
#include "smpi_datatype.hpp"
#include "smpi_host.hpp"
#include "smpi_op.hpp"
+#include "src/kernel/EngineImpl.hpp"
#include "src/kernel/activity/CommImpl.hpp"
+#include "src/kernel/actor/ActorImpl.hpp"
+#include "src/kernel/actor/SimcallObserver.hpp"
#include "src/mc/mc_replay.hpp"
#include "src/smpi/include/smpi_actor.hpp"
std::vector<s_smpi_factor_t> smpi_ois_values;
-extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl*, void*, size_t);
+extern std::function<void(simgrid::kernel::activity::CommImpl*, void*, size_t)> smpi_comm_copy_data_callback;
namespace simgrid{
namespace smpi{
-Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
+Request::Request(const void* buf, int count, MPI_Datatype datatype, aid_t src, aid_t dst, int tag, MPI_Comm comm,
unsigned flags, MPI_Op op)
: buf_(const_cast<void*>(buf))
- , old_type_(datatype)
+ , old_buf_(buf_)
+ , type_(datatype)
, size_(datatype->size() * count)
, src_(src)
, dst_(dst)
detached_ = false;
detached_sender_ = nullptr;
real_src_ = 0;
+ // get src_host if it's available (src is valid)
+ auto src_process = simgrid::s4u::Actor::by_pid(src);
+ if (src_process)
+ src_host_ = src_process->get_host();
truncated_ = false;
+ unmatched_types_ = false;
real_size_ = 0;
real_tag_ = 0;
if (flags & MPI_REQ_PERSISTENT)
refcount_ = 1;
else
refcount_ = 0;
- nbc_requests_=nullptr;
- nbc_requests_size_=0;
+ message_id_ = 0;
init_buffer(count);
this->add_f();
}
((*request)->generalized_funcs)->free_fn(((*request)->generalized_funcs)->extra_state);
} else {
Comm::unref((*request)->comm_);
- Datatype::unref((*request)->old_type_);
+ Datatype::unref((*request)->type_);
}
if ((*request)->op_ != MPI_REPLACE && (*request)->op_ != MPI_OP_NULL)
Op::unref(&(*request)->op_);
}
}
+bool Request::match_types(MPI_Datatype stype, MPI_Datatype rtype){
+ bool match = false;
+ if ((stype == rtype) ||
+ //byte and packed always match with anything
+ (stype == MPI_PACKED || rtype == MPI_PACKED || stype == MPI_BYTE || rtype == MPI_BYTE) ||
+ //complex datatypes - we don't properly match these yet, as it would mean checking each subtype recursively.
+ (stype->flags() & DT_FLAG_DERIVED || rtype->flags() & DT_FLAG_DERIVED) ||
+ //duplicated datatypes, check if underlying is ok
+ (stype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype->duplicated_datatype(), rtype)) ||
+ (rtype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype, rtype->duplicated_datatype())))
+ match = true;
+ if (!match)
+ XBT_WARN("Mismatched datatypes : sending %s and receiving %s", stype->name().c_str(), rtype->name().c_str());
+ return match;
+}
+
+
bool Request::match_common(MPI_Request req, MPI_Request sender, MPI_Request receiver)
{
xbt_assert(sender, "Cannot match against null sender");
xbt_assert(receiver, "Cannot match against null receiver");
- XBT_DEBUG("Trying to match %s of sender src %d against %d, tag %d against %d, id %d against %d",
+ XBT_DEBUG("Trying to match %s of sender src %ld against %ld, tag %d against %d, id %d against %d",
(req == receiver ? "send" : "recv"), sender->src_, receiver->src_, sender->tag_, receiver->tag_,
sender->comm_->id(), receiver->comm_->id());
receiver->src_ == sender->src_) &&
((receiver->tag_ == MPI_ANY_TAG && sender->tag_ >= 0) || receiver->tag_ == sender->tag_)) {
// we match, we can transfer some values
- if (receiver->src_ == MPI_ANY_SOURCE)
+ if (receiver->src_ == MPI_ANY_SOURCE) {
receiver->real_src_ = sender->src_;
+ receiver->src_host_ = sender->src_host_;
+ }
if (receiver->tag_ == MPI_ANY_TAG)
receiver->real_tag_ = sender->tag_;
- if ((receiver->flags_ & MPI_REQ_PROBE) == 0 ){
- if (receiver->real_size_ < sender->real_size_){
- XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_, sender->real_size_);
- receiver->truncated_ = true;
- } else if (receiver->real_size_ > sender->real_size_){
- receiver->real_size_=sender->real_size_;
- }
+ if ((receiver->flags_ & MPI_REQ_PROBE) == 0 && receiver->real_size_ < sender->real_size_) {
+ XBT_DEBUG("Truncating message - should not happen: receiver size : %zu < sender size : %zu", receiver->real_size_,
+ sender->real_size_);
+ receiver->truncated_ = true;
}
+ //0-sized datatypes/counts should not interfere and match
+ if ( sender->real_size_ != 0 && receiver->real_size_ != 0 &&
+ !match_types(sender->type_, receiver->type_))
+ receiver->unmatched_types_ = true;
if (sender->detached_)
receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in
// the receiver
}
void Request::init_buffer(int count){
- void *old_buf = nullptr;
// FIXME Handle the case of a partial shared malloc.
// This part handles the problem of non-contiguous memory (for the unserialization at the reception)
- if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (old_type_->flags() & DT_FLAG_DERIVED)) {
+ if (not smpi_process()->replaying() &&
+ ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (type_->flags() & DT_FLAG_DERIVED))) {
// This part handles the problem of non-contiguous memory
- old_buf = buf_;
+ old_buf_ = buf_;
if (count==0){
buf_ = nullptr;
}else {
- buf_ = xbt_malloc(count*old_type_->size());
- if ((old_type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) {
- old_type_->serialize(old_buf, buf_, count);
+ buf_ = xbt_malloc(count*type_->size());
+ if ((type_->flags() & DT_FLAG_DERIVED) && ((flags_ & MPI_REQ_SEND) != 0)) {
+ type_->serialize(old_buf_, buf_, count);
}
}
}
- old_buf_ = old_buf;
}
bool Request::match_recv(void* a, void* b, simgrid::kernel::activity::CommImpl*)
{
auto ref = static_cast<MPI_Request>(a);
auto req = static_cast<MPI_Request>(b);
- return match_common(req, req, ref);
+ bool match = match_common(req, req, ref);
+ if (not match || ref->comm_ == MPI_COMM_UNINITIALIZED || ref->comm_->is_smp_comm())
+ return match;
+
+ if (ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
+ ref->comm_->group()->rank(req->dst_), req->tag_) == req->message_id_) {
+ if (((ref->flags_ & MPI_REQ_PROBE) == 0) && ((req->flags_ & MPI_REQ_PROBE) == 0)) {
+ XBT_DEBUG("increasing count in comm %p, which was %u from pid %ld, to pid %ld with tag %d", ref->comm_,
+ ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
+ ref->comm_->group()->rank(req->dst_), req->tag_),
+ req->src_, req->dst_, req->tag_);
+ ref->comm_->increment_received_messages_count(ref->comm_->group()->rank(req->src_),
+ ref->comm_->group()->rank(req->dst_), req->tag_);
+ if (ref->real_size_ > req->real_size_) {
+ ref->real_size_ = req->real_size_;
+ }
+ }
+ } else {
+ match = false;
+ req->flags_ &= ~MPI_REQ_MATCHED;
+ ref->detached_sender_ = nullptr;
+ XBT_DEBUG("Refusing to match message, as its ID is not the one I expect. in comm %p, %u != %u, "
+ "from pid %ld to pid %ld, with tag %d",
+ ref->comm_,
+ ref->comm_->get_received_messages_count(ref->comm_->group()->rank(req->src_),
+ ref->comm_->group()->rank(req->dst_), req->tag_),
+ req->message_id_, req->src_, req->dst_, req->tag_);
+ }
+ return match;
}
bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*)
void Request::print_request(const char* message) const
{
- XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
- message, this, buf_, size_, src_, dst_, tag_, flags_);
+ XBT_VERB("%s request %p [buf = %p, size = %zu, src = %ld, dst = %ld, tag = %d, flags = %x]", message, this, buf_,
+ size_, src_, dst_, tag_, flags_);
}
/* factories, to hide the internal flags from the caller */
MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
}
MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
-
MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Op op)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
+ MPI_Request request;
if(op==MPI_OP_NULL){
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}else{
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
MPI_REQ_ACCUMULATE,
op);
MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- int source = MPI_PROC_NULL;
+ aid_t source = MPI_PROC_NULL;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
source,
simgrid::s4u::this_actor::get_pid(), tag, comm,
MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Op op)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- int source = MPI_PROC_NULL;
+ aid_t source = MPI_PROC_NULL;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
+ MPI_Request request;
if(op==MPI_OP_NULL){
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
}else{
request =
new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
}
return request;
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- int source = MPI_PROC_NULL;
+ aid_t source = MPI_PROC_NULL;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
source, simgrid::s4u::this_actor::get_pid(), tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
if(dst != MPI_PROC_NULL)
request->start();
return request;
MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
return request;
MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
return request;
}
-
MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- int source = MPI_PROC_NULL;
+ aid_t source = MPI_PROC_NULL;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- source, simgrid::s4u::this_actor::get_pid(), tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
+ source = comm->group()->actor(src);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
+ simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
if(src != MPI_PROC_NULL)
request->start();
return request;
int Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = irecv(buf, count, datatype, src, tag, comm);
+ MPI_Request request = irecv(buf, count, datatype, src, tag, comm);
int retval = wait(&request,status);
request = nullptr;
return retval;
void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
if(dst != MPI_PROC_NULL)
request->start();
void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
wait(&request, MPI_STATUS_IGNORE);
void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
- MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
+ auto request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
void *recvbuf, int recvcount, MPI_Datatype recvtype, int src, int recvtag,
MPI_Comm comm, MPI_Status * status)
{
- int source = MPI_PROC_NULL;
+ aid_t source = MPI_PROC_NULL;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
- int destination = dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL;
+ source = comm->group()->actor(src);
+ aid_t destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL;
std::array<MPI_Request, 2> requests;
std::array<MPI_Status, 2> stats;
- int myid = simgrid::s4u::this_actor::get_pid();
+ aid_t myid = simgrid::s4u::this_actor::get_pid();
if ((destination == myid) && (source == myid)) {
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
if (status != MPI_STATUS_IGNORE) {
//reinitialize temporary buffer for persistent requests
if(real_size_ > 0 && flags_ & MPI_REQ_FINISHED){
buf_ = old_buf_;
- init_buffer(real_size_/old_type_->size());
+ init_buffer(real_size_/type_->size());
}
flags_ &= ~MPI_REQ_PREPARED;
flags_ &= ~MPI_REQ_FINISHED;
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
mut->lock();
+ bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
+ flags_ |= MPI_REQ_PROBE;
+
if (smpi_cfg_async_small_thresh() == 0 && (flags_ & MPI_REQ_RMA) == 0) {
mailbox = process->mailbox();
} else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) {
mailbox = process->mailbox_small();
}
} else {
- XBT_DEBUG("yes there was something for us in the large mailbox");
+ XBT_DEBUG("yes there was something for us in the small mailbox");
}
} else {
mailbox = process->mailbox_small();
XBT_DEBUG("yes there was something for us in the small mailbox");
}
}
+ if(!is_probe)
+ flags_ &= ~MPI_REQ_PROBE;
+ kernel::actor::CommIrecvSimcall observer{process->get_actor()->get_impl(),
+ mailbox->get_impl(),
+ static_cast<unsigned char*>(buf_),
+ &real_size_,
+ &match_recv,
+ process->replaying() ? &smpi_comm_null_copy_buffer_callback
+ : smpi_comm_copy_data_callback,
+ this,
+ -1.0};
+ observer.set_tag(tag_);
+
+ action_ = kernel::actor::simcall_answered([&observer] { return kernel::activity::CommImpl::irecv(&observer); },
+ &observer);
- action_ = simcall_comm_irecv(
- process->get_actor()->get_impl(), mailbox->get_impl(), buf_, &real_size_, &match_recv,
- process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
- xbt_assert(process, "Actor pid=%d is gone??", dst_);
+ xbt_assert(process, "Actor pid=%ld is gone??", dst_);
if (TRACE_smpi_view_internals())
TRACE_smpi_send(src_, src_, dst_, tag_, size_);
this->print_request("New send");
+ message_id_=comm_->get_sent_messages_count(comm_->group()->rank(src_), comm_->group()->rank(dst_), tag_);
+ comm_->increment_sent_messages_count(comm_->group()->rank(src_), comm_->group()->rank(dst_), tag_);
+
void* buf = buf_;
if ((flags_ & MPI_REQ_SSEND) == 0 &&
((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
detached_ = true;
XBT_DEBUG("Send request %p is detached", this);
this->ref();
- if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
+ if (not(type_->flags() & DT_FLAG_DERIVED)) {
oldbuf = buf_;
if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
- if ((smpi_cfg_privatization() != SmpiPrivStrategies::NONE) &&
- (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
- (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
+ if (smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_), buf_))
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
- smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
- }
+
//we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
//so actually ... don't use manually attached buffer space.
buf = xbt_malloc(size_);
double sleeptime = 0.0;
if (detached_ || ((flags_ & (MPI_REQ_ISEND | MPI_REQ_SSEND)) != 0)) { // issend should be treated as isend
// isend and send timings may be different
- sleeptime = ((flags_ & MPI_REQ_ISEND) != 0)
- ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(size_)
- : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(size_);
+ sleeptime =
+ ((flags_ & MPI_REQ_ISEND) != 0)
+ ? simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->oisend(
+ size_, simgrid::s4u::Actor::by_pid(src_)->get_host(), simgrid::s4u::Actor::by_pid(dst_)->get_host())
+ : simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->osend(
+ size_, simgrid::s4u::Actor::by_pid(src_)->get_host(),
+ simgrid::s4u::Actor::by_pid(dst_)->get_host());
}
if(sleeptime > 0.0){
if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
mailbox = process->mailbox();
} else if (((flags_ & MPI_REQ_RMA) != 0) || static_cast<int>(size_) < smpi_cfg_async_small_thresh()) { // eager mode
+ bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
+ flags_ |= MPI_REQ_PROBE;
+
mailbox = process->mailbox();
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %s?", mailbox->get_cname());
simgrid::kernel::activity::ActivityImplPtr action = mailbox->iprobe(1, &match_send, static_cast<void*>(this));
} else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
+ if(!is_probe)
+ flags_ &= ~MPI_REQ_PROBE;
} else {
mailbox = process->mailbox();
XBT_DEBUG("Send request %p is in the large mailbox %s (buf: %p)", this, mailbox->get_cname(), buf_);
}
size_t payload_size_ = size_ + 16;//MPI enveloppe size (tag+dest+communicator)
- action_ = simcall_comm_isend(
- simgrid::kernel::actor::ActorImpl::by_pid(src_), mailbox->get_impl(), payload_size_, -1.0, buf, real_size_,
- &match_send,
+ kernel::actor::CommIsendSimcall observer{
+ simgrid::kernel::EngineImpl::get_instance()->get_actor_by_pid(src_), mailbox->get_impl(),
+ static_cast<double>(payload_size_), -1, static_cast<unsigned char*>(buf), real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this,
// detach if msg size < eager/rdv switch limit
- detached_);
+ detached_};
+ observer.set_tag(tag_);
+ action_ = kernel::actor::simcall_answered([&observer] { return kernel::activity::CommImpl::isend(&observer); },
+ &observer);
XBT_DEBUG("send simcall posted");
/* FIXME: detached sends are not traceable (action_ == nullptr) */
static int nsleeps = 1;
int ret = MPI_SUCCESS;
-
- // Are we testing a request meant for non blocking collectives ?
- // If so, test all the subrequests.
- if ((*request)->nbc_requests_size_>0){
- ret = testall((*request)->nbc_requests_size_, (*request)->nbc_requests_, flag, MPI_STATUSES_IGNORE);
- if(*flag){
- delete[] (*request)->nbc_requests_;
- (*request)->nbc_requests_size_=0;
- unref(request);
- }
- return ret;
- }
-
+
if(smpi_test_sleep > 0)
simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
Status::empty(status);
*flag = 1;
+
+ if ((*request)->flags_ & MPI_REQ_NBC){
+ *flag = finish_nbc_requests(request, 1);
+ }
+
if (((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) == 0) {
if ((*request)->action_ != nullptr && ((*request)->flags_ & MPI_REQ_CANCELLED) == 0){
try{
- *flag = simcall_comm_test((*request)->action_.get());
+ kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
+ kernel::actor::ActivityTestSimcall observer{issuer, (*request)->action_.get()};
+ *flag = kernel::actor::simcall_answered(
+ [&observer] { return observer.get_activity()->test(observer.get_issuer()); }, &observer);
} catch (const Exception&) {
*flag = 0;
return ret;
}
}
- if (((*request)->flags_ & MPI_REQ_GENERALIZED) && !((*request)->flags_ & MPI_REQ_COMPLETE))
+ if (((*request)->flags_ & MPI_REQ_GENERALIZED) && not((*request)->flags_ & MPI_REQ_COMPLETE))
*flag=0;
if (*flag) {
finish_wait(request, status); // may invalidate *request
int Request::testany(int count, MPI_Request requests[], int *index, int* flag, MPI_Status * status)
{
- std::vector<simgrid::kernel::activity::CommImpl*> comms;
+ std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
comms.reserve(count);
- int i;
*flag = 0;
int ret = MPI_SUCCESS;
*index = MPI_UNDEFINED;
std::vector<int> map; /** Maps all matching comms back to their location in requests **/
- for(i = 0; i < count; i++) {
+ for (int i = 0; i < count; i++) {
if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & MPI_REQ_PREPARED)) {
- comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
+ comms.push_back(requests[i]->action_.get());
map.push_back(i);
}
}
static int nsleeps = 1;
if(smpi_test_sleep > 0)
simgrid::s4u::this_actor::sleep_for(nsleeps * smpi_test_sleep);
+ ssize_t i;
try{
- i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
+ kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
+ kernel::actor::ActivityTestanySimcall observer{issuer, comms};
+ i = kernel::actor::simcall_answered(
+ [&observer] {
+ return kernel::activity::ActivityImpl::test_any(observer.get_issuer(), observer.get_activities());
+ },
+ &observer);
} catch (const Exception&) {
XBT_DEBUG("Exception in testany");
return 0;
}
-
+
if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
*index = map[i];
- if (requests[*index] != MPI_REQUEST_NULL &&
- (requests[*index]->flags_ & MPI_REQ_GENERALIZED)
- && !(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_GENERALIZED) &&
+ not(requests[*index]->flags_ & MPI_REQ_COMPLETE)) {
*flag=0;
} else {
finish_wait(&requests[*index],status);
ret=(requests[*index]->generalized_funcs)->query_fn((requests[*index]->generalized_funcs)->extra_state, mystatus);
}
- if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
+ if (requests[*index] != MPI_REQUEST_NULL && requests[*index]->flags_ & MPI_REQ_NBC){
+ *flag = finish_nbc_requests(&requests[*index] , 1);
+ }
+
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags_ & MPI_REQ_NON_PERSISTENT))
requests[*index] = MPI_REQUEST_NULL;
XBT_DEBUG("Testany - returning with index %d", *index);
*flag=1;
int ret = test(&requests[i], pstat, &flag);
if (flag){
flag=0;
- requests[i]=MPI_REQUEST_NULL;
}else{
*outflag=0;
}
- if (ret != MPI_SUCCESS)
+ if (ret != MPI_SUCCESS)
error = 1;
}else{
Status::empty(pstat);
status[i] = *pstat;
}
}
- if(error==1)
+ if (error == 1)
return MPI_ERR_IN_STATUS;
- else
+ else
return MPI_SUCCESS;
}
double speed = s4u::this_actor::get_host()->get_speed();
double maxrate = smpi_cfg_iprobe_cpu_usage();
auto request =
- new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor_pid(source),
+ new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source),
simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE);
if (smpi_iprobe_sleep > 0) {
/** Compute the number of flops we will sleep **/
/*(seconds * flop/s -> total flops)*/ smpi_iprobe_sleep * speed * maxrate)
->set_name("iprobe")
/* Not the entire CPU can be used when iprobing: This is important for
- * the energy consumption caused by polling with iprobes.
+ * the energy consumption caused by polling with iprobes.
* Note also that the number of flops that was
* computed above contains a maxrate factor and is hence reduced (maxrate < 1)
*/
- ->set_bound(maxrate*speed)
+ ->set_bound(maxrate * speed)
->start()
->wait();
}
xbt_assert(request == MPI_REQUEST_NULL);
}
+int Request::finish_nbc_requests(MPI_Request* request, int test){
+ int flag = 1;
+ int ret = 0;
+ if(test == 0)
+ ret = waitall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), MPI_STATUSES_IGNORE);
+ else{
+ ret = testall((*request)->nbc_requests_.size(), (*request)->nbc_requests_.data(), &flag, MPI_STATUSES_IGNORE);
+ }
+ if(ret!=MPI_SUCCESS)
+ xbt_die("Failure when waiting on non blocking collective sub-requests");
+ if(flag == 1){
+ XBT_DEBUG("Finishing non blocking collective request with %zu sub-requests", (*request)->nbc_requests_.size());
+ for(auto& req: (*request)->nbc_requests_){
+ if((*request)->buf_!=nullptr && req!=MPI_REQUEST_NULL){//reduce case
+ void * buf=req->buf_;
+ if((*request)->type_->flags() & DT_FLAG_DERIVED)
+ buf=req->old_buf_;
+ if(req->flags_ & MPI_REQ_RECV ){
+ if((*request)->op_!=MPI_OP_NULL){
+ int count=(*request)->size_/ (*request)->type_->size();
+ (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->type_);
+ }
+ smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
+ }
+ }
+ if(req!=MPI_REQUEST_NULL)
+ Request::unref(&req);
+ }
+ (*request)->nbc_requests_.clear();
+ }
+ return flag;
+}
+
void Request::finish_wait(MPI_Request* request, MPI_Status * status)
{
MPI_Request req = *request;
Status::empty(status);
status->MPI_SOURCE = MPI_PROC_NULL;
} else {
- int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
+ aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
status->MPI_SOURCE = req->comm_->group()->rank(src);
status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
//detached send will be finished at the other end
if (not(req->detached_ && ((req->flags_ & MPI_REQ_SEND) != 0))) {
req->print_request("Finishing");
- MPI_Datatype datatype = req->old_type_;
+ MPI_Datatype datatype = req->type_;
// FIXME Handle the case of a partial shared malloc.
- if (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) ||
- (datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
- if (not smpi_process()->replaying() && smpi_cfg_privatization() != SmpiPrivStrategies::NONE &&
- static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
- static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
+ if (not smpi_process()->replaying() &&
+ (((req->flags_ & MPI_REQ_ACCUMULATE) != 0) || (datatype->flags() & DT_FLAG_DERIVED))) {
+ if (smpi_switch_data_segment(simgrid::s4u::Actor::self(), req->old_buf_))
XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
- }
if(datatype->flags() & DT_FLAG_DERIVED){
// This part handles the problem of non-contiguous memory the unserialization at the reception
}
if (TRACE_smpi_view_internals() && ((req->flags_ & MPI_REQ_RECV) != 0)) {
- int rank = simgrid::s4u::this_actor::get_pid();
- int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
+ aid_t rank = simgrid::s4u::this_actor::get_pid();
+ aid_t src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
TRACE_smpi_recv(src_traced, rank,req->tag_);
}
if(req->detached_sender_ != nullptr){
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
- double sleeptime =
- simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(req->real_size());
+ simgrid::s4u::Host* dst_host = simgrid::s4u::Actor::by_pid(req->dst_)->get_host();
+ double sleeptime = simgrid::s4u::Actor::self()->get_host()->extension<simgrid::smpi::Host>()->orecv(
+ req->real_size(), req->src_host_, dst_host);
if (sleeptime > 0.0) {
simgrid::s4u::this_actor::sleep_for(sleeptime);
XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
req->action_ = nullptr;
req->flags_ |= MPI_REQ_FINISHED;
- if (req->truncated_) {
+ if (req->truncated_ || req->unmatched_types_) {
char error_string[MPI_MAX_ERROR_STRING];
int error_size;
- PMPI_Error_string(MPI_ERR_TRUNCATE, error_string, &error_size);
+ int errkind;
+ if(req->truncated_ )
+ errkind = MPI_ERR_TRUNCATE;
+ else
+ errkind = MPI_ERR_TYPE;
+ PMPI_Error_string(errkind, error_string, &error_size);
MPI_Errhandler err = (req->comm_) ? (req->comm_)->errhandler() : MPI_ERRHANDLER_NULL;
if (err == MPI_ERRHANDLER_NULL || err == MPI_ERRORS_RETURN)
XBT_WARN("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
else if (err == MPI_ERRORS_ARE_FATAL)
xbt_die("recv - returned %.*s instead of MPI_SUCCESS", error_size, error_string);
else
- err->call((req->comm_), MPI_ERR_TRUNCATE);
+ err->call((req->comm_), errkind);
if (err != MPI_ERRHANDLER_NULL)
simgrid::smpi::Errhandler::unref(err);
MC_assert(not MC_is_active()); /* Only fail in MC mode */
(*request)=MPI_REQUEST_NULL;
return ret;
}
- // Are we waiting on a request meant for non blocking collectives ?
- // If so, wait for all the subrequests.
- if ((*request)->nbc_requests_size_>0){
- ret = waitall((*request)->nbc_requests_size_, (*request)->nbc_requests_, MPI_STATUSES_IGNORE);
- for (int i = 0; i < (*request)->nbc_requests_size_; i++) {
- if((*request)->buf_!=nullptr && (*request)->nbc_requests_[i]!=MPI_REQUEST_NULL){//reduce case
- void * buf=(*request)->nbc_requests_[i]->buf_;
- if((*request)->old_type_->flags() & DT_FLAG_DERIVED)
- buf=(*request)->nbc_requests_[i]->old_buf_;
- if((*request)->nbc_requests_[i]->flags_ & MPI_REQ_RECV ){
- if((*request)->op_!=MPI_OP_NULL){
- int count=(*request)->size_/ (*request)->old_type_->size();
- (*request)->op_->apply(buf, (*request)->buf_, &count, (*request)->old_type_);
- }
- smpi_free_tmp_buffer(static_cast<unsigned char*>(buf));
- }
- }
- if((*request)->nbc_requests_[i]!=MPI_REQUEST_NULL)
- Request::unref(&((*request)->nbc_requests_[i]));
- }
- delete[] (*request)->nbc_requests_;
- (*request)->nbc_requests_size_=0;
- unref(request);
- (*request)=MPI_REQUEST_NULL;
- return ret;
- }
(*request)->print_request("Waiting");
if ((*request)->flags_ & (MPI_REQ_PREPARED | MPI_REQ_FINISHED)) {
if ((*request)->action_ != nullptr){
try{
// this is not a detached send
- simcall_comm_wait((*request)->action_.get(), -1.0);
- } catch (const Exception&) {
+ kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
+ kernel::actor::ActivityWaitSimcall observer{issuer, (*request)->action_.get(), -1};
+ kernel::actor::simcall_blocking([issuer, &observer] { observer.get_activity()->wait_for(issuer, -1); },
+ &observer);
+ } catch (const CancelException&) {
XBT_VERB("Request cancelled");
}
}
if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
- if(!((*request)->flags_ & MPI_REQ_COMPLETE)){
+ if (not((*request)->flags_ & MPI_REQ_COMPLETE)) {
((*request)->generalized_funcs)->mutex->lock();
((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
((*request)->generalized_funcs)->mutex->unlock();
if ((*request)->truncated_)
ret = MPI_ERR_TRUNCATE;
+ if ((*request)->flags_ & MPI_REQ_NBC)
+ finish_nbc_requests(request, 0);
+
finish_wait(request, status); // may invalidate *request
if (*request != MPI_REQUEST_NULL && (((*request)->flags_ & MPI_REQ_NON_PERSISTENT) != 0))
*request = MPI_REQUEST_NULL;
if(count > 0) {
// Wait for a request to complete
- std::vector<simgrid::kernel::activity::CommImpl*> comms;
+ std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
std::vector<int> map;
XBT_DEBUG("Wait for one of %d", count);
for(int i = 0; i < count; i++) {
not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
if (requests[i]->action_ != nullptr) {
XBT_DEBUG("Waiting any %p ", requests[i]);
- comms.push_back(static_cast<simgrid::kernel::activity::CommImpl*>(requests[i]->action_.get()));
+ comms.push_back(requests[i]->action_.get());
map.push_back(i);
} else {
// This is a finished detached request, let's return this one
comms.clear(); // don't do the waitany call afterwards
index = i;
+ if (requests[index]->flags_ & MPI_REQ_NBC)
+ finish_nbc_requests(&requests[index], 0);
finish_wait(&requests[i], status); // cleanup if refcount = 0
if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & MPI_REQ_NON_PERSISTENT))
requests[i] = MPI_REQUEST_NULL; // set to null
}
if (not comms.empty()) {
XBT_DEBUG("Enter waitany for %zu comms", comms.size());
- int i;
+ ssize_t i;
try{
- i = simcall_comm_waitany(comms.data(), comms.size(), -1);
- } catch (const Exception&) {
+ kernel::actor::ActorImpl* issuer = kernel::actor::ActorImpl::self();
+ kernel::actor::ActivityWaitanySimcall observer{issuer, comms, -1};
+ i = kernel::actor::simcall_blocking(
+ [&observer] {
+ kernel::activity::ActivityImpl::wait_any_for(observer.get_issuer(), observer.get_activities(),
+ observer.get_timeout());
+ },
+ &observer);
+ } catch (const CancelException&) {
XBT_INFO("request cancelled");
i = -1;
}
}
}
+
if (index==MPI_UNDEFINED)
Status::empty(status);
}
}
- if (not accumulates.empty()) {
- std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
- for (auto& req : accumulates) {
- finish_wait(&req, status);
- }
- }
+ std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
+ for (auto& req : accumulates)
+ finish_wait(&req, status);
return retvalue;
}
indices[count] = index;
count++;
for (int i = 0; i < incount; i++) {
- if (i!=index && requests[i] != MPI_REQUEST_NULL
- && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
+ if (i != index && requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & MPI_REQ_FINISHED)) {
test(&requests[i], pstat,&flag);
if (flag==1){
indices[count] = i;
if(*flag)
return MPI_SUCCESS;
}
- if (req != MPI_REQUEST_NULL &&
- (req->flags_ & MPI_REQ_GENERALIZED)
- && !(req->flags_ & MPI_REQ_COMPLETE)) {
- *flag=0;
+ if (req != MPI_REQUEST_NULL && (req->flags_ & MPI_REQ_GENERALIZED) && not(req->flags_ & MPI_REQ_COMPLETE)) {
+ *flag = 0;
return MPI_SUCCESS;
}
*flag=1;
if(req != MPI_REQUEST_NULL &&
status != MPI_STATUS_IGNORE) {
- int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
+ aid_t src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
status->MPI_SOURCE = req->comm_->group()->rank(src);
status->MPI_TAG = req->tag_ == MPI_ANY_TAG ? req->real_tag_ : req->tag_;
status->MPI_ERROR = req->truncated_ ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
int Request::grequest_complete(MPI_Request request)
{
- if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
+ if ((not(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
return MPI_ERR_REQUEST;
request->generalized_funcs->mutex->lock();
request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
return MPI_SUCCESS;
}
-void Request::set_nbc_requests(MPI_Request* reqs, int size){
- nbc_requests_size_ = size;
- if (size > 0) {
+void Request::start_nbc_requests(std::vector<MPI_Request> reqs){
+ if (not reqs.empty()) {
nbc_requests_ = reqs;
- } else {
- delete[] reqs;
- nbc_requests_ = nullptr;
+ Request::startall(reqs.size(), reqs.data());
}
}
-int Request::get_nbc_requests_size() const
-{
- return nbc_requests_size_;
-}
-
-MPI_Request* Request::get_nbc_requests() const
+std::vector<MPI_Request> Request::get_nbc_requests() const
{
return nbc_requests_;
}