-/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_request.hpp"
-#include "mc/mc.h"
#include "private.hpp"
#include "simgrid/Exception.hpp"
#include "simgrid/s4u/ConditionVariable.hpp"
#include "src/kernel/activity/CommImpl.hpp"
#include "src/kernel/actor/ActorImpl.hpp"
#include "src/kernel/actor/SimcallObserver.hpp"
+#include "src/mc/mc.h"
#include "src/mc/mc_replay.hpp"
#include "src/smpi/include/smpi_actor.hpp"
#include <algorithm>
#include <array>
+#include <mutex> // std::scoped_lock and std::unique_lock
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
static simgrid::config::Flag<double> smpi_test_sleep(
"smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
-std::vector<s_smpi_factor_t> smpi_ois_values;
-
extern std::function<void(simgrid::kernel::activity::CommImpl*, void*, size_t)> smpi_comm_copy_data_callback;
namespace simgrid::smpi {
(stype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype->duplicated_datatype(), rtype)) ||
(rtype->duplicated_datatype()!=MPI_DATATYPE_NULL && match_types(stype, rtype->duplicated_datatype())))
match = true;
- if (!match)
+ if (not match)
XBT_WARN("Mismatched datatypes : sending %s and receiving %s", stype->name().c_str(), rtype->name().c_str());
return match;
}
receiver->truncated_ = true;
}
//0-sized datatypes/counts should not interfere and match
- if ( sender->real_size_ != 0 && receiver->real_size_ != 0 &&
- !match_types(sender->type_, receiver->type_))
+ if (sender->real_size_ != 0 && receiver->real_size_ != 0 && not match_types(sender->type_, receiver->type_))
receiver->unmatched_types_ = true;
if (sender->detached_)
receiver->detached_sender_ = sender; // tie the sender to the receiver, as it is detached and has to be freed in
{
MPI_Request request = irecv(buf, count, datatype, src, tag, comm);
int retval = wait(&request,status);
- request = nullptr;
return retval;
}
if(dst != MPI_PROC_NULL)
request->start();
wait(&request, MPI_STATUS_IGNORE);
- request = nullptr;
}
void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
if(dst != MPI_PROC_NULL)
request->start();
wait(&request, MPI_STATUS_IGNORE);
- request = nullptr;
}
void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
if(dst != MPI_PROC_NULL)
request->start();
wait(&request,MPI_STATUS_IGNORE);
- request = nullptr;
}
void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag,
simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
- simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
+ std::unique_lock<s4u::Mutex> mut_lock;
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->lock();
+ mut_lock = std::unique_lock(*process->mailboxes_mutex());
bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
flags_ |= MPI_REQ_PROBE;
XBT_DEBUG("yes there was something for us in the small mailbox");
}
}
- if(!is_probe)
+ if (not is_probe)
flags_ &= ~MPI_REQ_PROBE;
kernel::actor::CommIrecvSimcall observer{process->get_actor()->get_impl(),
mailbox->get_impl(),
&observer);
XBT_DEBUG("recv simcall posted");
-
- if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
xbt_assert(process, "Actor pid=%ld is gone??", dst_);
if ((flags_ & MPI_REQ_SSEND) == 0 &&
((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
static_cast<int>(size_) < smpi_cfg_detached_send_thresh())) {
- void *oldbuf = nullptr;
detached_ = true;
XBT_DEBUG("Send request %p is detached", this);
this->ref();
if (not(type_->flags() & DT_FLAG_DERIVED)) {
- oldbuf = buf_;
+ void* oldbuf = buf_;
if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
if (smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_), buf_))
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
}
- simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
-
+ std::unique_lock<s4u::Mutex> mut_lock;
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->lock();
+ mut_lock = std::unique_lock(*process->mailboxes_mutex());
if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
mailbox = process->mailbox();
} else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
- if(!is_probe)
+ if (not is_probe)
flags_ &= ~MPI_REQ_PROBE;
} else {
mailbox = process->mailbox();
boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
smpi_process()->get_tracing_category());
}
-
- if (smpi_cfg_async_small_thresh() != 0 || ((flags_ & MPI_REQ_RMA) != 0))
- mut->unlock();
}
}
if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
if (not((*request)->flags_ & MPI_REQ_COMPLETE)) {
- ((*request)->generalized_funcs)->mutex->lock();
- ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
- ((*request)->generalized_funcs)->mutex->unlock();
+ const std::scoped_lock lock(*(*request)->generalized_funcs->mutex);
+ (*request)->generalized_funcs->cond->wait((*request)->generalized_funcs->mutex);
}
MPI_Status tmp_status;
MPI_Status* mystatus;
int Request::get_status(const Request* req, int* flag, MPI_Status* status)
{
- *flag=0;
-
if(req != MPI_REQUEST_NULL && req->action_ != nullptr) {
req->iprobe(req->comm_->group()->rank(req->src_), req->tag_, req->comm_, flag, status);
if(*flag)
{
if ((not(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
return MPI_ERR_REQUEST;
- request->generalized_funcs->mutex->lock();
+ const std::scoped_lock lock(*request->generalized_funcs->mutex);
request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
request->generalized_funcs->cond->notify_one();
- request->generalized_funcs->mutex->unlock();
return MPI_SUCCESS;
}