Stick to C++14's std::lock_guard in public headers.
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <mutex> /* std::mutex and std::lock_guard */
+#include <mutex> /* std::mutex and std::scoped_lock */
#include <simgrid/s4u.hpp> /* All of S4U */
XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "a sample log category");
static void competitor(int id, sg4::ConditionVariablePtr cv, sg4::MutexPtr mtx, std::shared_ptr<bool> ready)
{
XBT_INFO("Entering the race...");
- std::unique_lock lck(*mtx);
+ std::unique_lock lock(*mtx);
while (not *ready) {
auto now = sg4::Engine::get_clock();
- if (cv->wait_until(lck, now + (id+1)*0.25) == std::cv_status::timeout) {
+ if (cv->wait_until(lock, now + (id + 1) * 0.25) == std::cv_status::timeout) {
XBT_INFO("Out of wait_until (timeout)");
- }
- else {
+ } else {
XBT_INFO("Out of wait_until (YAY!)");
}
}
{
XBT_INFO("Are you ready? ...");
sg4::this_actor::sleep_for(3);
- std::unique_lock lck(*mtx);
+ const std::scoped_lock lock(*mtx);
XBT_INFO("Go go go!");
*ready = true;
cv->notify_all();
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <mutex> /* std::mutex and std::lock_guard */
+#include <mutex> /* std::mutex and std::scoped_lock */
#include <simgrid/s4u.hpp> /* All of S4U */
XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "a sample log category");
static void worker_fun(sg4::ConditionVariablePtr cv, sg4::MutexPtr mutex, std::string& data, bool& done)
{
- std::unique_lock lock(*mutex);
+ const std::scoped_lock lock(*mutex);
XBT_INFO("Start processing data which is '%s'.", data.c_str());
data += " after processing";
std::ref(done));
// wait for the worker
- cv->wait(std::unique_lock<sg4::Mutex>(*mutex), [&done]() { return done; });
+ cv->wait(std::unique_lock(*mutex), [&done]() { return done; });
XBT_INFO("data is now '%s'.", data.c_str());
worker->join();
#include "simgrid/s4u.hpp" /* All of S4U */
#include "xbt/config.hpp"
-#include <mutex> /* std::mutex and std::lock_guard */
+#include <mutex> /* std::mutex and std::scoped_lock */
namespace sg4 = simgrid::s4u;
mutex->unlock();
}
-static void workerLockGuard(sg4::MutexPtr mutex, int& result)
+static void workerScopedLock(sg4::MutexPtr mutex, int& result)
{
- // Simply use the std::lock_guard like this
+ // Simply use the std::scoped_lock like this
// It's like a lock() that would do the unlock() automatically when getting out of scope
- std::lock_guard lock(*mutex);
+ const std::scoped_lock lock(*mutex);
// then you are in a safe zone
- XBT_INFO("Hello s4u, I'm ready to compute after a lock_guard");
+ XBT_INFO("Hello s4u, I'm ready to compute after a scoped_lock");
// update the results
result += 1;
XBT_INFO("I'm done, good bye");
for (int i = 0; i < cfg_actor_count; i++) {
sg4::MutexPtr mutex = sg4::Mutex::create();
- sg4::Actor::create("worker", sg4::Host::by_name("Jupiter"), workerLockGuard, mutex, std::ref(result[i]));
+ sg4::Actor::create("worker", sg4::Host::by_name("Jupiter"), workerScopedLock, mutex, std::ref(result[i]));
sg4::Actor::create("worker", sg4::Host::by_name("Tremblay"), worker, mutex, std::ref(result[i]));
}
#!/usr/bin/env tesh
$ ${bindir:=.}/s4u-synchro-mutex
-> [Jupiter:worker:(1) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(1) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(1) 0.000000] [s4u_test/INFO] I'm done, good bye
-> [Jupiter:worker:(3) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(3) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(3) 0.000000] [s4u_test/INFO] I'm done, good bye
-> [Jupiter:worker:(5) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(5) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(5) 0.000000] [s4u_test/INFO] I'm done, good bye
-> [Jupiter:worker:(7) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(7) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(7) 0.000000] [s4u_test/INFO] I'm done, good bye
-> [Jupiter:worker:(9) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(9) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(9) 0.000000] [s4u_test/INFO] I'm done, good bye
-> [Jupiter:worker:(11) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a lock_guard
+> [Jupiter:worker:(11) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a scoped_lock
> [Jupiter:worker:(11) 0.000000] [s4u_test/INFO] I'm done, good bye
> [Tremblay:worker:(2) 0.000000] [s4u_test/INFO] Hello s4u, I'm ready to compute after a regular lock
> [Tremblay:worker:(2) 0.000000] [s4u_test/INFO] I'm done, good bye
*/
ProducerConsumer* set_max_queue_size(unsigned int max_queue_size)
{
- std::unique_lock<s4u::Mutex> lock(*mutex_);
+ const std::lock_guard<s4u::Mutex> lock(*mutex_);
max_queue_size_ = max_queue_size;
return this;
}
#include "simgrid/s4u/Host.hpp"
#include "simgrid/plugins/file_system.h"
+#include <mutex> // std::scoped_lock
+
#define FP_SIZE sizeof(MPI_Offset)
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_io, smpi, "Logging specific to SMPI (RMA operations)");
int File::get_position_shared(MPI_Offset* offset) const
{
- shared_mutex_->lock();
+ const std::scoped_lock lock(*shared_mutex_);
*offset = *shared_file_pointer_/etype_->get_extent();
- shared_mutex_->unlock();
return MPI_SUCCESS;
}
int File::seek_shared(MPI_Offset offset, int whence)
{
- shared_mutex_->lock();
+ const std::scoped_lock lock(*shared_mutex_);
seek(offset, whence);
*shared_file_pointer_ = file_->tell();
- shared_mutex_->unlock();
return MPI_SUCCESS;
}
/* }*/
int File::read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status)
{
- fh->shared_mutex_->lock();
- fh->seek(*(fh->shared_file_pointer_), MPI_SEEK_SET);
- read(fh, buf, count, datatype, status);
- *(fh->shared_file_pointer_) = fh->file_->tell();
- fh->shared_mutex_->unlock();
+ if (const std::scoped_lock lock(*fh->shared_mutex_); true) {
+ fh->seek(*(fh->shared_file_pointer_), MPI_SEEK_SET);
+ read(fh, buf, count, datatype, status);
+ *(fh->shared_file_pointer_) = fh->file_->tell();
+ }
fh->seek(*(fh->shared_file_pointer_), MPI_SEEK_SET);
return MPI_SUCCESS;
}
fh->seek(result, MPI_SEEK_SET);
int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);
if (fh->comm_->rank() == fh->comm_->size() - 1) {
- fh->shared_mutex_->lock();
+ const std::scoped_lock lock(*fh->shared_mutex_);
*(fh->shared_file_pointer_)=fh->file_->tell();
- fh->shared_mutex_->unlock();
}
char c;
simgrid::smpi::colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size() - 1, fh->comm_);
int File::write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status)
{
- fh->shared_mutex_->lock();
+ const std::scoped_lock lock(*fh->shared_mutex_);
XBT_DEBUG("Write shared on %s - Shared ptr before : %lld", fh->file_->get_path(), *(fh->shared_file_pointer_));
fh->seek(*(fh->shared_file_pointer_), MPI_SEEK_SET);
write(fh, const_cast<void*>(buf), count, datatype, status);
*(fh->shared_file_pointer_) = fh->file_->tell();
XBT_DEBUG("Write shared on %s - Shared ptr after : %lld", fh->file_->get_path(), *(fh->shared_file_pointer_));
fh->seek(*(fh->shared_file_pointer_), MPI_SEEK_SET);
- fh->shared_mutex_->unlock();
return MPI_SUCCESS;
}
fh->seek(result, MPI_SEEK_SET);
int ret = fh->op_all<simgrid::smpi::File::write>(const_cast<void*>(buf), count, datatype, status);
if (fh->comm_->rank() == fh->comm_->size() - 1) {
- fh->shared_mutex_->lock();
+ const std::scoped_lock lock(*fh->shared_mutex_);
*(fh->shared_file_pointer_)=fh->file_->tell();
- fh->shared_mutex_->unlock();
}
char c;
simgrid::smpi::colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size() - 1, fh->comm_);
#include <algorithm>
#include <array>
+#include <mutex> // std::scoped_lock and std::unique_lock
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (request)");
simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
- simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
+ std::unique_lock<s4u::Mutex> mut_lock;
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->lock();
+ mut_lock = std::unique_lock(*process->mailboxes_mutex());
bool is_probe = ((flags_ & MPI_REQ_PROBE) != 0);
flags_ |= MPI_REQ_PROBE;
&observer);
XBT_DEBUG("recv simcall posted");
-
- if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
xbt_assert(process, "Actor pid=%ld is gone??", dst_);
XBT_DEBUG("sending size of %zu : sleep %f ", size_, sleeptime);
}
- simgrid::s4u::MutexPtr mut = process->mailboxes_mutex();
-
+ std::unique_lock<s4u::Mutex> mut_lock;
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
- mut->lock();
+ mut_lock = std::unique_lock(*process->mailboxes_mutex());
if (not(smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)) {
mailbox = process->mailbox();
boost::static_pointer_cast<kernel::activity::CommImpl>(action_)->set_tracing_category(
smpi_process()->get_tracing_category());
}
-
- if (smpi_cfg_async_small_thresh() != 0 || ((flags_ & MPI_REQ_RMA) != 0))
- mut->unlock();
}
}
if ((*request)->flags_ & MPI_REQ_GENERALIZED) {
if (not((*request)->flags_ & MPI_REQ_COMPLETE)) {
- ((*request)->generalized_funcs)->mutex->lock();
- ((*request)->generalized_funcs)->cond->wait(((*request)->generalized_funcs)->mutex);
- ((*request)->generalized_funcs)->mutex->unlock();
+ const std::scoped_lock lock(*(*request)->generalized_funcs->mutex);
+ (*request)->generalized_funcs->cond->wait((*request)->generalized_funcs->mutex);
}
MPI_Status tmp_status;
MPI_Status* mystatus;
{
if ((not(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
return MPI_ERR_REQUEST;
- request->generalized_funcs->mutex->lock();
+ const std::scoped_lock lock(*request->generalized_funcs->mutex);
request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
request->generalized_funcs->cond->notify_one();
- request->generalized_funcs->mutex->unlock();
return MPI_SUCCESS;
}
#include "src/mc/mc_replay.hpp"
#include <algorithm>
+#include <mutex> // std::scoped_lock
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
if(request!=nullptr){
*request=sreq;
}else{
- mut_->lock();
+ const std::scoped_lock lock(*mut_);
requests_.push_back(sreq);
- mut_->unlock();
}
//push request to receiver's win
- recv_win->mut_->lock();
+ const std::scoped_lock recv_lock(*recv_win->mut_);
recv_win->requests_.push_back(rreq);
rreq->start();
- recv_win->mut_->unlock();
} else {
XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
//start the send, with another process than us as sender.
sreq->start();
// push request to sender's win
- send_win->mut_->lock();
- send_win->requests_.push_back(sreq);
- send_win->mut_->unlock();
+ if (const std::scoped_lock send_lock(*send_win->mut_); true) {
+ send_win->requests_.push_back(sreq);
+ }
//start recv
rreq->start();
if(request!=nullptr){
*request=rreq;
}else{
- mut_->lock();
+ const std::scoped_lock lock(*mut_);
requests_.push_back(rreq);
- mut_->unlock();
}
} else {
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
// start send
sreq->start();
// push request to receiver's win
- recv_win->mut_->lock();
- recv_win->requests_.push_back(rreq);
- rreq->start();
- recv_win->mut_->unlock();
+ if (const std::scoped_lock recv_lock(*recv_win->mut_); true) {
+ recv_win->requests_.push_back(rreq);
+ rreq->start();
+ }
if (request != nullptr) {
*request = sreq;
} else {
- mut_->lock();
+ const std::scoped_lock lock(*mut_);
requests_.push_back(sreq);
- mut_->unlock();
}
// FIXME: The current implementation fails to ensure the correct ordering of the accumulate requests. The following
XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank);
//need to be sure ops are correctly ordered, so finish request here ? slow.
MPI_Request req = MPI_REQUEST_NULL;
- send_win->atomic_mut_->lock();
+ const std::scoped_lock lock(*send_win->atomic_mut_);
get(result_addr, result_count, result_datatype, target_rank,
target_disp, target_count, target_datatype, &req);
if (req != MPI_REQUEST_NULL)
target_disp, target_count, target_datatype, op, &req);
if (req != MPI_REQUEST_NULL)
Request::wait(&req, MPI_STATUS_IGNORE);
- send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
}
XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank);
MPI_Request req = MPI_REQUEST_NULL;
- send_win->atomic_mut_->lock();
+ const std::scoped_lock lock(*send_win->atomic_mut_);
get(result_addr, 1, datatype, target_rank,
target_disp, 1, datatype, &req);
if (req != MPI_REQUEST_NULL)
put(origin_addr, 1, datatype, target_rank,
target_disp, 1, datatype);
}
- send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
}
// Without this, the vector could get redimensioned when another process pushes.
// This would result in the array used by Request::waitall() to be invalidated.
// Another solution would be to copy the data and cleanup the vector *before* Request::waitall
- mut_->lock();
+ const std::scoped_lock lock(*mut_);
//Finish own requests
int size = static_cast<int>(requests_.size());
if (size > 0) {
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
requests_.clear();
}
- mut_->unlock();
return size;
}
int Win::finish_comms(int rank){
// See comment about the mutex in finish_comms() above
- mut_->lock();
+ const std::scoped_lock lock(*mut_);
// Finish own requests
// Let's see if we're either the destination or the sender of this request
// because we only wait for requests that we are responsible for.
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
myreqqs.clear();
}
- mut_->unlock();
return size;
}
inline void release()
{
- std::unique_lock lock(mutex_);
+ const std::scoped_lock lock(mutex_);
++capa_;
condition_.notify_one();
}
return priority >= category->threshold;
static std::recursive_mutex log_cat_init_mutex;
- log_cat_init_mutex.lock();
+ const std::scoped_lock lock(log_cat_init_mutex);
XBT_DEBUG("Initializing category '%s' (firstChild=%s, nextSibling=%s)", category->name,
(category->firstChild ? category->firstChild->name : "none"),
}
category->initialized = 1;
- log_cat_init_mutex.unlock();
return priority >= category->threshold;
}
template <typename T> void Parmap<T>::PosixSynchro::master_signal()
{
- std::unique_lock lk(ready_mutex);
+ const std::scoped_lock lock(ready_mutex);
this->parmap.thread_counter = 1;
this->parmap.work_round++;
/* wake all workers */
template <typename T> void Parmap<T>::PosixSynchro::master_wait()
{
- std::unique_lock lk(done_mutex);
+ std::unique_lock lock(done_mutex);
/* wait for all workers to be ready */
- done_cond.wait(lk, [this]() { return this->parmap.thread_counter >= this->parmap.num_workers; });
+ done_cond.wait(lock, [this]() { return this->parmap.thread_counter >= this->parmap.num_workers; });
}
template <typename T> void Parmap<T>::PosixSynchro::worker_signal()
{
- std::unique_lock lk(done_mutex);
+ const std::scoped_lock lock(done_mutex);
this->parmap.thread_counter++;
if (this->parmap.thread_counter == this->parmap.num_workers) {
/* all workers have finished, wake the controller */
template <typename T> void Parmap<T>::PosixSynchro::worker_wait(unsigned expected_round)
{
- std::unique_lock lk(ready_mutex);
+ std::unique_lock lock(ready_mutex);
/* wait for more work */
- ready_cond.wait(lk, [this, expected_round]() { return this->parmap.work_round == expected_round; });
+ ready_cond.wait(lock, [this, expected_round]() { return this->parmap.work_round == expected_round; });
}
#if HAVE_FUTEX_H
#include "simgrid/s4u/Mailbox.hpp"
#include "simgrid/s4u/Mutex.hpp"
+#include <mutex> // std::unique_lock
+
XBT_LOG_NEW_DEFAULT_CATEGORY(mutex_handling, "Messages specific for this test");
static int receiver(const char* box_name)
auto* payload = new int(value);
auto mb = simgrid::s4u::Mailbox::by_name(box_name);
+ std::unique_lock<simgrid::s4u::Mutex> lock;
if (mutex)
- mutex->lock();
+ lock = std::unique_lock(*mutex);
mb->put(payload, 8);
-
- if (mutex)
- mutex->unlock();
-
return 0;
}