A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Small cleanups in parmap.hpp.
[simgrid.git]
/
src
/
include
/
xbt
/
parmap.hpp
diff --git
a/src/include/xbt/parmap.hpp
b/src/include/xbt/parmap.hpp
index
fc8f6da
..
51f4818
100644
(file)
--- a/
src/include/xbt/parmap.hpp
+++ b/
src/include/xbt/parmap.hpp
@@
-10,9
+10,11
@@
#include "src/internal_config.h" // HAVE_FUTEX_H
#include "src/kernel/context/Context.hpp"
#include "src/internal_config.h" // HAVE_FUTEX_H
#include "src/kernel/context/Context.hpp"
+#include "src/simix/smx_private.hpp" /* simix_global */
#include <boost/optional.hpp>
#include <condition_variable>
#include <boost/optional.hpp>
#include <condition_variable>
+#include <functional>
#include <mutex>
#include <thread>
#include <mutex>
#include <thread>
@@
-41,12
+43,10
@@
public:
Parmap(const Parmap&) = delete;
Parmap& operator=(const Parmap&) = delete;
~Parmap();
Parmap(const Parmap&) = delete;
Parmap& operator=(const Parmap&) = delete;
~Parmap();
- void apply(
void (*fun)(T)
, const std::vector<T>& data);
+ void apply(
std::function<void(T)>&& fun
, const std::vector<T>& data);
boost::optional<T> next();
private:
boost::optional<T> next();
private:
- enum Flag { PARMAP_WORK, PARMAP_DESTROY };
-
/**
* @brief Thread data transmission structure
*/
/**
* @brief Thread data transmission structure
*/
@@
-98,10
+98,10
@@
private:
public:
explicit PosixSynchro(Parmap<T>& parmap);
~PosixSynchro();
public:
explicit PosixSynchro(Parmap<T>& parmap);
~PosixSynchro();
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned round);
+ void master_signal()
override
;
+ void master_wait()
override
;
+ void worker_signal()
override
;
+ void worker_wait(unsigned round)
override
;
private:
std::condition_variable ready_cond;
private:
std::condition_variable ready_cond;
@@
-114,40
+114,40
@@
private:
class FutexSynchro : public Synchro {
public:
explicit FutexSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
class FutexSynchro : public Synchro {
public:
explicit FutexSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned);
+ void master_signal()
override
;
+ void master_wait()
override
;
+ void worker_signal()
override
;
+ void worker_wait(unsigned)
override
;
private:
private:
- static void futex_wait(
unsigned
* uaddr, unsigned val);
- static void futex_wake(
unsigned
* uaddr, unsigned val);
+ static void futex_wait(
std::atomic_uint
* uaddr, unsigned val);
+ static void futex_wake(
std::atomic_uint
* uaddr, unsigned val);
};
#endif
class BusyWaitSynchro : public Synchro {
public:
explicit BusyWaitSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
};
#endif
class BusyWaitSynchro : public Synchro {
public:
explicit BusyWaitSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned);
+ void master_signal()
override
;
+ void master_wait()
override
;
+ void worker_signal()
override
;
+ void worker_wait(unsigned)
override
;
};
};
- static void
* worker_main(void* arg
);
+ static void
worker_main(ThreadData* data
);
Synchro* new_synchro(e_xbt_parmap_mode_t mode);
void work();
Synchro* new_synchro(e_xbt_parmap_mode_t mode);
void work();
-
Flag status; /**< is the parmap active or
being destroyed? */
-
unsigned work_round;
/**< index of the current round */
+
bool destroying = false; /**< is the parmap
being destroyed? */
+
std::atomic_uint work_round{0};
/**< index of the current round */
std::vector<std::thread*> workers; /**< worker thread handlers */
unsigned num_workers; /**< total number of worker threads including the controller */
Synchro* synchro; /**< synchronization object */
std::vector<std::thread*> workers; /**< worker thread handlers */
unsigned num_workers; /**< total number of worker threads including the controller */
Synchro* synchro; /**< synchronization object */
-
unsigned thread_counter = 0;
/**< number of workers that have done the work */
-
void (*fun)(const T) = nullptr;
/**< function to run in parallel on each element of data */
+
std::atomic_uint thread_counter{0};
/**< number of workers that have done the work */
+
std::function<void(T)> fun;
/**< function to run in parallel on each element of data */
const std::vector<T>* data = nullptr; /**< parameters to pass to fun in parallel */
const std::vector<T>* data = nullptr; /**< parameters to pass to fun in parallel */
- std::atomic
<unsigned> index;
/**< index of the next element of data to pick */
+ std::atomic
_uint index{0};
/**< index of the next element of data to pick */
};
/**
};
/**
@@
-160,18
+160,16
@@
template <typename T> Parmap<T>::Parmap(unsigned num_workers, e_xbt_parmap_mode_
XBT_CDEBUG(xbt_parmap, "Create new parmap (%u workers)", num_workers);
/* Initialize the thread pool data structure */
XBT_CDEBUG(xbt_parmap, "Create new parmap (%u workers)", num_workers);
/* Initialize the thread pool data structure */
- this->status = PARMAP_WORK;
- this->work_round = 0;
- this->workers.reserve(num_workers);
+ this->workers.resize(num_workers);
this->num_workers = num_workers;
this->synchro = new_synchro(mode);
/* Create the pool of worker threads (the caller of apply() will be worker[0]) */
this->workers[0] = nullptr;
this->num_workers = num_workers;
this->synchro = new_synchro(mode);
/* Create the pool of worker threads (the caller of apply() will be worker[0]) */
this->workers[0] = nullptr;
- XBT_ATTRIB_UNUSED unsigned int core_bind = 0;
for (unsigned i = 1; i < num_workers; i++) {
for (unsigned i = 1; i < num_workers; i++) {
- this->workers[i] = new std::thread(worker_main, new ThreadData(*this, i));
+ ThreadData* data = new ThreadData(*this, i);
+ this->workers[i] = new std::thread(worker_main, data);
/* Bind the worker to a core if possible */
#if HAVE_PTHREAD_SETAFFINITY
/* Bind the worker to a core if possible */
#if HAVE_PTHREAD_SETAFFINITY
@@
-183,13
+181,10
@@
template <typename T> Parmap<T>::Parmap(unsigned num_workers, e_xbt_parmap_mode_
size_t size = sizeof(cpu_set_t);
#endif
pthread_t pthread = this->workers[i]->native_handle();
size_t size = sizeof(cpu_set_t);
#endif
pthread_t pthread = this->workers[i]->native_handle();
+ int core_bind = (i - 1) % std::thread::hardware_concurrency();
CPU_ZERO(&cpuset);
CPU_SET(core_bind, &cpuset);
pthread_setaffinity_np(pthread, size, &cpuset);
CPU_ZERO(&cpuset);
CPU_SET(core_bind, &cpuset);
pthread_setaffinity_np(pthread, size, &cpuset);
- if (core_bind != std::thread::hardware_concurrency() - 1)
- core_bind++;
- else
- core_bind = 0;
#endif
}
}
#endif
}
}
@@
-199,13
+194,13
@@
template <typename T> Parmap<T>::Parmap(unsigned num_workers, e_xbt_parmap_mode_
*/
template <typename T> Parmap<T>::~Parmap()
{
*/
template <typename T> Parmap<T>::~Parmap()
{
-
status = PARMAP_DESTROY
;
+
destroying = true
;
synchro->master_signal();
synchro->master_signal();
- for (unsigned i = 1; i < num_workers; i++)
+ for (unsigned i = 1; i < num_workers; i++)
{
workers[i]->join();
workers[i]->join();
-
- workers.clear();
+ delete workers[i];
+ }
delete synchro;
}
delete synchro;
}
@@
-214,10
+209,10
@@
template <typename T> Parmap<T>::~Parmap()
* @param fun the function to call in parallel
* @param data each element of this vector will be passed as an argument to fun
*/
* @param fun the function to call in parallel
* @param data each element of this vector will be passed as an argument to fun
*/
-template <typename T> void Parmap<T>::apply(
void (*fun)(T)
, const std::vector<T>& data)
+template <typename T> void Parmap<T>::apply(
std::function<void(T)>&& fun
, const std::vector<T>& data)
{
/* Assign resources to worker threads (we are maestro here)*/
{
/* Assign resources to worker threads (we are maestro here)*/
- this->fun =
fun
;
+ this->fun =
std::move(fun)
;
this->data = &data;
this->index = 0;
this->synchro->master_signal(); // maestro runs futex_wake to wake all the minions (the working threads)
this->data = &data;
this->index = 0;
this->synchro->master_signal(); // maestro runs futex_wake to wake all the minions (the working threads)
@@
-290,12
+285,11
@@
template <typename T> typename Parmap<T>::Synchro* Parmap<T>::new_synchro(e_xbt_
}
/** @brief Main function of a worker thread */
}
/** @brief Main function of a worker thread */
-template <typename T> void
* Parmap<T>::worker_main(void* arg
)
+template <typename T> void
Parmap<T>::worker_main(ThreadData* data
)
{
{
- ThreadData* data = static_cast<ThreadData*>(arg);
Parmap<T>& parmap = data->parmap;
unsigned round = 0;
Parmap<T>& parmap = data->parmap;
unsigned round = 0;
-
smx_context_t context = SIMIX_context_new(std::function<void()>(), nullptr
, nullptr);
+
kernel::context::Context* context = simix_global->context_factory->create_context(std::function<void()>()
, nullptr);
kernel::context::Context::set_current(context);
XBT_CDEBUG(xbt_parmap, "New worker thread created");
kernel::context::Context::set_current(context);
XBT_CDEBUG(xbt_parmap, "New worker thread created");
@@
-304,7
+298,7
@@
template <typename T> void* Parmap<T>::worker_main(void* arg)
while (1) {
round++; // New scheduling round
parmap.synchro->worker_wait(round);
while (1) {
round++; // New scheduling round
parmap.synchro->worker_wait(round);
- if (parmap.
status == PARMAP_DESTROY
)
+ if (parmap.
destroying
)
break;
XBT_CDEBUG(xbt_parmap, "Worker %d got a job", data->worker_id);
break;
XBT_CDEBUG(xbt_parmap, "Worker %d got a job", data->worker_id);
@@
-315,7
+309,6
@@
template <typename T> void* Parmap<T>::worker_main(void* arg)
/* We are destroying the parmap */
delete context;
delete data;
/* We are destroying the parmap */
delete context;
delete data;
- return nullptr;
}
template <typename T> Parmap<T>::PosixSynchro::PosixSynchro(Parmap<T>& parmap) : Synchro(parmap)
}
template <typename T> Parmap<T>::PosixSynchro::PosixSynchro(Parmap<T>& parmap) : Synchro(parmap)
@@
-364,13
+357,13
@@
template <typename T> void Parmap<T>::PosixSynchro::worker_wait(unsigned round)
}
#if HAVE_FUTEX_H
}
#if HAVE_FUTEX_H
-template <typename T> inline void Parmap<T>::FutexSynchro::futex_wait(
unsigned
* uaddr, unsigned val)
+template <typename T> inline void Parmap<T>::FutexSynchro::futex_wait(
std::atomic_uint
* uaddr, unsigned val)
{
XBT_CVERB(xbt_parmap, "Waiting on futex %p", uaddr);
syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, nullptr, nullptr, 0);
}
{
XBT_CVERB(xbt_parmap, "Waiting on futex %p", uaddr);
syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, nullptr, nullptr, 0);
}
-template <typename T> inline void Parmap<T>::FutexSynchro::futex_wake(
unsigned
* uaddr, unsigned val)
+template <typename T> inline void Parmap<T>::FutexSynchro::futex_wake(
std::atomic_uint
* uaddr, unsigned val)
{
XBT_CVERB(xbt_parmap, "Waking futex %p", uaddr);
syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, nullptr, nullptr, 0);
{
XBT_CVERB(xbt_parmap, "Waking futex %p", uaddr);
syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, nullptr, nullptr, 0);
@@
-378,25
+371,25
@@
template <typename T> inline void Parmap<T>::FutexSynchro::futex_wake(unsigned*
template <typename T> void Parmap<T>::FutexSynchro::master_signal()
{
template <typename T> void Parmap<T>::FutexSynchro::master_signal()
{
-
__atomic_store_n(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST
);
-
__atomic_add_fetch(&this->parmap.work_round, 1, __ATOMIC_SEQ_CST
);
+
this->parmap.thread_counter.store(1
);
+
this->parmap.work_round.fetch_add(1
);
/* wake all workers */
futex_wake(&this->parmap.work_round, std::numeric_limits<int>::max());
}
template <typename T> void Parmap<T>::FutexSynchro::master_wait()
{
/* wake all workers */
futex_wake(&this->parmap.work_round, std::numeric_limits<int>::max());
}
template <typename T> void Parmap<T>::FutexSynchro::master_wait()
{
- unsigned count =
__atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST
);
+ unsigned count =
this->parmap.thread_counter.load(
);
while (count < this->parmap.num_workers) {
/* wait for all workers to be ready */
futex_wait(&this->parmap.thread_counter, count);
while (count < this->parmap.num_workers) {
/* wait for all workers to be ready */
futex_wait(&this->parmap.thread_counter, count);
- count =
__atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST
);
+ count =
this->parmap.thread_counter.load(
);
}
}
template <typename T> void Parmap<T>::FutexSynchro::worker_signal()
{
}
}
template <typename T> void Parmap<T>::FutexSynchro::worker_signal()
{
- unsigned count =
__atomic_add_fetch(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST)
;
+ unsigned count =
this->parmap.thread_counter.fetch_add(1) + 1
;
if (count == this->parmap.num_workers) {
/* all workers have finished, wake the controller */
futex_wake(&this->parmap.thread_counter, std::numeric_limits<int>::max());
if (count == this->parmap.num_workers) {
/* all workers have finished, wake the controller */
futex_wake(&this->parmap.thread_counter, std::numeric_limits<int>::max());
@@
-405,37
+398,37
@@
template <typename T> void Parmap<T>::FutexSynchro::worker_signal()
template <typename T> void Parmap<T>::FutexSynchro::worker_wait(unsigned round)
{
template <typename T> void Parmap<T>::FutexSynchro::worker_wait(unsigned round)
{
- unsigned work_round =
__atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST
);
+ unsigned work_round =
this->parmap.work_round.load(
);
/* wait for more work */
while (work_round != round) {
futex_wait(&this->parmap.work_round, work_round);
/* wait for more work */
while (work_round != round) {
futex_wait(&this->parmap.work_round, work_round);
- work_round =
__atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST
);
+ work_round =
this->parmap.work_round.load(
);
}
}
#endif
template <typename T> void Parmap<T>::BusyWaitSynchro::master_signal()
{
}
}
#endif
template <typename T> void Parmap<T>::BusyWaitSynchro::master_signal()
{
-
__atomic_store_n(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST
);
-
__atomic_add_fetch(&this->parmap.work_round, 1, __ATOMIC_SEQ_CST
);
+
this->parmap.thread_counter.store(1
);
+
this->parmap.work_round.fetch_add(1
);
}
template <typename T> void Parmap<T>::BusyWaitSynchro::master_wait()
{
}
template <typename T> void Parmap<T>::BusyWaitSynchro::master_wait()
{
- while (
__atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST
) < this->parmap.num_workers) {
+ while (
this->parmap.thread_counter.load(
) < this->parmap.num_workers) {
std::this_thread::yield();
}
}
template <typename T> void Parmap<T>::BusyWaitSynchro::worker_signal()
{
std::this_thread::yield();
}
}
template <typename T> void Parmap<T>::BusyWaitSynchro::worker_signal()
{
-
__atomic_add_fetch(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST
);
+
this->parmap.thread_counter.fetch_add(1
);
}
template <typename T> void Parmap<T>::BusyWaitSynchro::worker_wait(unsigned round)
{
/* wait for more work */
}
template <typename T> void Parmap<T>::BusyWaitSynchro::worker_wait(unsigned round)
{
/* wait for more work */
- while (
__atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST
) != round) {
+ while (
this->parmap.work_round.load(
) != round) {
std::this_thread::yield();
}
}
std::this_thread::yield();
}
}