#include "src/internal_config.h" // HAVE_FUTEX_H
#include "src/kernel/context/Context.hpp"
+#include "src/simix/smx_private.hpp" /* simix_global */
#include <boost/optional.hpp>
#include <condition_variable>
+#include <functional>
#include <mutex>
#include <thread>
Parmap(const Parmap&) = delete;
Parmap& operator=(const Parmap&) = delete;
~Parmap();
- void apply(void (*fun)(T), const std::vector<T>& data);
+ void apply(std::function<void(T)>&& fun, const std::vector<T>& data);
boost::optional<T> next();
private:
- enum Flag { PARMAP_WORK, PARMAP_DESTROY };
-
/**
* @brief Thread data transmission structure
*/
public:
explicit PosixSynchro(Parmap<T>& parmap);
~PosixSynchro();
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned round);
+ void master_signal() override;
+ void master_wait() override;
+ void worker_signal() override;
+ void worker_wait(unsigned round) override;
private:
std::condition_variable ready_cond;
class FutexSynchro : public Synchro {
public:
explicit FutexSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned);
+ void master_signal() override;
+ void master_wait() override;
+ void worker_signal() override;
+ void worker_wait(unsigned) override;
private:
static void futex_wait(std::atomic_uint* uaddr, unsigned val);
class BusyWaitSynchro : public Synchro {
public:
explicit BusyWaitSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
- void master_signal();
- void master_wait();
- void worker_signal();
- void worker_wait(unsigned);
+ void master_signal() override;
+ void master_wait() override;
+ void worker_signal() override;
+ void worker_wait(unsigned) override;
};
- static void* worker_main(void* arg);
+ static void worker_main(ThreadData* data);
Synchro* new_synchro(e_xbt_parmap_mode_t mode);
void work();
- Flag status; /**< is the parmap active or being destroyed? */
- std::atomic_uint work_round; /**< index of the current round */
+ bool destroying = false; /**< is the parmap being destroyed? */
+ std::atomic_uint work_round{0}; /**< index of the current round */
std::vector<std::thread*> workers; /**< worker thread handlers */
unsigned num_workers; /**< total number of worker threads including the controller */
Synchro* synchro; /**< synchronization object */
std::atomic_uint thread_counter{0}; /**< number of workers that have done the work */
- void (*fun)(const T) = nullptr; /**< function to run in parallel on each element of data */
+ std::function<void(T)> fun; /**< function to run in parallel on each element of data */
const std::vector<T>* data = nullptr; /**< parameters to pass to fun in parallel */
- std::atomic_uint index; /**< index of the next element of data to pick */
+ std::atomic_uint index{0}; /**< index of the next element of data to pick */
};
/**
XBT_CDEBUG(xbt_parmap, "Create new parmap (%u workers)", num_workers);
/* Initialize the thread pool data structure */
- this->status = PARMAP_WORK;
- this->work_round = 0;
this->workers.resize(num_workers);
this->num_workers = num_workers;
this->synchro = new_synchro(mode);
/* Create the pool of worker threads (the caller of apply() will be worker[0]) */
this->workers[0] = nullptr;
- XBT_ATTRIB_UNUSED unsigned int core_bind = 0;
for (unsigned i = 1; i < num_workers; i++) {
- this->workers[i] = new std::thread(worker_main, new ThreadData(*this, i));
+ ThreadData* data = new ThreadData(*this, i);
+ this->workers[i] = new std::thread(worker_main, data);
/* Bind the worker to a core if possible */
#if HAVE_PTHREAD_SETAFFINITY
size_t size = sizeof(cpu_set_t);
#endif
pthread_t pthread = this->workers[i]->native_handle();
+ int core_bind = (i - 1) % std::thread::hardware_concurrency();
CPU_ZERO(&cpuset);
CPU_SET(core_bind, &cpuset);
pthread_setaffinity_np(pthread, size, &cpuset);
- if (core_bind != std::thread::hardware_concurrency() - 1)
- core_bind++;
- else
- core_bind = 0;
#endif
}
}
*/
template <typename T> Parmap<T>::~Parmap()
{
- status = PARMAP_DESTROY;
+ destroying = true;
synchro->master_signal();
for (unsigned i = 1; i < num_workers; i++) {
* @param fun the function to call in parallel
* @param data each element of this vector will be passed as an argument to fun
*/
-template <typename T> void Parmap<T>::apply(void (*fun)(T), const std::vector<T>& data)
+template <typename T> void Parmap<T>::apply(std::function<void(T)>&& fun, const std::vector<T>& data)
{
/* Assign resources to worker threads (we are maestro here)*/
- this->fun = fun;
+ this->fun = std::move(fun);
this->data = &data;
this->index = 0;
this->synchro->master_signal(); // maestro runs futex_wake to wake all the minions (the working threads)
}
/** @brief Main function of a worker thread */
-template <typename T> void* Parmap<T>::worker_main(void* arg)
+template <typename T> void Parmap<T>::worker_main(ThreadData* data)
{
- ThreadData* data = static_cast<ThreadData*>(arg);
Parmap<T>& parmap = data->parmap;
unsigned round = 0;
- smx_context_t context = SIMIX_context_new(std::function<void()>(), nullptr, nullptr);
+ kernel::context::Context* context = simix_global->context_factory->create_context(std::function<void()>(), nullptr);
kernel::context::Context::set_current(context);
XBT_CDEBUG(xbt_parmap, "New worker thread created");
while (1) {
round++; // New scheduling round
parmap.synchro->worker_wait(round);
- if (parmap.status == PARMAP_DESTROY)
+ if (parmap.destroying)
break;
XBT_CDEBUG(xbt_parmap, "Worker %d got a job", data->worker_id);
/* We are destroying the parmap */
delete context;
delete data;
- return nullptr;
}
template <typename T> Parmap<T>::PosixSynchro::PosixSynchro(Parmap<T>& parmap) : Synchro(parmap)