/* A thread pool (C++ version). */
-/* Copyright (c) 2004-2019 The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2020 The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
class PosixSynchro : public Synchro {
public:
- explicit PosixSynchro(Parmap<T>& parmap);
- ~PosixSynchro();
+ explicit PosixSynchro(Parmap<T>& parmap) : Synchro(parmap) {}
void master_signal() override;
void master_wait() override;
void worker_signal() override;
Synchro* new_synchro(e_xbt_parmap_mode_t mode);
void work();
- bool destroying; /**< is the parmap being destroyed? */
- std::atomic_uint work_round; /**< index of the current round */
+ bool destroying = false; /**< is the parmap being destroyed? */
+ std::atomic_uint work_round{0}; /**< index of the current round */
std::vector<std::thread*> workers; /**< worker thread handlers */
unsigned num_workers; /**< total number of worker threads including the controller */
Synchro* synchro; /**< synchronization object */
std::atomic_uint thread_counter{0}; /**< number of workers that have done the work */
std::function<void(T)> fun; /**< function to run in parallel on each element of data */
const std::vector<T>* data = nullptr; /**< parameters to pass to fun in parallel */
- std::atomic_uint index; /**< index of the next element of data to pick */
+ std::atomic_uint index{0}; /**< index of the next element of data to pick */
};
/**
XBT_CDEBUG(xbt_parmap, "Create new parmap (%u workers)", num_workers);
/* Initialize the thread pool data structure */
- this->destroying = false;
- this->work_round = 0;
this->workers.resize(num_workers);
this->num_workers = num_workers;
this->synchro = new_synchro(mode);
/* Create the pool of worker threads (the caller of apply() will be worker[0]) */
this->workers[0] = nullptr;
- XBT_ATTRIB_UNUSED unsigned int core_bind = 0;
for (unsigned i = 1; i < num_workers; i++) {
ThreadData* data = new ThreadData(*this, i);
size_t size = sizeof(cpu_set_t);
#endif
pthread_t pthread = this->workers[i]->native_handle();
+ int core_bind = (i - 1) % std::thread::hardware_concurrency();
CPU_ZERO(&cpuset);
CPU_SET(core_bind, &cpuset);
pthread_setaffinity_np(pthread, size, &cpuset);
- if (core_bind != std::thread::hardware_concurrency() - 1)
- core_bind++;
- else
- core_bind = 0;
#endif
}
}
delete data;
}
-template <typename T> Parmap<T>::PosixSynchro::PosixSynchro(Parmap<T>& parmap) : Synchro(parmap)
-{
-}
-
-template <typename T> Parmap<T>::PosixSynchro::~PosixSynchro()
-{
-}
-
template <typename T> void Parmap<T>::PosixSynchro::master_signal()
{
std::unique_lock<std::mutex> lk(ready_mutex);