X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/014c65d0044e87e37b1ab403923401234e0605da..51c23076e2b42ff07dc167dea1cb0e3a4ab3cf68:/src/include/xbt/parmap.hpp diff --git a/src/include/xbt/parmap.hpp b/src/include/xbt/parmap.hpp index bc65ff9274..1bde553e5c 100644 --- a/src/include/xbt/parmap.hpp +++ b/src/include/xbt/parmap.hpp @@ -1,7 +1,6 @@ /* A thread pool (C++ version). */ -/* Copyright (c) 2004-2017 The SimGrid Team. - * All rights reserved. */ +/* Copyright (c) 2004-2018 The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -11,16 +10,11 @@ #include "src/internal_config.h" // HAVE_FUTEX_H #include "src/kernel/context/Context.hpp" -#include +#include "xbt/xbt_os_thread.h" + #include -#include -#include -#include -#include -#include #if HAVE_FUTEX_H -#include #include #include #endif @@ -39,6 +33,7 @@ template class Parmap { public: Parmap(unsigned num_workers, e_xbt_parmap_mode_t mode); Parmap(const Parmap&) = delete; + Parmap& operator=(const Parmap&) = delete; ~Parmap(); void apply(void (*fun)(T), const std::vector& data); boost::optional next(); @@ -209,7 +204,7 @@ template void Parmap::apply(void (*fun)(T), const std::vector this->fun = fun; this->data = &data; this->index = 0; - this->synchro->master_signal(); // maestro runs futex_wait to wake all the minions (the working threads) + this->synchro->master_signal(); // maestro runs futex_wake to wake all the minions (the working threads) this->work(); // maestro works with its minions this->synchro->master_wait(); // When there is no more work to do, then maestro waits for the last minion to stop XBT_CDEBUG(xbt_parmap, "Job done"); // ... and proceeds @@ -224,7 +219,7 @@ template void Parmap::apply(void (*fun)(T), const std::vector */ template boost::optional Parmap::next() { - unsigned index = this->index++; + unsigned index = this->index.fetch_add(1, std::memory_order_relaxed); if (index < this->data->size()) return (*this->data)[index]; else @@ -236,11 +231,11 @@ template boost::optional Parmap::next() */ template void Parmap::work() { - unsigned index = this->index++; unsigned length = this->data->size(); + unsigned index = this->index.fetch_add(1, std::memory_order_relaxed); while (index < length) { this->fun((*this->data)[index]); - index = this->index++; + index = this->index.fetch_add(1, std::memory_order_relaxed); } } @@ -266,7 +261,7 @@ template typename Parmap::Synchro* Parmap::new_synchro(e_xbt_ #if HAVE_FUTEX_H res = new FutexSynchro(*this); #else - xbt_die("Fute is not available on this OS."); + xbt_die("Futex is not available on this OS."); #endif break; case XBT_PARMAP_BUSY_WAIT: @@ -338,7 +333,7 @@ template void Parmap::PosixSynchro::master_signal() template void Parmap::PosixSynchro::master_wait() { xbt_os_mutex_acquire(done_mutex); - if (this->parmap.thread_counter < this->parmap.num_workers) { + while (this->parmap.thread_counter < this->parmap.num_workers) { /* wait for all workers to be ready */ xbt_os_cond_wait(done_cond, done_mutex); } @@ -360,7 +355,7 @@ template void Parmap::PosixSynchro::worker_wait(unsigned round) { xbt_os_mutex_acquire(ready_mutex); /* wait for more work */ - if (this->parmap.work_round != round) { + while (this->parmap.work_round != round) { xbt_os_cond_wait(ready_cond, ready_mutex); } xbt_os_mutex_release(ready_mutex); @@ -381,25 +376,25 @@ template inline void Parmap::FutexSynchro::futex_wake(unsigned* template void Parmap::FutexSynchro::master_signal() { - this->parmap.thread_counter = 1; - __sync_add_and_fetch(&this->parmap.work_round, 1); + __atomic_store_n(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST); + __atomic_add_fetch(&this->parmap.work_round, 1, __ATOMIC_SEQ_CST); /* wake all workers */ futex_wake(&this->parmap.work_round, std::numeric_limits::max()); } template void Parmap::FutexSynchro::master_wait() { - unsigned count = this->parmap.thread_counter; + unsigned count = __atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST); while (count < this->parmap.num_workers) { /* wait for all workers to be ready */ futex_wait(&this->parmap.thread_counter, count); - count = this->parmap.thread_counter; + count = __atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST); } } template void Parmap::FutexSynchro::worker_signal() { - unsigned count = __sync_add_and_fetch(&this->parmap.thread_counter, 1); + unsigned count = __atomic_add_fetch(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST); if (count == this->parmap.num_workers) { /* all workers have finished, wake the controller */ futex_wake(&this->parmap.thread_counter, std::numeric_limits::max()); @@ -408,37 +403,37 @@ template void Parmap::FutexSynchro::worker_signal() template void Parmap::FutexSynchro::worker_wait(unsigned round) { - unsigned work_round = this->parmap.work_round; + unsigned work_round = __atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST); /* wait for more work */ while (work_round != round) { futex_wait(&this->parmap.work_round, work_round); - work_round = this->parmap.work_round; + work_round = __atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST); } } #endif template void Parmap::BusyWaitSynchro::master_signal() { - this->parmap.thread_counter = 1; - __sync_add_and_fetch(&this->parmap.work_round, 1); + __atomic_store_n(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST); + __atomic_add_fetch(&this->parmap.work_round, 1, __ATOMIC_SEQ_CST); } template void Parmap::BusyWaitSynchro::master_wait() { - while (this->parmap.thread_counter < this->parmap.num_workers) { + while (__atomic_load_n(&this->parmap.thread_counter, __ATOMIC_SEQ_CST) < this->parmap.num_workers) { xbt_os_thread_yield(); } } template void Parmap::BusyWaitSynchro::worker_signal() { - __sync_add_and_fetch(&this->parmap.thread_counter, 1); + __atomic_add_fetch(&this->parmap.thread_counter, 1, __ATOMIC_SEQ_CST); } template void Parmap::BusyWaitSynchro::worker_wait(unsigned round) { /* wait for more work */ - while (this->parmap.work_round != round) { + while (__atomic_load_n(&this->parmap.work_round, __ATOMIC_SEQ_CST) != round) { xbt_os_thread_yield(); } }