-/* Copyright (c) 2004, 2005, 2007, 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2004-2005, 2007, 2009-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "threadpool_private.h"
+#include "internal_config.h"
+#include <unistd.h>
+
+#ifndef _XBT_WIN32
+#include <sys/syscall.h>
+#endif
+
+#ifdef HAVE_FUTEX_H
+#include <linux/futex.h>
+#include <limits.h>
+#endif
+
+#include "xbt/parmap.h"
+#include "xbt/log.h"
+#include "xbt/function_types.h"
+#include "xbt/dynar.h"
+#include "xbt/xbt_os_thread.h"
+#include "xbt/sysdep.h"
+#include "simix/smx_private.h"
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_parmap, xbt, "parmap: parallel map");
+
+typedef enum {
+ XBT_PARMAP_WORK,
+ XBT_PARMAP_DESTROY
+} e_xbt_parmap_flag_t;
+
+static void xbt_parmap_set_mode(xbt_parmap_t parmap, e_xbt_parmap_mode_t mode);
+static void *xbt_parmap_worker_main(void *parmap);
+static void xbt_parmap_work(xbt_parmap_t parmap);
+
+static void xbt_parmap_posix_master_wait(xbt_parmap_t parmap);
+static void xbt_parmap_posix_worker_signal(xbt_parmap_t parmap);
+static void xbt_parmap_posix_master_signal(xbt_parmap_t parmap);
+static void xbt_parmap_posix_worker_wait(xbt_parmap_t parmap, unsigned round);
+
+#ifdef HAVE_FUTEX_H
+static void xbt_parmap_futex_master_wait(xbt_parmap_t parmap);
+static void xbt_parmap_futex_worker_signal(xbt_parmap_t parmap);
+static void xbt_parmap_futex_master_signal(xbt_parmap_t parmap);
+static void xbt_parmap_futex_worker_wait(xbt_parmap_t parmap, unsigned round);
+static void futex_wait(unsigned *uaddr, unsigned val);
+static void futex_wake(unsigned *uaddr, unsigned val);
+#endif
+
+static void xbt_parmap_busy_master_wait(xbt_parmap_t parmap);
+static void xbt_parmap_busy_worker_signal(xbt_parmap_t parmap);
+static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap);
+static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round);
+
+#ifdef HAVE_MC
+static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id);
+static void *xbt_parmap_mc_worker_main(void *arg);
+#endif
+
+/**
+ * \brief Parallel map structure
+ */
+typedef struct s_xbt_parmap {
+ e_xbt_parmap_flag_t status; /**< is the parmap active or being destroyed? */
+ unsigned work; /**< index of the current round */
+ unsigned thread_counter; /**< number of workers that have done the work */
+
+ unsigned int num_workers; /**< total number of worker threads including the controller */
+ xbt_os_thread_t *workers; /**< worker thread handlers */
+ void_f_pvoid_t fun; /**< function to run in parallel on each element of data */
+ xbt_dynar_t data; /**< parameters to pass to fun in parallel */
+ unsigned int index; /**< index of the next element of data to pick */
+
+#ifdef HAVE_MC
+ int finish;
+ void* ref_snapshot;
+ int_f_pvoid_pvoid_t snapshot_compare;
+ unsigned int length;
+ void* mc_data;
+#endif
+
+ /* posix only */
+ xbt_os_cond_t ready_cond;
+ xbt_os_mutex_t ready_mutex;
+ xbt_os_cond_t done_cond;
+ xbt_os_mutex_t done_mutex;
+
+ /* fields that depend on the synchronization mode */
+ e_xbt_parmap_mode_t mode; /**< synchronization mode */
+ void (*master_wait_f)(xbt_parmap_t); /**< wait for the workers to have done the work */
+ void (*worker_signal_f)(xbt_parmap_t); /**< signal the master that a worker has done the work */
+ void (*master_signal_f)(xbt_parmap_t); /**< wakes the workers threads to process tasks */
+ void (*worker_wait_f)(xbt_parmap_t, unsigned); /**< waits for more work */
+} s_xbt_parmap_t;
+
+/**
+ * \brief Thread data transmission structure
+ */
+typedef struct s_xbt_parmap_thread_data{
+ xbt_parmap_t parmap;
+ int worker_id;
+} s_xbt_parmap_thread_data_t;
+
+typedef s_xbt_parmap_thread_data_t *xbt_parmap_thread_data_t;
+
+/**
+ * \brief Creates a parallel map object
+ * \param num_workers number of worker threads to create
+ * \param mode how to synchronize the worker threads
+ * \return the parmap created
+ */
+xbt_parmap_t xbt_parmap_new(unsigned int num_workers, e_xbt_parmap_mode_t mode)
+{
+ unsigned int i;
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_threadpool, xbt,
- "threadpool: pool of worker threads");
+ XBT_DEBUG("Create new parmap (%u workers)", num_workers);
-static void *_xbt_tpool_worker_main(void *tpool);
+ /* Initialize the thread pool data structure */
+ xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
+ parmap->workers = xbt_new(xbt_os_thread_t, num_workers);
-unsigned long tpoolcounter = 0; /* Debug purposes */
+ parmap->num_workers = num_workers;
+ parmap->status = XBT_PARMAP_WORK;
+ xbt_parmap_set_mode(parmap, mode);
-xbt_tpool_t xbt_tpool_new(unsigned int num_workers, unsigned int max_jobs)
+ /* Create the pool of worker threads */
+ xbt_parmap_thread_data_t data;
+ parmap->workers[0] = NULL;
+#ifdef CORE_BINDING
+ unsigned int core_bind = 0;
+#endif
+ for (i = 1; i < num_workers; i++) {
+ data = xbt_new0(s_xbt_parmap_thread_data_t, 1);
+ data->parmap = parmap;
+ data->worker_id = i;
+ parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_worker_main,
+ data, NULL);
+#ifdef CORE_BINDING
+ xbt_os_thread_bind(parmap->workers[i], core_bind);
+ if(core_bind!=xbt_os_get_numcores()){
+ core_bind++;
+ } else {
+ core_bind = 0;
+ }
+#endif
+ }
+ return parmap;
+}
+
+#ifdef HAVE_MC
+/**
+ * \brief Creates a parallel map object
+ * \param num_workers number of worker threads to create
+ * \param mode how to synchronize the worker threads
+ * \return the parmap created
+ */
+xbt_parmap_t xbt_parmap_mc_new(unsigned int num_workers, e_xbt_parmap_mode_t mode)
{
unsigned int i;
- xbt_os_thread_t worker = NULL;
-
- DEBUG2("Create new thread pool (%u, %u)", num_workers, max_jobs);
-
- /* Initialize thread pool data structure */
- xbt_tpool_t tpool = xbt_new0(s_xbt_tpool_t, 1);
- tpool->mutex = xbt_os_mutex_init();
- tpool->job_posted = xbt_os_cond_init();
- tpool->job_taken = xbt_os_cond_init();
- tpool->idle_worker = xbt_os_cond_init();
- tpool->jobs_queue = xbt_dynar_new(sizeof(s_xbt_tpool_job_t), NULL);
- tpool->num_workers = num_workers;
- tpool->num_idle_workers = 0;
- tpool->max_jobs = max_jobs;
-
+
+ XBT_DEBUG("Create new parmap (%u workers)", num_workers);
+
+ /* Initialize the thread pool data structure */
+ xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
+ parmap->workers = xbt_new(xbt_os_thread_t, num_workers);
+
+ parmap->num_workers = num_workers;
+ parmap->status = XBT_PARMAP_WORK;
+ xbt_parmap_set_mode(parmap, mode);
+
/* Create the pool of worker threads */
- for(i=0; i < num_workers; i++){
- worker = xbt_os_thread_create(NULL, _xbt_tpool_worker_main, tpool, NULL);
- xbt_os_thread_detach(worker);
+ xbt_parmap_thread_data_t data;
+ parmap->workers[0] = NULL;
+ for (i = 1; i < num_workers; i++) {
+ data = xbt_new0(s_xbt_parmap_thread_data_t, 1);
+ data->parmap = parmap;
+ data->worker_id = i;
+ parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_mc_worker_main,
+ data, NULL);
}
-
- return tpool;
+ return parmap;
}
+#endif
-void xbt_tpool_destroy(xbt_tpool_t tpool)
-{
- DEBUG1("Destroy thread pool %p", tpool);
+/**
+ * \brief Destroys a parmap
+ * \param parmap the parmap to destroy
+ */
+void xbt_parmap_destroy(xbt_parmap_t parmap)
+{
+ if (!parmap) {
+ return;
+ }
- /* Lock the pool, then signal every worker an wait for each to finish */
- xbt_os_mutex_acquire(tpool->mutex);
- tpool->flags = TPOOL_DESTROY;
+ parmap->status = XBT_PARMAP_DESTROY;
+ parmap->master_signal_f(parmap);
- while(tpool->num_workers){
- DEBUG1("Still %u workers, waiting...", tpool->num_workers);
- xbt_os_cond_signal(tpool->job_posted);
- xbt_os_cond_wait(tpool->job_taken, tpool->mutex);
- }
+ unsigned int i;
+ for (i = 1; i < parmap->num_workers; i++)
+ xbt_os_thread_join(parmap->workers[i], NULL);
+
+ xbt_os_cond_destroy(parmap->ready_cond);
+ xbt_os_mutex_destroy(parmap->ready_mutex);
+ xbt_os_cond_destroy(parmap->done_cond);
+ xbt_os_mutex_destroy(parmap->done_mutex);
- /* Destroy pool's data structures */
- xbt_os_cond_destroy(tpool->job_posted);
- xbt_os_cond_destroy(tpool->job_taken);
- xbt_os_cond_destroy(tpool->idle_worker);
- xbt_os_mutex_release(tpool->mutex);
- xbt_os_mutex_destroy(tpool->mutex);
- xbt_free(tpool);
+ xbt_free(parmap->workers);
+ xbt_free(parmap);
}
-void xbt_tpool_queue_job(xbt_tpool_t tpool, void_f_pvoid_t fun, void* fun_arg)
+/**
+ * \brief Sets the synchronization mode of a parmap.
+ * \param parmap a parallel map object
+ * \param mode the synchronization mode
+ */
+static void xbt_parmap_set_mode(xbt_parmap_t parmap, e_xbt_parmap_mode_t mode)
{
- s_xbt_tpool_job_t job;
- job.fun = fun;
- job.fun_arg = fun_arg;
+ if (mode == XBT_PARMAP_DEFAULT) {
+#ifdef HAVE_FUTEX_H
+ mode = XBT_PARMAP_FUTEX;
+#else
+ mode = XBT_PARMAP_POSIX;
+#endif
+ }
+ parmap->mode = mode;
+
+ switch (mode) {
+
+ case XBT_PARMAP_POSIX:
+ parmap->master_wait_f = xbt_parmap_posix_master_wait;
+ parmap->worker_signal_f = xbt_parmap_posix_worker_signal;
+ parmap->master_signal_f = xbt_parmap_posix_master_signal;
+ parmap->worker_wait_f = xbt_parmap_posix_worker_wait;
+
+ parmap->ready_cond = xbt_os_cond_init();
+ parmap->ready_mutex = xbt_os_mutex_init();
+ parmap->done_cond = xbt_os_cond_init();
+ parmap->done_mutex = xbt_os_mutex_init();
+ break;
+
+
+ case XBT_PARMAP_FUTEX:
+#ifdef HAVE_FUTEX_H
+ parmap->master_wait_f = xbt_parmap_futex_master_wait;
+ parmap->worker_signal_f = xbt_parmap_futex_worker_signal;
+ parmap->master_signal_f = xbt_parmap_futex_master_signal;
+ parmap->worker_wait_f = xbt_parmap_futex_worker_wait;
+
+ xbt_os_cond_destroy(parmap->ready_cond);
+ xbt_os_mutex_destroy(parmap->ready_mutex);
+ xbt_os_cond_destroy(parmap->done_cond);
+ xbt_os_mutex_destroy(parmap->done_mutex);
+ break;
+#else
+ xbt_die("Futex is not available on this OS.");
+#endif
+
+ case XBT_PARMAP_BUSY_WAIT:
+ parmap->master_wait_f = xbt_parmap_busy_master_wait;
+ parmap->worker_signal_f = xbt_parmap_busy_worker_signal;
+ parmap->master_signal_f = xbt_parmap_busy_master_signal;
+ parmap->worker_wait_f = xbt_parmap_busy_worker_wait;
+
+ xbt_os_cond_destroy(parmap->ready_cond);
+ xbt_os_mutex_destroy(parmap->ready_mutex);
+ xbt_os_cond_destroy(parmap->done_cond);
+ xbt_os_mutex_destroy(parmap->done_mutex);
+ break;
+
+ case XBT_PARMAP_DEFAULT:
+ THROW_IMPOSSIBLE;
+ break;
+ }
+}
- /* Wait until we can lock on the pool with some space on it for the job */
- xbt_os_mutex_acquire(tpool->mutex);
- while(xbt_dynar_length(tpool->jobs_queue) == tpool->max_jobs)
- xbt_os_cond_wait(tpool->job_taken, tpool->mutex);
+/**
+ * \brief Applies a list of tasks in parallel.
+ * \param parmap a parallel map object
+ * \param fun the function to call in parallel
+ * \param data each element of this dynar will be passed as an argument to fun
+ */
+void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data)
+{
+ /* Assign resources to worker threads (we are maestro here)*/
+ parmap->fun = fun;
+ parmap->data = data;
+ parmap->index = 0;
+ parmap->master_signal_f(parmap); // maestro runs futex_wait to wake all the minions (the working threads)
+ xbt_parmap_work(parmap); // maestro works with its minions
+ parmap->master_wait_f(parmap); // When there is no more work to do, then maestro waits for the last minion to stop
+ XBT_DEBUG("Job done"); // ... and proceeds
+}
- DEBUG3("Queue job %p (%p) to thread pool %p", fun, fun_arg, tpool);
+/**
+ * \brief Returns a next task to process.
+ *
+ * Worker threads call this function to get more work.
+ *
+ * \return the next task to process, or NULL if there is no more work
+ */
+void* xbt_parmap_next(xbt_parmap_t parmap)
+{
+ unsigned int index = __sync_fetch_and_add(&parmap->index, 1);
+ if (index < xbt_dynar_length(parmap->data)) {
+ return xbt_dynar_get_as(parmap->data, index, void*);
+ }
+ return NULL;
+}
- /* Push the job in the queue, signal the workers and unlock the pool */
- xbt_dynar_push_as(tpool->jobs_queue, s_xbt_tpool_job_t, job);
- xbt_os_cond_signal(tpool->job_posted);
- xbt_os_mutex_release(tpool->mutex);
- return;
+static void xbt_parmap_work(xbt_parmap_t parmap)
+{
+ unsigned index;
+ while ((index = __sync_fetch_and_add(&parmap->index, 1))
+ < xbt_dynar_length(parmap->data))
+ parmap->fun(xbt_dynar_get_as(parmap->data, index, void*));
}
-void xbt_tpool_wait_all(xbt_tpool_t tpool)
+/**
+ * \brief Main function of a worker thread.
+ * \param arg the parmap
+ */
+static void *xbt_parmap_worker_main(void *arg)
{
- DEBUG1("Wait all workers in thread pool %p", tpool);
- xbt_os_mutex_acquire(tpool->mutex);
+ xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg;
+ xbt_parmap_t parmap = data->parmap;
+ unsigned round = 0;
+ smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL);
+ SIMIX_context_set_current(context);
+
+ XBT_DEBUG("New worker thread created");
+
+ /* Worker's main loop */
+ while (1) {
+ parmap->worker_wait_f(parmap, ++round);
+ if (parmap->status == XBT_PARMAP_WORK) {
+
+ XBT_DEBUG("Worker %d got a job", data->worker_id);
- while(tpool->num_idle_workers < tpool->num_workers
- || xbt_dynar_length(tpool->jobs_queue) > 0)
- xbt_os_cond_wait(tpool->idle_worker, tpool->mutex);
+ xbt_parmap_work(parmap);
+ parmap->worker_signal_f(parmap);
- xbt_os_mutex_release(tpool->mutex);
- DEBUG1("Wait all workers done in thread pool %p", tpool);
- return;
+ XBT_DEBUG("Worker %d has finished", data->worker_id);
+
+ /* We are destroying the parmap */
+ } else {
+ SIMIX_context_free(context);
+ xbt_free(data);
+ return NULL;
+ }
+ }
}
-static void *_xbt_tpool_worker_main(void *arg)
+#ifdef HAVE_MC
+
+/**
+ * \brief Applies a list of tasks in parallel.
+ * \param parmap a parallel map object
+ * \param fun the function to call in parallel
+ * \param data each element of this dynar will be passed as an argument to fun
+ */
+int xbt_parmap_mc_apply(xbt_parmap_t parmap, int_f_pvoid_pvoid_t fun,
+ void* data, unsigned int length, void* ref_snapshot)
{
- s_xbt_tpool_job_t job;
- xbt_tpool_t tpool = (xbt_tpool_t)arg;
+ /* Assign resources to worker threads */
+ parmap->snapshot_compare = fun;
+ parmap->mc_data = data;
+ parmap->index = 0;
+ parmap->finish = -1;
+ parmap->length = length;
+ parmap->ref_snapshot = ref_snapshot;
+ parmap->master_signal_f(parmap);
+ xbt_parmap_mc_work(parmap, 0);
+ parmap->master_wait_f(parmap);
+ XBT_DEBUG("Job done");
+ return parmap->finish;
+}
- unsigned long i = tpoolcounter++; /* Debug purposes */
- DEBUG1("New worker thread created (%lu)", i);
+static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id)
+{
+ unsigned int data_size = (parmap->length / parmap->num_workers) +
+ ((parmap->length % parmap->num_workers) ? 1 :0);
+ void* start = (char*)parmap->mc_data + (data_size*worker_id*sizeof(void*));
+ void* end = MIN((char *)start + data_size* sizeof(void*), (char*)parmap->mc_data + parmap->length*sizeof(void*));
+ //XBT_CRITICAL("Worker %d : %p -> %p (%d)", worker_id, start, end, data_size);
+
+ while ( start < end && parmap->finish == -1) {
+ //XBT_CRITICAL("Starting with %p", start);
+ int res = parmap->snapshot_compare(*(void**)start, parmap->ref_snapshot);
+ start = (char *)start + sizeof(start);
+ if (!res){
+
+ parmap->finish = ((char*)start - (char*)parmap->mc_data) / sizeof(void*);
+ //XBT_CRITICAL("Find good one %p (%p)", start, parmap->mc_data);
+ break;
+ }
+ }
+}
+
+/**
+ * \brief Main function of a worker thread.
+ * \param arg the parmap
+ */
+static void *xbt_parmap_mc_worker_main(void *arg)
+{
+ xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg;
+ xbt_parmap_t parmap = data->parmap;
+ unsigned round = 0;
+ /* smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL); */
+ /* SIMIX_context_set_current(context); */
+
+ XBT_DEBUG("New worker thread created");
/* Worker's main loop */
- while(1){
- xbt_os_mutex_acquire(tpool->mutex);
+ while (1) {
+ parmap->worker_wait_f(parmap, ++round);
+ if (parmap->status == XBT_PARMAP_WORK) {
- tpool->num_idle_workers++;
- xbt_os_cond_signal(tpool->idle_worker);
+ XBT_DEBUG("Worker %d got a job", data->worker_id);
- /* If there are no jobs in the queue wait for one */
- while(xbt_dynar_length(tpool->jobs_queue) == 0
- && tpool->flags != TPOOL_DESTROY){
- DEBUG1("Worker %lu waiting for a job", i);
- xbt_os_cond_wait(tpool->job_posted, tpool->mutex);
- }
+ xbt_parmap_mc_work(parmap, data->worker_id);
+ parmap->worker_signal_f(parmap);
- DEBUG1("Worker %lu got a job", i);
+ XBT_DEBUG("Worker %d has finished", data->worker_id);
- /* If we are shutting down, signal the destroyer so it can kill the other */
- /* workers, unlock the pool and return */
- if(tpool->flags == TPOOL_DESTROY){
- DEBUG1("Shutting down worker %lu", i);
- tpool->num_idle_workers--;
- tpool->num_workers--;
- xbt_os_cond_signal(tpool->job_taken);
- xbt_os_mutex_release(tpool->mutex);
+ /* We are destroying the parmap */
+ } else {
+ xbt_free(data);
return NULL;
}
-
- /* Get a job, signal the pool to inform jobs submitters and unlock it */
- job = xbt_dynar_pop_as(tpool->jobs_queue, s_xbt_tpool_job_t);
- xbt_os_cond_signal(tpool->job_taken);
- tpool->num_idle_workers--;
- xbt_os_mutex_release(tpool->mutex);
-
- /* Run the job and loop again ... */
- job.fun(job.fun_arg);
}
}
+#endif
-#ifdef SIMGRID_TEST
-#include "xbt.h"
-#include "xbt/ex.h"
+#ifdef HAVE_FUTEX_H
+static void futex_wait(unsigned *uaddr, unsigned val)
+{
+ XBT_VERB("Waiting on futex %p", uaddr);
+ syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0);
+}
+
+static void futex_wake(unsigned *uaddr, unsigned val)
+{
+ XBT_VERB("Waking futex %p", uaddr);
+ syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0);
+}
+#endif
+
+/**
+ * \brief Starts the parmap: waits for all workers to be ready and returns.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_posix_master_wait(xbt_parmap_t parmap)
+{
+ xbt_os_mutex_acquire(parmap->done_mutex);
+ if (parmap->thread_counter < parmap->num_workers) {
+ /* wait for all workers to be ready */
+ xbt_os_cond_wait(parmap->done_cond, parmap->done_mutex);
+ }
+ xbt_os_mutex_release(parmap->done_mutex);
+}
-XBT_TEST_SUITE("tpool", "Thread pool");
+/**
+ * \brief Ends the parmap: wakes the controller thread when all workers terminate.
+ *
+ * This function is called by all worker threads when they end (not including
+ * the controller).
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_posix_worker_signal(xbt_parmap_t parmap)
+{
+ xbt_os_mutex_acquire(parmap->done_mutex);
+ if (++parmap->thread_counter == parmap->num_workers) {
+ /* all workers have finished, wake the controller */
+ xbt_os_cond_signal(parmap->done_cond);
+ }
+ xbt_os_mutex_release(parmap->done_mutex);
+}
-xbt_tpool_t tpool;
+/**
+ * \brief Wakes all workers and waits for them to finish the tasks.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_posix_master_signal(xbt_parmap_t parmap)
+{
+ xbt_os_mutex_acquire(parmap->ready_mutex);
+ parmap->thread_counter = 1;
+ parmap->work++;
+ /* wake all workers */
+ xbt_os_cond_broadcast(parmap->ready_cond);
+ xbt_os_mutex_release(parmap->ready_mutex);
+}
-void job(void *arg);
+/**
+ * \brief Waits for some work to process.
+ *
+ * This function is called by each worker thread (not including the controller)
+ * when it has no more work to do.
+ *
+ * \param parmap a parmap
+ * \param round the expected round number
+ */
+static void xbt_parmap_posix_worker_wait(xbt_parmap_t parmap, unsigned round)
+{
+ xbt_os_mutex_acquire(parmap->ready_mutex);
+ /* wait for more work */
+ if (parmap->work != round) {
+ xbt_os_cond_wait(parmap->ready_cond, parmap->ready_mutex);
+ }
+ xbt_os_mutex_release(parmap->ready_mutex);
+}
-void job (void *arg)
+#ifdef HAVE_FUTEX_H
+/**
+ * \brief Starts the parmap: waits for all workers to be ready and returns.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_futex_master_wait(xbt_parmap_t parmap)
{
- xbt_test_log1("I'm job %lu", (unsigned long)arg);
+ unsigned count = parmap->thread_counter;
+ while (count < parmap->num_workers) {
+ /* wait for all workers to be ready */
+ futex_wait(&parmap->thread_counter, count);
+ count = parmap->thread_counter;
+ }
}
-XBT_TEST_UNIT("basic", test_tpool_basic, "Basic usage")
+/**
+ * \brief Ends the parmap: wakes the controller thread when all workers terminate.
+ *
+ * This function is called by all worker threads when they end (not including
+ * the controller).
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_futex_worker_signal(xbt_parmap_t parmap)
{
- xbt_test_add0("Create thread pool");
+ unsigned count = __sync_add_and_fetch(&parmap->thread_counter, 1);
+ if (count == parmap->num_workers) {
+ /* all workers have finished, wake the controller */
+ futex_wake(&parmap->thread_counter, INT_MAX);
+ }
+}
- unsigned long i,j;
- /* Create thread pool */
- tpool = xbt_tpool_new(5, 10);
+/**
+ * \brief Wakes all workers and waits for them to finish the tasks.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_futex_master_signal(xbt_parmap_t parmap)
+{
+ parmap->thread_counter = 1;
+ __sync_add_and_fetch(&parmap->work, 1);
+ /* wake all workers */
+ futex_wake(&parmap->work, INT_MAX);
+}
- for(j=0; j < 10; j++){
- /* Queue some work */
- for(i=0; i < 20; i++){
- xbt_tpool_queue_job(tpool, job, (void*)i);
- }
- /* Wait for everyone */
- xbt_tpool_wait_all(tpool);
+/**
+ * \brief Waits for some work to process.
+ *
+ * This function is called by each worker thread (not including the controller)
+ * when it has no more work to do.
+ *
+ * \param parmap a parmap
+ * \param round the expected round number
+ */
+static void xbt_parmap_futex_worker_wait(xbt_parmap_t parmap, unsigned round)
+{
+ unsigned work = parmap->work;
+ /* wait for more work */
+ while (work != round) {
+ futex_wait(&parmap->work, work);
+ work = parmap->work;
+ }
+}
+#endif
+
+/**
+ * \brief Starts the parmap: waits for all workers to be ready and returns.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_busy_master_wait(xbt_parmap_t parmap)
+{
+ while (parmap->thread_counter < parmap->num_workers) {
+ xbt_os_thread_yield();
}
+}
+
+/**
+ * \brief Ends the parmap: wakes the controller thread when all workers terminate.
+ *
+ * This function is called by all worker threads when they end.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_busy_worker_signal(xbt_parmap_t parmap)
+{
+ __sync_add_and_fetch(&parmap->thread_counter, 1);
+}
- /* Destroy thread pool */
- xbt_tpool_destroy(tpool);
+/**
+ * \brief Wakes all workers and waits for them to finish the tasks.
+ *
+ * This function is called by the controller thread.
+ *
+ * \param parmap a parmap
+ */
+static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap)
+{
+ parmap->thread_counter = 1;
+ __sync_add_and_fetch(&parmap->work, 1);
}
-#endif /* SIMGRID_TEST */
+/**
+ * \brief Waits for some work to process.
+ *
+ * This function is called by each worker thread (not including the controller)
+ * when it has no more work to do.
+ *
+ * \param parmap a parmap
+ * \param round the expected round number
+ */
+static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round)
+{
+ /* wait for more work */
+ while (parmap->work != round) {
+ xbt_os_thread_yield();
+ }
+}