X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/839877aee607c198ffda1d836d72976831819825..9dbc2a5652d2692d1383f13782ab6099d24cfad2:/src/xbt/parmap.c diff --git a/src/xbt/parmap.c b/src/xbt/parmap.c index 3012cabce4..ad335a0796 100644 --- a/src/xbt/parmap.c +++ b/src/xbt/parmap.c @@ -1,227 +1,629 @@ -/* Copyright (c) 2004, 2005, 2007, 2009, 2010. The SimGrid Team. +/* Copyright (c) 2004-2005, 2007, 2009-2013. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "gras_config.h" +#include "internal_config.h" #include + +#ifndef _XBT_WIN32 #include +#endif + #ifdef HAVE_FUTEX_H - #include -#else - #include "xbt/xbt_os_thread.h" +#include +#include #endif -#include -#include "parmap_private.h" + +#include "xbt/parmap.h" +#include "xbt/log.h" +#include "xbt/function_types.h" +#include "xbt/dynar.h" +#include "xbt/xbt_os_thread.h" +#include "xbt/sysdep.h" +#include "simix/smx_private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_parmap, xbt, "parmap: parallel map"); -XBT_LOG_NEW_SUBCATEGORY(xbt_parmap_unit, xbt_parmap, "parmap unit testing"); -static void *_xbt_parmap_worker_main(void *parmap); +typedef enum { + XBT_PARMAP_WORK, + XBT_PARMAP_DESTROY +} e_xbt_parmap_flag_t; + +static void xbt_parmap_set_mode(xbt_parmap_t parmap, e_xbt_parmap_mode_t mode); +static void *xbt_parmap_worker_main(void *parmap); +static void xbt_parmap_work(xbt_parmap_t parmap); + +static void xbt_parmap_posix_master_wait(xbt_parmap_t parmap); +static void xbt_parmap_posix_worker_signal(xbt_parmap_t parmap); +static void xbt_parmap_posix_master_signal(xbt_parmap_t parmap); +static void xbt_parmap_posix_worker_wait(xbt_parmap_t parmap, unsigned round); + #ifdef HAVE_FUTEX_H - static void futex_wait(int *uaddr, int val); - static void futex_wake(int *uaddr, int val); +static void xbt_parmap_futex_master_wait(xbt_parmap_t parmap); +static void xbt_parmap_futex_worker_signal(xbt_parmap_t parmap); +static void xbt_parmap_futex_master_signal(xbt_parmap_t parmap); +static void xbt_parmap_futex_worker_wait(xbt_parmap_t parmap, unsigned round); +static void futex_wait(unsigned *uaddr, unsigned val); +static void futex_wake(unsigned *uaddr, unsigned val); #endif -xbt_parmap_t xbt_parmap_new(unsigned int num_workers) + +static void xbt_parmap_busy_master_wait(xbt_parmap_t parmap); +static void xbt_parmap_busy_worker_signal(xbt_parmap_t parmap); +static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap); +static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round); + +#ifdef HAVE_MC +static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id); +static void *xbt_parmap_mc_worker_main(void *arg); +#endif + +/** + * \brief Parallel map structure + */ +typedef struct s_xbt_parmap { + e_xbt_parmap_flag_t status; /**< is the parmap active or being destroyed? */ + unsigned work; /**< index of the current round */ + unsigned thread_counter; /**< number of workers that have done the work */ + + unsigned int num_workers; /**< total number of worker threads including the controller */ + void_f_pvoid_t fun; /**< function to run in parallel on each element of data */ + xbt_dynar_t data; /**< parameters to pass to fun in parallel */ + unsigned int index; /**< index of the next element of data to pick */ + +#ifdef HAVE_MC + int finish; + void* ref_snapshot; + int_f_pvoid_pvoid_t snapshot_compare; + unsigned int length; + void* mc_data; +#endif + + /* posix only */ + xbt_os_cond_t ready_cond; + xbt_os_mutex_t ready_mutex; + xbt_os_cond_t done_cond; + xbt_os_mutex_t done_mutex; + + /* fields that depend on the synchronization mode */ + e_xbt_parmap_mode_t mode; /**< synchronization mode */ + void (*master_wait_f)(xbt_parmap_t); /**< wait for the workers to have done the work */ + void (*worker_signal_f)(xbt_parmap_t); /**< signal the master that a worker has done the work */ + void (*master_signal_f)(xbt_parmap_t); /**< wakes the workers threads to process tasks */ + void (*worker_wait_f)(xbt_parmap_t, unsigned); /**< waits for more work */ +} s_xbt_parmap_t; + +/** + * \brief Thread data transmission structure + */ +typedef struct s_xbt_parmap_thread_data{ + xbt_parmap_t parmap; + int worker_id; +} s_xbt_parmap_thread_data_t; + +typedef s_xbt_parmap_thread_data_t *xbt_parmap_thread_data_t; + +/** + * \brief Creates a parallel map object + * \param num_workers number of worker threads to create + * \param mode how to synchronize the worker threads + * \return the parmap created + */ +xbt_parmap_t xbt_parmap_new(unsigned int num_workers, e_xbt_parmap_mode_t mode) { unsigned int i; xbt_os_thread_t worker = NULL; - DEBUG1("Create new parmap (%u workers)", num_workers); + XBT_DEBUG("Create new parmap (%u workers)", num_workers); /* Initialize the thread pool data structure */ xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1); + parmap->num_workers = num_workers; - parmap->status = PARMAP_WORK; - - parmap->workers_ready = xbt_new0(s_xbt_barrier_t, 1); - xbt_barrier_init(parmap->workers_ready, num_workers + 1); - parmap->workers_done = xbt_new0(s_xbt_barrier_t, 1); - xbt_barrier_init(parmap->workers_done, num_workers + 1); -#ifndef HAVE_FUTEX_H - parmap->workers_ready->mutex = xbt_os_mutex_init(); - parmap->workers_ready->cond = xbt_os_cond_init(); -#endif + parmap->status = XBT_PARMAP_WORK; + xbt_parmap_set_mode(parmap, mode); + /* Create the pool of worker threads */ - for(i=0; i < num_workers; i++){ - worker = xbt_os_thread_create(NULL, _xbt_parmap_worker_main, parmap, NULL); + xbt_parmap_thread_data_t data; + for (i = 1; i < num_workers; i++) { + data = xbt_new0(s_xbt_parmap_thread_data_t, 1); + data->parmap = parmap; + data->worker_id = i; + worker = xbt_os_thread_create(NULL, xbt_parmap_worker_main, data, NULL); xbt_os_thread_detach(worker); } - return parmap; } -void xbt_parmap_destroy(xbt_parmap_t parmap) -{ - DEBUG1("Destroy parmap %p", parmap); +#ifdef HAVE_MC +/** + * \brief Creates a parallel map object + * \param num_workers number of worker threads to create + * \param mode how to synchronize the worker threads + * \return the parmap created + */ +xbt_parmap_t xbt_parmap_mc_new(unsigned int num_workers, e_xbt_parmap_mode_t mode) +{ + unsigned int i; + xbt_os_thread_t worker = NULL; - parmap->status = PARMAP_DESTROY; + XBT_DEBUG("Create new parmap (%u workers)", num_workers); - xbt_barrier_wait(parmap->workers_ready); - DEBUG0("Kill job sent"); - xbt_barrier_wait(parmap->workers_done); -#ifndef HAVE_FUTEX_H - xbt_os_mutex_destroy(parmap->workers_ready->mutex); - xbt_os_cond_destroy(parmap->workers_ready->cond); + /* Initialize the thread pool data structure */ + xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1); + + parmap->num_workers = num_workers; + parmap->status = XBT_PARMAP_WORK; + xbt_parmap_set_mode(parmap, mode); + + /* Create the pool of worker threads */ + xbt_parmap_thread_data_t data; + for (i = 1; i < num_workers; i++) { + data = xbt_new0(s_xbt_parmap_thread_data_t, 1); + data->parmap = parmap; + data->worker_id = i; + worker = xbt_os_thread_create(NULL, xbt_parmap_mc_worker_main, data, NULL); + xbt_os_thread_detach(worker); + } + return parmap; +} #endif - xbt_free(parmap->workers_ready); - xbt_free(parmap->workers_done); + +/** + * \brief Destroys a parmap + * \param parmap the parmap to destroy + */ +void xbt_parmap_destroy(xbt_parmap_t parmap) +{ + if (!parmap) { + return; + } + + parmap->status = XBT_PARMAP_DESTROY; + parmap->master_signal_f(parmap); + parmap->master_wait_f(parmap); + + xbt_os_cond_destroy(parmap->ready_cond); + xbt_os_mutex_destroy(parmap->ready_mutex); + xbt_os_cond_destroy(parmap->done_cond); + xbt_os_mutex_destroy(parmap->done_mutex); + xbt_free(parmap); } - void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data) +/** + * \brief Sets the synchronization mode of a parmap. + * \param parmap a parallel map object + * \param mode the synchronization mode + */ +static void xbt_parmap_set_mode(xbt_parmap_t parmap, e_xbt_parmap_mode_t mode) +{ + if (mode == XBT_PARMAP_DEFAULT) { +#ifdef HAVE_FUTEX_H + mode = XBT_PARMAP_FUTEX; +#else + mode = XBT_PARMAP_POSIX; +#endif + } + parmap->mode = mode; + + switch (mode) { + + case XBT_PARMAP_POSIX: + parmap->master_wait_f = xbt_parmap_posix_master_wait; + parmap->worker_signal_f = xbt_parmap_posix_worker_signal; + parmap->master_signal_f = xbt_parmap_posix_master_signal; + parmap->worker_wait_f = xbt_parmap_posix_worker_wait; + + parmap->ready_cond = xbt_os_cond_init(); + parmap->ready_mutex = xbt_os_mutex_init(); + parmap->done_cond = xbt_os_cond_init(); + parmap->done_mutex = xbt_os_mutex_init(); + break; + + + case XBT_PARMAP_FUTEX: +#ifdef HAVE_FUTEX_H + parmap->master_wait_f = xbt_parmap_futex_master_wait; + parmap->worker_signal_f = xbt_parmap_futex_worker_signal; + parmap->master_signal_f = xbt_parmap_futex_master_signal; + parmap->worker_wait_f = xbt_parmap_futex_worker_wait; + + xbt_os_cond_destroy(parmap->ready_cond); + xbt_os_mutex_destroy(parmap->ready_mutex); + xbt_os_cond_destroy(parmap->done_cond); + xbt_os_mutex_destroy(parmap->done_mutex); + break; +#else + xbt_die("Futex is not available on this OS."); +#endif + + case XBT_PARMAP_BUSY_WAIT: + parmap->master_wait_f = xbt_parmap_busy_master_wait; + parmap->worker_signal_f = xbt_parmap_busy_worker_signal; + parmap->master_signal_f = xbt_parmap_busy_master_signal; + parmap->worker_wait_f = xbt_parmap_busy_worker_wait; + + xbt_os_cond_destroy(parmap->ready_cond); + xbt_os_mutex_destroy(parmap->ready_mutex); + xbt_os_cond_destroy(parmap->done_cond); + xbt_os_mutex_destroy(parmap->done_mutex); + break; + + case XBT_PARMAP_DEFAULT: + THROW_IMPOSSIBLE; + break; + } +} + +/** + * \brief Applies a list of tasks in parallel. + * \param parmap a parallel map object + * \param fun the function to call in parallel + * \param data each element of this dynar will be passed as an argument to fun + */ +void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data) { - /* Assign resources to worker threads*/ + /* Assign resources to worker threads */ parmap->fun = fun; parmap->data = data; + parmap->index = 0; + parmap->master_signal_f(parmap); + xbt_parmap_work(parmap); + parmap->master_wait_f(parmap); + XBT_DEBUG("Job done"); +} - /* Notify workers that there is a job */ - xbt_barrier_wait(parmap->workers_ready); - DEBUG0("Job dispatched, lets wait..."); - xbt_barrier_wait(parmap->workers_done); +/** + * \brief Returns a next task to process. + * + * Worker threads call this function to get more work. + * + * \return the next task to process, or NULL if there is no more work + */ +void* xbt_parmap_next(xbt_parmap_t parmap) +{ + unsigned int index = __sync_fetch_and_add(&parmap->index, 1); + if (index < xbt_dynar_length(parmap->data)) { + return xbt_dynar_get_as(parmap->data, index, void*); + } + return NULL; +} - DEBUG0("Job done"); - parmap->fun = NULL; - parmap->data = NULL; +static void xbt_parmap_work(xbt_parmap_t parmap) +{ + unsigned index; + while ((index = __sync_fetch_and_add(&parmap->index, 1)) + < xbt_dynar_length(parmap->data)) + parmap->fun(xbt_dynar_get_as(parmap->data, index, void*)); } -static void *_xbt_parmap_worker_main(void *arg) +/** + * \brief Main function of a worker thread. + * \param arg the parmap + */ +static void *xbt_parmap_worker_main(void *arg) { - unsigned int data_start, data_end, data_size, worker_id; - xbt_parmap_t parmap = (xbt_parmap_t)arg; + xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg; + xbt_parmap_t parmap = data->parmap; + unsigned round = 0; + smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL); + SIMIX_context_set_current(context); - /* Fetch a worker id */ - worker_id = __sync_fetch_and_add(&parmap->workers_max_id, 1); + XBT_DEBUG("New worker thread created"); - DEBUG1("New worker thread created (%u)", worker_id); - /* Worker's main loop */ - while(1){ - xbt_barrier_wait(parmap->workers_ready); + while (1) { + parmap->worker_wait_f(parmap, ++round); + if (parmap->status == XBT_PARMAP_WORK) { + + XBT_DEBUG("Worker %d got a job", data->worker_id); + + xbt_parmap_work(parmap); + parmap->worker_signal_f(parmap); + + XBT_DEBUG("Worker %d has finished", data->worker_id); + + /* We are destroying the parmap */ + } else { + SIMIX_context_free(context); + xbt_free(data); + parmap->worker_signal_f(parmap); + return NULL; + } + } +} + +#ifdef HAVE_MC + +/** + * \brief Applies a list of tasks in parallel. + * \param parmap a parallel map object + * \param fun the function to call in parallel + * \param data each element of this dynar will be passed as an argument to fun + */ +int xbt_parmap_mc_apply(xbt_parmap_t parmap, int_f_pvoid_pvoid_t fun, + void* data, unsigned int length, void* ref_snapshot) +{ + /* Assign resources to worker threads */ + parmap->snapshot_compare = fun; + parmap->mc_data = data; + parmap->index = 0; + parmap->finish = -1; + parmap->length = length; + parmap->ref_snapshot = ref_snapshot; + parmap->master_signal_f(parmap); + xbt_parmap_mc_work(parmap, 0); + parmap->master_wait_f(parmap); + XBT_DEBUG("Job done"); + return parmap->finish; +} - if(parmap->status == PARMAP_WORK){ - DEBUG1("Worker %u got a job", worker_id); +static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id) +{ + unsigned int data_size = (parmap->length / parmap->num_workers) + + ((parmap->length % parmap->num_workers) ? 1 :0); + void* start = (char*)parmap->mc_data + (data_size*worker_id*sizeof(void*)); + void* end = MIN((char *)start + data_size* sizeof(void*), (char*)parmap->mc_data + parmap->length*sizeof(void*)); + + //XBT_CRITICAL("Worker %d : %p -> %p (%d)", worker_id, start, end, data_size); + + while ( start < end && parmap->finish == -1) { + //XBT_CRITICAL("Starting with %p", start); + int res = parmap->snapshot_compare(*(void**)start, parmap->ref_snapshot); + start = (char *)start + sizeof(start); + if (!res){ + + parmap->finish = ((char*)start - (char*)parmap->mc_data) / sizeof(void*); + //XBT_CRITICAL("Find good one %p (%p)", start, parmap->mc_data); + break; + } + } +} - /* Compute how much data does every worker gets */ - data_size = (xbt_dynar_length(parmap->data) / parmap->num_workers) - + ((xbt_dynar_length(parmap->data) % parmap->num_workers) ? 1 : 0); +/** + * \brief Main function of a worker thread. + * \param arg the parmap + */ +static void *xbt_parmap_mc_worker_main(void *arg) +{ + xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg; + xbt_parmap_t parmap = data->parmap; + unsigned round = 0; + /* smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL); */ + /* SIMIX_context_set_current(context); */ - /* Each worker data segment starts in a position associated with its id*/ - data_start = data_size * worker_id; + XBT_DEBUG("New worker thread created"); - /* The end of the worker data segment must be bounded by the end of the data vector */ - data_end = MIN(data_start + data_size, xbt_dynar_length(parmap->data)); + /* Worker's main loop */ + while (1) { + parmap->worker_wait_f(parmap, ++round); + if (parmap->status == XBT_PARMAP_WORK) { - DEBUG4("Worker %u: data_start=%u data_end=%u (data_size=%u)", - worker_id, data_start, data_end, data_size); + XBT_DEBUG("Worker %d got a job", data->worker_id); - /* While the worker don't pass the end of it data segment apply the function */ - while(data_start < data_end){ - parmap->fun(*(void **)xbt_dynar_get_ptr(parmap->data, data_start)); - data_start++; - } + xbt_parmap_mc_work(parmap, data->worker_id); + parmap->worker_signal_f(parmap); - xbt_barrier_wait(parmap->workers_done); + XBT_DEBUG("Worker %d has finished", data->worker_id); /* We are destroying the parmap */ - }else{ - xbt_barrier_wait(parmap->workers_done); - DEBUG1("Shutting down worker %u", worker_id); + } else { + xbt_free(data); + parmap->worker_signal_f(parmap); return NULL; } } } - -#ifdef HAVE_FUTEX_H - static void futex_wait(int *uaddr, int val) - { - DEBUG1("Waiting on futex %d", *uaddr); - syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0); - } - - static void futex_wake(int *uaddr, int val) - { - DEBUG1("Waking futex %d", *uaddr); - syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0); - } #endif -/* Futex based implementation of the barrier */ -void xbt_barrier_init(xbt_barrier_t barrier, unsigned int threads_to_wait) +#ifdef HAVE_FUTEX_H +static void futex_wait(unsigned *uaddr, unsigned val) { - barrier->threads_to_wait = threads_to_wait; - barrier->thread_count = 0; + XBT_VERB("Waiting on futex %p", uaddr); + syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0); } -#ifdef HAVE_FUTEX_H - void xbt_barrier_wait(xbt_barrier_t barrier) - { - int myflag = 0; - unsigned int mycount = 0; - - myflag = barrier->futex; - mycount = __sync_add_and_fetch(&barrier->thread_count, 1); - if(mycount < barrier->threads_to_wait){ - futex_wait(&barrier->futex, myflag); - }else{ - barrier->futex = __sync_add_and_fetch(&barrier->futex, 1); - barrier->thread_count = 0; - futex_wake(&barrier->futex, barrier->threads_to_wait); - } - } -#else - void xbt_barrier_wait(xbt_barrier_t barrier) - { - xbt_os_mutex_acquire(barrier->mutex); - - barrier->thread_count++; - if(barrier->thread_count < barrier->threads_to_wait){ - xbt_os_cond_wait(barrier->cond,barrier->mutex); - }else{ - barrier->thread_count = 0; - xbt_os_cond_broadcast(barrier->cond); - } - xbt_os_mutex_release(barrier->mutex); - } +static void futex_wake(unsigned *uaddr, unsigned val) +{ + XBT_VERB("Waking futex %p", uaddr); + syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0); +} #endif -#ifdef SIMGRID_TEST -#include "xbt.h" -#include "xbt/ex.h" - -XBT_TEST_SUITE("parmap", "Parallel Map"); -XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(xbt_parmap_unit); - +/** + * \brief Starts the parmap: waits for all workers to be ready and returns. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_posix_master_wait(xbt_parmap_t parmap) +{ + xbt_os_mutex_acquire(parmap->done_mutex); + if (parmap->thread_counter < parmap->num_workers) { + /* wait for all workers to be ready */ + xbt_os_cond_wait(parmap->done_cond, parmap->done_mutex); + } + xbt_os_mutex_release(parmap->done_mutex); +} +/** + * \brief Ends the parmap: wakes the controller thread when all workers terminate. + * + * This function is called by all worker threads when they end (not including + * the controller). + * + * \param parmap a parmap + */ +static void xbt_parmap_posix_worker_signal(xbt_parmap_t parmap) +{ + xbt_os_mutex_acquire(parmap->done_mutex); + if (++parmap->thread_counter == parmap->num_workers) { + /* all workers have finished, wake the controller */ + xbt_os_cond_signal(parmap->done_cond); + } + xbt_os_mutex_release(parmap->done_mutex); +} -xbt_parmap_t parmap; +/** + * \brief Wakes all workers and waits for them to finish the tasks. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_posix_master_signal(xbt_parmap_t parmap) +{ + xbt_os_mutex_acquire(parmap->ready_mutex); + parmap->thread_counter = 1; + parmap->work++; + /* wake all workers */ + xbt_os_cond_broadcast(parmap->ready_cond); + xbt_os_mutex_release(parmap->ready_mutex); +} -void fun(void *arg); +/** + * \brief Waits for some work to process. + * + * This function is called by each worker thread (not including the controller) + * when it has no more work to do. + * + * \param parmap a parmap + * \param round the expected round number + */ +static void xbt_parmap_posix_worker_wait(xbt_parmap_t parmap, unsigned round) +{ + xbt_os_mutex_acquire(parmap->ready_mutex); + /* wait for more work */ + if (parmap->work != round) { + xbt_os_cond_wait(parmap->ready_cond, parmap->ready_mutex); + } + xbt_os_mutex_release(parmap->ready_mutex); +} -void fun(void *arg) +#ifdef HAVE_FUTEX_H +/** + * \brief Starts the parmap: waits for all workers to be ready and returns. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_futex_master_wait(xbt_parmap_t parmap) { - //INFO1("I'm job %lu", (unsigned long)arg); + unsigned count = parmap->thread_counter; + while (count < parmap->num_workers) { + /* wait for all workers to be ready */ + futex_wait(&parmap->thread_counter, count); + count = parmap->thread_counter; + } } -XBT_TEST_UNIT("basic", test_parmap_basic, "Basic usage") +/** + * \brief Ends the parmap: wakes the controller thread when all workers terminate. + * + * This function is called by all worker threads when they end (not including + * the controller). + * + * \param parmap a parmap + */ +static void xbt_parmap_futex_worker_signal(xbt_parmap_t parmap) { - xbt_test_add0("Create the parmap"); + unsigned count = __sync_add_and_fetch(&parmap->thread_counter, 1); + if (count == parmap->num_workers) { + /* all workers have finished, wake the controller */ + futex_wake(&parmap->thread_counter, INT_MAX); + } +} - unsigned long i,j; - xbt_dynar_t data = xbt_dynar_new(sizeof(void *), NULL); +/** + * \brief Wakes all workers and waits for them to finish the tasks. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_futex_master_signal(xbt_parmap_t parmap) +{ + parmap->thread_counter = 1; + __sync_add_and_fetch(&parmap->work, 1); + /* wake all workers */ + futex_wake(&parmap->work, INT_MAX); +} - /* Create the parallel map */ - parmap = xbt_parmap_new(10); +/** + * \brief Waits for some work to process. + * + * This function is called by each worker thread (not including the controller) + * when it has no more work to do. + * + * \param parmap a parmap + * \param round the expected round number + */ +static void xbt_parmap_futex_worker_wait(xbt_parmap_t parmap, unsigned round) +{ + unsigned work = parmap->work; + /* wait for more work */ + while (work != round) { + futex_wait(&parmap->work, work); + work = parmap->work; + } +} +#endif - for(j=0; j < 100; j++){ - xbt_dynar_push_as(data, void *, (void *)j); +/** + * \brief Starts the parmap: waits for all workers to be ready and returns. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_busy_master_wait(xbt_parmap_t parmap) +{ + while (parmap->thread_counter < parmap->num_workers) { + xbt_os_thread_yield(); } +} - for(i=0; i < 5; i++) - xbt_parmap_apply(parmap, fun, data); +/** + * \brief Ends the parmap: wakes the controller thread when all workers terminate. + * + * This function is called by all worker threads when they end. + * + * \param parmap a parmap + */ +static void xbt_parmap_busy_worker_signal(xbt_parmap_t parmap) +{ + __sync_add_and_fetch(&parmap->thread_counter, 1); +} - /* Destroy the parmap */ - xbt_parmap_destroy(parmap); +/** + * \brief Wakes all workers and waits for them to finish the tasks. + * + * This function is called by the controller thread. + * + * \param parmap a parmap + */ +static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap) +{ + parmap->thread_counter = 1; + __sync_add_and_fetch(&parmap->work, 1); } -#endif /* SIMGRID_TEST */ +/** + * \brief Waits for some work to process. + * + * This function is called by each worker thread (not including the controller) + * when it has no more work to do. + * + * \param parmap a parmap + * \param round the expected round number + */ +static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round) +{ + /* wait for more work */ + while (parmap->work != round) { + xbt_os_thread_yield(); + } +}