X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/a889c5815b75beb11f858bab68b2314c8dc724c2..6e5cfd7ff86900354c20502af95ee5f751492753:/src/xbt/parmap.c diff --git a/src/xbt/parmap.c b/src/xbt/parmap.c index f4f92cd9f0..19dcb58027 100644 --- a/src/xbt/parmap.c +++ b/src/xbt/parmap.c @@ -3,181 +3,223 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ - -#include "threadpool_private.h" - -XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_threadpool, xbt, - "threadpool: pool of worker threads"); - -static void *_xbt_tpool_worker_main(void *tpool); - -unsigned long tpoolcounter = 0; /* Debug purposes */ - -xbt_tpool_t xbt_tpool_new(unsigned int num_workers, unsigned int max_jobs) +#include "gras_config.h" +#include +#ifndef _XBT_WIN32 +#include +#endif + +#ifdef HAVE_FUTEX_H + #include +#else + #include "xbt/xbt_os_thread.h" +#endif +#include +#include "parmap_private.h" + +XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_parmap, xbt, "parmap: parallel map"); +XBT_LOG_NEW_SUBCATEGORY(xbt_parmap_unit, xbt_parmap, "parmap unit testing"); + +static void *_xbt_parmap_worker_main(void *parmap); +#ifdef HAVE_FUTEX_H + static void futex_wait(int *uaddr, int val); + static void futex_wake(int *uaddr, int val); +#endif +xbt_parmap_t xbt_parmap_new(unsigned int num_workers) { unsigned int i; xbt_os_thread_t worker = NULL; - DEBUG2("Create new thread pool (%u, %u)", num_workers, max_jobs); - - /* Initialize thread pool data structure */ - xbt_tpool_t tpool = xbt_new0(s_xbt_tpool_t, 1); - tpool->mutex = xbt_os_mutex_init(); - tpool->job_posted = xbt_os_cond_init(); - tpool->job_taken = xbt_os_cond_init(); - tpool->idle_worker = xbt_os_cond_init(); - tpool->jobs_queue = xbt_dynar_new(sizeof(s_xbt_tpool_job_t), NULL); - tpool->num_workers = num_workers; - tpool->num_idle_workers = 0; - tpool->max_jobs = max_jobs; - + XBT_DEBUG("Create new parmap (%u workers)", num_workers); + + /* Initialize the thread pool data structure */ + xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1); +#ifdef HAVE_FUTEX_H + parmap->sync_event = xbt_new0(s_xbt_event_t, 1); +#endif + parmap->num_workers = num_workers; + parmap->status = PARMAP_WORK; +#ifdef HAVE_FUTEX_H + parmap->sync_event->threads_to_wait = num_workers; +#endif /* Create the pool of worker threads */ for(i=0; i < num_workers; i++){ - worker = xbt_os_thread_create(NULL, _xbt_tpool_worker_main, tpool, NULL); + worker = xbt_os_thread_create(NULL, _xbt_parmap_worker_main, parmap, NULL); xbt_os_thread_detach(worker); } - - return tpool; +#ifdef HAVE_FUTEX_H + xbt_event_init(parmap->sync_event); +#endif + return parmap; } -void xbt_tpool_destroy(xbt_tpool_t tpool) +void xbt_parmap_destroy(xbt_parmap_t parmap) { - DEBUG1("Destroy thread pool %p", tpool); + parmap->status = PARMAP_DESTROY; +#ifdef HAVE_FUTEX_H + xbt_event_signal(parmap->sync_event); + xbt_free(parmap->sync_event); +#endif + xbt_free(parmap); +} + + void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data) +{ + /* Assign resources to worker threads*/ + parmap->fun = fun; + parmap->data = data; + parmap->index = 0; +#ifdef HAVE_FUTEX_H + xbt_event_signal(parmap->sync_event); +#endif + XBT_DEBUG("Job done"); +} - /* Lock the pool, then signal every worker an wait for each to finish */ - xbt_os_mutex_acquire(tpool->mutex); - tpool->flags = TPOOL_DESTROY; +void* xbt_parmap_next(xbt_parmap_t parmap) { - while(tpool->num_workers){ - DEBUG1("Still %u workers, waiting...", tpool->num_workers); - xbt_os_cond_signal(tpool->job_posted); - xbt_os_cond_wait(tpool->job_taken, tpool->mutex); + unsigned int index = __sync_fetch_and_add(&parmap->index, 1); + if (index < xbt_dynar_length(parmap->data)) { + return xbt_dynar_get_as(parmap->data, index, void*); } - - /* Destroy pool's data structures */ - xbt_os_cond_destroy(tpool->job_posted); - xbt_os_cond_destroy(tpool->job_taken); - xbt_os_cond_destroy(tpool->idle_worker); - xbt_os_mutex_release(tpool->mutex); - xbt_os_mutex_destroy(tpool->mutex); - xbt_free(tpool); + return NULL; } -void xbt_tpool_queue_job(xbt_tpool_t tpool, void_f_pvoid_t fun, void* fun_arg) -{ - s_xbt_tpool_job_t job; - job.fun = fun; - job.fun_arg = fun_arg; - - /* Wait until we can lock on the pool with some space on it for the job */ - xbt_os_mutex_acquire(tpool->mutex); - while(xbt_dynar_length(tpool->jobs_queue) == tpool->max_jobs) - xbt_os_cond_wait(tpool->job_taken, tpool->mutex); - - DEBUG3("Queue job %p (%p) to thread pool %p", fun, fun_arg, tpool); - - /* Push the job in the queue, signal the workers and unlock the pool */ - xbt_dynar_push_as(tpool->jobs_queue, s_xbt_tpool_job_t, job); - xbt_os_cond_signal(tpool->job_posted); - xbt_os_mutex_release(tpool->mutex); - return; +unsigned long xbt_parmap_get_worker_id(xbt_parmap_t parmap) { + return (unsigned long) xbt_os_thread_get_extra_data(); } -void xbt_tpool_wait_all(xbt_tpool_t tpool) +static void *_xbt_parmap_worker_main(void *arg) { - DEBUG1("Wait all workers in thread pool %p", tpool); - xbt_os_mutex_acquire(tpool->mutex); + unsigned int worker_id; + xbt_parmap_t parmap = (xbt_parmap_t) arg; - while(tpool->num_idle_workers < tpool->num_workers - || xbt_dynar_length(tpool->jobs_queue) > 0) - xbt_os_cond_wait(tpool->idle_worker, tpool->mutex); + /* Fetch a worker id */ + worker_id = __sync_fetch_and_add(&parmap->workers_max_id, 1); + xbt_os_thread_set_extra_data((void*) (unsigned long) worker_id); - xbt_os_mutex_release(tpool->mutex); - DEBUG1("Wait all workers done in thread pool %p", tpool); - return; + XBT_DEBUG("New worker thread created (%u)", worker_id); + + /* Worker's main loop */ + while (1) { +#ifdef HAVE_FUTEX_H + xbt_event_wait(parmap->sync_event); +#endif + if (parmap->status == PARMAP_WORK) { + + XBT_DEBUG("Worker %u got a job", worker_id); + + void* work = xbt_parmap_next(parmap); + if (work != NULL) { + parmap->fun(work); + } + + XBT_DEBUG("Worker %u has finished", worker_id); + + /* We are destroying the parmap */ + } else { +#ifdef HAVE_FUTEX_H + xbt_event_end(parmap->sync_event); +#endif + XBT_DEBUG("Shutting down worker %u", worker_id); + return NULL; + } + } } -static void *_xbt_tpool_worker_main(void *arg) +#ifdef HAVE_FUTEX_H +static void futex_wait(int *uaddr, int val) { - s_xbt_tpool_job_t job; - xbt_tpool_t tpool = (xbt_tpool_t)arg; + XBT_VERB("Waiting on futex %p", uaddr); + syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0); +} - unsigned long i = tpoolcounter++; /* Debug purposes */ - DEBUG1("New worker thread created (%lu)", i); - +static void futex_wake(int *uaddr, int val) +{ + XBT_VERB("Waking futex %p", uaddr); + syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0); +} - /* Worker's main loop */ - while(1){ - xbt_os_mutex_acquire(tpool->mutex); +void xbt_event_init(xbt_event_t event) +{ + int myflag = event->done; + if(event->thread_counter < event->threads_to_wait) + futex_wait(&event->done, myflag); +} - tpool->num_idle_workers++; - xbt_os_cond_signal(tpool->idle_worker); +void xbt_event_signal(xbt_event_t event) +{ + int myflag = event->done; + event->thread_counter = 0; + event->work++; + futex_wake(&event->work, event->threads_to_wait); + futex_wait(&event->done, myflag); +} - /* If there are no jobs in the queue wait for one */ - while(xbt_dynar_length(tpool->jobs_queue) == 0 - && tpool->flags != TPOOL_DESTROY){ - DEBUG1("Worker %lu waiting for a job", i); - xbt_os_cond_wait(tpool->job_posted, tpool->mutex); - } +void xbt_event_wait(xbt_event_t event) +{ + int myflag; + unsigned int mycount; + + myflag = event->work; + mycount = __sync_add_and_fetch(&event->thread_counter, 1); + if(mycount == event->threads_to_wait){ + event->done++; + futex_wake(&event->done, 1); + } - DEBUG1("Worker %lu got a job", i); + futex_wait(&event->work, myflag); +} - /* If we are shutting down, signal the destroyer so it can kill the other */ - /* workers, unlock the pool and return */ - if(tpool->flags == TPOOL_DESTROY){ - DEBUG1("Shutting down worker %lu", i); - tpool->num_idle_workers--; - tpool->num_workers--; - xbt_os_cond_signal(tpool->job_taken); - xbt_os_mutex_release(tpool->mutex); - return NULL; - } +void xbt_event_end(xbt_event_t event) +{ + unsigned int mycount; - /* Get a job, signal the pool to inform jobs submitters and unlock it */ - job = xbt_dynar_pop_as(tpool->jobs_queue, s_xbt_tpool_job_t); - xbt_os_cond_signal(tpool->job_taken); - tpool->num_idle_workers--; - xbt_os_mutex_release(tpool->mutex); - - /* Run the job and loop again ... */ - job.fun(job.fun_arg); + mycount = __sync_add_and_fetch(&event->thread_counter, 1); + if(mycount == event->threads_to_wait){ + event->done++; + futex_wake(&event->done, 1); } } +#endif #ifdef SIMGRID_TEST #include "xbt.h" #include "xbt/ex.h" -XBT_TEST_SUITE("tpool", "Thread pool"); +XBT_TEST_SUITE("parmap", "Parallel Map"); +XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(xbt_parmap_unit); -xbt_tpool_t tpool; -void job(void *arg); -void job (void *arg) +xbt_parmap_t parmap; + +void fun(void *arg); + +void fun(void *arg) { - xbt_test_log1("I'm job %lu", (unsigned long)arg); + //XBT_INFO("I'm job %lu", (unsigned long)arg); } -XBT_TEST_UNIT("basic", test_tpool_basic, "Basic usage") +XBT_TEST_UNIT("basic", test_parmap_basic, "Basic usage") { - xbt_test_add0("Create thread pool"); + xbt_test_add("Create the parmap"); unsigned long i,j; - /* Create thread pool */ - tpool = xbt_tpool_new(5, 10); + xbt_dynar_t data = xbt_dynar_new(sizeof(void *), NULL); - for(j=0; j < 10; j++){ - /* Queue some work */ - for(i=0; i < 20; i++){ - xbt_tpool_queue_job(tpool, job, (void*)i); - } - /* Wait for everyone */ - xbt_tpool_wait_all(tpool); + /* Create the parallel map */ + parmap = xbt_parmap_new(10); + + for(j=0; j < 100; j++){ + xbt_dynar_push_as(data, void *, (void *)j); } - /* Destroy thread pool */ - xbt_tpool_destroy(tpool); + for(i=0; i < 5; i++) + xbt_parmap_apply(parmap, fun, data); + + /* Destroy the parmap */ + xbt_parmap_destroy(parmap); + xbt_dynar_free(&data); } #endif /* SIMGRID_TEST */