X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/fff8fc755939b0f3fbb3fd62cf3cc5f83aa05c5b..5880a1bbbf150def1664cfefd6ec7f36f56edfaa:/src/xbt/parmap.c diff --git a/src/xbt/parmap.c b/src/xbt/parmap.c index c7592a044a..cdb6e916b6 100644 --- a/src/xbt/parmap.c +++ b/src/xbt/parmap.c @@ -1,8 +1,9 @@ -/* Copyright (c) 2004, 2005, 2007, 2009, 2010. The SimGrid Team. +/* Copyright (c) 2004-2005, 2007, 2009-2014. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ + #include "internal_config.h" #include @@ -53,6 +54,10 @@ static void xbt_parmap_busy_worker_signal(xbt_parmap_t parmap); static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap); static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round); +#ifdef HAVE_MC +static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id); +static void *xbt_parmap_mc_worker_main(void *arg); +#endif /** * \brief Parallel map structure @@ -63,10 +68,19 @@ typedef struct s_xbt_parmap { unsigned thread_counter; /**< number of workers that have done the work */ unsigned int num_workers; /**< total number of worker threads including the controller */ + xbt_os_thread_t *workers; /**< worker thread handlers */ void_f_pvoid_t fun; /**< function to run in parallel on each element of data */ xbt_dynar_t data; /**< parameters to pass to fun in parallel */ unsigned int index; /**< index of the next element of data to pick */ +#ifdef HAVE_MC + int finish; + void* ref_snapshot; + int_f_pvoid_pvoid_t snapshot_compare; + unsigned int length; + void* mc_data; +#endif + /* posix only */ xbt_os_cond_t ready_cond; xbt_os_mutex_t ready_mutex; @@ -100,12 +114,46 @@ typedef s_xbt_parmap_thread_data_t *xbt_parmap_thread_data_t; xbt_parmap_t xbt_parmap_new(unsigned int num_workers, e_xbt_parmap_mode_t mode) { unsigned int i; - xbt_os_thread_t worker = NULL; XBT_DEBUG("Create new parmap (%u workers)", num_workers); /* Initialize the thread pool data structure */ xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1); + parmap->workers = xbt_new(xbt_os_thread_t, num_workers); + + parmap->num_workers = num_workers; + parmap->status = XBT_PARMAP_WORK; + xbt_parmap_set_mode(parmap, mode); + + /* Create the pool of worker threads */ + xbt_parmap_thread_data_t data; + parmap->workers[0] = NULL; + for (i = 1; i < num_workers; i++) { + data = xbt_new0(s_xbt_parmap_thread_data_t, 1); + data->parmap = parmap; + data->worker_id = i; + parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_worker_main, + data, NULL); + } + return parmap; +} + +#ifdef HAVE_MC +/** + * \brief Creates a parallel map object + * \param num_workers number of worker threads to create + * \param mode how to synchronize the worker threads + * \return the parmap created + */ +xbt_parmap_t xbt_parmap_mc_new(unsigned int num_workers, e_xbt_parmap_mode_t mode) +{ + unsigned int i; + + XBT_DEBUG("Create new parmap (%u workers)", num_workers); + + /* Initialize the thread pool data structure */ + xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1); + parmap->workers = xbt_new(xbt_os_thread_t, num_workers); parmap->num_workers = num_workers; parmap->status = XBT_PARMAP_WORK; @@ -113,15 +161,17 @@ xbt_parmap_t xbt_parmap_new(unsigned int num_workers, e_xbt_parmap_mode_t mode) /* Create the pool of worker threads */ xbt_parmap_thread_data_t data; + parmap->workers[0] = NULL; for (i = 1; i < num_workers; i++) { data = xbt_new0(s_xbt_parmap_thread_data_t, 1); data->parmap = parmap; data->worker_id = i; - worker = xbt_os_thread_create(NULL, xbt_parmap_worker_main, data, NULL); - xbt_os_thread_detach(worker); + parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_mc_worker_main, + data, NULL); } return parmap; } +#endif /** * \brief Destroys a parmap @@ -135,13 +185,17 @@ void xbt_parmap_destroy(xbt_parmap_t parmap) parmap->status = XBT_PARMAP_DESTROY; parmap->master_signal_f(parmap); - parmap->master_wait_f(parmap); + + unsigned int i; + for (i = 1; i < parmap->num_workers; i++) + xbt_os_thread_join(parmap->workers[i], NULL); xbt_os_cond_destroy(parmap->ready_cond); xbt_os_mutex_destroy(parmap->ready_mutex); xbt_os_cond_destroy(parmap->done_cond); xbt_os_mutex_destroy(parmap->done_mutex); + xbt_free(parmap->workers); xbt_free(parmap); } @@ -280,12 +334,94 @@ static void *xbt_parmap_worker_main(void *arg) /* We are destroying the parmap */ } else { + SIMIX_context_free(context); xbt_free(data); + return NULL; + } + } +} + +#ifdef HAVE_MC + +/** + * \brief Applies a list of tasks in parallel. + * \param parmap a parallel map object + * \param fun the function to call in parallel + * \param data each element of this dynar will be passed as an argument to fun + */ +int xbt_parmap_mc_apply(xbt_parmap_t parmap, int_f_pvoid_pvoid_t fun, + void* data, unsigned int length, void* ref_snapshot) +{ + /* Assign resources to worker threads */ + parmap->snapshot_compare = fun; + parmap->mc_data = data; + parmap->index = 0; + parmap->finish = -1; + parmap->length = length; + parmap->ref_snapshot = ref_snapshot; + parmap->master_signal_f(parmap); + xbt_parmap_mc_work(parmap, 0); + parmap->master_wait_f(parmap); + XBT_DEBUG("Job done"); + return parmap->finish; +} + +static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id) +{ + unsigned int data_size = (parmap->length / parmap->num_workers) + + ((parmap->length % parmap->num_workers) ? 1 :0); + void* start = (char*)parmap->mc_data + (data_size*worker_id*sizeof(void*)); + void* end = MIN((char *)start + data_size* sizeof(void*), (char*)parmap->mc_data + parmap->length*sizeof(void*)); + + //XBT_CRITICAL("Worker %d : %p -> %p (%d)", worker_id, start, end, data_size); + + while ( start < end && parmap->finish == -1) { + //XBT_CRITICAL("Starting with %p", start); + int res = parmap->snapshot_compare(*(void**)start, parmap->ref_snapshot); + start = (char *)start + sizeof(start); + if (!res){ + + parmap->finish = ((char*)start - (char*)parmap->mc_data) / sizeof(void*); + //XBT_CRITICAL("Find good one %p (%p)", start, parmap->mc_data); + break; + } + } +} + +/** + * \brief Main function of a worker thread. + * \param arg the parmap + */ +static void *xbt_parmap_mc_worker_main(void *arg) +{ + xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg; + xbt_parmap_t parmap = data->parmap; + unsigned round = 0; + /* smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL); */ + /* SIMIX_context_set_current(context); */ + + XBT_DEBUG("New worker thread created"); + + /* Worker's main loop */ + while (1) { + parmap->worker_wait_f(parmap, ++round); + if (parmap->status == XBT_PARMAP_WORK) { + + XBT_DEBUG("Worker %d got a job", data->worker_id); + + xbt_parmap_mc_work(parmap, data->worker_id); parmap->worker_signal_f(parmap); + + XBT_DEBUG("Worker %d has finished", data->worker_id); + + /* We are destroying the parmap */ + } else { + xbt_free(data); return NULL; } } } +#endif #ifdef HAVE_FUTEX_H static void futex_wait(unsigned *uaddr, unsigned val)