-/* Copyright (c) 2004, 2005, 2007, 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2004-2005, 2007, 2009-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+
#include "internal_config.h"
#include <unistd.h>
static void xbt_parmap_busy_master_signal(xbt_parmap_t parmap);
static void xbt_parmap_busy_worker_wait(xbt_parmap_t parmap, unsigned round);
+#ifdef HAVE_MC
+static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id);
+static void *xbt_parmap_mc_worker_main(void *arg);
+#endif
/**
* \brief Parallel map structure
unsigned thread_counter; /**< number of workers that have done the work */
unsigned int num_workers; /**< total number of worker threads including the controller */
+ xbt_os_thread_t *workers; /**< worker thread handlers */
void_f_pvoid_t fun; /**< function to run in parallel on each element of data */
xbt_dynar_t data; /**< parameters to pass to fun in parallel */
unsigned int index; /**< index of the next element of data to pick */
+#ifdef HAVE_MC
+ int finish;
+ void* ref_snapshot;
+ int_f_pvoid_pvoid_t snapshot_compare;
+ unsigned int length;
+ void* mc_data;
+#endif
+
/* posix only */
xbt_os_cond_t ready_cond;
xbt_os_mutex_t ready_mutex;
xbt_parmap_t xbt_parmap_new(unsigned int num_workers, e_xbt_parmap_mode_t mode)
{
unsigned int i;
- xbt_os_thread_t worker = NULL;
XBT_DEBUG("Create new parmap (%u workers)", num_workers);
/* Initialize the thread pool data structure */
xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
+ parmap->workers = xbt_new(xbt_os_thread_t, num_workers);
parmap->num_workers = num_workers;
parmap->status = XBT_PARMAP_WORK;
/* Create the pool of worker threads */
xbt_parmap_thread_data_t data;
+ parmap->workers[0] = NULL;
+#ifdef CORE_BINDING
+ unsigned int core_bind = 0;
+#endif
for (i = 1; i < num_workers; i++) {
data = xbt_new0(s_xbt_parmap_thread_data_t, 1);
data->parmap = parmap;
data->worker_id = i;
- worker = xbt_os_thread_create(NULL, xbt_parmap_worker_main, data, NULL);
- xbt_os_thread_detach(worker);
+ parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_worker_main,
+ data, NULL);
+#ifdef CORE_BINDING
+ xbt_os_thread_bind(parmap->workers[i], core_bind);
+ if(core_bind!=xbt_os_get_numcores()){
+ core_bind++;
+ } else {
+ core_bind = 0;
+ }
+#endif
}
return parmap;
}
+#ifdef HAVE_MC
+/**
+ * \brief Creates a parallel map object
+ * \param num_workers number of worker threads to create
+ * \param mode how to synchronize the worker threads
+ * \return the parmap created
+ */
+xbt_parmap_t xbt_parmap_mc_new(unsigned int num_workers, e_xbt_parmap_mode_t mode)
+{
+ unsigned int i;
+
+ XBT_DEBUG("Create new parmap (%u workers)", num_workers);
+
+ /* Initialize the thread pool data structure */
+ xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
+ parmap->workers = xbt_new(xbt_os_thread_t, num_workers);
+
+ parmap->num_workers = num_workers;
+ parmap->status = XBT_PARMAP_WORK;
+ xbt_parmap_set_mode(parmap, mode);
+
+ /* Create the pool of worker threads */
+ xbt_parmap_thread_data_t data;
+ parmap->workers[0] = NULL;
+ for (i = 1; i < num_workers; i++) {
+ data = xbt_new0(s_xbt_parmap_thread_data_t, 1);
+ data->parmap = parmap;
+ data->worker_id = i;
+ parmap->workers[i] = xbt_os_thread_create(NULL, xbt_parmap_mc_worker_main,
+ data, NULL);
+ }
+ return parmap;
+}
+#endif
+
/**
* \brief Destroys a parmap
* \param parmap the parmap to destroy
parmap->status = XBT_PARMAP_DESTROY;
parmap->master_signal_f(parmap);
- parmap->master_wait_f(parmap);
+
+ unsigned int i;
+ for (i = 1; i < parmap->num_workers; i++)
+ xbt_os_thread_join(parmap->workers[i], NULL);
xbt_os_cond_destroy(parmap->ready_cond);
xbt_os_mutex_destroy(parmap->ready_mutex);
xbt_os_cond_destroy(parmap->done_cond);
xbt_os_mutex_destroy(parmap->done_mutex);
+ xbt_free(parmap->workers);
xbt_free(parmap);
}
*/
void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data)
{
- /* Assign resources to worker threads */
+ /* Assign resources to worker threads (we are maestro here)*/
parmap->fun = fun;
parmap->data = data;
parmap->index = 0;
- parmap->master_signal_f(parmap);
- xbt_parmap_work(parmap);
- parmap->master_wait_f(parmap);
- XBT_DEBUG("Job done");
+ parmap->master_signal_f(parmap); // maestro runs futex_wait to wake all the minions (the working threads)
+ xbt_parmap_work(parmap); // maestro works with its minions
+ parmap->master_wait_f(parmap); // When there is no more work to do, then maestro waits for the last minion to stop
+ XBT_DEBUG("Job done"); // ... and proceeds
}
/**
/* We are destroying the parmap */
} else {
+ SIMIX_context_free(context);
xbt_free(data);
+ return NULL;
+ }
+ }
+}
+
+#ifdef HAVE_MC
+
+/**
+ * \brief Applies a list of tasks in parallel.
+ * \param parmap a parallel map object
+ * \param fun the function to call in parallel
+ * \param data each element of this dynar will be passed as an argument to fun
+ */
+int xbt_parmap_mc_apply(xbt_parmap_t parmap, int_f_pvoid_pvoid_t fun,
+ void* data, unsigned int length, void* ref_snapshot)
+{
+ /* Assign resources to worker threads */
+ parmap->snapshot_compare = fun;
+ parmap->mc_data = data;
+ parmap->index = 0;
+ parmap->finish = -1;
+ parmap->length = length;
+ parmap->ref_snapshot = ref_snapshot;
+ parmap->master_signal_f(parmap);
+ xbt_parmap_mc_work(parmap, 0);
+ parmap->master_wait_f(parmap);
+ XBT_DEBUG("Job done");
+ return parmap->finish;
+}
+
+static void xbt_parmap_mc_work(xbt_parmap_t parmap, int worker_id)
+{
+ unsigned int data_size = (parmap->length / parmap->num_workers) +
+ ((parmap->length % parmap->num_workers) ? 1 :0);
+ void* start = (char*)parmap->mc_data + (data_size*worker_id*sizeof(void*));
+ void* end = MIN((char *)start + data_size* sizeof(void*), (char*)parmap->mc_data + parmap->length*sizeof(void*));
+
+ //XBT_CRITICAL("Worker %d : %p -> %p (%d)", worker_id, start, end, data_size);
+
+ while ( start < end && parmap->finish == -1) {
+ //XBT_CRITICAL("Starting with %p", start);
+ int res = parmap->snapshot_compare(*(void**)start, parmap->ref_snapshot);
+ start = (char *)start + sizeof(start);
+ if (!res){
+
+ parmap->finish = ((char*)start - (char*)parmap->mc_data) / sizeof(void*);
+ //XBT_CRITICAL("Find good one %p (%p)", start, parmap->mc_data);
+ break;
+ }
+ }
+}
+
+/**
+ * \brief Main function of a worker thread.
+ * \param arg the parmap
+ */
+static void *xbt_parmap_mc_worker_main(void *arg)
+{
+ xbt_parmap_thread_data_t data = (xbt_parmap_thread_data_t) arg;
+ xbt_parmap_t parmap = data->parmap;
+ unsigned round = 0;
+ /* smx_context_t context = SIMIX_context_new(NULL, 0, NULL, NULL, NULL); */
+ /* SIMIX_context_set_current(context); */
+
+ XBT_DEBUG("New worker thread created");
+
+ /* Worker's main loop */
+ while (1) {
+ parmap->worker_wait_f(parmap, ++round);
+ if (parmap->status == XBT_PARMAP_WORK) {
+
+ XBT_DEBUG("Worker %d got a job", data->worker_id);
+
+ xbt_parmap_mc_work(parmap, data->worker_id);
parmap->worker_signal_f(parmap);
+
+ XBT_DEBUG("Worker %d has finished", data->worker_id);
+
+ /* We are destroying the parmap */
+ } else {
+ xbt_free(data);
return NULL;
}
}
}
+#endif
#ifdef HAVE_FUTEX_H
static void futex_wait(unsigned *uaddr, unsigned val)