Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
[trace] adding a new boolean parameter (tracing/platform) to register platform in...
[simgrid.git] / src / xbt / parmap.c
index f4f92cd..a472c2f 100644 (file)
 
 /* This program is free software; you can redistribute it and/or modify it
  * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "threadpool_private.h"
-
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_threadpool, xbt,
-                                "threadpool: pool of worker threads");
-
-static void *_xbt_tpool_worker_main(void *tpool);
-
-unsigned long tpoolcounter = 0;  /* Debug purposes */
-
-xbt_tpool_t xbt_tpool_new(unsigned int num_workers, unsigned int max_jobs)
+#include "gras_config.h"
+#include <unistd.h>
+#ifndef _XBT_WIN32
+#include <sys/syscall.h>
+#endif
+
+#ifdef HAVE_FUTEX_H
+       #include <linux/futex.h>
+#else
+       #include "xbt/xbt_os_thread.h"
+#endif
+#include <errno.h>
+#include "parmap_private.h"
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_parmap, xbt, "parmap: parallel map");
+XBT_LOG_NEW_SUBCATEGORY(xbt_parmap_unit, xbt_parmap, "parmap unit testing");
+
+static void *_xbt_parmap_worker_main(void *parmap);
+#ifdef HAVE_FUTEX_H
+       static void futex_wait(int *uaddr, int val);
+       static void futex_wake(int *uaddr, int val);
+#endif
+xbt_parmap_t xbt_parmap_new(unsigned int num_workers)
 {
   unsigned int i;
   xbt_os_thread_t worker = NULL;
 
-  DEBUG2("Create new thread pool (%u, %u)", num_workers, max_jobs);
-
-  /* Initialize thread pool data structure */
-  xbt_tpool_t tpool = xbt_new0(s_xbt_tpool_t, 1);
-  tpool->mutex = xbt_os_mutex_init();
-  tpool->job_posted = xbt_os_cond_init();
-  tpool->job_taken = xbt_os_cond_init();
-  tpool->idle_worker = xbt_os_cond_init();
-  tpool->jobs_queue = xbt_dynar_new(sizeof(s_xbt_tpool_job_t), NULL);
-  tpool->num_workers = num_workers;
-  tpool->num_idle_workers = 0;
-  tpool->max_jobs = max_jobs;
-  
+  XBT_DEBUG("Create new parmap (%u workers)", num_workers);
+
+  /* Initialize the thread pool data structure */
+  xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
+#ifdef HAVE_FUTEX_H
+  parmap->sync_event = xbt_new0(s_xbt_event_t, 1);
+#endif
+  parmap->num_workers = num_workers;
+  parmap->status = PARMAP_WORK;
+#ifdef HAVE_FUTEX_H
+  parmap->sync_event->threads_to_wait = num_workers;
+#endif
   /* Create the pool of worker threads */
   for(i=0; i < num_workers; i++){
-    worker = xbt_os_thread_create(NULL, _xbt_tpool_worker_main, tpool, NULL);
+    worker = xbt_os_thread_create(NULL, _xbt_parmap_worker_main, parmap, NULL);
     xbt_os_thread_detach(worker);
   }
-  
-  return tpool;
+#ifdef HAVE_FUTEX_H
+  xbt_event_init(parmap->sync_event);
+#endif
+  return parmap;
 }
 
-void xbt_tpool_destroy(xbt_tpool_t tpool)
+void xbt_parmap_destroy(xbt_parmap_t parmap)
 { 
-  DEBUG1("Destroy thread pool %p", tpool);
-
-  /* Lock the pool, then signal every worker an wait for each to finish */
-  xbt_os_mutex_acquire(tpool->mutex);
-  tpool->flags = TPOOL_DESTROY; 
-
-  while(tpool->num_workers){
-    DEBUG1("Still %u workers, waiting...", tpool->num_workers);
-    xbt_os_cond_signal(tpool->job_posted);
-    xbt_os_cond_wait(tpool->job_taken, tpool->mutex);
-  }
-
-  /* Destroy pool's data structures */
-  xbt_os_cond_destroy(tpool->job_posted);
-  xbt_os_cond_destroy(tpool->job_taken);
-  xbt_os_cond_destroy(tpool->idle_worker);
-  xbt_os_mutex_release(tpool->mutex);
-  xbt_os_mutex_destroy(tpool->mutex);  
-  xbt_free(tpool);
+  XBT_DEBUG("Destroy parmap %p", parmap);
+  parmap->status = PARMAP_DESTROY;
+#ifdef HAVE_FUTEX_H
+  xbt_event_signal(parmap->sync_event);
+  xbt_free(parmap->sync_event);
+#endif
+  xbt_free(parmap);
 }
 
-void xbt_tpool_queue_job(xbt_tpool_t tpool, void_f_pvoid_t fun, void* fun_arg)
+ void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data)
 {
-  s_xbt_tpool_job_t job;
-  job.fun = fun;
-  job.fun_arg = fun_arg;
-
-  /* Wait until we can lock on the pool with some space on it for the job */
-  xbt_os_mutex_acquire(tpool->mutex);
-  while(xbt_dynar_length(tpool->jobs_queue) == tpool->max_jobs)
-    xbt_os_cond_wait(tpool->job_taken, tpool->mutex); 
-
-  DEBUG3("Queue job %p (%p) to thread pool %p", fun, fun_arg, tpool);
-
-  /* Push the job in the queue, signal the workers and unlock the pool */
-  xbt_dynar_push_as(tpool->jobs_queue, s_xbt_tpool_job_t, job);
-  xbt_os_cond_signal(tpool->job_posted);
-  xbt_os_mutex_release(tpool->mutex);    
-  return;
+  /* Assign resources to worker threads*/
+  parmap->fun = fun;
+  parmap->data = data;
+  parmap->index = 0;
+#ifdef HAVE_FUTEX_H
+  xbt_event_signal(parmap->sync_event);
+#endif
+  XBT_DEBUG("Job done");
 }
 
-void xbt_tpool_wait_all(xbt_tpool_t tpool)
+static void *_xbt_parmap_worker_main(void *arg)
 {
-  DEBUG1("Wait all workers in thread pool %p", tpool);
-  xbt_os_mutex_acquire(tpool->mutex);
+  unsigned int worker_id;
+  xbt_parmap_t parmap = (xbt_parmap_t)arg;
 
-  while(tpool->num_idle_workers < tpool->num_workers
-        || xbt_dynar_length(tpool->jobs_queue) > 0)
-    xbt_os_cond_wait(tpool->idle_worker, tpool->mutex);
+  /* Fetch a worker id */
+  worker_id = __sync_fetch_and_add(&parmap->workers_max_id, 1);
+  xbt_os_thread_set_extra_data((void *)(unsigned long)worker_id);
 
-  xbt_os_mutex_release(tpool->mutex);
-  DEBUG1("Wait all workers done in thread pool %p", tpool);
-  return;
+  XBT_DEBUG("New worker thread created (%u)", worker_id);
+  
+  /* Worker's main loop */
+  while(1){
+#ifdef HAVE_FUTEX_H
+    xbt_event_wait(parmap->sync_event);
+#endif
+    if(parmap->status == PARMAP_WORK){
+      unsigned int i;
+      unsigned int n = 0;
+
+      XBT_DEBUG("Worker %u got a job", worker_id);
+
+      while ((i = __sync_fetch_and_add(&parmap->index, 1))
+             < xbt_dynar_length(parmap->data)) {
+        parmap->fun(xbt_dynar_get_as(parmap->data, i, void*));
+        n++;
+      }
+
+      XBT_DEBUG("Worker %u processed %u tasks", worker_id, n);
+
+    /* We are destroying the parmap */
+    }else{
+#ifdef HAVE_FUTEX_H
+      xbt_event_end(parmap->sync_event);
+#endif
+      XBT_DEBUG("Shutting down worker %u", worker_id);
+      return NULL;
+    }
+  }
 }
 
-static void *_xbt_tpool_worker_main(void *arg)
+#ifdef HAVE_FUTEX_H
+static void futex_wait(int *uaddr, int val)
 {
-  s_xbt_tpool_job_t job;
-  xbt_tpool_t tpool = (xbt_tpool_t)arg;
-
-  unsigned long i = tpoolcounter++; /* Debug purposes */
-  DEBUG1("New worker thread created (%lu)", i);
-  
+  XBT_VERB("Waiting on futex %p", uaddr);
+  syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0);
+}
 
-  /* Worker's main loop */
-  while(1){
-    xbt_os_mutex_acquire(tpool->mutex);
+static void futex_wake(int *uaddr, int val)
+{
+  XBT_VERB("Waking futex %p", uaddr);
+  syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0);
+}
 
-    tpool->num_idle_workers++;
-    xbt_os_cond_signal(tpool->idle_worker);
+void xbt_event_init(xbt_event_t event)
+{
+  int myflag = event->done;
+  if(event->thread_counter < event->threads_to_wait)
+    futex_wait(&event->done, myflag);
+}
 
-    /* If there are no jobs in the queue wait for one */
-    while(xbt_dynar_length(tpool->jobs_queue) == 0
-          && tpool->flags != TPOOL_DESTROY){
-      DEBUG1("Worker %lu waiting for a job", i);
-      xbt_os_cond_wait(tpool->job_posted, tpool->mutex);
-    }
+void xbt_event_signal(xbt_event_t event)
+{
+  int myflag = event->done;
+  event->thread_counter = 0;
+  event->work++;
+  futex_wake(&event->work, event->threads_to_wait);
+  futex_wait(&event->done, myflag);
+}
 
-    DEBUG1("Worker %lu got a job", i);
+void xbt_event_wait(xbt_event_t event)
+{
+  int myflag;
+  unsigned int mycount;
+
+  myflag = event->work;
+  mycount = __sync_add_and_fetch(&event->thread_counter, 1);
+  if(mycount == event->threads_to_wait){
+    event->done++;
+    futex_wake(&event->done, 1);
+  }
 
-    /* If we are shutting down, signal the destroyer so it can kill the other */
-    /* workers, unlock the pool and return  */
-    if(tpool->flags == TPOOL_DESTROY){
-      DEBUG1("Shutting down worker %lu", i);
-      tpool->num_idle_workers--;
-      tpool->num_workers--;
-      xbt_os_cond_signal(tpool->job_taken);
-      xbt_os_mutex_release(tpool->mutex);
-      return NULL;
-    }
+  futex_wait(&event->work, myflag);
+}
 
-    /* Get a job, signal the pool to inform jobs submitters and unlock it */
-    job = xbt_dynar_pop_as(tpool->jobs_queue, s_xbt_tpool_job_t);
-    xbt_os_cond_signal(tpool->job_taken);
-    tpool->num_idle_workers--;
-    xbt_os_mutex_release(tpool->mutex);
-  
-    /* Run the job and loop again ... */
-    job.fun(job.fun_arg);
+void xbt_event_end(xbt_event_t event)
+{
+  int myflag;
+  unsigned int mycount;
+
+  myflag = event->work;
+  mycount = __sync_add_and_fetch(&event->thread_counter, 1);
+  if(mycount == event->threads_to_wait){
+    event->done++;
+    futex_wake(&event->done, 1);
   }
 }
+#endif
 
 #ifdef SIMGRID_TEST
 #include "xbt.h"
 #include "xbt/ex.h"
 
-XBT_TEST_SUITE("tpool", "Thread pool");
+XBT_TEST_SUITE("parmap", "Parallel Map");
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(xbt_parmap_unit);
 
-xbt_tpool_t tpool;
 
-void job(void *arg);
 
-void job (void *arg)
+xbt_parmap_t parmap;
+
+void fun(void *arg);
+
+void fun(void *arg)
 {
-  xbt_test_log1("I'm job %lu", (unsigned long)arg);
+  //XBT_INFO("I'm job %lu", (unsigned long)arg);
 }
 
-XBT_TEST_UNIT("basic", test_tpool_basic, "Basic usage")
+XBT_TEST_UNIT("basic", test_parmap_basic, "Basic usage")
 {
-  xbt_test_add0("Create thread pool");
+  xbt_test_add("Create the parmap");
 
   unsigned long i,j;
-  /* Create thread pool */
-  tpool = xbt_tpool_new(5, 10);
+  xbt_dynar_t data = xbt_dynar_new(sizeof(void *), NULL);
 
-  for(j=0; j < 10; j++){
-    /* Queue some work */
-    for(i=0; i < 20; i++){
-      xbt_tpool_queue_job(tpool, job, (void*)i);
-    }
-    /* Wait for everyone */
-    xbt_tpool_wait_all(tpool);
+  /* Create the parallel map */
+  parmap = xbt_parmap_new(10);
+
+  for(j=0; j < 100; j++){
+    xbt_dynar_push_as(data, void *, (void *)j);
   }
 
-  /* Destroy thread pool */
-  xbt_tpool_destroy(tpool);
+  for(i=0; i < 5; i++)
+    xbt_parmap_apply(parmap, fun, data);
+
+  /* Destroy the parmap */
+  xbt_parmap_destroy(parmap);
+  xbt_dynar_free(&data);
 }
 
 #endif /* SIMGRID_TEST */