1 /* Copyright (c) 2004, 2005, 2007, 2009, 2010. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "gras_config.h"
9 #include <sys/syscall.h>
13 #include <linux/futex.h>
15 #include "xbt/xbt_os_thread.h"
18 #include "parmap_private.h"
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_parmap, xbt, "parmap: parallel map");
21 XBT_LOG_NEW_SUBCATEGORY(xbt_parmap_unit, xbt_parmap, "parmap unit testing");
23 static void *_xbt_parmap_worker_main(void *parmap);
25 static void futex_wait(int *uaddr, int val);
26 static void futex_wake(int *uaddr, int val);
28 xbt_parmap_t xbt_parmap_new(unsigned int num_workers)
31 xbt_os_thread_t worker = NULL;
33 DEBUG1("Create new parmap (%u workers)", num_workers);
35 /* Initialize the thread pool data structure */
36 xbt_parmap_t parmap = xbt_new0(s_xbt_parmap_t, 1);
37 parmap->num_workers = num_workers;
38 parmap->status = PARMAP_WORK;
40 parmap->workers_ready = xbt_new0(s_xbt_barrier_t, 1);
41 xbt_barrier_init(parmap->workers_ready, num_workers + 1);
42 parmap->workers_done = xbt_new0(s_xbt_barrier_t, 1);
43 xbt_barrier_init(parmap->workers_done, num_workers + 1);
45 parmap->workers_ready->mutex = xbt_os_mutex_init();
46 parmap->workers_ready->cond = xbt_os_cond_init();
48 /* Create the pool of worker threads */
49 for(i=0; i < num_workers; i++){
50 worker = xbt_os_thread_create(NULL, _xbt_parmap_worker_main, parmap, NULL);
51 xbt_os_thread_detach(worker);
57 void xbt_parmap_destroy(xbt_parmap_t parmap)
59 DEBUG1("Destroy parmap %p", parmap);
61 parmap->status = PARMAP_DESTROY;
63 xbt_barrier_wait(parmap->workers_ready);
64 DEBUG0("Kill job sent");
65 xbt_barrier_wait(parmap->workers_done);
67 xbt_os_mutex_destroy(parmap->workers_ready->mutex);
68 xbt_os_cond_destroy(parmap->workers_ready->cond);
70 xbt_free(parmap->workers_ready);
71 xbt_free(parmap->workers_done);
75 void xbt_parmap_apply(xbt_parmap_t parmap, void_f_pvoid_t fun, xbt_dynar_t data)
77 /* Assign resources to worker threads*/
81 /* Notify workers that there is a job */
82 xbt_barrier_wait(parmap->workers_ready);
83 DEBUG0("Job dispatched, lets wait...");
84 xbt_barrier_wait(parmap->workers_done);
91 static void *_xbt_parmap_worker_main(void *arg)
93 unsigned int data_start, data_end, data_size, worker_id;
94 xbt_parmap_t parmap = (xbt_parmap_t)arg;
96 /* Fetch a worker id */
97 worker_id = __sync_fetch_and_add(&parmap->workers_max_id, 1);
98 xbt_os_thread_set_extra_data((void *)(unsigned long)worker_id);
100 DEBUG1("New worker thread created (%u)", worker_id);
102 /* Worker's main loop */
104 xbt_barrier_wait(parmap->workers_ready);
106 if(parmap->status == PARMAP_WORK){
107 DEBUG1("Worker %u got a job", worker_id);
109 /* Compute how much data does every worker gets */
110 data_size = (xbt_dynar_length(parmap->data) / parmap->num_workers)
111 + ((xbt_dynar_length(parmap->data) % parmap->num_workers) ? 1 : 0);
113 /* Each worker data segment starts in a position associated with its id*/
114 data_start = data_size * worker_id;
116 /* The end of the worker data segment must be bounded by the end of the data vector */
117 data_end = MIN(data_start + data_size, xbt_dynar_length(parmap->data));
119 DEBUG4("Worker %u: data_start=%u data_end=%u (data_size=%u)",
120 worker_id, data_start, data_end, data_size);
122 /* While the worker don't pass the end of it data segment apply the function */
123 while(data_start < data_end){
124 parmap->fun(*(void **)xbt_dynar_get_ptr(parmap->data, data_start));
128 xbt_barrier_wait(parmap->workers_done);
130 /* We are destroying the parmap */
132 xbt_barrier_wait(parmap->workers_done);
133 DEBUG1("Shutting down worker %u", worker_id);
140 static void futex_wait(int *uaddr, int val)
142 DEBUG1("Waiting on futex %d", *uaddr);
143 syscall(SYS_futex, uaddr, FUTEX_WAIT_PRIVATE, val, NULL, NULL, 0);
146 static void futex_wake(int *uaddr, int val)
148 DEBUG1("Waking futex %d", *uaddr);
149 syscall(SYS_futex, uaddr, FUTEX_WAKE_PRIVATE, val, NULL, NULL, 0);
153 /* Futex based implementation of the barrier */
154 void xbt_barrier_init(xbt_barrier_t barrier, unsigned int threads_to_wait)
156 barrier->threads_to_wait = threads_to_wait;
157 barrier->thread_count = 0;
161 void xbt_barrier_wait(xbt_barrier_t barrier)
164 unsigned int mycount = 0;
166 myflag = barrier->futex;
167 mycount = __sync_add_and_fetch(&barrier->thread_count, 1);
168 if(mycount < barrier->threads_to_wait){
169 futex_wait(&barrier->futex, myflag);
171 barrier->futex = __sync_add_and_fetch(&barrier->futex, 1);
172 barrier->thread_count = 0;
173 futex_wake(&barrier->futex, barrier->threads_to_wait);
177 void xbt_barrier_wait(xbt_barrier_t barrier)
179 xbt_os_mutex_acquire(barrier->mutex);
181 barrier->thread_count++;
182 if(barrier->thread_count < barrier->threads_to_wait){
183 xbt_os_cond_wait(barrier->cond,barrier->mutex);
185 barrier->thread_count = 0;
186 xbt_os_cond_broadcast(barrier->cond);
188 xbt_os_mutex_release(barrier->mutex);
196 XBT_TEST_SUITE("parmap", "Parallel Map");
197 XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(xbt_parmap_unit);
207 //INFO1("I'm job %lu", (unsigned long)arg);
210 XBT_TEST_UNIT("basic", test_parmap_basic, "Basic usage")
212 xbt_test_add0("Create the parmap");
215 xbt_dynar_t data = xbt_dynar_new(sizeof(void *), NULL);
217 /* Create the parallel map */
218 parmap = xbt_parmap_new(10);
220 for(j=0; j < 100; j++){
221 xbt_dynar_push_as(data, void *, (void *)j);
225 xbt_parmap_apply(parmap, fun, data);
227 /* Destroy the parmap */
228 xbt_parmap_destroy(parmap);
231 #endif /* SIMGRID_TEST */