4 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi,
5 "Logging specific to SMPI (base)");
6 XBT_LOG_EXTERNAL_CATEGORY(smpi_base);
7 XBT_LOG_EXTERNAL_CATEGORY(smpi_bench);
8 XBT_LOG_EXTERNAL_CATEGORY(smpi_kernel);
9 XBT_LOG_EXTERNAL_CATEGORY(smpi_mpi);
10 XBT_LOG_EXTERNAL_CATEGORY(smpi_receiver);
11 XBT_LOG_EXTERNAL_CATEGORY(smpi_sender);
12 XBT_LOG_EXTERNAL_CATEGORY(smpi_util);
14 smpi_mpi_global_t smpi_mpi_global = NULL;
17 * Operations of MPI_OP : implemented=land,sum,min,max
19 void smpi_mpi_land_func(void *a, void *b, int *length,
20 MPI_Datatype * datatype);
22 void smpi_mpi_land_func(void *a, void *b, int *length,
23 MPI_Datatype * datatype)
26 if (*datatype == smpi_mpi_global->mpi_int) {
28 for (i = 0; i < *length; i++) {
35 * sum two vectors element-wise
37 * @param a the first vectors
38 * @param b the second vectors
39 * @return the second vector is modified and contains the element-wise sums
41 void smpi_mpi_sum_func(void *a, void *b, int *length,
42 MPI_Datatype * datatype);
44 void smpi_mpi_sum_func(void *a, void *b, int *length, MPI_Datatype * datatype)
47 if (*datatype == smpi_mpi_global->mpi_byte) {
49 for (i = 0; i < *length; i++) {
52 } else if (*datatype == smpi_mpi_global->mpi_int) {
54 for (i = 0; i < *length; i++) {
57 } else if (*datatype == smpi_mpi_global->mpi_float) {
59 for (i = 0; i < *length; i++) {
62 } else if (*datatype == smpi_mpi_global->mpi_double) {
63 double *x = a, *y = b;
64 for (i = 0; i < *length; i++) {
70 * compute the min of two vectors element-wise
72 void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype);
74 void smpi_mpi_min_func(void *a, void *b, int *length, MPI_Datatype * datatype)
77 if (*datatype == smpi_mpi_global->mpi_byte) {
79 for (i = 0; i < *length; i++) {
80 y[i] = x[i] < y[i] ? x[i] : y[i];
83 if (*datatype == smpi_mpi_global->mpi_int) {
85 for (i = 0; i < *length; i++) {
86 y[i] = x[i] < y[i] ? x[i] : y[i];
89 if (*datatype == smpi_mpi_global->mpi_float) {
91 for (i = 0; i < *length; i++) {
92 y[i] = x[i] < y[i] ? x[i] : y[i];
95 if (*datatype == smpi_mpi_global->mpi_double) {
96 double *x = a, *y = b;
97 for (i = 0; i < *length; i++) {
98 y[i] = x[i] < y[i] ? x[i] : y[i];
104 * compute the max of two vectors element-wise
106 void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype);
108 void smpi_mpi_max_func(void *a, void *b, int *length, MPI_Datatype * datatype)
111 if (*datatype == smpi_mpi_global->mpi_byte) {
113 for (i = 0; i < *length; i++) {
114 y[i] = x[i] > y[i] ? x[i] : y[i];
116 } else if (*datatype == smpi_mpi_global->mpi_int) {
118 for (i = 0; i > *length; i++) {
119 y[i] = x[i] > y[i] ? x[i] : y[i];
121 } else if (*datatype == smpi_mpi_global->mpi_float) {
122 float *x = a, *y = b;
123 for (i = 0; i > *length; i++) {
124 y[i] = x[i] > y[i] ? x[i] : y[i];
126 } else if (*datatype == smpi_mpi_global->mpi_double) {
127 double *x = a, *y = b;
128 for (i = 0; i > *length; i++) {
129 y[i] = x[i] > y[i] ? x[i] : y[i];
139 * tell the MPI rank of the calling process (from its SIMIX process id)
141 int smpi_mpi_comm_rank(smpi_mpi_communicator_t comm)
143 return comm->index_to_rank_map[smpi_process_index()];
146 void smpi_process_init(int *argc, char***argv)
148 smpi_process_data_t pdata;
150 // initialize some local variables
152 pdata = xbt_new(s_smpi_process_data_t, 1);
153 SIMIX_process_set_data(SIMIX_process_self(),pdata);
155 /* get rank from command line, and remove it from argv */
156 pdata->index = atoi( (*argv)[1] );
157 DEBUG1("I'm rank %d",pdata->index);
159 memmove((*argv)[1],(*argv)[2], sizeof(char*)* (*argc-2));
160 (*argv)[ (*argc)-1] = NULL;
164 pdata->mutex = SIMIX_mutex_init();
165 pdata->cond = SIMIX_cond_init();
168 pdata->pending_recv_request_queue = xbt_fifo_new();
169 pdata->pending_send_request_queue = xbt_fifo_new();
170 pdata->received_message_queue = xbt_fifo_new();
172 pdata->main = SIMIX_process_self();
173 pdata->sender = SIMIX_process_create("smpi_sender",
175 SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
177 pdata->receiver = SIMIX_process_create("smpi_receiver",
178 smpi_receiver, pdata,
179 SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
182 smpi_global->main_processes[pdata->index] = SIMIX_process_self();
186 void smpi_process_finalize()
188 smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
190 pdata->finalize = 2; /* Tell sender and receiver to quit */
191 SIMIX_process_resume(pdata->sender);
192 SIMIX_process_resume(pdata->receiver);
193 while (pdata->finalize>0) { /* wait until it's done */
194 SIMIX_cond_wait(pdata->cond,pdata->mutex);
197 SIMIX_mutex_destroy(pdata->mutex);
198 SIMIX_cond_destroy(pdata->cond);
199 xbt_fifo_free(pdata->pending_recv_request_queue);
200 xbt_fifo_free(pdata->pending_send_request_queue);
201 xbt_fifo_free(pdata->received_message_queue);
204 int smpi_mpi_barrier(smpi_mpi_communicator_t comm)
207 SIMIX_mutex_lock(comm->barrier_mutex);
208 ++comm->barrier_count;
209 if (comm->barrier_count > comm->size) { // only happens on second barrier...
210 comm->barrier_count = 0;
211 } else if (comm->barrier_count == comm->size) {
212 SIMIX_cond_broadcast(comm->barrier_cond);
214 while (comm->barrier_count < comm->size) {
215 SIMIX_cond_wait(comm->barrier_cond, comm->barrier_mutex);
217 SIMIX_mutex_unlock(comm->barrier_mutex);
222 int smpi_mpi_isend(smpi_mpi_request_t request)
224 smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
225 int retval = MPI_SUCCESS;
227 if (NULL == request) {
228 retval = MPI_ERR_INTERN;
230 xbt_fifo_push(pdata->pending_send_request_queue, request);
231 SIMIX_process_resume(pdata->sender);
237 int smpi_mpi_irecv(smpi_mpi_request_t request)
239 int retval = MPI_SUCCESS;
240 smpi_process_data_t pdata = SIMIX_process_get_data(SIMIX_process_self());
242 if (NULL == request) {
243 retval = MPI_ERR_INTERN;
245 xbt_fifo_push(pdata->pending_recv_request_queue, request);
247 if (SIMIX_process_is_suspended(pdata->receiver)) {
248 SIMIX_process_resume(pdata->receiver);
255 int smpi_mpi_wait(smpi_mpi_request_t request, smpi_mpi_status_t * status)
257 int retval = MPI_SUCCESS;
259 if (NULL == request) {
260 retval = MPI_ERR_INTERN;
262 SIMIX_mutex_lock(request->mutex);
263 while (!request->completed) {
264 SIMIX_cond_wait(request->cond, request->mutex);
266 if (NULL != status) {
267 status->MPI_SOURCE = request->src;
268 status->MPI_TAG = request->tag;
269 status->MPI_ERROR = MPI_SUCCESS;
271 SIMIX_mutex_unlock(request->mutex);
277 int smpi_mpi_waitall(int count, smpi_mpi_request_t requests[], smpi_mpi_status_t status[]) {
281 smpi_mpi_status_t stat;
283 for (cpt=0; cpt<count;cpt++) {
284 retval = smpi_mpi_waitany(count,requests, &index,&stat);
285 if (retval != MPI_SUCCESS)
287 memcpy(&(status[index]),&stat,sizeof(stat));
292 int smpi_mpi_waitany(int count, smpi_mpi_request_t *requests, int *index, smpi_mpi_status_t *status) {
295 *index = MPI_UNDEFINED;
296 if (NULL == requests) {
297 return MPI_ERR_INTERN;
299 /* First check if one of them is already done */
300 for (cpt=0;cpt<count;cpt++) {
301 if (requests[cpt]->completed && !requests[cpt]->consumed) { /* got ya */
306 /* If none found, block */
307 /* FIXME: should use a SIMIX_cond_waitany, when implemented. For now, block on the first one */
309 for (cpt=0;cpt<count;cpt++) {
310 if (!requests[cpt]->completed) { /* this one is not done, wait on it */
311 while (!requests[cpt]->completed)
312 SIMIX_cond_wait(requests[cpt]->cond, requests[cpt]->mutex);
318 if (cpt == count) /* they are all done. Damn user */
319 return MPI_ERR_REQUEST;
323 requests[*index]->consumed = 1;
325 if (NULL != status) {
326 status->MPI_SOURCE = requests[*index]->src;
327 status->MPI_TAG = requests[*index]->tag;
328 status->MPI_ERROR = MPI_SUCCESS;