3 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
4 "Logging specific to SMPI (mpi)");
6 int SMPI_MPI_Init(int *argc, char ***argv)
8 smpi_process_init(argc, argv);
13 int SMPI_MPI_Finalize()
16 smpi_process_finalize();
20 // right now this just exits the current node, should send abort signal to all
21 // hosts in the communicator (TODO)
22 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
28 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
30 int retval = MPI_SUCCESS;
35 retval = MPI_ERR_COMM;
36 } else if (NULL == size) {
47 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
49 int retval = MPI_SUCCESS;
54 retval = MPI_ERR_COMM;
55 } else if (NULL == rank) {
58 *rank = smpi_mpi_comm_rank(comm);
66 int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size)
68 int retval = MPI_SUCCESS;
72 if (NULL == datatype) {
73 retval = MPI_ERR_TYPE;
74 } else if (NULL == size) {
77 *size = datatype->size;
85 int SMPI_MPI_Barrier(MPI_Comm comm)
87 int retval = MPI_SUCCESS;
92 retval = MPI_ERR_COMM;
94 retval = smpi_mpi_barrier(comm);
102 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
103 int tag, MPI_Comm comm, MPI_Request * request)
105 int retval = MPI_SUCCESS;
109 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
111 if (NULL != *request && MPI_SUCCESS == retval) {
112 retval = smpi_mpi_irecv(*request);
120 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
121 int tag, MPI_Comm comm, MPI_Status * status)
123 int retval = MPI_SUCCESS;
124 smpi_mpi_request_t request;
128 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
130 if (NULL != request && MPI_SUCCESS == retval) {
131 retval = smpi_mpi_irecv(request);
132 if (MPI_SUCCESS == retval) {
133 retval = smpi_mpi_wait(request, status);
135 xbt_mallocator_release(smpi_global->request_mallocator, request);
143 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
144 int tag, MPI_Comm comm, MPI_Request * request)
146 int retval = MPI_SUCCESS;
150 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
152 if (NULL != *request && MPI_SUCCESS == retval) {
153 retval = smpi_mpi_isend(*request);
161 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
162 int tag, MPI_Comm comm)
164 int retval = MPI_SUCCESS;
165 smpi_mpi_request_t request;
169 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
171 if (NULL != request && MPI_SUCCESS == retval) {
172 retval = smpi_mpi_isend(request);
173 if (MPI_SUCCESS == retval) {
174 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
176 xbt_mallocator_release(smpi_global->request_mallocator, request);
185 * MPI_Wait and friends
187 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
189 return smpi_mpi_wait(*request, status);
192 int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
194 return smpi_mpi_waitall(count, requests, status);
197 int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
200 return smpi_mpi_waitany(count, requests, index, status);
206 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
210 int retval = MPI_SUCCESS;
212 smpi_mpi_request_t request;
216 rank = smpi_mpi_comm_rank(comm);
219 retval = smpi_create_request(buf, count, datatype, root,
220 (root + 1) % comm->size, 0, comm, &request);
221 request->forward = comm->size - 1;
222 smpi_mpi_isend(request);
224 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
226 smpi_mpi_irecv(request);
229 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
230 xbt_mallocator_release(smpi_global->request_mallocator, request);
241 * debugging helper function
243 static void print_buffer_int(void *buf, int len, const char *msg, int rank)
246 printf("**[%d] %s: ", rank, msg);
247 for (tmp = 0; tmp < len; tmp++) {
249 printf("[%d]", v[tmp]);
259 int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
260 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
262 int retval = MPI_SUCCESS;
267 smpi_mpi_request_t *tabrequest;
268 smpi_mpi_request_t request;
272 rank = smpi_mpi_comm_rank(comm);
275 if (rank != root) { // if i am not ROOT, simply send my buffer to root
278 print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
281 smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
283 smpi_mpi_isend(request);
284 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
285 xbt_mallocator_release(smpi_global->request_mallocator, request);
288 // i am the ROOT: wait for all buffers by creating one request by sender
290 tabrequest = xbt_malloc((size - 1) * sizeof(smpi_mpi_request_t));
292 void **tmpbufs = xbt_malloc((size - 1) * sizeof(void *));
293 for (i = 0; i < size - 1; i++) {
294 // we need 1 buffer per request to store intermediate receptions
295 tmpbufs[i] = xbt_malloc(count * datatype->size);
297 memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char)); // initiliaze recv buf with my own snd buf
299 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
300 // since we should op values as soon as one receiving request matches.
301 for (i = 0; i < size - 1; i++) {
302 // reminder: for smpi_create_request() the src is always the process sending.
303 src = i < root ? i : i + 1;
304 retval = smpi_create_request(tmpbufs[i], count, datatype,
305 src, root, tag, comm, &(tabrequest[i]));
306 if (NULL != tabrequest[i] && MPI_SUCCESS == retval) {
307 if (MPI_SUCCESS == retval) {
308 smpi_mpi_irecv(tabrequest[i]);
312 // now, wait for completion of all irecv's.
313 for (i = 0; i < size - 1; i++) {
314 int index = MPI_UNDEFINED;
315 smpi_mpi_waitany(size - 1, tabrequest, &index, MPI_STATUS_IGNORE);
319 ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",
321 print_buffer_int(tmpbufs[index], count,
322 bprintf("tmpbufs[index=%d] (value received)", index),
327 op->func(tmpbufs[index], recvbuf, &count, &datatype);
329 print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
332 //xbt_mallocator_release(smpi_global->request_mallocator, tabrequest[i]);
333 xbt_free(tmpbufs[index]);
335 xbt_free(tabrequest);
347 * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
349 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
350 MPI_Op op, MPI_Comm comm );
351 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
352 MPI_Op op, MPI_Comm comm )
354 int retval = MPI_SUCCESS;
355 int root=0; // arbitrary choice
359 retval = SMPI_MPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
360 if (MPI_SUCCESS != retval)
362 retval = SMPI_MPI_Bcast( recvbuf, count, datatype, root, comm);
372 // used by comm_split to sort ranks based on key values
373 int smpi_compare_rankkeys(const void *a, const void *b);
374 int smpi_compare_rankkeys(const void *a, const void *b)
393 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
396 int retval = MPI_SUCCESS;
399 smpi_mpi_request_t request;
401 smpi_mpi_status_t status;
405 // FIXME: need to test parameters
407 index = smpi_process_index();
408 rank = comm->index_to_rank_map[index];
413 // root node does most of the real work
415 int colormap[comm->size];
416 int keymap[comm->size];
417 int rankkeymap[comm->size * 2];
419 smpi_mpi_communicator_t tempcomm = NULL;
426 // FIXME: use scatter/gather or similar instead of individual comms
427 for (i = 1; i < comm->size; i++) {
428 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
429 rank, MPI_ANY_TAG, comm, &request);
430 smpi_mpi_irecv(request);
431 smpi_mpi_wait(request, &status);
432 colormap[status.MPI_SOURCE] = colorkey[0];
433 keymap[status.MPI_SOURCE] = colorkey[1];
434 xbt_mallocator_release(smpi_global->request_mallocator, request);
437 for (i = 0; i < comm->size; i++) {
438 if (MPI_UNDEFINED == colormap[i]) {
441 // make a list of nodes with current color and sort by keys
443 for (j = i; j < comm->size; j++) {
444 if (colormap[i] == colormap[j]) {
445 colormap[j] = MPI_UNDEFINED;
446 rankkeymap[count * 2] = j;
447 rankkeymap[count * 2 + 1] = keymap[j];
451 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
454 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
455 tempcomm->barrier_count = 0;
456 tempcomm->size = count;
457 tempcomm->barrier_mutex = SIMIX_mutex_init();
458 tempcomm->barrier_cond = SIMIX_cond_init();
459 tempcomm->rank_to_index_map = xbt_new(int, count);
460 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
461 for (j = 0; j < smpi_global->process_count; j++) {
462 tempcomm->index_to_rank_map[j] = -1;
464 for (j = 0; j < count; j++) {
465 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
466 tempcomm->rank_to_index_map[j] = indextmp;
467 tempcomm->index_to_rank_map[indextmp] = j;
469 for (j = 0; j < count; j++) {
470 if (rankkeymap[j * 2]) {
471 retval = smpi_create_request(&j, 1, MPI_INT, 0,
472 rankkeymap[j * 2], 0, comm, &request);
473 request->data = tempcomm;
474 smpi_mpi_isend(request);
475 smpi_mpi_wait(request, &status);
476 xbt_mallocator_release(smpi_global->request_mallocator, request);
478 *comm_out = tempcomm;
485 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
487 smpi_mpi_isend(request);
488 smpi_mpi_wait(request, &status);
489 xbt_mallocator_release(smpi_global->request_mallocator, request);
490 if (MPI_UNDEFINED != color) {
491 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
493 smpi_mpi_irecv(request);
494 smpi_mpi_wait(request, &status);
495 *comm_out = request->data;
504 double SMPI_MPI_Wtime(void)
506 return (SIMIX_get_clock());