4 #include "smpi_coll_private.h"
6 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
7 "Logging specific to SMPI (mpi)");
9 int SMPI_MPI_Init(int *argc, char ***argv)
11 smpi_process_init(argc, argv);
16 int SMPI_MPI_Finalize()
19 smpi_process_finalize();
23 // right now this just exits the current node, should send abort signal to all
24 // hosts in the communicator (TODO)
25 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
31 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
33 int retval = MPI_SUCCESS;
38 retval = MPI_ERR_COMM;
39 } else if (NULL == size) {
50 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
52 int retval = MPI_SUCCESS;
57 retval = MPI_ERR_COMM;
58 } else if (NULL == rank) {
61 *rank = smpi_mpi_comm_rank(comm);
69 int SMPI_MPI_Type_size(MPI_Datatype datatype, size_t * size)
71 int retval = MPI_SUCCESS;
75 if (NULL == datatype) {
76 retval = MPI_ERR_TYPE;
77 } else if (NULL == size) {
80 *size = datatype->size;
88 int SMPI_MPI_Barrier(MPI_Comm comm)
90 int retval = MPI_SUCCESS;
96 retval = MPI_ERR_COMM;
100 * original implemantation:
101 * retval = smpi_mpi_barrier(comm);
102 * this one is unrealistic: it just cond_waits, means no time.
104 retval = nary_tree_barrier( comm, arity );
112 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
113 int tag, MPI_Comm comm, MPI_Request * request)
115 int retval = MPI_SUCCESS;
119 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
121 if (NULL != *request && MPI_SUCCESS == retval) {
122 retval = smpi_mpi_irecv(*request);
130 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
131 int tag, MPI_Comm comm, MPI_Status * status)
133 int retval = MPI_SUCCESS;
134 smpi_mpi_request_t request;
138 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
140 if (NULL != request && MPI_SUCCESS == retval) {
141 retval = smpi_mpi_irecv(request);
142 if (MPI_SUCCESS == retval) {
143 retval = smpi_mpi_wait(request, status);
145 xbt_mallocator_release(smpi_global->request_mallocator, request);
153 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
154 int tag, MPI_Comm comm, MPI_Request * request)
156 int retval = MPI_SUCCESS;
160 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
162 if (NULL != *request && MPI_SUCCESS == retval) {
163 retval = smpi_mpi_isend(*request);
171 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
172 int tag, MPI_Comm comm)
174 int retval = MPI_SUCCESS;
175 smpi_mpi_request_t request;
179 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
181 if (NULL != request && MPI_SUCCESS == retval) {
182 retval = smpi_mpi_isend(request);
183 if (MPI_SUCCESS == retval) {
184 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
186 xbt_mallocator_release(smpi_global->request_mallocator, request);
197 int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
198 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
199 MPI_Comm comm, MPI_Status *status)
202 int retval = MPI_SUCCESS;
203 smpi_mpi_request_t srequest;
204 smpi_mpi_request_t rrequest;
206 rank = smpi_mpi_comm_rank(comm);
209 retval = smpi_create_request(sendbuf, sendcount, sendtype,
212 smpi_mpi_isend(srequest);
216 retval = smpi_create_request(recvbuf, recvcount, recvtype,
219 smpi_mpi_irecv(rrequest);
221 smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
222 smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
229 * MPI_Wait and friends
231 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
233 return smpi_mpi_wait(*request, status);
236 int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
238 return smpi_mpi_waitall(count, requests, status);
241 int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
244 return smpi_mpi_waitany(count, requests, index, status);
254 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
255 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
259 int retval = MPI_SUCCESS;
260 smpi_mpi_request_t request;
262 rank = smpi_mpi_comm_rank(comm);
264 retval = smpi_create_request(buf, count, datatype, root,
265 (root + 1) % comm->size, 0, comm, &request);
266 request->forward = comm->size - 1;
267 smpi_mpi_isend(request);
269 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
271 smpi_mpi_irecv(request);
274 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
275 xbt_mallocator_release(smpi_global->request_mallocator, request);
282 * Bcast user entry point
284 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
287 int retval = MPI_SUCCESS;
291 //retval = flat_tree_bcast(buf, count, datatype, root, comm);
292 retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
301 //#ifdef DEBUG_REDUCE
303 * debugging helper function
305 static void print_buffer_int(void *buf, int len, char *msg, int rank)
308 printf("**[%d] %s: ", rank, msg);
309 for (tmp = 0; tmp < len; tmp++) {
311 printf("[%d]", v[tmp]);
316 static void print_buffer_double(void *buf, int len, char *msg, int rank)
320 printf("**[%d] %s: ", rank, msg);
321 for (tmp = 0; tmp < len; tmp++) {
323 printf("[%lf]", v[tmp]);
334 int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
335 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
337 int retval = MPI_SUCCESS;
342 smpi_mpi_request_t *requests;
343 smpi_mpi_request_t request;
347 rank = smpi_mpi_comm_rank(comm);
350 if (rank != root) { // if i am not ROOT, simply send my buffer to root
353 print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
356 smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
358 smpi_mpi_isend(request);
359 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
360 xbt_mallocator_release(smpi_global->request_mallocator, request);
363 // i am the ROOT: wait for all buffers by creating one request by sender
365 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
367 void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
368 for (i = 0; i < size-1; i++) {
369 // we need 1 buffer per request to store intermediate receptions
370 tmpbufs[i] = xbt_malloc(count * datatype->size);
372 // root: initiliaze recv buf with my own snd buf
373 memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
375 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
376 // since we should op values as soon as one receiving request matches.
377 for (i = 0; i < size-1; i++) {
378 // reminder: for smpi_create_request() the src is always the process sending.
379 src = i < root ? i : i + 1;
380 retval = smpi_create_request(tmpbufs[i], count, datatype,
381 src, root, tag, comm, &(requests[i]));
382 if (NULL != requests[i] && MPI_SUCCESS == retval) {
383 if (MPI_SUCCESS == retval) {
384 smpi_mpi_irecv(requests[i]);
388 // now, wait for completion of all irecv's.
389 for (i = 0; i < size-1; i++) {
390 int index = MPI_UNDEFINED;
391 smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
393 printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index);
394 print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
399 op->func(tmpbufs[index], recvbuf, &count, &datatype);
401 print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
403 xbt_free(tmpbufs[index]);
404 /* FIXME: with the following line, it generates an
405 * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
407 // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
419 * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
421 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
422 MPI_Op op, MPI_Comm comm );
423 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
424 MPI_Op op, MPI_Comm comm )
426 int retval = MPI_SUCCESS;
427 int root=1; // arbitrary choice
431 retval = SMPI_MPI_Reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
432 if (MPI_SUCCESS != retval)
435 retval = SMPI_MPI_Bcast( sendbuf, count, datatype, root, comm);
442 * MPI_Scatter user entry point
444 //int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
445 // void *recvbuf, int recvcount, MPI_Datatype recvtype,int root,
447 int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
448 void *recvbuf, int recvcount, MPI_Datatype recvtype,
449 int root, MPI_Comm comm)
451 int retval = MPI_SUCCESS;
456 char *cptr; // to manipulate the void * buffers
457 smpi_mpi_request_t *requests;
458 smpi_mpi_request_t request;
459 smpi_mpi_status_t status;
464 rank = smpi_mpi_comm_rank(comm);
466 requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
468 // i am the root: distribute my sendbuf
469 //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
471 for (i=0; i < comm->size; i++) {
472 if ( i!=root ) { // send to processes ...
474 retval = smpi_create_request((void *)cptr, sendcount,
475 datatype, root, i, tag, comm, &(requests[cnt]));
476 if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
477 if (MPI_SUCCESS == retval) {
478 smpi_mpi_isend(requests[cnt]);
483 else { // ... except if it's me.
484 memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
486 cptr += sendcount*datatype->size;
488 for(i=0; i<cnt; i++) { // wait for send to complete
489 /* FIXME: waitall() should be slightly better */
490 smpi_mpi_wait(requests[i], &status);
491 xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
495 else { // i am a non-root process: wait data from the root
496 retval = smpi_create_request(recvbuf,recvcount,
497 recvtype, root, rank, tag, comm, &request);
498 if (NULL != request && MPI_SUCCESS == retval) {
499 if (MPI_SUCCESS == retval) {
500 smpi_mpi_irecv(request);
503 smpi_mpi_wait(request, &status);
504 xbt_mallocator_release(smpi_global->request_mallocator, request);
515 * MPI_Alltoall user entry point
517 * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
518 * ompi/mca/coll/tuned/coll_tuned_module.c
520 int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
521 void *recvbuf, int recvcount, MPI_Datatype recvtype,
524 int retval = MPI_SUCCESS;
530 rank = smpi_mpi_comm_rank(comm);
531 block_dsize = datatype->size * sendcount;
533 if ((block_dsize < 200) && (comm->size > 12)) {
534 retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
535 recvbuf, recvcount, recvtype, comm);
537 } else if (block_dsize < 3000) {
538 /* use this one !! retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
539 recvbuf, recvcount, recvtype, comm);
541 retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
542 recvbuf, recvcount, recvtype, comm);
545 retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
546 recvbuf, recvcount, recvtype, comm);
557 // used by comm_split to sort ranks based on key values
558 int smpi_compare_rankkeys(const void *a, const void *b);
559 int smpi_compare_rankkeys(const void *a, const void *b)
578 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
581 int retval = MPI_SUCCESS;
584 smpi_mpi_request_t request;
586 smpi_mpi_status_t status;
590 // FIXME: need to test parameters
592 index = smpi_process_index();
593 rank = comm->index_to_rank_map[index];
598 // root node does most of the real work
600 int colormap[comm->size];
601 int keymap[comm->size];
602 int rankkeymap[comm->size * 2];
604 smpi_mpi_communicator_t tempcomm = NULL;
611 // FIXME: use scatter/gather or similar instead of individual comms
612 for (i = 1; i < comm->size; i++) {
613 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
614 rank, MPI_ANY_TAG, comm, &request);
615 smpi_mpi_irecv(request);
616 smpi_mpi_wait(request, &status);
617 colormap[status.MPI_SOURCE] = colorkey[0];
618 keymap[status.MPI_SOURCE] = colorkey[1];
619 xbt_mallocator_release(smpi_global->request_mallocator, request);
622 for (i = 0; i < comm->size; i++) {
623 if (MPI_UNDEFINED == colormap[i]) {
626 // make a list of nodes with current color and sort by keys
628 for (j = i; j < comm->size; j++) {
629 if (colormap[i] == colormap[j]) {
630 colormap[j] = MPI_UNDEFINED;
631 rankkeymap[count * 2] = j;
632 rankkeymap[count * 2 + 1] = keymap[j];
636 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
639 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
640 tempcomm->barrier_count = 0;
641 tempcomm->size = count;
642 tempcomm->barrier_mutex = SIMIX_mutex_init();
643 tempcomm->barrier_cond = SIMIX_cond_init();
644 tempcomm->rank_to_index_map = xbt_new(int, count);
645 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
646 for (j = 0; j < smpi_global->process_count; j++) {
647 tempcomm->index_to_rank_map[j] = -1;
649 for (j = 0; j < count; j++) {
650 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
651 tempcomm->rank_to_index_map[j] = indextmp;
652 tempcomm->index_to_rank_map[indextmp] = j;
654 for (j = 0; j < count; j++) {
655 if (rankkeymap[j * 2]) {
656 retval = smpi_create_request(&j, 1, MPI_INT, 0,
657 rankkeymap[j * 2], 0, comm, &request);
658 request->data = tempcomm;
659 smpi_mpi_isend(request);
660 smpi_mpi_wait(request, &status);
661 xbt_mallocator_release(smpi_global->request_mallocator, request);
663 *comm_out = tempcomm;
670 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
672 smpi_mpi_isend(request);
673 smpi_mpi_wait(request, &status);
674 xbt_mallocator_release(smpi_global->request_mallocator, request);
675 if (MPI_UNDEFINED != color) {
676 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
678 smpi_mpi_irecv(request);
679 smpi_mpi_wait(request, &status);
680 *comm_out = request->data;
689 double SMPI_MPI_Wtime(void)
691 return (SIMIX_get_clock());