4 #include "smpi_coll_private.h"
5 #include "smpi_mpi_dt_private.h"
7 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
8 "Logging specific to SMPI (mpi)");
10 int SMPI_MPI_Init(int *argc, char ***argv)
12 smpi_process_init(argc, argv);
17 int SMPI_MPI_Finalize()
20 smpi_process_finalize();
24 // right now this just exits the current node, should send abort signal to all
25 // hosts in the communicator (TODO)
26 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
32 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
34 int retval = MPI_SUCCESS;
39 retval = MPI_ERR_COMM;
40 } else if (NULL == size) {
51 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
53 int retval = MPI_SUCCESS;
58 retval = MPI_ERR_COMM;
59 } else if (NULL == rank) {
62 *rank = smpi_mpi_comm_rank(comm);
75 int SMPI_MPI_Barrier(MPI_Comm comm)
77 int retval = MPI_SUCCESS;
83 retval = MPI_ERR_COMM;
87 * original implemantation:
88 * retval = smpi_mpi_barrier(comm);
89 * this one is unrealistic: it just cond_waits, means no time.
91 retval = nary_tree_barrier( comm, arity );
101 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
102 int tag, MPI_Comm comm, MPI_Request * request)
104 int retval = MPI_SUCCESS;
108 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
110 if (NULL != *request && MPI_SUCCESS == retval) {
111 retval = smpi_mpi_irecv(*request);
119 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
120 int tag, MPI_Comm comm, MPI_Status * status)
122 int retval = MPI_SUCCESS;
123 smpi_mpi_request_t request;
127 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
129 if (NULL != request && MPI_SUCCESS == retval) {
130 retval = smpi_mpi_irecv(request);
131 if (MPI_SUCCESS == retval) {
132 retval = smpi_mpi_wait(request, status);
134 xbt_mallocator_release(smpi_global->request_mallocator, request);
142 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
143 int tag, MPI_Comm comm, MPI_Request * request)
145 int retval = MPI_SUCCESS;
149 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
151 if (NULL != *request && MPI_SUCCESS == retval) {
152 retval = smpi_mpi_isend(*request);
161 * MPI_Send user level
163 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
164 int tag, MPI_Comm comm)
166 int retval = MPI_SUCCESS;
167 smpi_mpi_request_t request;
171 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
173 if (NULL != request && MPI_SUCCESS == retval) {
174 retval = smpi_mpi_isend(request);
175 if (MPI_SUCCESS == retval) {
176 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
178 xbt_mallocator_release(smpi_global->request_mallocator, request);
188 * MPI_Sendrecv internal level
190 int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
191 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
192 MPI_Comm comm, MPI_Status *status)
195 int retval = MPI_SUCCESS;
196 smpi_mpi_request_t srequest;
197 smpi_mpi_request_t rrequest;
199 rank = smpi_mpi_comm_rank(comm);
202 retval = smpi_create_request(sendbuf, sendcount, sendtype,
205 smpi_mpi_isend(srequest);
208 retval = smpi_create_request(recvbuf, recvcount, recvtype,
209 source, rank,recvtag,
211 smpi_mpi_irecv(rrequest);
213 smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
214 //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval);
215 smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
216 //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval);
221 * MPI_Sendrecv user entry point
223 int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
224 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
225 MPI_Comm comm, MPI_Status *status)
227 int retval = MPI_SUCCESS;
230 smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag,
231 recvbuf, recvcount, recvtype, source, recvtag,
241 * MPI_Wait and friends
243 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
245 return smpi_mpi_wait(*request, status);
248 int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
250 return smpi_mpi_waitall(count, requests, status);
253 int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
256 return smpi_mpi_waitany(count, requests, index, status);
266 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
267 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
271 int retval = MPI_SUCCESS;
272 smpi_mpi_request_t request;
274 rank = smpi_mpi_comm_rank(comm);
276 retval = smpi_create_request(buf, count, datatype, root,
277 (root + 1) % comm->size, 0, comm, &request);
278 request->forward = comm->size - 1;
279 smpi_mpi_isend(request);
281 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
283 smpi_mpi_irecv(request);
286 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
287 xbt_mallocator_release(smpi_global->request_mallocator, request);
293 * Bcast internal level
295 int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
298 int retval = MPI_SUCCESS;
299 //retval = flat_tree_bcast(buf, count, datatype, root, comm);
300 retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
305 * Bcast user entry point
307 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
310 int retval = MPI_SUCCESS;
313 smpi_mpi_bcast(buf,count,datatype,root,comm);
323 * debugging helper function
325 static void print_buffer_int(void *buf, int len, char *msg, int rank)
328 printf("**[%d] %s: ", rank, msg);
329 for (tmp = 0; tmp < len; tmp++) {
331 printf("[%d]", v[tmp]);
336 static void print_buffer_double(void *buf, int len, char *msg, int rank)
340 printf("**[%d] %s: ", rank, msg);
341 for (tmp = 0; tmp < len; tmp++) {
343 printf("[%lf]", v[tmp]);
352 * MPI_Reduce internal level
354 int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
355 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
357 int retval = MPI_SUCCESS;
362 smpi_mpi_request_t *requests;
363 smpi_mpi_request_t request;
367 rank = smpi_mpi_comm_rank(comm);
370 if (rank != root) { // if i am not ROOT, simply send my buffer to root
373 print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
375 retval = smpi_create_request(sendbuf, count, datatype, rank, root, tag, comm,
377 smpi_mpi_isend(request);
378 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
379 xbt_mallocator_release(smpi_global->request_mallocator, request);
382 // i am the ROOT: wait for all buffers by creating one request by sender
384 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
386 void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
387 for (i = 0; i < size-1; i++) {
388 // we need 1 buffer per request to store intermediate receptions
389 tmpbufs[i] = xbt_malloc(count * datatype->size);
391 // root: initiliaze recv buf with my own snd buf
392 memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
394 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
395 // since we should op values as soon as one receiving request matches.
396 for (i = 0; i < size-1; i++) {
397 // reminder: for smpi_create_request() the src is always the process sending.
398 src = i < root ? i : i + 1;
399 retval = smpi_create_request(tmpbufs[i], count, datatype,
400 src, root, tag, comm, &(requests[i]));
401 if (NULL != requests[i] && MPI_SUCCESS == retval) {
402 if (MPI_SUCCESS == retval) {
403 smpi_mpi_irecv(requests[i]);
407 // now, wait for completion of all irecv's.
408 for (i = 0; i < size-1; i++) {
409 int index = MPI_UNDEFINED;
410 smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
412 printf ("MPI_Waitany() unblocked: root received (completes req[index=%d])\n",index);
413 print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
418 op->func(tmpbufs[index], recvbuf, &count, &datatype);
420 print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
422 xbt_free(tmpbufs[index]);
423 /* FIXME: with the following line, it generates an
424 * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
426 // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
435 * MPI_Reduce user entry point
437 int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
438 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
440 int retval = MPI_SUCCESS;
444 retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
455 * Same as MPI_REDUCE except that the result appears in the receive buffer of all the group members.
457 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
458 MPI_Op op, MPI_Comm comm )
460 int retval = MPI_SUCCESS;
461 int root=0; // arbitrary choice
465 retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
466 if (MPI_SUCCESS != retval)
469 retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
477 * MPI_Scatter user entry point
479 int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
480 void *recvbuf, int recvcount, MPI_Datatype recvtype,
481 int root, MPI_Comm comm)
483 int retval = MPI_SUCCESS;
488 char *cptr; // to manipulate the void * buffers
489 smpi_mpi_request_t *requests;
490 smpi_mpi_request_t request;
491 smpi_mpi_status_t status;
496 rank = smpi_mpi_comm_rank(comm);
498 requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
500 // i am the root: distribute my sendbuf
501 //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
503 for (i=0; i < comm->size; i++) {
504 if ( i!=root ) { // send to processes ...
506 retval = smpi_create_request((void *)cptr, sendcount,
507 datatype, root, i, tag, comm, &(requests[cnt]));
508 if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
509 if (MPI_SUCCESS == retval) {
510 smpi_mpi_isend(requests[cnt]);
515 else { // ... except if it's me.
516 memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
518 cptr += sendcount*datatype->size;
520 for(i=0; i<cnt; i++) { // wait for send to complete
521 /* FIXME: waitall() should be slightly better */
522 smpi_mpi_wait(requests[i], &status);
523 xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
527 else { // i am a non-root process: wait data from the root
528 retval = smpi_create_request(recvbuf,recvcount,
529 recvtype, root, rank, tag, comm, &request);
530 if (NULL != request && MPI_SUCCESS == retval) {
531 if (MPI_SUCCESS == retval) {
532 smpi_mpi_irecv(request);
535 smpi_mpi_wait(request, &status);
536 xbt_mallocator_release(smpi_global->request_mallocator, request);
547 * MPI_Alltoall user entry point
549 * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
550 * ompi/mca/coll/tuned/coll_tuned_module.c
552 int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
553 void *recvbuf, int recvcount, MPI_Datatype recvtype,
556 int retval = MPI_SUCCESS;
562 rank = smpi_mpi_comm_rank(comm);
563 block_dsize = datatype->size * sendcount;
564 INFO2("[%d] optimized alltoall() called. Block size sent to each rank=%d.\n",rank,block_dsize);
566 if ((block_dsize < 200) && (comm->size > 12)) {
567 retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
568 recvbuf, recvcount, recvtype, comm);
570 } else if (block_dsize < 3000) {
571 retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
572 recvbuf, recvcount, recvtype, comm);
575 retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
576 recvbuf, recvcount, recvtype, comm);
587 // used by comm_split to sort ranks based on key values
588 int smpi_compare_rankkeys(const void *a, const void *b);
589 int smpi_compare_rankkeys(const void *a, const void *b)
608 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
611 int retval = MPI_SUCCESS;
614 smpi_mpi_request_t request;
616 smpi_mpi_status_t status;
620 // FIXME: need to test parameters
622 index = smpi_process_index();
623 rank = comm->index_to_rank_map[index];
628 // root node does most of the real work
630 int colormap[comm->size];
631 int keymap[comm->size];
632 int rankkeymap[comm->size * 2];
634 smpi_mpi_communicator_t tempcomm = NULL;
641 // FIXME: use scatter/gather or similar instead of individual comms
642 for (i = 1; i < comm->size; i++) {
643 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
644 rank, MPI_ANY_TAG, comm, &request);
645 smpi_mpi_irecv(request);
646 smpi_mpi_wait(request, &status);
647 colormap[status.MPI_SOURCE] = colorkey[0];
648 keymap[status.MPI_SOURCE] = colorkey[1];
649 xbt_mallocator_release(smpi_global->request_mallocator, request);
652 for (i = 0; i < comm->size; i++) {
653 if (MPI_UNDEFINED == colormap[i]) {
656 // make a list of nodes with current color and sort by keys
658 for (j = i; j < comm->size; j++) {
659 if (colormap[i] == colormap[j]) {
660 colormap[j] = MPI_UNDEFINED;
661 rankkeymap[count * 2] = j;
662 rankkeymap[count * 2 + 1] = keymap[j];
666 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
669 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
670 tempcomm->barrier_count = 0;
671 tempcomm->size = count;
672 tempcomm->barrier_mutex = SIMIX_mutex_init();
673 tempcomm->barrier_cond = SIMIX_cond_init();
674 tempcomm->rank_to_index_map = xbt_new(int, count);
675 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
676 for (j = 0; j < smpi_global->process_count; j++) {
677 tempcomm->index_to_rank_map[j] = -1;
679 for (j = 0; j < count; j++) {
680 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
681 tempcomm->rank_to_index_map[j] = indextmp;
682 tempcomm->index_to_rank_map[indextmp] = j;
684 for (j = 0; j < count; j++) {
685 if (rankkeymap[j * 2]) {
686 retval = smpi_create_request(&j, 1, MPI_INT, 0,
687 rankkeymap[j * 2], 0, comm, &request);
688 request->data = tempcomm;
689 smpi_mpi_isend(request);
690 smpi_mpi_wait(request, &status);
691 xbt_mallocator_release(smpi_global->request_mallocator, request);
693 *comm_out = tempcomm;
700 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
702 smpi_mpi_isend(request);
703 smpi_mpi_wait(request, &status);
704 xbt_mallocator_release(smpi_global->request_mallocator, request);
705 if (MPI_UNDEFINED != color) {
706 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
708 smpi_mpi_irecv(request);
709 smpi_mpi_wait(request, &status);
710 *comm_out = request->data;
719 double SMPI_MPI_Wtime(void)
721 return (SIMIX_get_clock());