5 * Eventually will contain the user level MPI primitives and its corresponding
6 * internal wrapper. The implementations of these primitives should go to specific
7 * files. For example, a SMPI_MPI_Bcast() in this file, should call the wrapper
8 * smpi_mpi_bcast(), which decides which implementation to call. Currently, it
9 * calls nary_tree_bcast() in smpi_coll.c. (Stéphane Genaud).
15 #include "smpi_coll_private.h"
16 #include "smpi_mpi_dt_private.h"
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
19 "Logging specific to SMPI (mpi)");
21 int SMPI_MPI_Init(int *argc, char ***argv)
23 smpi_process_init(argc, argv);
28 int SMPI_MPI_Finalize()
31 smpi_process_finalize();
35 // right now this just exits the current node, should send abort signal to all
36 // hosts in the communicator (TODO)
37 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
43 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
45 int retval = MPI_SUCCESS;
50 retval = MPI_ERR_COMM;
51 } else if (NULL == size) {
62 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
64 int retval = MPI_SUCCESS;
69 retval = MPI_ERR_COMM;
70 } else if (NULL == rank) {
73 *rank = smpi_mpi_comm_rank(comm);
86 int SMPI_MPI_Barrier(MPI_Comm comm)
88 int retval = MPI_SUCCESS;
94 retval = MPI_ERR_COMM;
98 * original implemantation:
99 * retval = smpi_mpi_barrier(comm);
100 * this one is unrealistic: it just cond_waits, means no time.
102 retval = nary_tree_barrier( comm, arity );
112 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
113 int tag, MPI_Comm comm, MPI_Request * request)
115 int retval = MPI_SUCCESS;
119 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
121 if (NULL != *request && MPI_SUCCESS == retval) {
122 retval = smpi_mpi_irecv(*request);
130 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
131 int tag, MPI_Comm comm, MPI_Status * status)
133 int retval = MPI_SUCCESS;
134 smpi_mpi_request_t request;
138 retval = smpi_create_request(buf, count, datatype, src, 0, tag, comm,
140 if (NULL != request && MPI_SUCCESS == retval) {
141 retval = smpi_mpi_irecv(request);
142 if (MPI_SUCCESS == retval) {
143 retval = smpi_mpi_wait(request, status);
145 xbt_mallocator_release(smpi_global->request_mallocator, request);
153 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
154 int tag, MPI_Comm comm, MPI_Request * request)
156 int retval = MPI_SUCCESS;
160 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
162 if (NULL != *request && MPI_SUCCESS == retval) {
163 retval = smpi_mpi_isend(*request);
172 * MPI_Send user level
174 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
175 int tag, MPI_Comm comm)
177 int retval = MPI_SUCCESS;
178 smpi_mpi_request_t request;
182 retval = smpi_create_request(buf, count, datatype, 0, dst, tag, comm,
184 if (NULL != request && MPI_SUCCESS == retval) {
185 retval = smpi_mpi_isend(request);
186 if (MPI_SUCCESS == retval) {
187 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
189 xbt_mallocator_release(smpi_global->request_mallocator, request);
199 * MPI_Sendrecv internal level
201 int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
202 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
203 MPI_Comm comm, MPI_Status *status)
206 int retval = MPI_SUCCESS;
207 smpi_mpi_request_t srequest;
208 smpi_mpi_request_t rrequest;
210 rank = smpi_mpi_comm_rank(comm);
213 retval = smpi_create_request(sendbuf, sendcount, sendtype,
216 smpi_mpi_isend(srequest);
219 retval = smpi_create_request(recvbuf, recvcount, recvtype,
220 source, rank,recvtag,
222 smpi_mpi_irecv(rrequest);
224 smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
225 //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval);
226 smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
227 //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval);
232 * MPI_Sendrecv user entry point
234 int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
235 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
236 MPI_Comm comm, MPI_Status *status)
238 int retval = MPI_SUCCESS;
241 smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag,
242 recvbuf, recvcount, recvtype, source, recvtag,
252 * MPI_Wait and friends
254 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
256 return smpi_mpi_wait(*request, status);
259 int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
261 return smpi_mpi_waitall(count, requests, status);
264 int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
267 return smpi_mpi_waitany(count, requests, index, status);
277 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
278 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
282 int retval = MPI_SUCCESS;
283 smpi_mpi_request_t request;
285 rank = smpi_mpi_comm_rank(comm);
287 retval = smpi_create_request(buf, count, datatype, root,
288 (root + 1) % comm->size, 0, comm, &request);
289 request->forward = comm->size - 1;
290 smpi_mpi_isend(request);
292 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
294 smpi_mpi_irecv(request);
297 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
298 xbt_mallocator_release(smpi_global->request_mallocator, request);
304 * Bcast internal level
306 int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
309 int retval = MPI_SUCCESS;
310 int rank = smpi_mpi_comm_rank(comm);
312 DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank);
313 //retval = flat_tree_bcast(buf, count, datatype, root, comm);
314 retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
319 * Bcast user entry point
321 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
324 int retval = MPI_SUCCESS;
327 smpi_mpi_bcast(buf,count,datatype,root,comm);
337 * debugging helper function
339 static void print_buffer_int(void *buf, int len, char *msg, int rank)
342 printf("**[%d] %s: ", rank, msg);
343 for (tmp = 0; tmp < len; tmp++) {
345 printf("[%d]", v[tmp]);
350 static void print_buffer_double(void *buf, int len, char *msg, int rank)
354 printf("**[%d] %s: ", rank, msg);
355 for (tmp = 0; tmp < len; tmp++) {
357 printf("[%lf]", v[tmp]);
366 * MPI_Reduce internal level
368 int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
369 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
371 int retval = MPI_SUCCESS;
375 int system_tag = 666;
376 smpi_mpi_request_t *requests;
377 smpi_mpi_request_t request;
381 rank = smpi_mpi_comm_rank(comm);
383 DEBUG1("<%d> entered smpi_mpi_reduce()",rank);
385 if (rank != root) { // if i am not ROOT, simply send my buffer to root
388 print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
390 retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm,
392 smpi_mpi_isend(request);
393 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
394 xbt_mallocator_release(smpi_global->request_mallocator, request);
397 // i am the ROOT: wait for all buffers by creating one request by sender
399 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
401 void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
402 for (i = 0; i < size-1; i++) {
403 // we need 1 buffer per request to store intermediate receptions
404 tmpbufs[i] = xbt_malloc(count * datatype->size);
406 // root: initiliaze recv buf with my own snd buf
407 memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
409 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
410 // since we should op values as soon as one receiving request matches.
411 for (i = 0; i < size-1; i++) {
412 // reminder: for smpi_create_request() the src is always the process sending.
413 src = i < root ? i : i + 1;
414 retval = smpi_create_request(tmpbufs[i], count, datatype,
415 src, root, system_tag, comm, &(requests[i]));
416 if (NULL != requests[i] && MPI_SUCCESS == retval) {
417 if (MPI_SUCCESS == retval) {
418 smpi_mpi_irecv(requests[i]);
422 // now, wait for completion of all irecv's.
423 for (i = 0; i < size-1; i++) {
424 int index = MPI_UNDEFINED;
425 smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
426 DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)",
427 rank,index,size-i-2);
429 print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
434 op->func(tmpbufs[index], recvbuf, &count, &datatype);
436 print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
438 xbt_free(tmpbufs[index]);
439 /* FIXME: with the following line, it generates an
440 * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
442 // xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
451 * MPI_Reduce user entry point
453 int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
454 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
456 int retval = MPI_SUCCESS;
460 retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
471 * Same as MPI_Reduce except that the result appears in the receive buffer of all the group members.
473 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
474 MPI_Op op, MPI_Comm comm )
476 int retval = MPI_SUCCESS;
477 int root=0; // arbitrary choice
481 retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
482 if (MPI_SUCCESS != retval)
485 retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
493 * MPI_Scatter user entry point
495 int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
496 void *recvbuf, int recvcount, MPI_Datatype recvtype,
497 int root, MPI_Comm comm)
499 int retval = MPI_SUCCESS;
504 char *cptr; // to manipulate the void * buffers
505 smpi_mpi_request_t *requests;
506 smpi_mpi_request_t request;
507 smpi_mpi_status_t status;
512 rank = smpi_mpi_comm_rank(comm);
514 requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
516 // i am the root: distribute my sendbuf
517 //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
519 for (i=0; i < comm->size; i++) {
520 if ( i!=root ) { // send to processes ...
522 retval = smpi_create_request((void *)cptr, sendcount,
523 datatype, root, i, tag, comm, &(requests[cnt]));
524 if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
525 if (MPI_SUCCESS == retval) {
526 smpi_mpi_isend(requests[cnt]);
531 else { // ... except if it's me.
532 memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
534 cptr += sendcount*datatype->size;
536 for(i=0; i<cnt; i++) { // wait for send to complete
537 /* FIXME: waitall() should be slightly better */
538 smpi_mpi_wait(requests[i], &status);
539 xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
543 else { // i am a non-root process: wait data from the root
544 retval = smpi_create_request(recvbuf,recvcount,
545 recvtype, root, rank, tag, comm, &request);
546 if (NULL != request && MPI_SUCCESS == retval) {
547 if (MPI_SUCCESS == retval) {
548 smpi_mpi_irecv(request);
551 smpi_mpi_wait(request, &status);
552 xbt_mallocator_release(smpi_global->request_mallocator, request);
563 * MPI_Alltoall user entry point
565 * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
566 * ompi/mca/coll/tuned/coll_tuned_module.c
568 int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
569 void *recvbuf, int recvcount, MPI_Datatype recvtype,
572 int retval = MPI_SUCCESS;
578 rank = smpi_mpi_comm_rank(comm);
579 block_dsize = datatype->size * sendcount;
580 DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize);
582 if ((block_dsize < 200) && (comm->size > 12)) {
583 retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
584 recvbuf, recvcount, recvtype, comm);
586 } else if (block_dsize < 3000) {
587 retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
588 recvbuf, recvcount, recvtype, comm);
591 retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
592 recvbuf, recvcount, recvtype, comm);
601 * MPI_Alltoallv user entry point
603 * As in OpenMPI, alltoallv is not optimized
604 * ompi/mca/coll/basic/coll_basic_alltoallv.c
606 int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype,
607 void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype,
610 int retval = MPI_SUCCESS;
613 rank = smpi_mpi_comm_rank(comm);
614 DEBUG1("<%d> basic alltoallv() called.",rank);
616 retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype,
617 recvbuf, rcounts, rdisps, recvtype,
626 // used by comm_split to sort ranks based on key values
627 int smpi_compare_rankkeys(const void *a, const void *b);
628 int smpi_compare_rankkeys(const void *a, const void *b)
647 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
650 int retval = MPI_SUCCESS;
653 smpi_mpi_request_t request;
655 smpi_mpi_status_t status;
659 // FIXME: need to test parameters
661 index = smpi_process_index();
662 rank = comm->index_to_rank_map[index];
667 // root node does most of the real work
669 int colormap[comm->size];
670 int keymap[comm->size];
671 int rankkeymap[comm->size * 2];
673 smpi_mpi_communicator_t tempcomm = NULL;
680 // FIXME: use scatter/gather or similar instead of individual comms
681 for (i = 1; i < comm->size; i++) {
682 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
683 rank, MPI_ANY_TAG, comm, &request);
684 smpi_mpi_irecv(request);
685 smpi_mpi_wait(request, &status);
686 colormap[status.MPI_SOURCE] = colorkey[0];
687 keymap[status.MPI_SOURCE] = colorkey[1];
688 xbt_mallocator_release(smpi_global->request_mallocator, request);
691 for (i = 0; i < comm->size; i++) {
692 if (MPI_UNDEFINED == colormap[i]) {
695 // make a list of nodes with current color and sort by keys
697 for (j = i; j < comm->size; j++) {
698 if (colormap[i] == colormap[j]) {
699 colormap[j] = MPI_UNDEFINED;
700 rankkeymap[count * 2] = j;
701 rankkeymap[count * 2 + 1] = keymap[j];
705 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
708 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
709 tempcomm->barrier_count = 0;
710 tempcomm->size = count;
711 tempcomm->barrier_mutex = SIMIX_mutex_init();
712 tempcomm->barrier_cond = SIMIX_cond_init();
713 tempcomm->rank_to_index_map = xbt_new(int, count);
714 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
715 for (j = 0; j < smpi_global->process_count; j++) {
716 tempcomm->index_to_rank_map[j] = -1;
718 for (j = 0; j < count; j++) {
719 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
720 tempcomm->rank_to_index_map[j] = indextmp;
721 tempcomm->index_to_rank_map[indextmp] = j;
723 for (j = 0; j < count; j++) {
724 if (rankkeymap[j * 2]) {
725 retval = smpi_create_request(&j, 1, MPI_INT, 0,
726 rankkeymap[j * 2], 0, comm, &request);
727 request->data = tempcomm;
728 smpi_mpi_isend(request);
729 smpi_mpi_wait(request, &status);
730 xbt_mallocator_release(smpi_global->request_mallocator, request);
732 *comm_out = tempcomm;
739 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
741 smpi_mpi_isend(request);
742 smpi_mpi_wait(request, &status);
743 xbt_mallocator_release(smpi_global->request_mallocator, request);
744 if (MPI_UNDEFINED != color) {
745 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
747 smpi_mpi_irecv(request);
748 smpi_mpi_wait(request, &status);
749 *comm_out = request->data;
758 double SMPI_MPI_Wtime(void)
760 return (SIMIX_get_clock());