5 * Eventually will contain the user level MPI primitives and its corresponding
6 * internal wrapper. The implementations of these primitives should go to specific
7 * files. For example, a SMPI_MPI_Bcast() in this file, should call the wrapper
8 * smpi_mpi_bcast(), which decides which implementation to call. Currently, it
9 * calls nary_tree_bcast() in smpi_coll.c. (Stéphane Genaud).
15 #include "smpi_coll_private.h"
16 #include "smpi_mpi_dt_private.h"
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi,
19 "Logging specific to SMPI (mpi)");
21 int SMPI_MPI_Init(int *argc, char ***argv)
23 smpi_process_init(argc, argv);
28 int SMPI_MPI_Finalize()
31 smpi_process_finalize();
35 // right now this just exits the current node, should send abort signal to all
36 // hosts in the communicator (TODO)
37 int SMPI_MPI_Abort(MPI_Comm comm, int errorcode)
43 int SMPI_MPI_Comm_size(MPI_Comm comm, int *size)
45 int retval = MPI_SUCCESS;
50 retval = MPI_ERR_COMM;
51 } else if (NULL == size) {
62 int SMPI_MPI_Comm_rank(MPI_Comm comm, int *rank)
64 int retval = MPI_SUCCESS;
69 retval = MPI_ERR_COMM;
70 } else if (NULL == rank) {
73 *rank = smpi_mpi_comm_rank(comm);
86 int SMPI_MPI_Barrier(MPI_Comm comm)
88 int retval = MPI_SUCCESS;
94 retval = MPI_ERR_COMM;
98 * original implemantation:
99 * retval = smpi_mpi_barrier(comm);
100 * this one is unrealistic: it just cond_waits, means no time.
102 retval = nary_tree_barrier( comm, arity );
112 int SMPI_MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src,
113 int tag, MPI_Comm comm, MPI_Request * request)
115 int retval = MPI_SUCCESS;
119 rank = smpi_mpi_comm_rank(comm);
120 retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm,
122 if (NULL != *request && MPI_SUCCESS == retval) {
123 retval = smpi_mpi_irecv(*request);
131 int SMPI_MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src,
132 int tag, MPI_Comm comm, MPI_Status * status)
134 int retval = MPI_SUCCESS;
136 smpi_mpi_request_t request;
140 rank = smpi_mpi_comm_rank(comm);
141 retval = smpi_create_request(buf, count, datatype, src, rank, tag, comm,
143 if (NULL != request && MPI_SUCCESS == retval) {
144 retval = smpi_mpi_irecv(request);
145 if (MPI_SUCCESS == retval) {
146 retval = smpi_mpi_wait(request, status);
148 xbt_mallocator_release(smpi_global->request_mallocator, request);
156 int SMPI_MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst,
157 int tag, MPI_Comm comm, MPI_Request * request)
159 int retval = MPI_SUCCESS;
164 rank = smpi_mpi_comm_rank(comm);
165 retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm,
167 if (NULL != *request && MPI_SUCCESS == retval) {
168 retval = smpi_mpi_isend(*request);
177 * MPI_Send user level
179 int SMPI_MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst,
180 int tag, MPI_Comm comm)
182 int retval = MPI_SUCCESS;
184 smpi_mpi_request_t request;
188 rank = smpi_mpi_comm_rank(comm);
189 retval = smpi_create_request(buf, count, datatype, rank, dst, tag, comm,
191 if (NULL != request && MPI_SUCCESS == retval) {
192 retval = smpi_mpi_isend(request);
193 if (MPI_SUCCESS == retval) {
194 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
196 xbt_mallocator_release(smpi_global->request_mallocator, request);
206 * MPI_Sendrecv internal level
208 int smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
209 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
210 MPI_Comm comm, MPI_Status *status)
213 int retval = MPI_SUCCESS;
214 smpi_mpi_request_t srequest;
215 smpi_mpi_request_t rrequest;
217 rank = smpi_mpi_comm_rank(comm);
220 retval = smpi_create_request(sendbuf, sendcount, sendtype,
223 smpi_mpi_isend(srequest);
226 retval = smpi_create_request(recvbuf, recvcount, recvtype,
227 source, rank,recvtag,
229 smpi_mpi_irecv(rrequest);
231 smpi_mpi_wait(srequest, MPI_STATUS_IGNORE);
232 //printf("[%d] isend request src=%d dst=%d tag=%d COMPLETED (retval=%d) \n",rank,rank,dest,sendtag,retval);
233 smpi_mpi_wait(rrequest, MPI_STATUS_IGNORE);
234 //printf("[%d] irecv request src=%d -> dst=%d tag=%d COMPLETED (retval=%d)\n",rank,source,rank,recvtag,retval);
239 * MPI_Sendrecv user entry point
241 int SMPI_MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag,
242 void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag,
243 MPI_Comm comm, MPI_Status *status)
245 int retval = MPI_SUCCESS;
248 smpi_mpi_sendrecv( sendbuf, sendcount, sendtype, dest, sendtag,
249 recvbuf, recvcount, recvtype, source, recvtag,
259 * MPI_Wait and friends
261 int SMPI_MPI_Wait(MPI_Request * request, MPI_Status * status)
266 retval = smpi_mpi_wait(*request, status);
271 int SMPI_MPI_Waitall(int count, MPI_Request requests[], MPI_Status status[])
276 retval = smpi_mpi_waitall(count, requests, status);
281 int SMPI_MPI_Waitany(int count, MPI_Request requests[], int *index,
287 retval = smpi_mpi_waitany(count, requests, index, status);
299 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
300 int flat_tree_bcast(void *buf, int count, MPI_Datatype datatype, int root,
304 int retval = MPI_SUCCESS;
305 smpi_mpi_request_t request;
307 rank = smpi_mpi_comm_rank(comm);
309 retval = smpi_create_request(buf, count, datatype, root,
310 (root + 1) % comm->size, 0, comm, &request);
311 request->forward = comm->size - 1;
312 smpi_mpi_isend(request);
314 retval = smpi_create_request(buf, count, datatype, MPI_ANY_SOURCE, rank,
316 smpi_mpi_irecv(request);
319 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
320 xbt_mallocator_release(smpi_global->request_mallocator, request);
326 * Bcast internal level
328 int smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
331 int retval = MPI_SUCCESS;
332 int rank = smpi_mpi_comm_rank(comm);
334 DEBUG1("<%d> entered smpi_mpi_bcast(). Calls nary_tree_bcast()",rank);
335 //retval = flat_tree_bcast(buf, count, datatype, root, comm);
336 retval = nary_tree_bcast(buf, count, datatype, root, comm, 2 );
341 * Bcast user entry point
343 int SMPI_MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root,
346 int retval = MPI_SUCCESS;
349 smpi_mpi_bcast(buf,count,datatype,root,comm);
359 * debugging helper function
361 static void print_buffer_int(void *buf, int len, char *msg, int rank)
364 printf("**[%d] %s: ", rank, msg);
365 for (tmp = 0; tmp < len; tmp++) {
367 printf("[%d]", v[tmp]);
372 static void print_buffer_double(void *buf, int len, char *msg, int rank)
376 printf("**[%d] %s: ", rank, msg);
377 for (tmp = 0; tmp < len; tmp++) {
379 printf("[%lf]", v[tmp]);
388 * MPI_Reduce internal level
390 int smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
391 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
393 int retval = MPI_SUCCESS;
397 int system_tag = 666;
398 smpi_mpi_request_t *requests;
399 smpi_mpi_request_t request;
403 rank = smpi_mpi_comm_rank(comm);
405 DEBUG1("<%d> entered smpi_mpi_reduce()",rank);
407 if (rank != root) { // if i am not ROOT, simply send my buffer to root
410 print_buffer_int(sendbuf, count, xbt_strdup("sndbuf"), rank);
412 retval = smpi_create_request(sendbuf, count, datatype, rank, root, system_tag, comm,
414 smpi_mpi_isend(request);
415 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
416 xbt_mallocator_release(smpi_global->request_mallocator, request);
419 // i am the ROOT: wait for all buffers by creating one request by sender
421 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
423 void **tmpbufs = xbt_malloc((size-1) * sizeof(void *));
424 for (i = 0; i < size-1; i++) {
425 // we need 1 buffer per request to store intermediate receptions
426 tmpbufs[i] = xbt_malloc(count * datatype->size);
428 // root: initiliaze recv buf with my own snd buf
429 memcpy(recvbuf, sendbuf, count * datatype->size * sizeof(char));
431 // i can not use: 'request->forward = size-1;' (which would progagate size-1 receive reqs)
432 // since we should op values as soon as one receiving request matches.
433 for (i = 0; i < size-1; i++) {
434 // reminder: for smpi_create_request() the src is always the process sending.
435 src = i < root ? i : i + 1;
436 retval = smpi_create_request(tmpbufs[i], count, datatype,
437 src, root, system_tag, comm, &(requests[i]));
438 if (NULL != requests[i] && MPI_SUCCESS == retval) {
439 if (MPI_SUCCESS == retval) {
440 smpi_mpi_irecv(requests[i]);
444 // now, wait for completion of all irecv's.
445 for (i = 0; i < size-1; i++) {
446 int index = MPI_UNDEFINED;
447 smpi_mpi_waitany( size-1, requests, &index, MPI_STATUS_IGNORE);
448 DEBUG3("<%d> waitany() unblocked by reception (completes request[%d]) (%d reqs remaining)",
449 rank,index,size-i-2);
451 print_buffer_int(tmpbufs[index], count, bprintf("tmpbufs[index=%d] (value received)", index),
456 op->func(tmpbufs[index], recvbuf, &count, &datatype);
458 print_buffer_int(recvbuf, count, xbt_strdup("rcvbuf"), rank);
460 xbt_free(tmpbufs[index]);
461 /* FIXME: with the following line, it generates an
462 * [xbt_ex/CRITICAL] Conditional list not empty 162518800.
465 xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
474 * MPI_Reduce user entry point
476 int SMPI_MPI_Reduce(void *sendbuf, void *recvbuf, int count,
477 MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
479 int retval = MPI_SUCCESS;
483 retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
494 * Same as MPI_Reduce except that the result appears in the receive buffer of all the group members.
496 int SMPI_MPI_Allreduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
497 MPI_Op op, MPI_Comm comm )
499 int retval = MPI_SUCCESS;
500 int root=0; // arbitrary choice
504 retval = smpi_mpi_reduce( sendbuf, recvbuf, count, datatype, op, root, comm);
505 if (MPI_SUCCESS != retval)
508 retval = smpi_mpi_bcast( sendbuf, count, datatype, root, comm);
516 * MPI_Scatter user entry point
518 int SMPI_MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype datatype,
519 void *recvbuf, int recvcount, MPI_Datatype recvtype,
520 int root, MPI_Comm comm)
522 int retval = MPI_SUCCESS;
527 char *cptr; // to manipulate the void * buffers
528 smpi_mpi_request_t *requests;
529 smpi_mpi_request_t request;
530 smpi_mpi_status_t status;
535 rank = smpi_mpi_comm_rank(comm);
537 requests = xbt_malloc((comm->size-1) * sizeof(smpi_mpi_request_t));
539 // i am the root: distribute my sendbuf
540 //print_buffer_int(sendbuf, comm->size, xbt_strdup("rcvbuf"), rank);
542 for (i=0; i < comm->size; i++) {
543 if ( i!=root ) { // send to processes ...
545 retval = smpi_create_request((void *)cptr, sendcount,
546 datatype, root, i, tag, comm, &(requests[cnt]));
547 if (NULL != requests[cnt] && MPI_SUCCESS == retval) {
548 if (MPI_SUCCESS == retval) {
549 smpi_mpi_isend(requests[cnt]);
554 else { // ... except if it's me.
555 memcpy(recvbuf, (void *)cptr, recvcount*recvtype->size*sizeof(char));
557 cptr += sendcount*datatype->size;
559 for(i=0; i<cnt; i++) { // wait for send to complete
560 /* FIXME: waitall() should be slightly better */
561 smpi_mpi_wait(requests[i], &status);
562 xbt_mallocator_release(smpi_global->request_mallocator, requests[i]);
566 else { // i am a non-root process: wait data from the root
567 retval = smpi_create_request(recvbuf,recvcount,
568 recvtype, root, rank, tag, comm, &request);
569 if (NULL != request && MPI_SUCCESS == retval) {
570 if (MPI_SUCCESS == retval) {
571 smpi_mpi_irecv(request);
574 smpi_mpi_wait(request, &status);
575 xbt_mallocator_release(smpi_global->request_mallocator, request);
586 * MPI_Alltoall user entry point
588 * Uses the logic of OpenMPI (upto 1.2.7 or greater) for the optimizations
589 * ompi/mca/coll/tuned/coll_tuned_module.c
591 int SMPI_MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype datatype,
592 void *recvbuf, int recvcount, MPI_Datatype recvtype,
595 int retval = MPI_SUCCESS;
601 rank = smpi_mpi_comm_rank(comm);
602 block_dsize = datatype->size * sendcount;
603 DEBUG2("<%d> optimized alltoall() called. Block size sent to each rank: %d bytes.",rank,block_dsize);
605 if ((block_dsize < 200) && (comm->size > 12)) {
606 retval = smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, datatype,
607 recvbuf, recvcount, recvtype, comm);
609 } else if (block_dsize < 3000) {
610 retval = smpi_coll_tuned_alltoall_basic_linear(sendbuf, sendcount, datatype,
611 recvbuf, recvcount, recvtype, comm);
614 retval = smpi_coll_tuned_alltoall_pairwise(sendbuf, sendcount, datatype,
615 recvbuf, recvcount, recvtype, comm);
624 * MPI_Alltoallv user entry point
626 * As in OpenMPI, alltoallv is not optimized
627 * ompi/mca/coll/basic/coll_basic_alltoallv.c
629 int SMPI_MPI_Alltoallv(void *sendbuf, int *scounts, int *sdisps, MPI_Datatype datatype,
630 void *recvbuf, int *rcounts, int *rdisps, MPI_Datatype recvtype,
633 int retval = MPI_SUCCESS;
637 rank = smpi_mpi_comm_rank(comm);
638 DEBUG1("<%d> basic alltoallv() called.",rank);
640 retval = smpi_coll_basic_alltoallv(sendbuf, scounts, sdisps, datatype,
641 recvbuf, rcounts, rdisps, recvtype,
650 // used by comm_split to sort ranks based on key values
651 int smpi_compare_rankkeys(const void *a, const void *b);
652 int smpi_compare_rankkeys(const void *a, const void *b)
671 int SMPI_MPI_Comm_split(MPI_Comm comm, int color, int key,
674 int retval = MPI_SUCCESS;
677 smpi_mpi_request_t request;
679 smpi_mpi_status_t status;
683 // FIXME: need to test parameters
685 index = smpi_process_index();
686 rank = comm->index_to_rank_map[index];
691 // root node does most of the real work
693 int colormap[comm->size];
694 int keymap[comm->size];
695 int rankkeymap[comm->size * 2];
697 smpi_mpi_communicator_t tempcomm = NULL;
704 // FIXME: use scatter/gather or similar instead of individual comms
705 for (i = 1; i < comm->size; i++) {
706 retval = smpi_create_request(colorkey, 2, MPI_INT, MPI_ANY_SOURCE,
707 rank, MPI_ANY_TAG, comm, &request);
708 smpi_mpi_irecv(request);
709 smpi_mpi_wait(request, &status);
710 colormap[status.MPI_SOURCE] = colorkey[0];
711 keymap[status.MPI_SOURCE] = colorkey[1];
712 xbt_mallocator_release(smpi_global->request_mallocator, request);
715 for (i = 0; i < comm->size; i++) {
716 if (MPI_UNDEFINED == colormap[i]) {
719 // make a list of nodes with current color and sort by keys
721 for (j = i; j < comm->size; j++) {
722 if (colormap[i] == colormap[j]) {
723 colormap[j] = MPI_UNDEFINED;
724 rankkeymap[count * 2] = j;
725 rankkeymap[count * 2 + 1] = keymap[j];
729 qsort(rankkeymap, count, sizeof(int) * 2, &smpi_compare_rankkeys);
732 tempcomm = xbt_new(s_smpi_mpi_communicator_t, 1);
733 tempcomm->barrier_count = 0;
734 tempcomm->size = count;
735 tempcomm->barrier_mutex = SIMIX_mutex_init();
736 tempcomm->barrier_cond = SIMIX_cond_init();
737 tempcomm->rank_to_index_map = xbt_new(int, count);
738 tempcomm->index_to_rank_map = xbt_new(int, smpi_global->process_count);
739 for (j = 0; j < smpi_global->process_count; j++) {
740 tempcomm->index_to_rank_map[j] = -1;
742 for (j = 0; j < count; j++) {
743 indextmp = comm->rank_to_index_map[rankkeymap[j * 2]];
744 tempcomm->rank_to_index_map[j] = indextmp;
745 tempcomm->index_to_rank_map[indextmp] = j;
747 for (j = 0; j < count; j++) {
748 if (rankkeymap[j * 2]) {
749 retval = smpi_create_request(&j, 1, MPI_INT, 0,
750 rankkeymap[j * 2], 0, comm, &request);
751 request->data = tempcomm;
752 smpi_mpi_isend(request);
753 smpi_mpi_wait(request, &status);
754 xbt_mallocator_release(smpi_global->request_mallocator, request);
756 *comm_out = tempcomm;
763 retval = smpi_create_request(colorkey, 2, MPI_INT, rank, 0, 0, comm,
765 smpi_mpi_isend(request);
766 smpi_mpi_wait(request, &status);
767 xbt_mallocator_release(smpi_global->request_mallocator, request);
768 if (MPI_UNDEFINED != color) {
769 retval = smpi_create_request(colorkey, 1, MPI_INT, 0, rank, 0, comm,
771 smpi_mpi_irecv(request);
772 smpi_mpi_wait(request, &status);
773 *comm_out = request->data;
782 double SMPI_MPI_Wtime(void)
787 time = SIMIX_get_clock();
792 int SMPI_MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
793 void* recvbuf, int recvcount, MPI_Datatype recvtype,
794 int root, MPI_Comm comm)
796 int retval = MPI_SUCCESS;
797 int system_tag = 666;
801 rank = smpi_mpi_comm_rank(comm);
804 // Send buffer to root
805 smpi_mpi_request_t request;
807 retval = smpi_create_request(sendbuf, sendcount, sendtype,
808 rank, root, system_tag, comm, &request);
809 smpi_mpi_isend(request);
810 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
811 xbt_mallocator_release(smpi_global->request_mallocator, request);
813 // Receive buffers from senders
815 smpi_mpi_request_t* requests;
817 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
818 for(src = 0; src < size; src++) {
820 // Local copy from root
821 memcpy(&((char*)recvbuf)[src*recvcount*recvtype->size],
822 sendbuf, sendcount*sendtype->size*sizeof(char));
824 int index = src < root ? src : src - 1;
825 retval = smpi_create_request(&((char*)recvbuf)[src*recvcount*recvtype->size],
826 recvcount, recvtype, src, root, system_tag,
827 comm, &requests[index]);
828 if(NULL != requests[index] && MPI_SUCCESS == retval) {
829 smpi_mpi_irecv(requests[index]);
833 // Wait for completion of irecv's.
834 for(src = 0; src < size - 1; src++) {
835 int index = MPI_UNDEFINED;
836 smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
837 xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
845 int SMPI_MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
846 void* recvbuf, int* recvcounts, int* displs, MPI_Datatype recvtype,
847 int root, MPI_Comm comm)
849 int retval = MPI_SUCCESS;
850 int system_tag = 666;
854 rank = smpi_mpi_comm_rank(comm);
857 // Send buffer to root
858 smpi_mpi_request_t request;
860 retval = smpi_create_request(sendbuf, sendcount, sendtype,
861 rank, root, system_tag, comm, &request);
862 smpi_mpi_isend(request);
863 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
864 xbt_mallocator_release(smpi_global->request_mallocator, request);
866 // Receive buffers from senders
868 smpi_mpi_request_t* requests;
870 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
871 for(src = 0; src < size; src++) {
873 // Local copy from root
874 memcpy(&((char*)recvbuf)[displs[src]],
875 sendbuf, sendcount*sendtype->size*sizeof(char));
877 int index = src < root ? src : src - 1;
878 retval = smpi_create_request(&((char*)recvbuf)[displs[src]],
879 recvcounts[src], recvtype, src, root, system_tag,
880 comm, &requests[index]);
881 if(NULL != requests[index] && MPI_SUCCESS == retval) {
882 smpi_mpi_irecv(requests[index]);
886 // Wait for completion of irecv's.
887 for(src = 0; src < size - 1; src++) {
888 int index = MPI_UNDEFINED;
889 smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
890 xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
898 int SMPI_MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype,
899 void* recvbuf, int recvcount, MPI_Datatype recvtype,
900 int root, MPI_Comm comm)
902 int retval = MPI_SUCCESS;
903 int system_tag = 666;
907 rank = smpi_mpi_comm_rank(comm);
910 // Receive buffer from root
911 smpi_mpi_request_t request;
913 retval = smpi_create_request(recvbuf, recvcount, recvtype,
914 root, rank, system_tag, comm, &request);
915 smpi_mpi_isend(request);
916 smpi_mpi_wait(request, MPI_STATUS_IGNORE);
917 xbt_mallocator_release(smpi_global->request_mallocator, request);
919 // Send buffers to receivers
921 smpi_mpi_request_t* requests;
923 requests = xbt_malloc((size-1) * sizeof(smpi_mpi_request_t));
924 for(dst = 0; dst < size; dst++) {
926 // Local copy from root
927 memcpy(recvbuf, &((char*)sendbuf)[displs[dst]],
928 sendcounts[dst]*sendtype->size*sizeof(char));
930 int index = dst < root ? dst : dst - 1;
931 retval = smpi_create_request(&((char*)sendbuf)[displs[dst]], sendcounts[dst], sendtype,
932 root, dst, system_tag, comm, &requests[index]);
933 if(NULL != requests[index] && MPI_SUCCESS == retval) {
934 smpi_mpi_isend(requests[index]);
938 // Wait for completion of isend's.
939 for(dst = 0; dst < size - 1; dst++) {
940 int index = MPI_UNDEFINED;
941 smpi_mpi_waitany(size - 1, requests, &index, MPI_STATUS_IGNORE);
942 xbt_mallocator_release(smpi_global->request_mallocator, requests[index]);
950 int SMPI_MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts,
951 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
953 // FIXME: Suboptimal implementation
954 int retval = MPI_SUCCESS;
961 rank = smpi_mpi_comm_rank(comm);
962 displs = xbt_new(int, comm->size);
963 for(i = 0; i < comm->size; i++) {
964 count += recvcounts[i];
967 retval = smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
968 retval = SMPI_MPI_Scatterv(recvbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, root, comm);
974 int SMPI_MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
975 void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
977 // FIXME: Suboptimal implementation
982 retval = SMPI_MPI_Gather(sendbuf, sendcount, sendtype,
983 recvbuf, recvcount, recvtype, root, comm);
984 if(retval == MPI_SUCCESS) {
985 retval = SMPI_MPI_Bcast(recvbuf, recvcount, recvtype, root, comm);
991 int SMPI_MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
992 void* recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype,
995 // FIXME: Suboptimal implementation
1000 retval = SMPI_MPI_Gatherv(sendbuf, sendcount, sendtype,
1001 recvbuf, recvcounts, displs, recvtype, root, comm);
1002 if(retval == MPI_SUCCESS) {
1003 last = comm->size - 1;
1004 retval = SMPI_MPI_Bcast(recvbuf, displs[last] + recvcounts[last], recvtype, root, comm);