1 /* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "xbt/replay.h"
12 #include "simix/smx_private.h"
13 #include "surf/surf.h"
14 #include "simgrid/sg_config.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
20 static int match_recv(void* a, void* b, smx_action_t ignored) {
21 MPI_Request ref = (MPI_Request)a;
22 MPI_Request req = (MPI_Request)b;
23 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
25 xbt_assert(ref, "Cannot match recv against null reference");
26 xbt_assert(req, "Cannot match recv against null request");
27 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
28 && (ref->tag == MPI_ANY_TAG || req->tag == ref->tag)){
29 //we match, we can transfer some values
30 // FIXME : move this to the copy function ?
31 if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
32 if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
33 if(ref->real_size < req->real_size) ref->truncated = 1;
35 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
41 static int match_send(void* a, void* b,smx_action_t ignored) {
42 MPI_Request ref = (MPI_Request)a;
43 MPI_Request req = (MPI_Request)b;
44 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
45 xbt_assert(ref, "Cannot match send against null reference");
46 xbt_assert(req, "Cannot match send against null request");
48 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
49 && (req->tag == MPI_ANY_TAG || req->tag == ref->tag))
51 if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
52 if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
53 if(req->real_size < ref->real_size) req->truncated = 1;
55 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
63 typedef struct s_smpi_factor *smpi_factor_t;
64 typedef struct s_smpi_factor {
67 double values[4];//arbitrary set to 4
69 xbt_dynar_t smpi_os_values = NULL;
70 xbt_dynar_t smpi_or_values = NULL;
73 // Methods used to parse and store the values for timing injections in smpi
74 // These are taken from surf/network.c and generalized to have more factors
75 // These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
77 static int factor_cmp(const void *pa, const void *pb)
79 return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor);
83 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
86 unsigned int iter = 0;
89 xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
91 smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
92 radical_elements = xbt_str_split(smpi_coef_string, ";");
93 xbt_dynar_foreach(radical_elements, iter, value) {
95 radical_elements2 = xbt_str_split(value, ":");
96 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
97 xbt_die("Malformed radical for smpi factor!");
98 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
100 fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
102 fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
107 xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
108 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
109 xbt_dynar_free(&radical_elements2);
111 xbt_dynar_free(&radical_elements);
113 xbt_dynar_sort(smpi_factor, &factor_cmp);
114 xbt_dynar_foreach(smpi_factor, iter, fact) {
115 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
120 static double smpi_os(double size)
124 parse_factor(sg_cfg_get_string("smpi/os"));
126 unsigned int iter = 0;
127 s_smpi_factor_t fact;
129 xbt_dynar_foreach(smpi_os_values, iter, fact) {
130 if (size <= fact.factor) {
131 XBT_DEBUG("os : %lf <= %ld return %f", size, fact.factor, current);
134 current=fact.values[0]+fact.values[1]*size;
137 XBT_DEBUG("os : %lf > %ld return %f", size, fact.factor, current);
142 static double smpi_or(double size)
146 parse_factor(sg_cfg_get_string("smpi/or"));
148 unsigned int iter = 0;
149 s_smpi_factor_t fact;
151 xbt_dynar_foreach(smpi_or_values, iter, fact) {
152 if (size <= fact.factor) {
153 XBT_DEBUG("or : %lf <= %ld return %f", size, fact.factor, current);
156 current=fact.values[0]+fact.values[1]*size;
158 XBT_DEBUG("or : %lf > %ld return %f", size, fact.factor, current);
163 static MPI_Request build_request(void *buf, int count,
164 MPI_Datatype datatype, int src, int dst,
165 int tag, MPI_Comm comm, unsigned flags)
169 void *old_buf = NULL;
171 request = xbt_new(s_smpi_mpi_request_t, 1);
173 s_smpi_subtype_t *subtype = datatype->substruct;
175 if(datatype->has_subtype == 1){
176 // This part handles the problem of non-contiguous memory
178 buf = xbt_malloc(count*smpi_datatype_size(datatype));
180 subtype->serialize(old_buf, buf, count, datatype->substruct);
185 // This part handles the problem of non-contiguous memory (for the
186 // unserialisation at the reception)
187 request->old_buf = old_buf;
188 request->old_type = datatype;
190 request->size = smpi_datatype_size(datatype) * count;
194 request->comm = comm;
195 request->action = NULL;
196 request->flags = flags;
197 request->detached = 0;
198 request->detached_sender = NULL;
200 request->truncated = 0;
201 request->real_size = 0;
202 request->real_tag = 0;
209 if (flags & SEND) smpi_datatype_unuse(datatype);
215 void smpi_empty_status(MPI_Status * status) {
216 if(status != MPI_STATUS_IGNORE) {
217 status->MPI_SOURCE=MPI_ANY_SOURCE;
218 status->MPI_TAG=MPI_ANY_TAG;
223 void smpi_action_trace_run(char *path)
227 xbt_dict_cursor_t cursor;
231 action_fp = fopen(path, "r");
232 xbt_assert(action_fp != NULL, "Cannot open %s: %s", path,
236 if (!xbt_dict_is_empty(action_queues)) {
238 ("Not all actions got consumed. If the simulation ended successfully (without deadlock), you may want to add new processes to your deployment file.");
241 xbt_dict_foreach(action_queues, cursor, name, todo) {
242 XBT_WARN("Still %lu actions for %s", xbt_dynar_length(todo), name);
248 xbt_dict_free(&action_queues);
249 action_queues = xbt_dict_new_homogeneous(NULL);
252 static void smpi_mpi_request_free_voidp(void* request)
254 MPI_Request req = request;
255 smpi_mpi_request_free(&req);
258 /* MPI Low level calls */
259 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
260 int dst, int tag, MPI_Comm comm)
262 MPI_Request request =
263 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
264 comm, PERSISTENT | SEND);
269 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
270 int dst, int tag, MPI_Comm comm)
272 MPI_Request request =
273 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
274 comm, PERSISTENT | SSEND | SEND);
279 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
280 int src, int tag, MPI_Comm comm)
282 MPI_Request request =
283 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
284 comm, PERSISTENT | RECV);
289 void smpi_mpi_start(MPI_Request request)
293 xbt_assert(!request->action,
294 "Cannot (re)start a non-finished communication");
295 if(request->flags & RECV) {
296 print_request("New recv", request);
297 if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
298 mailbox = smpi_process_mailbox_small();
300 mailbox = smpi_process_mailbox();
301 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
302 request->real_size=request->size;
303 smpi_datatype_use(request->old_type);
304 request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
306 double sleeptime = smpi_or(request->size);
308 simcall_process_sleep(sleeptime);
309 XBT_DEBUG("receiving size of %zu : sleep %lf ", request->size, smpi_or(request->size));
314 int receiver = smpi_group_index(smpi_comm_group(request->comm), request->dst);
315 /* if(receiver == MPI_UNDEFINED) {*/
316 /* XBT_WARN("Trying to send a message to a wrong rank");*/
319 print_request("New send", request);
320 if (request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
321 mailbox = smpi_process_remote_mailbox_small(receiver);
323 XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
324 mailbox = smpi_process_remote_mailbox(receiver);
326 if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
328 request->detached = 1;
330 if(request->old_type->has_subtype == 0){
331 oldbuf = request->buf;
333 request->buf = xbt_malloc(request->size);
334 memcpy(request->buf,oldbuf,request->size);
337 XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
339 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
340 request->real_size=request->size;
341 smpi_datatype_use(request->old_type);
342 double sleeptime = smpi_os(request->size);
344 simcall_process_sleep(sleeptime);
345 XBT_DEBUG("sending size of %zu : sleep %lf ", request->size, smpi_os(request->size));
348 simcall_comm_isend(mailbox, request->size, -1.0,
349 request->buf, request->real_size,
351 &smpi_mpi_request_free_voidp, // how to free the userdata if a detached send fails
353 // detach if msg size < eager/rdv switch limit
357 /* FIXME: detached sends are not traceable (request->action == NULL) */
359 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
366 void smpi_mpi_startall(int count, MPI_Request * requests)
370 for(i = 0; i < count; i++) {
371 smpi_mpi_start(requests[i]);
375 void smpi_mpi_request_free(MPI_Request * request)
378 if((*request) != MPI_REQUEST_NULL){
379 (*request)->refcount--;
380 if((*request)->refcount<0) xbt_die("wrong refcount");
382 if((*request)->refcount==0){
384 *request = MPI_REQUEST_NULL;
387 xbt_die("freeing an already free request");
391 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
392 int dst, int tag, MPI_Comm comm)
394 MPI_Request request =
395 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
396 comm, NON_PERSISTENT | SEND);
401 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
402 int dst, int tag, MPI_Comm comm)
404 MPI_Request request =
405 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
406 comm, NON_PERSISTENT | ISEND | SEND);
408 smpi_mpi_start(request);
412 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
413 int dst, int tag, MPI_Comm comm)
415 MPI_Request request =
416 build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
417 comm, NON_PERSISTENT | ISEND | SSEND | SEND);
418 smpi_mpi_start(request);
424 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
425 int src, int tag, MPI_Comm comm)
427 MPI_Request request =
428 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
429 comm, NON_PERSISTENT | RECV);
433 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
434 int src, int tag, MPI_Comm comm)
436 MPI_Request request =
437 build_request(buf, count, datatype, src, smpi_comm_rank(comm), tag,
438 comm, NON_PERSISTENT | RECV);
440 smpi_mpi_start(request);
444 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
445 int tag, MPI_Comm comm, MPI_Status * status)
448 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
449 smpi_mpi_wait(&request, status);
454 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
455 int tag, MPI_Comm comm)
458 request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
459 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
463 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
464 int dst, int tag, MPI_Comm comm)
466 MPI_Request request = smpi_mpi_issend(buf, count, datatype, dst, tag, comm);
467 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
470 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
471 int dst, int sendtag, void *recvbuf, int recvcount,
472 MPI_Datatype recvtype, int src, int recvtag,
473 MPI_Comm comm, MPI_Status * status)
475 MPI_Request requests[2];
479 smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
481 smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
482 smpi_mpi_startall(2, requests);
483 smpi_mpi_waitall(2, requests, stats);
484 if(status != MPI_STATUS_IGNORE) {
485 // Copy receive status
486 memcpy(status, &stats[1], sizeof(MPI_Status));
490 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
492 return status->count / smpi_datatype_size(datatype);
495 static void finish_wait(MPI_Request * request, MPI_Status * status)
497 MPI_Request req = *request;
498 if(!(req->detached && req->flags & SEND)){
499 if(status != MPI_STATUS_IGNORE) {
500 status->MPI_SOURCE = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
501 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
503 status->MPI_ERROR = MPI_ERR_TRUNCATE;
504 else status->MPI_ERROR = MPI_SUCCESS ;
505 // this handles the case were size in receive differs from size in send
506 // FIXME: really this should just contain the count of receive-type blocks,
508 status->count = req->real_size;
511 print_request("Finishing", req);
512 MPI_Datatype datatype = req->old_type;
514 if(datatype->has_subtype == 1){
515 // This part handles the problem of non-contignous memory
516 // the unserialization at the reception
517 s_smpi_subtype_t *subtype = datatype->substruct;
518 if(req->flags & RECV) {
519 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
521 if(req->detached == 0) free(req->buf);
523 smpi_datatype_unuse(datatype);
526 if(req->detached_sender!=NULL){
527 smpi_mpi_request_free(&(req->detached_sender));
530 if(req->flags & NON_PERSISTENT) {
531 smpi_mpi_request_free(request);
537 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
540 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
541 if ((*request)->action == NULL)
544 flag = simcall_comm_test((*request)->action);
546 (*request)->refcount++;
547 finish_wait(request, status);
549 smpi_empty_status(status);
554 int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
561 *index = MPI_UNDEFINED;
564 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
565 map = xbt_new(int, count);
567 for(i = 0; i < count; i++) {
568 if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
569 xbt_dynar_push(comms, &requests[i]->action);
575 i = simcall_comm_testany(comms);
576 // not MPI_UNDEFINED, as this is a simix return code
579 finish_wait(&requests[*index], status);
583 //all requests are null or inactive, return true
585 smpi_empty_status(status);
588 xbt_dynar_free(&comms);
595 int smpi_mpi_testall(int count, MPI_Request requests[],
599 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
602 for(i=0; i<count; i++){
603 if(requests[i]!= MPI_REQUEST_NULL){
604 if (smpi_mpi_test(&requests[i], pstat)!=1){
608 smpi_empty_status(pstat);
610 if(status != MPI_STATUSES_IGNORE) {
611 memcpy(&status[i], pstat, sizeof(*pstat));
617 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
619 //FIXME find another wait to avoid busy waiting ?
620 // the issue here is that we have to wait on a nonexistent comm
622 smpi_mpi_iprobe(source, tag, comm, &flag, status);
623 XBT_DEBUG("Busy Waiting on probing : %d", flag);
625 simcall_process_sleep(0.0001);
630 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
631 MPI_Request request =build_request(NULL, 0, MPI_CHAR, source, smpi_comm_rank(comm), tag,
632 comm, NON_PERSISTENT | RECV);
634 // behave like a receive, but don't do it
637 print_request("New iprobe", request);
638 // We have to test both mailboxes as we don't know if we will receive one one or another
639 if (sg_cfg_get_int("smpi/async_small_thres")>0){
640 mailbox = smpi_process_mailbox_small();
641 XBT_DEBUG("trying to probe the perm recv mailbox");
642 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
644 if (request->action==NULL){
645 mailbox = smpi_process_mailbox();
646 XBT_DEBUG("trying to probe the other mailbox");
647 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
651 MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
653 if(status != MPI_STATUS_IGNORE) {
654 status->MPI_SOURCE = req->src;
655 status->MPI_TAG = req->tag;
656 status->MPI_ERROR = MPI_SUCCESS;
657 status->count = req->real_size;
661 smpi_mpi_request_free(&request);
666 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
668 print_request("Waiting", *request);
669 if ((*request)->action != NULL) { // this is not a detached send
670 simcall_comm_wait((*request)->action, -1.0);
672 finish_wait(request, status);
674 // FIXME for a detached send, finish_wait is not called:
677 int smpi_mpi_waitany(int count, MPI_Request requests[],
684 index = MPI_UNDEFINED;
686 // Wait for a request to complete
687 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
688 map = xbt_new(int, count);
690 XBT_DEBUG("Wait for one of %d", count);
691 for(i = 0; i < count; i++) {
692 if(requests[i] != MPI_REQUEST_NULL) {
693 if (requests[i]->action != NULL) {
694 XBT_DEBUG("Waiting any %p ", requests[i]);
695 xbt_dynar_push(comms, &requests[i]->action);
699 //This is a finished detached request, let's return this one
700 size=0;//so we free the dynar but don't do the waitany call
702 finish_wait(&requests[i], status);//cleanup if refcount = 0
703 requests[i]=MPI_REQUEST_NULL;//set to null
709 i = simcall_comm_waitany(comms);
711 // not MPI_UNDEFINED, as this is a simix return code
714 finish_wait(&requests[index], status);
718 xbt_dynar_free(&comms);
721 if (index==MPI_UNDEFINED)
722 smpi_empty_status(status);
727 int smpi_mpi_waitall(int count, MPI_Request requests[],
732 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
733 int retvalue=MPI_SUCCESS;
734 //tag invalid requests in the set
735 for(c = 0; c < count; c++) {
736 if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
737 if(status != MPI_STATUSES_IGNORE)
738 smpi_empty_status(&status[c]);
739 }else if(requests[c]->src == MPI_PROC_NULL ){
740 if(status != MPI_STATUSES_IGNORE) {
741 smpi_empty_status(&status[c]);
742 status[c].MPI_SOURCE=MPI_PROC_NULL;
746 for(c = 0; c < count; c++) {
748 smpi_mpi_wait(&requests[c], pstat);
751 index = smpi_mpi_waitany(count, requests, pstat);
752 if(index == MPI_UNDEFINED) {
755 if(status != MPI_STATUSES_IGNORE) {
756 memcpy(&status[index], pstat, sizeof(*pstat));
757 if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
766 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
771 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
774 for(i = 0; i < incount; i++)
776 index=smpi_mpi_waitany(incount, requests, pstat);
777 if(index!=MPI_UNDEFINED){
778 indices[count] = index;
780 if(status != MPI_STATUSES_IGNORE) {
781 memcpy(&status[index], pstat, sizeof(*pstat));
784 return MPI_UNDEFINED;
790 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices,
793 int i, count, count_dead;
795 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
799 for(i = 0; i < incount; i++) {
800 if((requests[i] != MPI_REQUEST_NULL)) {
801 if(smpi_mpi_test(&requests[i], pstat)) {
804 if(status != MPI_STATUSES_IGNORE) {
805 memcpy(&status[i], pstat, sizeof(*pstat));
812 if(count_dead==incount)return MPI_UNDEFINED;
816 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
819 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
820 nary_tree_bcast(buf, count, datatype, root, comm, 4);
823 void smpi_mpi_barrier(MPI_Comm comm)
825 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
826 nary_tree_barrier(comm, 4);
829 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
830 void *recvbuf, int recvcount, MPI_Datatype recvtype,
831 int root, MPI_Comm comm)
833 int system_tag = 666;
834 int rank, size, src, index;
835 MPI_Aint lb = 0, recvext = 0;
836 MPI_Request *requests;
838 rank = smpi_comm_rank(comm);
839 size = smpi_comm_size(comm);
841 // Send buffer to root
842 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
844 // FIXME: check for errors
845 smpi_datatype_extent(recvtype, &lb, &recvext);
846 // Local copy from root
847 smpi_datatype_copy(sendbuf, sendcount, sendtype,
848 (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
849 // Receive buffers from senders
850 requests = xbt_new(MPI_Request, size - 1);
852 for(src = 0; src < size; src++) {
854 requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
856 src, system_tag, comm);
860 // Wait for completion of irecv's.
861 smpi_mpi_startall(size - 1, requests);
862 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
867 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
868 void *recvbuf, int *recvcounts, int *displs,
869 MPI_Datatype recvtype, int root, MPI_Comm comm)
871 int system_tag = 666;
872 int rank, size, src, index;
873 MPI_Aint lb = 0, recvext = 0;
874 MPI_Request *requests;
876 rank = smpi_comm_rank(comm);
877 size = smpi_comm_size(comm);
879 // Send buffer to root
880 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
882 // FIXME: check for errors
883 smpi_datatype_extent(recvtype, &lb, &recvext);
884 // Local copy from root
885 smpi_datatype_copy(sendbuf, sendcount, sendtype,
886 (char *)recvbuf + displs[root] * recvext,
887 recvcounts[root], recvtype);
888 // Receive buffers from senders
889 requests = xbt_new(MPI_Request, size - 1);
891 for(src = 0; src < size; src++) {
894 smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
895 recvcounts[src], recvtype, src, system_tag, comm);
899 // Wait for completion of irecv's.
900 smpi_mpi_startall(size - 1, requests);
901 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
906 void smpi_mpi_allgather(void *sendbuf, int sendcount,
907 MPI_Datatype sendtype, void *recvbuf,
908 int recvcount, MPI_Datatype recvtype,
911 int system_tag = 666;
912 int rank, size, other, index;
913 MPI_Aint lb = 0, recvext = 0;
914 MPI_Request *requests;
916 rank = smpi_comm_rank(comm);
917 size = smpi_comm_size(comm);
918 // FIXME: check for errors
919 smpi_datatype_extent(recvtype, &lb, &recvext);
920 // Local copy from self
921 smpi_datatype_copy(sendbuf, sendcount, sendtype,
922 (char *)recvbuf + rank * recvcount * recvext, recvcount,
924 // Send/Recv buffers to/from others;
925 requests = xbt_new(MPI_Request, 2 * (size - 1));
927 for(other = 0; other < size; other++) {
930 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
933 requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
934 recvcount, recvtype, other,
939 // Wait for completion of all comms.
940 smpi_mpi_startall(2 * (size - 1), requests);
941 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
945 void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
946 MPI_Datatype sendtype, void *recvbuf,
947 int *recvcounts, int *displs,
948 MPI_Datatype recvtype, MPI_Comm comm)
950 int system_tag = 666;
951 int rank, size, other, index;
952 MPI_Aint lb = 0, recvext = 0;
953 MPI_Request *requests;
955 rank = smpi_comm_rank(comm);
956 size = smpi_comm_size(comm);
957 // FIXME: check for errors
958 smpi_datatype_extent(recvtype, &lb, &recvext);
959 // Local copy from self
960 smpi_datatype_copy(sendbuf, sendcount, sendtype,
961 (char *)recvbuf + displs[rank] * recvext,
962 recvcounts[rank], recvtype);
963 // Send buffers to others;
964 requests = xbt_new(MPI_Request, 2 * (size - 1));
966 for(other = 0; other < size; other++) {
969 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
973 smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
974 recvtype, other, system_tag, comm);
978 // Wait for completion of all comms.
979 smpi_mpi_startall(2 * (size - 1), requests);
980 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
984 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
985 void *recvbuf, int recvcount, MPI_Datatype recvtype,
986 int root, MPI_Comm comm)
988 int system_tag = 666;
989 int rank, size, dst, index;
990 MPI_Aint lb = 0, sendext = 0;
991 MPI_Request *requests;
993 rank = smpi_comm_rank(comm);
994 size = smpi_comm_size(comm);
996 // Recv buffer from root
997 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1000 // FIXME: check for errors
1001 smpi_datatype_extent(sendtype, &lb, &sendext);
1002 // Local copy from root
1003 smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
1004 sendcount, sendtype, recvbuf, recvcount, recvtype);
1005 // Send buffers to receivers
1006 requests = xbt_new(MPI_Request, size - 1);
1008 for(dst = 0; dst < size; dst++) {
1010 requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
1011 sendcount, sendtype, dst,
1016 // Wait for completion of isend's.
1017 smpi_mpi_startall(size - 1, requests);
1018 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1023 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
1024 MPI_Datatype sendtype, void *recvbuf, int recvcount,
1025 MPI_Datatype recvtype, int root, MPI_Comm comm)
1027 int system_tag = 666;
1028 int rank, size, dst, index;
1029 MPI_Aint lb = 0, sendext = 0;
1030 MPI_Request *requests;
1032 rank = smpi_comm_rank(comm);
1033 size = smpi_comm_size(comm);
1035 // Recv buffer from root
1036 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1039 // FIXME: check for errors
1040 smpi_datatype_extent(sendtype, &lb, &sendext);
1041 // Local copy from root
1042 smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
1043 sendtype, recvbuf, recvcount, recvtype);
1044 // Send buffers to receivers
1045 requests = xbt_new(MPI_Request, size - 1);
1047 for(dst = 0; dst < size; dst++) {
1050 smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
1051 sendtype, dst, system_tag, comm);
1055 // Wait for completion of isend's.
1056 smpi_mpi_startall(size - 1, requests);
1057 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1062 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
1063 MPI_Datatype datatype, MPI_Op op, int root,
1066 int system_tag = 666;
1067 int rank, size, src, index;
1068 MPI_Aint lb = 0, dataext = 0;
1069 MPI_Request *requests;
1072 rank = smpi_comm_rank(comm);
1073 size = smpi_comm_size(comm);
1075 // Send buffer to root
1076 smpi_mpi_send(sendbuf, count, datatype, root, system_tag, comm);
1078 // FIXME: check for errors
1079 smpi_datatype_extent(datatype, &lb, &dataext);
1080 // Local copy from root
1081 if (sendbuf && recvbuf)
1082 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1083 // Receive buffers from senders
1084 //TODO: make a MPI_barrier here ?
1085 requests = xbt_new(MPI_Request, size - 1);
1086 tmpbufs = xbt_new(void *, size - 1);
1088 for(src = 0; src < size; src++) {
1090 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1092 tmpbufs[index] = xbt_malloc(count * dataext);
1094 smpi_irecv_init(tmpbufs[index], count, datatype, src,
1099 // Wait for completion of irecv's.
1100 smpi_mpi_startall(size - 1, requests);
1101 for(src = 0; src < size - 1; src++) {
1102 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1103 XBT_DEBUG("finished waiting any request with index %d", index);
1104 if(index == MPI_UNDEFINED) {
1107 if(op) /* op can be MPI_OP_NULL that does nothing */
1108 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1110 for(index = 0; index < size - 1; index++) {
1111 xbt_free(tmpbufs[index]);
1118 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
1119 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1121 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1122 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1125 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
1126 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1128 int system_tag = 666;
1129 int rank, size, other, index;
1130 MPI_Aint lb = 0, dataext = 0;
1131 MPI_Request *requests;
1134 rank = smpi_comm_rank(comm);
1135 size = smpi_comm_size(comm);
1137 // FIXME: check for errors
1138 smpi_datatype_extent(datatype, &lb, &dataext);
1140 // Local copy from self
1141 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1143 // Send/Recv buffers to/from others;
1144 requests = xbt_new(MPI_Request, size - 1);
1145 tmpbufs = xbt_new(void *, rank);
1147 for(other = 0; other < rank; other++) {
1148 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1150 tmpbufs[index] = xbt_malloc(count * dataext);
1152 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1156 for(other = rank + 1; other < size; other++) {
1158 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1161 // Wait for completion of all comms.
1162 smpi_mpi_startall(size - 1, requests);
1163 for(other = 0; other < size - 1; other++) {
1164 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1165 if(index == MPI_UNDEFINED) {
1169 // #Request is below rank: it's a irecv
1170 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1173 for(index = 0; index < rank; index++) {
1174 xbt_free(tmpbufs[index]);