1 /* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "xbt/replay.h"
12 #include "simix/smx_private.h"
13 #include "surf/surf.h"
14 #include "simgrid/sg_config.h"
15 #include "colls/colls.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
20 static int match_recv(void* a, void* b, smx_action_t ignored) {
21 MPI_Request ref = (MPI_Request)a;
22 MPI_Request req = (MPI_Request)b;
23 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
25 xbt_assert(ref, "Cannot match recv against null reference");
26 xbt_assert(req, "Cannot match recv against null request");
27 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
28 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
29 //we match, we can transfer some values
30 // FIXME : move this to the copy function ?
31 if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
32 if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
33 if(ref->real_size < req->real_size) ref->truncated = 1;
35 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
37 XBT_DEBUG("match succeeded");
42 static int match_send(void* a, void* b,smx_action_t ignored) {
43 MPI_Request ref = (MPI_Request)a;
44 MPI_Request req = (MPI_Request)b;
45 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
46 xbt_assert(ref, "Cannot match send against null reference");
47 xbt_assert(req, "Cannot match send against null request");
49 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
50 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
52 if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
53 if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
54 if(req->real_size < ref->real_size) req->truncated = 1;
56 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
58 XBT_DEBUG("match succeeded");
64 typedef struct s_smpi_factor *smpi_factor_t;
65 typedef struct s_smpi_factor {
68 double values[4];//arbitrary set to 4
70 xbt_dynar_t smpi_os_values = NULL;
71 xbt_dynar_t smpi_or_values = NULL;
72 xbt_dynar_t smpi_ois_values = NULL;
74 // Methods used to parse and store the values for timing injections in smpi
75 // These are taken from surf/network.c and generalized to have more factors
76 // These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
78 static int factor_cmp(const void *pa, const void *pb)
80 return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor);
84 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
87 unsigned int iter = 0;
90 xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
92 smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
93 radical_elements = xbt_str_split(smpi_coef_string, ";");
94 xbt_dynar_foreach(radical_elements, iter, value) {
96 radical_elements2 = xbt_str_split(value, ":");
97 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
98 xbt_die("Malformed radical for smpi factor!");
99 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
101 fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
103 fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
108 xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
109 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
110 xbt_dynar_free(&radical_elements2);
112 xbt_dynar_free(&radical_elements);
114 xbt_dynar_sort(smpi_factor, &factor_cmp);
115 xbt_dynar_foreach(smpi_factor, iter, fact) {
116 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
121 static double smpi_os(double size)
123 if (!smpi_os_values) {
124 smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
125 smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
127 unsigned int iter = 0;
128 s_smpi_factor_t fact;
130 xbt_dynar_foreach(smpi_os_values, iter, fact) {
131 if (size <= fact.factor) {
132 XBT_DEBUG("os : %lf <= %ld return %f", size, fact.factor, current);
135 current=fact.values[0]+fact.values[1]*size;
138 XBT_DEBUG("os : %lf > %ld return %f", size, fact.factor, current);
143 static double smpi_ois(double size)
145 if (!smpi_ois_values) {
146 smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
147 smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
149 unsigned int iter = 0;
150 s_smpi_factor_t fact;
152 xbt_dynar_foreach(smpi_ois_values, iter, fact) {
153 if (size <= fact.factor) {
154 XBT_DEBUG("ois : %lf <= %ld return %f", size, fact.factor, current);
157 current=fact.values[0]+fact.values[1]*size;
160 XBT_DEBUG("ois : %lf > %ld return %f", size, fact.factor, current);
165 static double smpi_or(double size)
167 if (!smpi_or_values) {
168 smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
169 smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
171 unsigned int iter = 0;
172 s_smpi_factor_t fact;
174 xbt_dynar_foreach(smpi_or_values, iter, fact) {
175 if (size <= fact.factor) {
176 XBT_DEBUG("or : %lf <= %ld return %f", size, fact.factor, current);
179 current=fact.values[0]+fact.values[1]*size;
181 XBT_DEBUG("or : %lf > %ld return %f", size, fact.factor, current);
186 static MPI_Request build_request(void *buf, int count,
187 MPI_Datatype datatype, int src, int dst,
188 int tag, MPI_Comm comm, unsigned flags)
192 void *old_buf = NULL;
194 request = xbt_new(s_smpi_mpi_request_t, 1);
196 s_smpi_subtype_t *subtype = datatype->substruct;
198 if(datatype->has_subtype == 1){
199 // This part handles the problem of non-contiguous memory
201 buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
203 subtype->serialize(old_buf, buf, count, datatype->substruct);
208 // This part handles the problem of non-contiguous memory (for the
209 // unserialisation at the reception)
210 request->old_buf = old_buf;
211 request->old_type = datatype;
213 request->size = smpi_datatype_size(datatype) * count;
217 request->comm = comm;
218 request->action = NULL;
219 request->flags = flags;
220 request->detached = 0;
221 request->detached_sender = NULL;
223 request->truncated = 0;
224 request->real_size = 0;
225 request->real_tag = 0;
232 if (flags & SEND) smpi_datatype_unuse(datatype);
238 void smpi_empty_status(MPI_Status * status)
240 if(status != MPI_STATUS_IGNORE) {
241 status->MPI_SOURCE = MPI_ANY_SOURCE;
242 status->MPI_TAG = MPI_ANY_TAG;
243 status->MPI_ERROR = MPI_SUCCESS;
248 void smpi_action_trace_run(char *path)
252 xbt_dict_cursor_t cursor;
256 action_fp = fopen(path, "r");
257 xbt_assert(action_fp != NULL, "Cannot open %s: %s", path,
261 if (!xbt_dict_is_empty(action_queues)) {
263 ("Not all actions got consumed. If the simulation ended successfully (without deadlock), you may want to add new processes to your deployment file.");
266 xbt_dict_foreach(action_queues, cursor, name, todo) {
267 XBT_WARN("Still %lu actions for %s", xbt_dynar_length(todo), name);
273 xbt_dict_free(&action_queues);
274 action_queues = xbt_dict_new_homogeneous(NULL);
277 static void smpi_mpi_request_free_voidp(void* request)
279 MPI_Request req = request;
280 smpi_mpi_request_free(&req);
283 /* MPI Low level calls */
284 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
285 int dst, int tag, MPI_Comm comm)
287 MPI_Request request =
288 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
289 comm, PERSISTENT | SEND | PREPARED);
294 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
295 int dst, int tag, MPI_Comm comm)
297 MPI_Request request =
298 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
299 comm, PERSISTENT | SSEND | SEND | PREPARED);
304 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
305 int src, int tag, MPI_Comm comm)
307 MPI_Request request =
308 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
309 comm, PERSISTENT | RECV | PREPARED);
314 void smpi_mpi_start(MPI_Request request)
318 xbt_assert(!request->action,
319 "Cannot (re)start a non-finished communication");
320 if(request->flags & PREPARED)request->flags &= ~PREPARED;
321 if(request->flags & RECV) {
322 print_request("New recv", request);
323 if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
324 mailbox = smpi_process_mailbox_small();
326 mailbox = smpi_process_mailbox();
327 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
328 request->real_size=request->size;
329 smpi_datatype_use(request->old_type);
330 smpi_comm_use(request->comm);
331 request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
333 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
334 double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
336 simcall_process_sleep(sleeptime);
337 XBT_DEBUG("receiving size of %zu : sleep %lf ", request->size, smpi_or(request->size));
343 int receiver = request->dst;//smpi_group_index(smpi_comm_group(request->comm), request->dst);
346 int rank = smpi_process_index();
347 if (TRACE_smpi_view_internals()) {
348 TRACE_smpi_send(rank, rank, receiver);
351 /* if(receiver == MPI_UNDEFINED) {*/
352 /* XBT_WARN("Trying to send a message to a wrong rank");*/
355 print_request("New send", request);
356 if (request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
357 mailbox = smpi_process_remote_mailbox_small(receiver);
359 XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
360 mailbox = smpi_process_remote_mailbox(receiver);
362 if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
364 request->detached = 1;
366 if(request->old_type->has_subtype == 0){
367 oldbuf = request->buf;
368 if (oldbuf && request->size!=0){
369 request->buf = xbt_malloc(request->size);
370 memcpy(request->buf,oldbuf,request->size);
373 XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
376 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
377 request->real_size=request->size;
378 smpi_datatype_use(request->old_type);
379 smpi_comm_use(request->comm);
381 //if we are giving back the control to the user without waiting for completion, we have to inject timings
382 double sleeptime =0.0;
383 if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
384 //isend and send timings may be different
385 sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
389 simcall_process_sleep(sleeptime);
390 XBT_DEBUG("sending size of %zu : sleep %lf ", request->size, smpi_os(request->size));
394 simcall_comm_isend(mailbox, request->size, -1.0,
395 request->buf, request->real_size,
397 &smpi_mpi_request_free_voidp, // how to free the userdata if a detached send fails
399 // detach if msg size < eager/rdv switch limit
403 /* FIXME: detached sends are not traceable (request->action == NULL) */
405 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
413 void smpi_mpi_startall(int count, MPI_Request * requests)
417 for(i = 0; i < count; i++) {
418 smpi_mpi_start(requests[i]);
422 void smpi_mpi_request_free(MPI_Request * request)
425 if((*request) != MPI_REQUEST_NULL){
426 (*request)->refcount--;
427 if((*request)->refcount<0) xbt_die("wrong refcount");
429 if((*request)->refcount==0){
430 print_request("Destroying", (*request));
432 *request = MPI_REQUEST_NULL;
434 print_request("Decrementing", (*request));
438 xbt_die("freeing an already free request");
442 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
443 int dst, int tag, MPI_Comm comm)
445 MPI_Request request =
446 build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
447 comm, NON_PERSISTENT | SEND | PREPARED);
452 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
453 int dst, int tag, MPI_Comm comm)
455 MPI_Request request =
456 build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
457 comm, NON_PERSISTENT | ISEND | SEND);
459 smpi_mpi_start(request);
463 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
464 int dst, int tag, MPI_Comm comm)
466 MPI_Request request =
467 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
468 comm, NON_PERSISTENT | ISEND | SSEND | SEND);
469 smpi_mpi_start(request);
475 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
476 int src, int tag, MPI_Comm comm)
478 MPI_Request request =
479 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
480 comm, NON_PERSISTENT | RECV | PREPARED);
484 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
485 int src, int tag, MPI_Comm comm)
487 MPI_Request request =
488 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
489 comm, NON_PERSISTENT | RECV);
491 smpi_mpi_start(request);
495 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
496 int tag, MPI_Comm comm, MPI_Status * status)
499 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
500 smpi_mpi_wait(&request, status);
505 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
506 int tag, MPI_Comm comm)
508 MPI_Request request =
509 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
510 comm, NON_PERSISTENT | SEND);
511 smpi_mpi_start(request);
512 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
516 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
517 int dst, int tag, MPI_Comm comm)
519 MPI_Request request =
520 build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
521 comm, NON_PERSISTENT | SSEND | SEND);
523 smpi_mpi_start(request);
524 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
527 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
528 int dst, int sendtag, void *recvbuf, int recvcount,
529 MPI_Datatype recvtype, int src, int recvtag,
530 MPI_Comm comm, MPI_Status * status)
532 MPI_Request requests[2];
534 int myid=smpi_process_index();
535 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
536 smpi_datatype_copy(sendbuf, sendcount, sendtype,
537 recvbuf, recvcount, recvtype);
541 smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
543 smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
544 smpi_mpi_startall(2, requests);
545 smpi_mpi_waitall(2, requests, stats);
546 if(status != MPI_STATUS_IGNORE) {
547 // Copy receive status
552 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
554 return status->count / smpi_datatype_size(datatype);
557 static void finish_wait(MPI_Request * request, MPI_Status * status)
559 MPI_Request req = *request;
560 if(status != MPI_STATUS_IGNORE)
561 smpi_empty_status(status);
563 if(!(req->detached && req->flags & SEND) && !(req->flags & PREPARED)){
564 if(status != MPI_STATUS_IGNORE) {
565 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
566 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
567 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
568 status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
569 // this handles the case were size in receive differs from size in send
570 // FIXME: really this should just contain the count of receive-type blocks,
572 status->count = req->real_size;
575 print_request("Finishing", req);
576 MPI_Datatype datatype = req->old_type;
578 if(datatype->has_subtype == 1){
579 // This part handles the problem of non-contignous memory
580 // the unserialization at the reception
581 s_smpi_subtype_t *subtype = datatype->substruct;
582 if(req->flags & RECV) {
583 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
585 if(req->detached == 0) free(req->buf);
587 smpi_comm_unuse(req->comm);
588 smpi_datatype_unuse(datatype);
593 if (TRACE_smpi_view_internals()) {
594 if(req->flags & RECV){
595 int rank = smpi_process_index();
596 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
597 TRACE_smpi_recv(rank, src_traced, rank);
602 if(req->detached_sender!=NULL){
603 smpi_mpi_request_free(&(req->detached_sender));
606 if(req->flags & NON_PERSISTENT) {
607 smpi_mpi_request_free(request);
613 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
616 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
617 if ((*request)->action == NULL)
620 flag = simcall_comm_test((*request)->action);
622 finish_wait(request, status);
623 request=MPI_REQUEST_NULL;
625 smpi_empty_status(status);
630 int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
637 *index = MPI_UNDEFINED;
639 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
640 map = xbt_new(int, count);
642 for(i = 0; i < count; i++) {
643 if((requests[i]!=MPI_REQUEST_NULL) && requests[i]->action) {
644 xbt_dynar_push(comms, &requests[i]->action);
650 i = simcall_comm_testany(comms);
651 // not MPI_UNDEFINED, as this is a simix return code
654 finish_wait(&requests[*index], status);
658 //all requests are null or inactive, return true
660 smpi_empty_status(status);
663 xbt_dynar_free(&comms);
669 int smpi_mpi_testall(int count, MPI_Request requests[],
673 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
676 for(i=0; i<count; i++){
677 if(requests[i]!= MPI_REQUEST_NULL){
678 if (smpi_mpi_test(&requests[i], pstat)!=1){
682 smpi_empty_status(pstat);
684 if(status != MPI_STATUSES_IGNORE) {
691 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
693 //FIXME find another wait to avoid busy waiting ?
694 // the issue here is that we have to wait on a nonexistent comm
696 smpi_mpi_iprobe(source, tag, comm, &flag, status);
697 XBT_DEBUG("Busy Waiting on probing : %d", flag);
701 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
703 MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
704 comm, NON_PERSISTENT | RECV);
706 //to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
707 double sleeptime= sg_cfg_get_double("smpi/iprobe");
708 //multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
709 static int nsleeps = 1;
711 simcall_process_sleep(sleeptime);
713 // behave like a receive, but don't do it
716 print_request("New iprobe", request);
717 // We have to test both mailboxes as we don't know if we will receive one one or another
718 if (sg_cfg_get_int("smpi/async_small_thres")>0){
719 mailbox = smpi_process_mailbox_small();
720 XBT_DEBUG("trying to probe the perm recv mailbox");
721 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
723 if (request->action==NULL){
724 mailbox = smpi_process_mailbox();
725 XBT_DEBUG("trying to probe the other mailbox");
726 request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
730 MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
732 if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
733 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
734 status->MPI_TAG = req->tag;
735 status->MPI_ERROR = MPI_SUCCESS;
736 status->count = req->real_size;
738 nsleeps=1;//reset the number of sleeps we will do next time
744 smpi_mpi_request_free(&request);
749 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
751 print_request("Waiting", *request);
752 if ((*request)->action != NULL) { // this is not a detached send
753 simcall_comm_wait((*request)->action, -1.0);
755 finish_wait(request, status);
757 // FIXME for a detached send, finish_wait is not called:
760 int smpi_mpi_waitany(int count, MPI_Request requests[],
767 index = MPI_UNDEFINED;
769 // Wait for a request to complete
770 comms = xbt_dynar_new(sizeof(smx_action_t), NULL);
771 map = xbt_new(int, count);
773 XBT_DEBUG("Wait for one of %d", count);
774 for(i = 0; i < count; i++) {
775 if(requests[i] != MPI_REQUEST_NULL) {
776 if (requests[i]->action != NULL) {
777 XBT_DEBUG("Waiting any %p ", requests[i]);
778 xbt_dynar_push(comms, &requests[i]->action);
782 //This is a finished detached request, let's return this one
783 size=0;//so we free the dynar but don't do the waitany call
785 finish_wait(&requests[i], status);//cleanup if refcount = 0
786 requests[i]=MPI_REQUEST_NULL;//set to null
792 i = simcall_comm_waitany(comms);
794 // not MPI_UNDEFINED, as this is a simix return code
797 finish_wait(&requests[index], status);
801 xbt_dynar_free(&comms);
804 if (index==MPI_UNDEFINED)
805 smpi_empty_status(status);
810 int smpi_mpi_waitall(int count, MPI_Request requests[],
815 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
816 int retvalue = MPI_SUCCESS;
817 //tag invalid requests in the set
818 if (status != MPI_STATUSES_IGNORE) {
819 for (c = 0; c < count; c++) {
820 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL) {
821 smpi_empty_status(&status[c]);
822 } else if (requests[c]->src == MPI_PROC_NULL) {
823 smpi_empty_status(&status[c]);
824 status[c].MPI_SOURCE = MPI_PROC_NULL;
828 for(c = 0; c < count; c++) {
829 if (MC_is_active()) {
830 smpi_mpi_wait(&requests[c], pstat);
833 index = smpi_mpi_waitany(count, requests, pstat);
834 if (index == MPI_UNDEFINED)
836 requests[index]=MPI_REQUEST_NULL;
838 if (status != MPI_STATUSES_IGNORE) {
839 status[index] = *pstat;
840 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
841 retvalue = MPI_ERR_IN_STATUS;
848 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
853 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
856 for(i = 0; i < incount; i++)
858 index=smpi_mpi_waitany(incount, requests, pstat);
859 if(index!=MPI_UNDEFINED){
860 indices[count] = index;
862 if(status != MPI_STATUSES_IGNORE) {
863 status[index] = *pstat;
865 requests[index]=MPI_REQUEST_NULL;
867 return MPI_UNDEFINED;
873 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices,
876 int i, count, count_dead;
878 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
882 for(i = 0; i < incount; i++) {
883 if((requests[i] != MPI_REQUEST_NULL)) {
884 if(smpi_mpi_test(&requests[i], pstat)) {
887 if(status != MPI_STATUSES_IGNORE) {
890 requests[i]=MPI_REQUEST_NULL;
897 if(count_dead==incount)return MPI_UNDEFINED;
901 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
904 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
905 nary_tree_bcast(buf, count, datatype, root, comm, 4);
908 void smpi_mpi_barrier(MPI_Comm comm)
910 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
911 nary_tree_barrier(comm, 4);
914 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
915 void *recvbuf, int recvcount, MPI_Datatype recvtype,
916 int root, MPI_Comm comm)
918 int system_tag = COLL_TAG_GATHER;
919 int rank, size, src, index;
920 MPI_Aint lb = 0, recvext = 0;
921 MPI_Request *requests;
923 rank = smpi_comm_rank(comm);
924 size = smpi_comm_size(comm);
926 // Send buffer to root
927 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
929 // FIXME: check for errors
930 smpi_datatype_extent(recvtype, &lb, &recvext);
931 // Local copy from root
932 smpi_datatype_copy(sendbuf, sendcount, sendtype,
933 (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
934 // Receive buffers from senders
935 requests = xbt_new(MPI_Request, size - 1);
937 for(src = 0; src < size; src++) {
939 requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
941 src, system_tag, comm);
945 // Wait for completion of irecv's.
946 smpi_mpi_startall(size - 1, requests);
947 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
953 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
954 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
958 int rank = smpi_process_index();
961 /* arbitrarily choose root as rank 0 */
962 size = smpi_comm_size(comm);
964 displs = xbt_new(int, size);
965 for (i = 0; i < size; i++) {
967 count += recvcounts[i];
969 tmpbuf=(void*)xbt_malloc(count*smpi_datatype_get_extent(datatype));
970 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
971 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf,
972 recvcounts[rank], datatype, 0, comm);
977 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
978 void *recvbuf, int *recvcounts, int *displs,
979 MPI_Datatype recvtype, int root, MPI_Comm comm)
981 int system_tag = COLL_TAG_GATHERV;
982 int rank, size, src, index;
983 MPI_Aint lb = 0, recvext = 0;
984 MPI_Request *requests;
986 rank = smpi_comm_rank(comm);
987 size = smpi_comm_size(comm);
989 // Send buffer to root
990 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
992 // FIXME: check for errors
993 smpi_datatype_extent(recvtype, &lb, &recvext);
994 // Local copy from root
995 smpi_datatype_copy(sendbuf, sendcount, sendtype,
996 (char *)recvbuf + displs[root] * recvext,
997 recvcounts[root], recvtype);
998 // Receive buffers from senders
999 requests = xbt_new(MPI_Request, size - 1);
1001 for(src = 0; src < size; src++) {
1004 smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
1005 recvcounts[src], recvtype, src, system_tag, comm);
1009 // Wait for completion of irecv's.
1010 smpi_mpi_startall(size - 1, requests);
1011 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1016 void smpi_mpi_allgather(void *sendbuf, int sendcount,
1017 MPI_Datatype sendtype, void *recvbuf,
1018 int recvcount, MPI_Datatype recvtype,
1021 int system_tag = COLL_TAG_ALLGATHER;
1022 int rank, size, other, index;
1023 MPI_Aint lb = 0, recvext = 0;
1024 MPI_Request *requests;
1026 rank = smpi_comm_rank(comm);
1027 size = smpi_comm_size(comm);
1028 // FIXME: check for errors
1029 smpi_datatype_extent(recvtype, &lb, &recvext);
1030 // Local copy from self
1031 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1032 (char *)recvbuf + rank * recvcount * recvext, recvcount,
1034 // Send/Recv buffers to/from others;
1035 requests = xbt_new(MPI_Request, 2 * (size - 1));
1037 for(other = 0; other < size; other++) {
1040 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1043 requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
1044 recvcount, recvtype, other,
1049 // Wait for completion of all comms.
1050 smpi_mpi_startall(2 * (size - 1), requests);
1051 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1055 void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
1056 MPI_Datatype sendtype, void *recvbuf,
1057 int *recvcounts, int *displs,
1058 MPI_Datatype recvtype, MPI_Comm comm)
1060 int system_tag = COLL_TAG_ALLGATHERV;
1061 int rank, size, other, index;
1062 MPI_Aint lb = 0, recvext = 0;
1063 MPI_Request *requests;
1065 rank = smpi_comm_rank(comm);
1066 size = smpi_comm_size(comm);
1067 // FIXME: check for errors
1068 smpi_datatype_extent(recvtype, &lb, &recvext);
1069 // Local copy from self
1070 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1071 (char *)recvbuf + displs[rank] * recvext,
1072 recvcounts[rank], recvtype);
1073 // Send buffers to others;
1074 requests = xbt_new(MPI_Request, 2 * (size - 1));
1076 for(other = 0; other < size; other++) {
1079 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1083 smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
1084 recvtype, other, system_tag, comm);
1088 // Wait for completion of all comms.
1089 smpi_mpi_startall(2 * (size - 1), requests);
1090 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1094 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1095 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1096 int root, MPI_Comm comm)
1098 int system_tag = COLL_TAG_SCATTER;
1099 int rank, size, dst, index;
1100 MPI_Aint lb = 0, sendext = 0;
1101 MPI_Request *requests;
1103 rank = smpi_comm_rank(comm);
1104 size = smpi_comm_size(comm);
1106 // Recv buffer from root
1107 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1110 // FIXME: check for errors
1111 smpi_datatype_extent(sendtype, &lb, &sendext);
1112 // Local copy from root
1113 smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
1114 sendcount, sendtype, recvbuf, recvcount, recvtype);
1115 // Send buffers to receivers
1116 requests = xbt_new(MPI_Request, size - 1);
1118 for(dst = 0; dst < size; dst++) {
1120 requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
1121 sendcount, sendtype, dst,
1126 // Wait for completion of isend's.
1127 smpi_mpi_startall(size - 1, requests);
1128 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1133 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
1134 MPI_Datatype sendtype, void *recvbuf, int recvcount,
1135 MPI_Datatype recvtype, int root, MPI_Comm comm)
1137 int system_tag = COLL_TAG_SCATTERV;
1138 int rank, size, dst, index;
1139 MPI_Aint lb = 0, sendext = 0;
1140 MPI_Request *requests;
1142 rank = smpi_comm_rank(comm);
1143 size = smpi_comm_size(comm);
1145 // Recv buffer from root
1146 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1149 // FIXME: check for errors
1150 smpi_datatype_extent(sendtype, &lb, &sendext);
1151 // Local copy from root
1152 smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
1153 sendtype, recvbuf, recvcount, recvtype);
1154 // Send buffers to receivers
1155 requests = xbt_new(MPI_Request, size - 1);
1157 for(dst = 0; dst < size; dst++) {
1160 smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
1161 sendtype, dst, system_tag, comm);
1165 // Wait for completion of isend's.
1166 smpi_mpi_startall(size - 1, requests);
1167 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1172 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
1173 MPI_Datatype datatype, MPI_Op op, int root,
1176 int system_tag = COLL_TAG_REDUCE;
1177 int rank, size, src, index;
1178 MPI_Aint lb = 0, dataext = 0;
1179 MPI_Request *requests;
1182 char* sendtmpbuf = (char*) sendbuf;
1183 if( sendbuf == MPI_IN_PLACE ) {
1184 sendtmpbuf = (char *)recvbuf;
1187 rank = smpi_comm_rank(comm);
1188 size = smpi_comm_size(comm);
1189 //non commutative case, use a working algo from openmpi
1190 if(!smpi_op_is_commute(op)){
1191 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count,
1192 datatype, op, root, comm);
1197 // Send buffer to root
1198 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1200 // FIXME: check for errors
1201 smpi_datatype_extent(datatype, &lb, &dataext);
1202 // Local copy from root
1203 if (sendtmpbuf && recvbuf)
1204 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1205 // Receive buffers from senders
1206 //TODO: make a MPI_barrier here ?
1207 requests = xbt_new(MPI_Request, size - 1);
1208 tmpbufs = xbt_new(void *, size - 1);
1210 for(src = 0; src < size; src++) {
1212 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1214 tmpbufs[index] = xbt_malloc(count * dataext);
1216 smpi_irecv_init(tmpbufs[index], count, datatype, src,
1221 // Wait for completion of irecv's.
1222 smpi_mpi_startall(size - 1, requests);
1223 for(src = 0; src < size - 1; src++) {
1224 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1225 XBT_DEBUG("finished waiting any request with index %d", index);
1226 if(index == MPI_UNDEFINED) {
1229 if(op) /* op can be MPI_OP_NULL that does nothing */
1230 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1232 for(index = 0; index < size - 1; index++) {
1233 xbt_free(tmpbufs[index]);
1240 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
1241 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1243 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1244 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1247 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
1248 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1250 int system_tag = 888;
1251 int rank, size, other, index;
1252 MPI_Aint lb = 0, dataext = 0;
1253 MPI_Request *requests;
1256 rank = smpi_comm_rank(comm);
1257 size = smpi_comm_size(comm);
1259 // FIXME: check for errors
1260 smpi_datatype_extent(datatype, &lb, &dataext);
1262 // Local copy from self
1263 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1265 // Send/Recv buffers to/from others;
1266 requests = xbt_new(MPI_Request, size - 1);
1267 tmpbufs = xbt_new(void *, rank);
1269 for(other = 0; other < rank; other++) {
1270 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1272 tmpbufs[index] = xbt_malloc(count * dataext);
1274 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1278 for(other = rank + 1; other < size; other++) {
1280 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1283 // Wait for completion of all comms.
1284 smpi_mpi_startall(size - 1, requests);
1285 for(other = 0; other < size - 1; other++) {
1286 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1287 if(index == MPI_UNDEFINED) {
1291 // #Request is below rank: it's a irecv
1292 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1295 for(index = 0; index < rank; index++) {
1296 xbt_free(tmpbufs[index]);
1302 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count,
1303 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1305 int system_tag = 888;
1306 int rank, size, other, index;
1307 MPI_Aint lb = 0, dataext = 0;
1308 MPI_Request *requests;
1310 int recvbuf_is_empty=1;
1311 rank = smpi_comm_rank(comm);
1312 size = smpi_comm_size(comm);
1314 // FIXME: check for errors
1315 smpi_datatype_extent(datatype, &lb, &dataext);
1317 // Send/Recv buffers to/from others;
1318 requests = xbt_new(MPI_Request, size - 1);
1319 tmpbufs = xbt_new(void *, rank);
1321 for(other = 0; other < rank; other++) {
1322 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1324 tmpbufs[index] = xbt_malloc(count * dataext);
1326 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1330 for(other = rank + 1; other < size; other++) {
1332 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1335 // Wait for completion of all comms.
1336 smpi_mpi_startall(size - 1, requests);
1337 for(other = 0; other < size - 1; other++) {
1338 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1339 if(index == MPI_UNDEFINED) {
1343 if(recvbuf_is_empty) smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1344 // #Request is below rank: it's a irecv
1345 else smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1348 for(index = 0; index < rank; index++) {
1349 xbt_free(tmpbufs[index]);