1 /* Copyright (c) 2007-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "mc/mc_replay.h"
11 #include "xbt/replay.h"
13 #include "simix/smx_private.h"
14 #include "surf/surf.h"
15 #include "simgrid/sg_config.h"
16 #include "colls/colls.h"
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
21 static int match_recv(void* a, void* b, smx_synchro_t ignored) {
22 MPI_Request ref = (MPI_Request)a;
23 MPI_Request req = (MPI_Request)b;
24 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
26 xbt_assert(ref, "Cannot match recv against null reference");
27 xbt_assert(req, "Cannot match recv against null request");
28 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
29 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
30 //we match, we can transfer some values
31 // FIXME : move this to the copy function ?
32 if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
33 if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
34 if(ref->real_size < req->real_size) ref->truncated = 1;
36 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
38 XBT_DEBUG("match succeeded");
43 static int match_send(void* a, void* b,smx_synchro_t ignored) {
44 MPI_Request ref = (MPI_Request)a;
45 MPI_Request req = (MPI_Request)b;
46 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
47 xbt_assert(ref, "Cannot match send against null reference");
48 xbt_assert(req, "Cannot match send against null request");
50 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
51 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
53 if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
54 if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
55 if(req->real_size < ref->real_size) req->truncated = 1;
57 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
59 XBT_DEBUG("match succeeded");
65 typedef struct s_smpi_factor *smpi_factor_t;
66 typedef struct s_smpi_factor {
69 double values[4];//arbitrary set to 4
71 xbt_dynar_t smpi_os_values = NULL;
72 xbt_dynar_t smpi_or_values = NULL;
73 xbt_dynar_t smpi_ois_values = NULL;
75 double smpi_wtime_sleep = 0.0;
76 double smpi_iprobe_sleep = 1e-4;
77 double smpi_test_sleep = 1e-4;
80 // Methods used to parse and store the values for timing injections in smpi
81 // These are taken from surf/network.c and generalized to have more factors
82 // These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
84 static int factor_cmp(const void *pa, const void *pb)
86 return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor) ? 1 :
87 (((s_smpi_factor_t*)pa)->factor < ((s_smpi_factor_t*)pb)->factor) ? -1 : 0;
91 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
94 unsigned int iter = 0;
98 xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
100 smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
101 radical_elements = xbt_str_split(smpi_coef_string, ";");
102 xbt_dynar_foreach(radical_elements, iter, value) {
103 memset(&fact, 0, sizeof(s_smpi_factor_t));
104 radical_elements2 = xbt_str_split(value, ":");
105 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
106 xbt_die("Malformed radical for smpi factor!");
107 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
109 fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
111 fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
116 xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
117 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
118 xbt_dynar_free(&radical_elements2);
120 xbt_dynar_free(&radical_elements);
122 xbt_dynar_sort(smpi_factor, &factor_cmp);
123 xbt_dynar_foreach(smpi_factor, iter, fact) {
124 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
129 static double smpi_os(double size)
131 if (!smpi_os_values) {
132 smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
133 smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
135 unsigned int iter = 0;
136 s_smpi_factor_t fact;
138 xbt_dynar_foreach(smpi_os_values, iter, fact) {
139 if (size <= fact.factor) {
140 XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current);
143 current=fact.values[0]+fact.values[1]*size;
146 XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current);
151 static double smpi_ois(double size)
153 if (!smpi_ois_values) {
154 smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
155 smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
157 unsigned int iter = 0;
158 s_smpi_factor_t fact;
160 xbt_dynar_foreach(smpi_ois_values, iter, fact) {
161 if (size <= fact.factor) {
162 XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current);
165 current=fact.values[0]+fact.values[1]*size;
168 XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current);
173 static double smpi_or(double size)
175 if (!smpi_or_values) {
176 smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
177 smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
179 unsigned int iter = 0;
180 s_smpi_factor_t fact;
182 xbt_dynar_foreach(smpi_or_values, iter, fact) {
183 if (size <= fact.factor) {
184 XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current);
187 current=fact.values[0]+fact.values[1]*size;
189 XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current);
194 double smpi_mpi_wtime(){
196 if (smpi_process_initialized() && !smpi_process_finalized() && !smpi_process_get_sampling()) {
198 time = SIMIX_get_clock();
199 // to avoid deadlocks if used as a break condition, such as
200 // while (MPI_Wtime(...) < time_limit) {
203 // because the time will not normally advance when only calls to MPI_Wtime
204 // are made -> deadlock (MPI_Wtime never reaches the time limit)
205 if(smpi_wtime_sleep > 0) simcall_process_sleep(smpi_wtime_sleep);
208 time = SIMIX_get_clock();
213 static MPI_Request build_request(void *buf, int count,
214 MPI_Datatype datatype, int src, int dst,
215 int tag, MPI_Comm comm, unsigned flags)
217 MPI_Request request = NULL;
219 void *old_buf = NULL;
221 request = xbt_new(s_smpi_mpi_request_t, 1);
223 s_smpi_subtype_t *subtype = datatype->substruct;
225 if(((flags & RECV) && (flags & ACCUMULATE)) || (datatype->has_subtype == 1)){
226 // This part handles the problem of non-contiguous memory
228 buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
229 if ((datatype->has_subtype == 1) && (flags & SEND)) {
230 subtype->serialize(old_buf, buf, count, datatype->substruct);
235 // This part handles the problem of non-contiguous memory (for the
236 // unserialisation at the reception)
237 request->old_buf = old_buf;
238 request->old_type = datatype;
240 request->size = smpi_datatype_size(datatype) * count;
244 request->comm = comm;
245 request->action = NULL;
246 request->flags = flags;
247 request->detached = 0;
248 request->detached_sender = NULL;
249 request->real_src = 0;
251 request->truncated = 0;
252 request->real_size = 0;
253 request->real_tag = 0;
254 if(flags & PERSISTENT)
255 request->refcount = 1;
257 request->refcount = 0;
258 request->op = MPI_REPLACE;
261 if (flags & SEND) smpi_datatype_unuse(datatype);
267 void smpi_empty_status(MPI_Status * status)
269 if(status != MPI_STATUS_IGNORE) {
270 status->MPI_SOURCE = MPI_ANY_SOURCE;
271 status->MPI_TAG = MPI_ANY_TAG;
272 status->MPI_ERROR = MPI_SUCCESS;
277 void smpi_action_trace_run(char *path)
281 xbt_dict_cursor_t cursor;
285 action_fp = fopen(path, "r");
286 if (action_fp == NULL)
287 xbt_die("Cannot open %s: %s", path, strerror(errno));
290 if (!xbt_dict_is_empty(action_queues)) {
292 ("Not all actions got consumed. If the simulation ended successfully (without deadlock), you may want to add new processes to your deployment file.");
295 xbt_dict_foreach(action_queues, cursor, name, todo) {
296 XBT_WARN("Still %lu actions for %s", xbt_dynar_length(todo), name);
302 xbt_dict_free(&action_queues);
303 action_queues = xbt_dict_new_homogeneous(NULL);
306 static void smpi_mpi_request_free_voidp(void* request)
308 MPI_Request req = request;
309 smpi_mpi_request_free(&req);
312 /* MPI Low level calls */
313 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
314 int dst, int tag, MPI_Comm comm)
316 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
317 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
318 comm, PERSISTENT | SEND | PREPARED);
322 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
323 int dst, int tag, MPI_Comm comm)
325 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
326 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
327 comm, PERSISTENT | SSEND | SEND | PREPARED);
331 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
332 int src, int tag, MPI_Comm comm)
334 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
335 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
336 comm, PERSISTENT | RECV | PREPARED);
340 void smpi_mpi_start(MPI_Request request)
344 xbt_assert(!request->action, "Cannot (re)start a non-finished communication");
345 request->flags &= ~PREPARED;
346 request->flags &= ~FINISHED;
349 if (request->flags & RECV) {
350 print_request("New recv", request);
352 xbt_mutex_t mut=smpi_process_mailboxes_mutex();
353 xbt_mutex_acquire(mut);
355 if (request->flags & RMA || request->size < sg_cfg_get_int("smpi/async_small_thres")){
356 //We have to check both mailboxes (because SSEND messages are sent to the large mbox). begin with the more appropriate one : the small one.
357 mailbox = smpi_process_mailbox_small();
358 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
359 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
362 mailbox = smpi_process_mailbox();
363 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
364 action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
366 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
367 mailbox = smpi_process_mailbox_small();
370 XBT_DEBUG("yes there was something for us in the large mailbox");
373 mailbox = smpi_process_mailbox_small();
374 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
375 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
378 XBT_DEBUG("No, nothing in the permanent receive mailbox");
379 mailbox = smpi_process_mailbox();
381 XBT_DEBUG("yes there was something for us in the small mailbox");
385 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
386 double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
388 simcall_process_sleep(sleeptime);
389 XBT_DEBUG("receiving size of %zu : sleep %f ", request->size, smpi_or(request->size));
392 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
393 request->real_size=request->size;
394 smpi_datatype_use(request->old_type);
395 smpi_comm_use(request->comm);
396 request->action = simcall_comm_irecv(mailbox, request->buf,
397 &request->real_size, &match_recv,
398 !smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
399 : &smpi_comm_null_copy_buffer_callback,
401 XBT_DEBUG("recv simcall posted");
403 xbt_mutex_release(mut);
407 int receiver = request->dst;
409 int rank = request->src;
410 if (TRACE_smpi_view_internals()) {
411 TRACE_smpi_send(rank, rank, receiver,request->size);
413 print_request("New send", request);
415 //if we are giving back the control to the user without waiting for completion, we have to inject timings
416 double sleeptime = 0.0;
417 if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
418 //isend and send timings may be different
419 sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
422 if(sleeptime != 0.0){
423 simcall_process_sleep(sleeptime);
424 XBT_DEBUG("sending size of %zu : sleep %f ", request->size, smpi_os(request->size));
427 xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
428 xbt_mutex_acquire(mut);
430 if (request->flags & RMA || request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
431 mailbox = smpi_process_remote_mailbox(receiver);
432 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
433 smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, (void*)request);
435 if (! (request->flags & SSEND)){
436 mailbox = smpi_process_remote_mailbox_small(receiver);
437 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
439 mailbox = smpi_process_remote_mailbox_small(receiver);
440 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
441 action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, (void*)request);
443 XBT_DEBUG("No, we are first, send to large mailbox");
444 mailbox = smpi_process_remote_mailbox(receiver);
448 XBT_DEBUG("Yes there was something for us in the large mailbox");
451 mailbox = smpi_process_remote_mailbox(receiver);
452 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
455 void* buf = request->buf;
456 if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
458 request->detached = 1;
459 XBT_DEBUG("Send request %p is detached", request);
461 if(request->old_type->has_subtype == 0){
462 oldbuf = request->buf;
463 if (!smpi_process_get_replaying() && oldbuf && request->size!=0){
464 if((smpi_privatize_global_variables)
465 && ((char*) request->buf >= smpi_start_data_exe)
466 && ((char*)request->buf < smpi_start_data_exe + smpi_size_data_exe )){
467 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
468 smpi_switch_data_segment(request->src);
470 buf = xbt_malloc(request->size);
471 memcpy(buf,oldbuf,request->size);
472 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
477 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
478 request->real_size=request->size;
479 smpi_datatype_use(request->old_type);
480 smpi_comm_use(request->comm);
482 simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
483 buf, request->real_size,
485 &xbt_free_f, // how to free the userdata if a detached send fails
486 !smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
487 : &smpi_comm_null_copy_buffer_callback,
489 // detach if msg size < eager/rdv switch limit
491 XBT_DEBUG("send simcall posted");
495 /* FIXME: detached sends are not traceable (request->action == NULL) */
497 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
499 xbt_mutex_release(mut);
504 void smpi_mpi_startall(int count, MPI_Request * requests)
507 if(requests==NULL) return;
509 for(i = 0; i < count; i++) {
510 smpi_mpi_start(requests[i]);
514 void smpi_mpi_request_free(MPI_Request * request)
516 if((*request) != MPI_REQUEST_NULL){
517 (*request)->refcount--;
518 if((*request)->refcount<0) xbt_die("wrong refcount");
520 if((*request)->refcount==0){
521 print_request("Destroying", (*request));
523 *request = MPI_REQUEST_NULL;
525 print_request("Decrementing", (*request));
528 xbt_die("freeing an already free request");
533 MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype,
534 int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
536 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
538 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, src, dst, tag,
539 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
541 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
542 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
548 MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype,
549 int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
551 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
553 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
554 comm, RMA | NON_PERSISTENT | RECV | PREPARED);
556 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
557 comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
564 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
565 int dst, int tag, MPI_Comm comm)
567 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
568 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
569 comm, PERSISTENT | ISEND | SEND | PREPARED);
573 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
574 int dst, int tag, MPI_Comm comm)
576 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
577 request = build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
578 comm, NON_PERSISTENT | ISEND | SEND);
579 smpi_mpi_start(request);
583 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
584 int dst, int tag, MPI_Comm comm)
586 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
587 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
588 comm, NON_PERSISTENT | ISEND | SSEND | SEND);
589 smpi_mpi_start(request);
593 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
594 int src, int tag, MPI_Comm comm)
596 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
597 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
598 comm, PERSISTENT | RECV | PREPARED);
602 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
603 int src, int tag, MPI_Comm comm)
605 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
606 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
607 comm, NON_PERSISTENT | RECV);
608 smpi_mpi_start(request);
612 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
613 int tag, MPI_Comm comm, MPI_Status * status)
615 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
616 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
617 smpi_mpi_wait(&request, status);
623 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
624 int tag, MPI_Comm comm)
626 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
627 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
628 comm, NON_PERSISTENT | SEND);
630 smpi_mpi_start(request);
631 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
635 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
636 int dst, int tag, MPI_Comm comm)
638 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
639 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
640 comm, NON_PERSISTENT | SSEND | SEND);
642 smpi_mpi_start(request);
643 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
647 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
648 int dst, int sendtag, void *recvbuf, int recvcount,
649 MPI_Datatype recvtype, int src, int recvtag,
650 MPI_Comm comm, MPI_Status * status)
652 MPI_Request requests[2];
654 int myid=smpi_process_index();
655 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
656 smpi_datatype_copy(sendbuf, sendcount, sendtype,
657 recvbuf, recvcount, recvtype);
661 smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
663 smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
664 smpi_mpi_startall(2, requests);
665 smpi_mpi_waitall(2, requests, stats);
666 smpi_mpi_request_free(&requests[0]);
667 smpi_mpi_request_free(&requests[1]);
668 if(status != MPI_STATUS_IGNORE) {
669 // Copy receive status
674 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
676 return status->count / smpi_datatype_size(datatype);
679 static void finish_wait(MPI_Request * request, MPI_Status * status)
681 MPI_Request req = *request;
682 smpi_empty_status(status);
684 if(!(req->detached && req->flags & SEND)
685 && !(req->flags & PREPARED)){
686 if(status != MPI_STATUS_IGNORE) {
687 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
688 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
689 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
690 status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
691 // this handles the case were size in receive differs from size in send
692 // FIXME: really this should just contain the count of receive-type blocks,
694 status->count = req->real_size;
697 print_request("Finishing", req);
698 MPI_Datatype datatype = req->old_type;
700 if((req->flags & ACCUMULATE) || (datatype->has_subtype == 1)){
701 if (!smpi_process_get_replaying()){
702 if( smpi_privatize_global_variables
703 && ((char*)req->old_buf >= smpi_start_data_exe)
704 && ((char*)req->old_buf < smpi_start_data_exe + smpi_size_data_exe )
706 XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
707 smpi_switch_data_segment(smpi_process_index());
711 if(datatype->has_subtype == 1){
712 // This part handles the problem of non-contignous memory
713 // the unserialization at the reception
714 s_smpi_subtype_t *subtype = datatype->substruct;
715 if(req->flags & RECV)
716 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct, req->op);
717 if(req->detached == 0) free(req->buf);
718 }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
719 int n =req->real_size/smpi_datatype_size(datatype);
720 smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
723 smpi_comm_unuse(req->comm);
724 smpi_datatype_unuse(datatype);
728 if (TRACE_smpi_view_internals()) {
729 if(req->flags & RECV){
730 int rank = smpi_process_index();
731 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
732 TRACE_smpi_recv(rank, src_traced, rank);
736 if(req->detached_sender!=NULL){
737 smpi_mpi_request_free(&(req->detached_sender));
739 if(req->flags & PERSISTENT)
741 req->flags |= FINISHED;
743 smpi_mpi_request_free(request);
747 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
750 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
752 // to avoid deadlocks if used as a break condition, such as
753 // while (MPI_Test(request, flag, status) && flag) {
755 // because the time will not normally advance when only calls to MPI_Test
756 // are made -> deadlock
757 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
758 static int nsleeps = 1;
759 if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
761 smpi_empty_status(status);
763 if (!((*request)->flags & PREPARED)) {
764 if ((*request)->action != NULL)
765 flag = simcall_comm_test((*request)->action);
767 finish_wait(request, status);
768 nsleeps=1;//reset the number of sleeps we will do next time
769 if (*request != MPI_REQUEST_NULL && !((*request)->flags & PERSISTENT))
770 *request = MPI_REQUEST_NULL;
778 int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
785 *index = MPI_UNDEFINED;
787 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
788 map = xbt_new(int, count);
790 for(i = 0; i < count; i++) {
791 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action &&
792 !(requests[i]->flags & PREPARED)) {
793 xbt_dynar_push(comms, &requests[i]->action);
799 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
800 static int nsleeps = 1;
801 if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
803 i = simcall_comm_testany(comms);
804 // not MPI_UNDEFINED, as this is a simix return code
807 finish_wait(&requests[*index], status);
808 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
809 requests[*index] = MPI_REQUEST_NULL;
816 //all requests are null or inactive, return true
818 smpi_empty_status(status);
821 xbt_dynar_free(&comms);
827 int smpi_mpi_testall(int count, MPI_Request requests[],
831 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
834 for(i=0; i<count; i++){
835 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
836 if (smpi_mpi_test(&requests[i], pstat)!=1){
839 requests[i]=MPI_REQUEST_NULL;
842 smpi_empty_status(pstat);
844 if(status != MPI_STATUSES_IGNORE) {
851 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
853 //FIXME find another wait to avoid busy waiting ?
854 // the issue here is that we have to wait on a nonexistent comm
856 smpi_mpi_iprobe(source, tag, comm, &flag, status);
857 XBT_DEBUG("Busy Waiting on probing : %d", flag);
861 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
863 MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
864 comm, PERSISTENT | RECV);
866 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
867 // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... )
868 // multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
869 static int nsleeps = 1;
870 if(smpi_iprobe_sleep > 0) simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
871 // behave like a receive, but don't do it
874 print_request("New iprobe", request);
875 // We have to test both mailboxes as we don't know if we will receive one one or another
876 if (sg_cfg_get_int("smpi/async_small_thres")>0){
877 mailbox = smpi_process_mailbox_small();
878 XBT_DEBUG("trying to probe the perm recv mailbox");
879 request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, (void*)request);
881 if (request->action==NULL){
882 mailbox = smpi_process_mailbox();
883 XBT_DEBUG("trying to probe the other mailbox");
884 request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
887 if (request->action){
888 MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
890 if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
891 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
892 status->MPI_TAG = req->tag;
893 status->MPI_ERROR = MPI_SUCCESS;
894 status->count = req->real_size;
896 nsleeps=1;//reset the number of sleeps we will do next time
902 smpi_mpi_request_free(&request);
907 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
909 print_request("Waiting", *request);
910 if ((*request)->flags & PREPARED) {
911 smpi_empty_status(status);
915 if ((*request)->action != NULL) { // this is not a detached send
916 simcall_comm_wait((*request)->action, -1.0);
918 if((MC_is_active() || MC_record_replay_is_active()) && (*request)->action)
919 (*request)->action->comm.dst_data = NULL; // dangling pointer : dst_data is freed with a wait, need to set it to NULL for system state comparison
922 finish_wait(request, status);
923 if (*request != MPI_REQUEST_NULL && ((*request)->flags & NON_PERSISTENT))
924 *request = MPI_REQUEST_NULL;
925 // FIXME for a detached send, finish_wait is not called:
928 int smpi_mpi_waitany(int count, MPI_Request requests[],
935 index = MPI_UNDEFINED;
937 // Wait for a request to complete
938 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
939 map = xbt_new(int, count);
941 XBT_DEBUG("Wait for one of %d", count);
942 for(i = 0; i < count; i++) {
943 if (requests[i] != MPI_REQUEST_NULL
944 && !(requests[i]->flags & PREPARED)
945 && !(requests[i]->flags & FINISHED)) {
946 if (requests[i]->action != NULL) {
947 XBT_DEBUG("Waiting any %p ", requests[i]);
948 xbt_dynar_push(comms, &requests[i]->action);
952 //This is a finished detached request, let's return this one
953 size=0;//so we free the dynar but don't do the waitany call
955 finish_wait(&requests[i], status);//cleanup if refcount = 0
956 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
957 requests[i]=MPI_REQUEST_NULL;//set to null
963 i = simcall_comm_waitany(comms);
965 // not MPI_UNDEFINED, as this is a simix return code
968 finish_wait(&requests[index], status);
969 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
970 requests[index] = MPI_REQUEST_NULL;
974 xbt_dynar_free(&comms);
977 if (index==MPI_UNDEFINED)
978 smpi_empty_status(status);
983 int smpi_mpi_waitall(int count, MPI_Request requests[],
988 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
989 int retvalue = MPI_SUCCESS;
990 //tag invalid requests in the set
991 if (status != MPI_STATUSES_IGNORE) {
992 for (c = 0; c < count; c++) {
993 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ||
994 (requests[c]->flags & PREPARED)) {
995 smpi_empty_status(&status[c]);
996 } else if (requests[c]->src == MPI_PROC_NULL) {
997 smpi_empty_status(&status[c]);
998 status[c].MPI_SOURCE = MPI_PROC_NULL;
1002 for(c = 0; c < count; c++) {
1004 if (MC_is_active() || MC_record_replay_is_active()) {
1005 smpi_mpi_wait(&requests[c], pstat);
1008 index = smpi_mpi_waitany(count, requests, pstat);
1009 if (index == MPI_UNDEFINED)
1011 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1012 requests[index]=MPI_REQUEST_NULL;
1014 if (status != MPI_STATUSES_IGNORE) {
1015 status[index] = *pstat;
1016 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1017 retvalue = MPI_ERR_IN_STATUS;
1024 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
1025 MPI_Status status[])
1027 int i, count, index;
1029 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1032 for(i = 0; i < incount; i++)
1034 index=smpi_mpi_waitany(incount, requests, pstat);
1035 if(index!=MPI_UNDEFINED){
1036 indices[count] = index;
1038 if(status != MPI_STATUSES_IGNORE) {
1039 status[index] = *pstat;
1041 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1042 requests[index]=MPI_REQUEST_NULL;
1044 return MPI_UNDEFINED;
1050 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices,
1051 MPI_Status status[])
1053 int i, count, count_dead;
1055 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1059 for(i = 0; i < incount; i++) {
1060 if((requests[i] != MPI_REQUEST_NULL)) {
1061 if(smpi_mpi_test(&requests[i], pstat)) {
1064 if(status != MPI_STATUSES_IGNORE) {
1067 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags & NON_PERSISTENT)
1068 requests[i]=MPI_REQUEST_NULL;
1074 if(count_dead==incount)return MPI_UNDEFINED;
1078 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
1081 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
1082 nary_tree_bcast(buf, count, datatype, root, comm, 4);
1085 void smpi_mpi_barrier(MPI_Comm comm)
1087 // arity=2: a binary tree, arity=4 seem to be a good setting (see P2P-MPI))
1088 nary_tree_barrier(comm, 4);
1091 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1092 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1093 int root, MPI_Comm comm)
1095 int system_tag = COLL_TAG_GATHER;
1096 int rank, size, src, index;
1097 MPI_Aint lb = 0, recvext = 0;
1098 MPI_Request *requests;
1100 rank = smpi_comm_rank(comm);
1101 size = smpi_comm_size(comm);
1103 // Send buffer to root
1104 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1106 // FIXME: check for errors
1107 smpi_datatype_extent(recvtype, &lb, &recvext);
1108 // Local copy from root
1109 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1110 (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
1111 // Receive buffers from senders
1112 requests = xbt_new(MPI_Request, size - 1);
1114 for(src = 0; src < size; src++) {
1116 requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
1117 recvcount, recvtype,
1118 src, system_tag, comm);
1122 // Wait for completion of irecv's.
1123 smpi_mpi_startall(size - 1, requests);
1124 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1125 for(src = 0; src < size-1; src++) {
1126 smpi_mpi_request_free(&requests[src]);
1133 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
1134 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1138 int rank = smpi_process_index();
1141 /* arbitrarily choose root as rank 0 */
1142 size = smpi_comm_size(comm);
1144 displs = xbt_new(int, size);
1145 for (i = 0; i < size; i++) {
1147 count += recvcounts[i];
1149 tmpbuf=(void*)smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype));
1151 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
1152 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf,
1153 recvcounts[rank], datatype, 0, comm);
1155 smpi_free_tmp_buffer(tmpbuf);
1158 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1159 void *recvbuf, int *recvcounts, int *displs,
1160 MPI_Datatype recvtype, int root, MPI_Comm comm)
1162 int system_tag = COLL_TAG_GATHERV;
1163 int rank, size, src, index;
1164 MPI_Aint lb = 0, recvext = 0;
1165 MPI_Request *requests;
1167 rank = smpi_comm_rank(comm);
1168 size = smpi_comm_size(comm);
1170 // Send buffer to root
1171 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1173 // FIXME: check for errors
1174 smpi_datatype_extent(recvtype, &lb, &recvext);
1175 // Local copy from root
1176 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1177 (char *)recvbuf + displs[root] * recvext,
1178 recvcounts[root], recvtype);
1179 // Receive buffers from senders
1180 requests = xbt_new(MPI_Request, size - 1);
1182 for(src = 0; src < size; src++) {
1185 smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
1186 recvcounts[src], recvtype, src, system_tag, comm);
1190 // Wait for completion of irecv's.
1191 smpi_mpi_startall(size - 1, requests);
1192 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1193 for(src = 0; src < size-1; src++) {
1194 smpi_mpi_request_free(&requests[src]);
1200 void smpi_mpi_allgather(void *sendbuf, int sendcount,
1201 MPI_Datatype sendtype, void *recvbuf,
1202 int recvcount, MPI_Datatype recvtype,
1205 int system_tag = COLL_TAG_ALLGATHER;
1206 int rank, size, other, index;
1207 MPI_Aint lb = 0, recvext = 0;
1208 MPI_Request *requests;
1210 rank = smpi_comm_rank(comm);
1211 size = smpi_comm_size(comm);
1212 // FIXME: check for errors
1213 smpi_datatype_extent(recvtype, &lb, &recvext);
1214 // Local copy from self
1215 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1216 (char *)recvbuf + rank * recvcount * recvext, recvcount,
1218 // Send/Recv buffers to/from others;
1219 requests = xbt_new(MPI_Request, 2 * (size - 1));
1221 for(other = 0; other < size; other++) {
1224 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1227 requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
1228 recvcount, recvtype, other,
1233 // Wait for completion of all comms.
1234 smpi_mpi_startall(2 * (size - 1), requests);
1235 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1236 for(other = 0; other < 2*(size-1); other++) {
1237 smpi_mpi_request_free(&requests[other]);
1242 void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
1243 MPI_Datatype sendtype, void *recvbuf,
1244 int *recvcounts, int *displs,
1245 MPI_Datatype recvtype, MPI_Comm comm)
1247 int system_tag = COLL_TAG_ALLGATHERV;
1248 int rank, size, other, index;
1249 MPI_Aint lb = 0, recvext = 0;
1250 MPI_Request *requests;
1252 rank = smpi_comm_rank(comm);
1253 size = smpi_comm_size(comm);
1254 // FIXME: check for errors
1255 smpi_datatype_extent(recvtype, &lb, &recvext);
1256 // Local copy from self
1257 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1258 (char *)recvbuf + displs[rank] * recvext,
1259 recvcounts[rank], recvtype);
1260 // Send buffers to others;
1261 requests = xbt_new(MPI_Request, 2 * (size - 1));
1263 for(other = 0; other < size; other++) {
1266 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1270 smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
1271 recvtype, other, system_tag, comm);
1275 // Wait for completion of all comms.
1276 smpi_mpi_startall(2 * (size - 1), requests);
1277 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1278 for(other = 0; other < 2*(size-1); other++) {
1279 smpi_mpi_request_free(&requests[other]);
1284 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1285 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1286 int root, MPI_Comm comm)
1288 int system_tag = COLL_TAG_SCATTER;
1289 int rank, size, dst, index;
1290 MPI_Aint lb = 0, sendext = 0;
1291 MPI_Request *requests;
1293 rank = smpi_comm_rank(comm);
1294 size = smpi_comm_size(comm);
1296 // Recv buffer from root
1297 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1300 // FIXME: check for errors
1301 smpi_datatype_extent(sendtype, &lb, &sendext);
1302 // Local copy from root
1303 if(recvbuf!=MPI_IN_PLACE){
1304 smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
1305 sendcount, sendtype, recvbuf, recvcount, recvtype);
1307 // Send buffers to receivers
1308 requests = xbt_new(MPI_Request, size - 1);
1310 for(dst = 0; dst < size; dst++) {
1312 requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
1313 sendcount, sendtype, dst,
1318 // Wait for completion of isend's.
1319 smpi_mpi_startall(size - 1, requests);
1320 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1321 for(dst = 0; dst < size-1; dst++) {
1322 smpi_mpi_request_free(&requests[dst]);
1328 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
1329 MPI_Datatype sendtype, void *recvbuf, int recvcount,
1330 MPI_Datatype recvtype, int root, MPI_Comm comm)
1332 int system_tag = COLL_TAG_SCATTERV;
1333 int rank, size, dst, index;
1334 MPI_Aint lb = 0, sendext = 0;
1335 MPI_Request *requests;
1337 rank = smpi_comm_rank(comm);
1338 size = smpi_comm_size(comm);
1340 // Recv buffer from root
1341 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1344 // FIXME: check for errors
1345 smpi_datatype_extent(sendtype, &lb, &sendext);
1346 // Local copy from root
1347 if(recvbuf!=MPI_IN_PLACE){
1348 smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
1349 sendtype, recvbuf, recvcount, recvtype);
1351 // Send buffers to receivers
1352 requests = xbt_new(MPI_Request, size - 1);
1354 for(dst = 0; dst < size; dst++) {
1357 smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
1358 sendtype, dst, system_tag, comm);
1362 // Wait for completion of isend's.
1363 smpi_mpi_startall(size - 1, requests);
1364 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1365 for(dst = 0; dst < size-1; dst++) {
1366 smpi_mpi_request_free(&requests[dst]);
1372 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
1373 MPI_Datatype datatype, MPI_Op op, int root,
1376 int system_tag = COLL_TAG_REDUCE;
1377 int rank, size, src, index;
1378 MPI_Aint lb = 0, dataext = 0;
1379 MPI_Request *requests;
1383 char* sendtmpbuf = (char*) sendbuf;
1384 if( sendbuf == MPI_IN_PLACE ) {
1385 sendtmpbuf = (char *)smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype));
1386 smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
1389 rank = smpi_comm_rank(comm);
1390 size = smpi_comm_size(comm);
1391 //non commutative case, use a working algo from openmpi
1392 if(!smpi_op_is_commute(op)){
1393 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count,
1394 datatype, op, root, comm);
1399 // Send buffer to root
1400 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1402 // FIXME: check for errors
1403 smpi_datatype_extent(datatype, &lb, &dataext);
1404 // Local copy from root
1405 if (sendtmpbuf && recvbuf)
1406 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1407 // Receive buffers from senders
1408 //TODO: make a MPI_barrier here ?
1409 requests = xbt_new(MPI_Request, size - 1);
1410 tmpbufs = xbt_new(void *, size - 1);
1412 for(src = 0; src < size; src++) {
1414 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1416 if (!smpi_process_get_replaying())
1417 tmpbufs[index] = xbt_malloc(count * dataext);
1419 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1421 smpi_irecv_init(tmpbufs[index], count, datatype, src,
1426 // Wait for completion of irecv's.
1427 smpi_mpi_startall(size - 1, requests);
1428 for(src = 0; src < size - 1; src++) {
1429 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1430 XBT_DEBUG("finished waiting any request with index %d", index);
1431 if(index == MPI_UNDEFINED) {
1434 smpi_mpi_request_free(&requests[index]);
1436 if(op) /* op can be MPI_OP_NULL that does nothing */
1437 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1439 for(index = 0; index < size - 1; index++) {
1440 smpi_free_tmp_buffer(tmpbufs[index]);
1445 if( sendbuf == MPI_IN_PLACE ) {
1446 smpi_free_tmp_buffer(sendtmpbuf);
1451 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
1452 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1454 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1455 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1458 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
1459 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1461 int system_tag = -888;
1462 int rank, size, other, index;
1463 MPI_Aint lb = 0, dataext = 0;
1464 MPI_Request *requests;
1467 rank = smpi_comm_rank(comm);
1468 size = smpi_comm_size(comm);
1470 // FIXME: check for errors
1471 smpi_datatype_extent(datatype, &lb, &dataext);
1473 // Local copy from self
1474 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1476 // Send/Recv buffers to/from others;
1477 requests = xbt_new(MPI_Request, size - 1);
1478 tmpbufs = xbt_new(void *, rank);
1480 for(other = 0; other < rank; other++) {
1481 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1483 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1485 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1489 for(other = rank + 1; other < size; other++) {
1491 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1494 // Wait for completion of all comms.
1495 smpi_mpi_startall(size - 1, requests);
1497 if(smpi_op_is_commute(op)){
1498 for(other = 0; other < size - 1; other++) {
1499 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1500 if(index == MPI_UNDEFINED) {
1504 // #Request is below rank: it's a irecv
1505 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1509 //non commutative case, wait in order
1510 for(other = 0; other < size - 1; other++) {
1511 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1513 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1517 for(index = 0; index < rank; index++) {
1518 smpi_free_tmp_buffer(tmpbufs[index]);
1520 for(index = 0; index < size-1; index++) {
1521 smpi_mpi_request_free(&requests[index]);
1527 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count,
1528 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1530 int system_tag = -888;
1531 int rank, size, other, index;
1532 MPI_Aint lb = 0, dataext = 0;
1533 MPI_Request *requests;
1535 int recvbuf_is_empty=1;
1536 rank = smpi_comm_rank(comm);
1537 size = smpi_comm_size(comm);
1539 // FIXME: check for errors
1540 smpi_datatype_extent(datatype, &lb, &dataext);
1542 // Send/Recv buffers to/from others;
1543 requests = xbt_new(MPI_Request, size - 1);
1544 tmpbufs = xbt_new(void *, rank);
1546 for(other = 0; other < rank; other++) {
1547 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1549 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1551 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1555 for(other = rank + 1; other < size; other++) {
1557 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1560 // Wait for completion of all comms.
1561 smpi_mpi_startall(size - 1, requests);
1562 if(smpi_op_is_commute(op)){
1563 for(other = 0; other < size - 1; other++) {
1564 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1565 if(index == MPI_UNDEFINED) {
1569 if(recvbuf_is_empty){
1570 smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1573 // #Request is below rank: it's a irecv
1574 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1578 //non commutative case, wait in order
1579 for(other = 0; other < size - 1; other++) {
1580 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1582 if(recvbuf_is_empty){
1583 smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
1585 }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1589 for(index = 0; index < rank; index++) {
1590 smpi_free_tmp_buffer(tmpbufs[index]);
1592 for(index = 0; index < size-1; index++) {
1593 smpi_mpi_request_free(&requests[index]);