1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
10 #include "src/mc/mc_replay.h"
11 #include "xbt/replay.h"
13 #include "src/simix/smx_private.h"
14 #include "surf/surf.h"
15 #include "simgrid/sg_config.h"
16 #include "colls/colls.h"
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
21 static int match_recv(void* a, void* b, smx_synchro_t ignored) {
22 MPI_Request ref = (MPI_Request)a;
23 MPI_Request req = (MPI_Request)b;
24 XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
26 xbt_assert(ref, "Cannot match recv against null reference");
27 xbt_assert(req, "Cannot match recv against null request");
28 if((ref->src == MPI_ANY_SOURCE || req->src == ref->src)
29 && ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
30 //we match, we can transfer some values
31 // FIXME : move this to the copy function ?
32 if(ref->src == MPI_ANY_SOURCE)ref->real_src = req->src;
33 if(ref->tag == MPI_ANY_TAG)ref->real_tag = req->tag;
34 if(ref->real_size < req->real_size) ref->truncated = 1;
36 ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
38 XBT_DEBUG("match succeeded");
43 static int match_send(void* a, void* b,smx_synchro_t ignored) {
44 MPI_Request ref = (MPI_Request)a;
45 MPI_Request req = (MPI_Request)b;
46 XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
47 xbt_assert(ref, "Cannot match send against null reference");
48 xbt_assert(req, "Cannot match send against null request");
50 if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
51 && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
53 if(req->src == MPI_ANY_SOURCE)req->real_src = ref->src;
54 if(req->tag == MPI_ANY_TAG)req->real_tag = ref->tag;
55 if(req->real_size < ref->real_size) req->truncated = 1;
57 req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
59 XBT_DEBUG("match succeeded");
65 // Methods used to parse and store the values for timing injections in smpi
66 // These are taken from surf/network.c and generalized to have more values for each factor
67 typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t;
68 typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c
71 double values[4];//arbitrary set to 4
72 } s_smpi_factor_multival_t;
73 xbt_dynar_t smpi_os_values = NULL;
74 xbt_dynar_t smpi_or_values = NULL;
75 xbt_dynar_t smpi_ois_values = NULL;
77 double smpi_wtime_sleep = 0.0;
78 double smpi_iprobe_sleep = 1e-4;
79 double smpi_test_sleep = 1e-4;
81 static int factor_cmp(const void *pa, const void *pb)
83 return (((s_smpi_factor_multival_t*)pa)->factor > ((s_smpi_factor_multival_t*)pb)->factor) ? 1 :
84 (((s_smpi_factor_multival_t*)pa)->factor < ((s_smpi_factor_multival_t*)pb)->factor) ? -1 : 0;
88 static xbt_dynar_t parse_factor(const char *smpi_coef_string)
91 unsigned int iter = 0;
92 s_smpi_factor_multival_t fact;
95 xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
97 smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_multival_t), NULL);
98 radical_elements = xbt_str_split(smpi_coef_string, ";");
99 xbt_dynar_foreach(radical_elements, iter, value) {
100 memset(&fact, 0, sizeof(s_smpi_factor_multival_t));
101 radical_elements2 = xbt_str_split(value, ":");
102 if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
103 xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
104 for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
106 fact.factor = xbt_str_parse_int(xbt_dynar_get_as(radical_elements2, i, char *),
107 bprintf("Invalid factor in chunk #%d: %%s", iter+1));
109 fact.values[fact.nb_values] = xbt_str_parse_double(xbt_dynar_get_as(radical_elements2, i, char *),
110 bprintf("Invalid factor value %d in chunk #%d: %%s", i, iter+1));
115 xbt_dynar_push_as(smpi_factor, s_smpi_factor_multival_t, fact);
116 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
117 xbt_dynar_free(&radical_elements2);
119 xbt_dynar_free(&radical_elements);
121 xbt_dynar_sort(smpi_factor, &factor_cmp);
122 xbt_dynar_foreach(smpi_factor, iter, fact) {
123 XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
128 static double smpi_os(double size)
130 if (!smpi_os_values) {
131 smpi_os_values = parse_factor(sg_cfg_get_string("smpi/os"));
132 smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
134 unsigned int iter = 0;
135 s_smpi_factor_multival_t fact;
137 // Iterate over all the sections that were specified and find the right
138 // value. (fact.factor represents the interval sizes; we want to find the
139 // section that has fact.factor <= size and no other such fact.factor <= size)
140 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
141 xbt_dynar_foreach(smpi_os_values, iter, fact) {
142 if (size <= fact.factor) { // Values already too large, use the previously
143 // computed value of current!
144 XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current);
147 // If the next section is too large, the current section must be used.
148 // Hence, save the cost, as we might have to use it.
149 current = fact.values[0]+fact.values[1]*size;
152 XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current);
157 static double smpi_ois(double size)
159 if (!smpi_ois_values) {
160 smpi_ois_values = parse_factor(sg_cfg_get_string("smpi/ois"));
161 smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
163 unsigned int iter = 0;
164 s_smpi_factor_multival_t fact;
166 // Iterate over all the sections that were specified and find the right
167 // value. (fact.factor represents the interval sizes; we want to find the
168 // section that has fact.factor <= size and no other such fact.factor <= size)
169 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
170 xbt_dynar_foreach(smpi_ois_values, iter, fact) {
171 if (size <= fact.factor) { // Values already too large, use the previously
172 // computed value of current!
173 XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current);
176 // If the next section is too large, the current section must be used.
177 // Hence, save the cost, as we might have to use it.
178 current = fact.values[0]+fact.values[1]*size;
181 XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current);
186 static double smpi_or(double size)
188 if (!smpi_or_values) {
189 smpi_or_values = parse_factor(sg_cfg_get_string("smpi/or"));
190 smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
192 unsigned int iter = 0;
193 s_smpi_factor_multival_t fact;
195 // Iterate over all the sections that were specified and find the right
196 // value. (fact.factor represents the interval sizes; we want to find the
197 // section that has fact.factor <= size and no other such fact.factor <= size)
198 // Note: parse_factor() (used before) already sorts the dynar we iterate over!
199 xbt_dynar_foreach(smpi_or_values, iter, fact) {
200 if (size <= fact.factor) { // Values already too large, use the previously
201 // computed value of current!
202 XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current);
205 // If the next section is too large, the current section must be used.
206 // Hence, save the cost, as we might have to use it.
207 current=fact.values[0]+fact.values[1]*size;
210 XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current);
215 double smpi_mpi_wtime(){
217 if (smpi_process_initialized() && !smpi_process_finalized() && !smpi_process_get_sampling()) {
219 time = SIMIX_get_clock();
220 // to avoid deadlocks if used as a break condition, such as
221 // while (MPI_Wtime(...) < time_limit) {
224 // because the time will not normally advance when only calls to MPI_Wtime
225 // are made -> deadlock (MPI_Wtime never reaches the time limit)
226 if(smpi_wtime_sleep > 0) simcall_process_sleep(smpi_wtime_sleep);
229 time = SIMIX_get_clock();
234 static MPI_Request build_request(void *buf, int count,
235 MPI_Datatype datatype, int src, int dst,
236 int tag, MPI_Comm comm, unsigned flags)
238 MPI_Request request = NULL;
240 void *old_buf = NULL;
242 request = xbt_new(s_smpi_mpi_request_t, 1);
244 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
246 if(((flags & RECV) && (flags & ACCUMULATE)) || (datatype->has_subtype == 1)){
247 // This part handles the problem of non-contiguous memory
249 buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
250 if ((datatype->has_subtype == 1) && (flags & SEND)) {
251 subtype->serialize(old_buf, buf, count, datatype->substruct);
256 // This part handles the problem of non-contiguous memory (for the
257 // unserialisation at the reception)
258 request->old_buf = old_buf;
259 request->old_type = datatype;
261 request->size = smpi_datatype_size(datatype) * count;
265 request->comm = comm;
266 request->action = NULL;
267 request->flags = flags;
268 request->detached = 0;
269 request->detached_sender = NULL;
270 request->real_src = 0;
272 request->truncated = 0;
273 request->real_size = 0;
274 request->real_tag = 0;
275 if(flags & PERSISTENT)
276 request->refcount = 1;
278 request->refcount = 0;
279 request->op = MPI_REPLACE;
282 if (flags & SEND) smpi_datatype_unuse(datatype);
288 void smpi_empty_status(MPI_Status * status)
290 if(status != MPI_STATUS_IGNORE) {
291 status->MPI_SOURCE = MPI_ANY_SOURCE;
292 status->MPI_TAG = MPI_ANY_TAG;
293 status->MPI_ERROR = MPI_SUCCESS;
298 static void smpi_mpi_request_free_voidp(void* request)
300 MPI_Request req = static_cast<MPI_Request>(request);
301 smpi_mpi_request_free(&req);
304 /* MPI Low level calls */
305 MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
306 int dst, int tag, MPI_Comm comm)
308 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
309 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
310 comm, PERSISTENT | SEND | PREPARED);
314 MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
315 int dst, int tag, MPI_Comm comm)
317 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
318 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
319 comm, PERSISTENT | SSEND | SEND | PREPARED);
323 MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
324 int src, int tag, MPI_Comm comm)
326 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
327 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
328 comm, PERSISTENT | RECV | PREPARED);
332 void smpi_mpi_start(MPI_Request request)
336 xbt_assert(!request->action, "Cannot (re)start a non-finished communication");
337 request->flags &= ~PREPARED;
338 request->flags &= ~FINISHED;
341 if (request->flags & RECV) {
342 print_request("New recv", request);
344 int async_small_thresh = sg_cfg_get_int("smpi/async_small_thresh");
346 xbt_mutex_t mut = smpi_process_mailboxes_mutex();
347 if (async_small_thresh != 0 ||request->flags & RMA)
348 xbt_mutex_acquire(mut);
350 if (async_small_thresh == 0 && !(request->flags & RMA)) {
351 mailbox = smpi_process_mailbox();
353 else if (request->flags & RMA || static_cast<int>(request->size) < async_small_thresh){
354 //We have to check both mailboxes (because SSEND messages are sent to the large mbox). begin with the more appropriate one : the small one.
355 mailbox = smpi_process_mailbox_small();
356 XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
357 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
360 mailbox = smpi_process_mailbox();
361 XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
362 action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
364 XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
365 mailbox = smpi_process_mailbox_small();
368 XBT_DEBUG("yes there was something for us in the large mailbox");
371 mailbox = smpi_process_mailbox_small();
372 XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
373 smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
376 XBT_DEBUG("No, nothing in the permanent receive mailbox");
377 mailbox = smpi_process_mailbox();
379 XBT_DEBUG("yes there was something for us in the small mailbox");
383 //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
384 double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
386 simcall_process_sleep(sleeptime);
387 XBT_DEBUG("receiving size of %zu : sleep %f ", request->size, smpi_or(request->size));
390 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
391 request->real_size=request->size;
392 smpi_datatype_use(request->old_type);
393 smpi_comm_use(request->comm);
394 request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf,
395 &request->real_size, &match_recv,
396 !smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
397 : &smpi_comm_null_copy_buffer_callback,
399 XBT_DEBUG("recv simcall posted");
401 if (async_small_thresh != 0 || (request->flags & RMA))
402 xbt_mutex_release(mut);
406 int receiver = request->dst;
408 int rank = request->src;
409 if (TRACE_smpi_view_internals()) {
410 TRACE_smpi_send(rank, rank, receiver,request->size);
412 print_request("New send", request);
414 //if we are giving back the control to the user without waiting for completion, we have to inject timings
415 double sleeptime = 0.0;
416 if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
417 //isend and send timings may be different
418 sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
421 if(sleeptime != 0.0){
422 simcall_process_sleep(sleeptime);
423 XBT_DEBUG("sending size of %zu : sleep %f ", request->size, smpi_os(request->size));
426 int async_small_thresh = sg_cfg_get_int("smpi/async_small_thresh");
428 xbt_mutex_t mut=smpi_process_remote_mailboxes_mutex(receiver);
430 if (async_small_thresh != 0 || (request->flags & RMA))
431 xbt_mutex_acquire(mut);
433 if (!(async_small_thresh != 0 || (request->flags & RMA))) {
434 mailbox = smpi_process_remote_mailbox(receiver);
436 else if (request->flags & RMA || static_cast<int>(request->size) < async_small_thresh) { // eager mode
437 mailbox = smpi_process_remote_mailbox(receiver);
438 XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
439 smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, (void*)request);
441 if (! (request->flags & SSEND)){
442 mailbox = smpi_process_remote_mailbox_small(receiver);
443 XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
445 mailbox = smpi_process_remote_mailbox_small(receiver);
446 XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
447 action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, (void*)request);
449 XBT_DEBUG("No, we are first, send to large mailbox");
450 mailbox = smpi_process_remote_mailbox(receiver);
454 XBT_DEBUG("Yes there was something for us in the large mailbox");
457 mailbox = smpi_process_remote_mailbox(receiver);
458 XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
461 void* buf = request->buf;
462 if ( (! (request->flags & SSEND)) && (static_cast<int>(request->size) < sg_cfg_get_int("smpi/send_is_detached_thresh"))) {
464 request->detached = 1;
465 XBT_DEBUG("Send request %p is detached", request);
467 if(request->old_type->has_subtype == 0){
468 oldbuf = request->buf;
469 if (!smpi_process_get_replaying() && oldbuf && request->size!=0){
470 if((smpi_privatize_global_variables)
471 && ((char*) request->buf >= smpi_start_data_exe)
472 && ((char*)request->buf < smpi_start_data_exe + smpi_size_data_exe )){
473 XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
474 smpi_switch_data_segment(request->src);
476 buf = xbt_malloc(request->size);
477 memcpy(buf,oldbuf,request->size);
478 XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
483 // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
484 request->real_size=request->size;
485 smpi_datatype_use(request->old_type);
486 smpi_comm_use(request->comm);
488 simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
489 buf, request->real_size,
491 &xbt_free_f, // how to free the userdata if a detached send fails
492 !smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
493 : &smpi_comm_null_copy_buffer_callback,
495 // detach if msg size < eager/rdv switch limit
497 XBT_DEBUG("send simcall posted");
501 /* FIXME: detached sends are not traceable (request->action == NULL) */
503 simcall_set_category(request->action, TRACE_internal_smpi_get_category());
505 if (async_small_thresh != 0 || request->flags & RMA)
506 xbt_mutex_release(mut);
511 void smpi_mpi_startall(int count, MPI_Request * requests)
514 if(requests==NULL) return;
516 for(i = 0; i < count; i++) {
517 smpi_mpi_start(requests[i]);
521 void smpi_mpi_request_free(MPI_Request * request)
523 if((*request) != MPI_REQUEST_NULL){
524 (*request)->refcount--;
525 if((*request)->refcount<0) xbt_die("wrong refcount");
527 if((*request)->refcount==0){
528 print_request("Destroying", (*request));
530 *request = MPI_REQUEST_NULL;
532 print_request("Decrementing", (*request));
535 xbt_die("freeing an already free request");
540 MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype,
541 int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
543 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
545 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, src, dst, tag,
546 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
548 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
549 comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
555 MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype,
556 int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
558 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
560 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
561 comm, RMA | NON_PERSISTENT | RECV | PREPARED);
563 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
564 comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
571 MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
572 int dst, int tag, MPI_Comm comm)
574 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
575 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
576 comm, PERSISTENT | ISEND | SEND | PREPARED);
580 MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype,
581 int dst, int tag, MPI_Comm comm)
583 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
584 request = build_request(buf==MPI_BOTTOM?(void*)0:buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
585 comm, NON_PERSISTENT | ISEND | SEND);
586 smpi_mpi_start(request);
590 MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
591 int dst, int tag, MPI_Comm comm)
593 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
594 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
595 comm, NON_PERSISTENT | ISEND | SSEND | SEND);
596 smpi_mpi_start(request);
600 MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
601 int src, int tag, MPI_Comm comm)
603 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
604 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
605 comm, PERSISTENT | RECV | PREPARED);
609 MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype,
610 int src, int tag, MPI_Comm comm)
612 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
613 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag,
614 comm, NON_PERSISTENT | RECV);
615 smpi_mpi_start(request);
619 void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src,
620 int tag, MPI_Comm comm, MPI_Status * status)
622 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
623 request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm);
624 smpi_mpi_wait(&request, status);
630 void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
631 int tag, MPI_Comm comm)
633 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
634 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
635 comm, NON_PERSISTENT | SEND);
637 smpi_mpi_start(request);
638 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
642 void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
643 int dst, int tag, MPI_Comm comm)
645 MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
646 request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,
647 comm, NON_PERSISTENT | SSEND | SEND);
649 smpi_mpi_start(request);
650 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
654 void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
655 int dst, int sendtag, void *recvbuf, int recvcount,
656 MPI_Datatype recvtype, int src, int recvtag,
657 MPI_Comm comm, MPI_Status * status)
659 MPI_Request requests[2];
661 int myid=smpi_process_index();
662 if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)) {
663 smpi_datatype_copy(sendbuf, sendcount, sendtype,
664 recvbuf, recvcount, recvtype);
668 smpi_isend_init(sendbuf, sendcount, sendtype, dst, sendtag, comm);
670 smpi_irecv_init(recvbuf, recvcount, recvtype, src, recvtag, comm);
671 smpi_mpi_startall(2, requests);
672 smpi_mpi_waitall(2, requests, stats);
673 smpi_mpi_request_free(&requests[0]);
674 smpi_mpi_request_free(&requests[1]);
675 if(status != MPI_STATUS_IGNORE) {
676 // Copy receive status
681 int smpi_mpi_get_count(MPI_Status * status, MPI_Datatype datatype)
683 return status->count / smpi_datatype_size(datatype);
686 static void finish_wait(MPI_Request * request, MPI_Status * status)
688 MPI_Request req = *request;
689 smpi_empty_status(status);
691 if(!(req->detached && req->flags & SEND)
692 && !(req->flags & PREPARED)){
693 if(status != MPI_STATUS_IGNORE) {
694 int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
695 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src);
696 status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
697 status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
698 // this handles the case were size in receive differs from size in send
699 // FIXME: really this should just contain the count of receive-type blocks,
701 status->count = req->real_size;
704 print_request("Finishing", req);
705 MPI_Datatype datatype = req->old_type;
707 if((req->flags & ACCUMULATE) || (datatype->has_subtype == 1)){
708 if (!smpi_process_get_replaying()){
709 if( smpi_privatize_global_variables
710 && ((char*)req->old_buf >= smpi_start_data_exe)
711 && ((char*)req->old_buf < smpi_start_data_exe + smpi_size_data_exe )
713 XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
714 smpi_switch_data_segment(smpi_process_index());
718 if(datatype->has_subtype == 1){
719 // This part handles the problem of non-contignous memory
720 // the unserialization at the reception
721 s_smpi_subtype_t *subtype = static_cast<s_smpi_subtype_t*>(datatype->substruct);
722 if(req->flags & RECV)
723 subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct, req->op);
724 if(req->detached == 0) free(req->buf);
725 }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
726 int n =req->real_size/smpi_datatype_size(datatype);
727 smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
730 smpi_comm_unuse(req->comm);
731 smpi_datatype_unuse(datatype);
735 if (TRACE_smpi_view_internals()) {
736 if(req->flags & RECV){
737 int rank = smpi_process_index();
738 int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
739 TRACE_smpi_recv(rank, src_traced, rank);
743 if(req->detached_sender!=NULL){
744 smpi_mpi_request_free(&(req->detached_sender));
746 if(req->flags & PERSISTENT)
748 req->flags |= FINISHED;
750 smpi_mpi_request_free(request);
754 int smpi_mpi_test(MPI_Request * request, MPI_Status * status) {
757 //assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
759 // to avoid deadlocks if used as a break condition, such as
760 // while (MPI_Test(request, flag, status) && flag) {
762 // because the time will not normally advance when only calls to MPI_Test
763 // are made -> deadlock
764 // multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
765 static int nsleeps = 1;
766 if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
768 smpi_empty_status(status);
770 if (!((*request)->flags & PREPARED)) {
771 if ((*request)->action != NULL)
772 flag = simcall_comm_test((*request)->action);
774 finish_wait(request, status);
775 nsleeps=1;//reset the number of sleeps we will do next time
776 if (*request != MPI_REQUEST_NULL && !((*request)->flags & PERSISTENT))
777 *request = MPI_REQUEST_NULL;
785 int smpi_mpi_testany(int count, MPI_Request requests[], int *index,
792 *index = MPI_UNDEFINED;
794 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
795 map = xbt_new(int, count);
797 for(i = 0; i < count; i++) {
798 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action &&
799 !(requests[i]->flags & PREPARED)) {
800 xbt_dynar_push(comms, &requests[i]->action);
806 //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
807 static int nsleeps = 1;
808 if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
810 i = simcall_comm_testany(comms);
811 // not MPI_UNDEFINED, as this is a simix return code
814 finish_wait(&requests[*index], status);
815 if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
816 requests[*index] = MPI_REQUEST_NULL;
823 //all requests are null or inactive, return true
825 smpi_empty_status(status);
828 xbt_dynar_free(&comms);
834 int smpi_mpi_testall(int count, MPI_Request requests[],
838 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
841 for(i=0; i<count; i++){
842 if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
843 if (smpi_mpi_test(&requests[i], pstat)!=1){
846 requests[i]=MPI_REQUEST_NULL;
849 smpi_empty_status(pstat);
851 if(status != MPI_STATUSES_IGNORE) {
858 void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){
860 //FIXME find another wait to avoid busy waiting ?
861 // the issue here is that we have to wait on a nonexistent comm
863 smpi_mpi_iprobe(source, tag, comm, &flag, status);
864 XBT_DEBUG("Busy Waiting on probing : %d", flag);
868 void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
870 MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag,
871 comm, PERSISTENT | RECV);
873 // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
874 // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... )
875 // multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
876 static int nsleeps = 1;
877 if(smpi_iprobe_sleep > 0) simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
878 // behave like a receive, but don't do it
881 print_request("New iprobe", request);
882 // We have to test both mailboxes as we don't know if we will receive one one or another
883 if (sg_cfg_get_int("smpi/async_small_thresh")>0){
884 mailbox = smpi_process_mailbox_small();
885 XBT_DEBUG("trying to probe the perm recv mailbox");
886 request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, (void*)request);
888 if (request->action==NULL){
889 mailbox = smpi_process_mailbox();
890 XBT_DEBUG("trying to probe the other mailbox");
891 request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
894 if (request->action){
895 MPI_Request req = (MPI_Request)SIMIX_comm_get_src_data(request->action);
897 if(status != MPI_STATUS_IGNORE && !(req->flags & PREPARED)) {
898 status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
899 status->MPI_TAG = req->tag;
900 status->MPI_ERROR = MPI_SUCCESS;
901 status->count = req->real_size;
903 nsleeps=1;//reset the number of sleeps we will do next time
909 smpi_mpi_request_free(&request);
914 void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
916 print_request("Waiting", *request);
917 if ((*request)->flags & PREPARED) {
918 smpi_empty_status(status);
922 if ((*request)->action != NULL) { // this is not a detached send
923 simcall_comm_wait((*request)->action, -1.0);
925 if((MC_is_active() || MC_record_replay_is_active()) && (*request)->action)
926 (*request)->action->comm.dst_data = NULL; // dangling pointer : dst_data is freed with a wait, need to set it to NULL for system state comparison
929 finish_wait(request, status);
930 if (*request != MPI_REQUEST_NULL && ((*request)->flags & NON_PERSISTENT))
931 *request = MPI_REQUEST_NULL;
932 // FIXME for a detached send, finish_wait is not called:
935 int smpi_mpi_waitany(int count, MPI_Request requests[],
942 index = MPI_UNDEFINED;
944 // Wait for a request to complete
945 comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL);
946 map = xbt_new(int, count);
948 XBT_DEBUG("Wait for one of %d", count);
949 for(i = 0; i < count; i++) {
950 if (requests[i] != MPI_REQUEST_NULL
951 && !(requests[i]->flags & PREPARED)
952 && !(requests[i]->flags & FINISHED)) {
953 if (requests[i]->action != NULL) {
954 XBT_DEBUG("Waiting any %p ", requests[i]);
955 xbt_dynar_push(comms, &requests[i]->action);
959 //This is a finished detached request, let's return this one
960 size=0;//so we free the dynar but don't do the waitany call
962 finish_wait(&requests[i], status);//cleanup if refcount = 0
963 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
964 requests[i]=MPI_REQUEST_NULL;//set to null
970 i = simcall_comm_waitany(comms);
972 // not MPI_UNDEFINED, as this is a simix return code
975 finish_wait(&requests[index], status);
976 if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags & NON_PERSISTENT))
977 requests[index] = MPI_REQUEST_NULL;
981 xbt_dynar_free(&comms);
984 if (index==MPI_UNDEFINED)
985 smpi_empty_status(status);
990 int smpi_mpi_waitall(int count, MPI_Request requests[],
995 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
996 int retvalue = MPI_SUCCESS;
997 //tag invalid requests in the set
998 if (status != MPI_STATUSES_IGNORE) {
999 for (c = 0; c < count; c++) {
1000 if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ||
1001 (requests[c]->flags & PREPARED)) {
1002 smpi_empty_status(&status[c]);
1003 } else if (requests[c]->src == MPI_PROC_NULL) {
1004 smpi_empty_status(&status[c]);
1005 status[c].MPI_SOURCE = MPI_PROC_NULL;
1009 for(c = 0; c < count; c++) {
1011 if (MC_is_active() || MC_record_replay_is_active()) {
1012 smpi_mpi_wait(&requests[c], pstat);
1015 index = smpi_mpi_waitany(count, requests, pstat);
1016 if (index == MPI_UNDEFINED)
1018 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1019 requests[index]=MPI_REQUEST_NULL;
1021 if (status != MPI_STATUSES_IGNORE) {
1022 status[index] = *pstat;
1023 if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
1024 retvalue = MPI_ERR_IN_STATUS;
1031 int smpi_mpi_waitsome(int incount, MPI_Request requests[], int *indices,
1032 MPI_Status status[])
1034 int i, count, index;
1036 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1039 for(i = 0; i < incount; i++)
1041 index=smpi_mpi_waitany(incount, requests, pstat);
1042 if(index!=MPI_UNDEFINED){
1043 indices[count] = index;
1045 if(status != MPI_STATUSES_IGNORE) {
1046 status[index] = *pstat;
1048 if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags & NON_PERSISTENT))
1049 requests[index]=MPI_REQUEST_NULL;
1051 return MPI_UNDEFINED;
1057 int smpi_mpi_testsome(int incount, MPI_Request requests[], int *indices,
1058 MPI_Status status[])
1060 int i, count, count_dead;
1062 MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
1066 for(i = 0; i < incount; i++) {
1067 if((requests[i] != MPI_REQUEST_NULL)) {
1068 if(smpi_mpi_test(&requests[i], pstat)) {
1071 if(status != MPI_STATUSES_IGNORE) {
1074 if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags & NON_PERSISTENT)
1075 requests[i]=MPI_REQUEST_NULL;
1081 if(count_dead==incount)return MPI_UNDEFINED;
1085 void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root,
1088 smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
1091 void smpi_mpi_barrier(MPI_Comm comm)
1093 smpi_coll_tuned_barrier_ompi_basic_linear(comm);
1096 void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1097 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1098 int root, MPI_Comm comm)
1100 int system_tag = COLL_TAG_GATHER;
1101 int rank, size, src, index;
1102 MPI_Aint lb = 0, recvext = 0;
1103 MPI_Request *requests;
1105 rank = smpi_comm_rank(comm);
1106 size = smpi_comm_size(comm);
1108 // Send buffer to root
1109 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1111 // FIXME: check for errors
1112 smpi_datatype_extent(recvtype, &lb, &recvext);
1113 // Local copy from root
1114 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1115 (char *)recvbuf + root * recvcount * recvext, recvcount, recvtype);
1116 // Receive buffers from senders
1117 requests = xbt_new(MPI_Request, size - 1);
1119 for(src = 0; src < size; src++) {
1121 requests[index] = smpi_irecv_init((char *)recvbuf + src * recvcount * recvext,
1122 recvcount, recvtype,
1123 src, system_tag, comm);
1127 // Wait for completion of irecv's.
1128 smpi_mpi_startall(size - 1, requests);
1129 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1130 for(src = 0; src < size-1; src++) {
1131 smpi_mpi_request_free(&requests[src]);
1138 void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
1139 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1143 int rank = smpi_process_index();
1146 /* arbitrarily choose root as rank 0 */
1147 size = smpi_comm_size(comm);
1149 displs = xbt_new(int, size);
1150 for (i = 0; i < size; i++) {
1152 count += recvcounts[i];
1154 tmpbuf=(void*)smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype));
1156 mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
1157 smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf,
1158 recvcounts[rank], datatype, 0, comm);
1160 smpi_free_tmp_buffer(tmpbuf);
1163 void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1164 void *recvbuf, int *recvcounts, int *displs,
1165 MPI_Datatype recvtype, int root, MPI_Comm comm)
1167 int system_tag = COLL_TAG_GATHERV;
1168 int rank, size, src, index;
1169 MPI_Aint lb = 0, recvext = 0;
1170 MPI_Request *requests;
1172 rank = smpi_comm_rank(comm);
1173 size = smpi_comm_size(comm);
1175 // Send buffer to root
1176 smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
1178 // FIXME: check for errors
1179 smpi_datatype_extent(recvtype, &lb, &recvext);
1180 // Local copy from root
1181 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1182 (char *)recvbuf + displs[root] * recvext,
1183 recvcounts[root], recvtype);
1184 // Receive buffers from senders
1185 requests = xbt_new(MPI_Request, size - 1);
1187 for(src = 0; src < size; src++) {
1190 smpi_irecv_init((char *)recvbuf + displs[src] * recvext,
1191 recvcounts[src], recvtype, src, system_tag, comm);
1195 // Wait for completion of irecv's.
1196 smpi_mpi_startall(size - 1, requests);
1197 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1198 for(src = 0; src < size-1; src++) {
1199 smpi_mpi_request_free(&requests[src]);
1205 void smpi_mpi_allgather(void *sendbuf, int sendcount,
1206 MPI_Datatype sendtype, void *recvbuf,
1207 int recvcount, MPI_Datatype recvtype,
1210 int system_tag = COLL_TAG_ALLGATHER;
1211 int rank, size, other, index;
1212 MPI_Aint lb = 0, recvext = 0;
1213 MPI_Request *requests;
1215 rank = smpi_comm_rank(comm);
1216 size = smpi_comm_size(comm);
1217 // FIXME: check for errors
1218 smpi_datatype_extent(recvtype, &lb, &recvext);
1219 // Local copy from self
1220 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1221 (char *)recvbuf + rank * recvcount * recvext, recvcount,
1223 // Send/Recv buffers to/from others;
1224 requests = xbt_new(MPI_Request, 2 * (size - 1));
1226 for(other = 0; other < size; other++) {
1229 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1232 requests[index] = smpi_irecv_init((char *)recvbuf + other * recvcount * recvext,
1233 recvcount, recvtype, other,
1238 // Wait for completion of all comms.
1239 smpi_mpi_startall(2 * (size - 1), requests);
1240 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1241 for(other = 0; other < 2*(size-1); other++) {
1242 smpi_mpi_request_free(&requests[other]);
1247 void smpi_mpi_allgatherv(void *sendbuf, int sendcount,
1248 MPI_Datatype sendtype, void *recvbuf,
1249 int *recvcounts, int *displs,
1250 MPI_Datatype recvtype, MPI_Comm comm)
1252 int system_tag = COLL_TAG_ALLGATHERV;
1253 int rank, size, other, index;
1254 MPI_Aint lb = 0, recvext = 0;
1255 MPI_Request *requests;
1257 rank = smpi_comm_rank(comm);
1258 size = smpi_comm_size(comm);
1259 // FIXME: check for errors
1260 smpi_datatype_extent(recvtype, &lb, &recvext);
1261 // Local copy from self
1262 smpi_datatype_copy(sendbuf, sendcount, sendtype,
1263 (char *)recvbuf + displs[rank] * recvext,
1264 recvcounts[rank], recvtype);
1265 // Send buffers to others;
1266 requests = xbt_new(MPI_Request, 2 * (size - 1));
1268 for(other = 0; other < size; other++) {
1271 smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,
1275 smpi_irecv_init((char *)recvbuf + displs[other] * recvext, recvcounts[other],
1276 recvtype, other, system_tag, comm);
1280 // Wait for completion of all comms.
1281 smpi_mpi_startall(2 * (size - 1), requests);
1282 smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
1283 for(other = 0; other < 2*(size-1); other++) {
1284 smpi_mpi_request_free(&requests[other]);
1289 void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
1290 void *recvbuf, int recvcount, MPI_Datatype recvtype,
1291 int root, MPI_Comm comm)
1293 int system_tag = COLL_TAG_SCATTER;
1294 int rank, size, dst, index;
1295 MPI_Aint lb = 0, sendext = 0;
1296 MPI_Request *requests;
1298 rank = smpi_comm_rank(comm);
1299 size = smpi_comm_size(comm);
1301 // Recv buffer from root
1302 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1305 // FIXME: check for errors
1306 smpi_datatype_extent(sendtype, &lb, &sendext);
1307 // Local copy from root
1308 if(recvbuf!=MPI_IN_PLACE){
1309 smpi_datatype_copy((char *)sendbuf + root * sendcount * sendext,
1310 sendcount, sendtype, recvbuf, recvcount, recvtype);
1312 // Send buffers to receivers
1313 requests = xbt_new(MPI_Request, size - 1);
1315 for(dst = 0; dst < size; dst++) {
1317 requests[index] = smpi_isend_init((char *)sendbuf + dst * sendcount * sendext,
1318 sendcount, sendtype, dst,
1323 // Wait for completion of isend's.
1324 smpi_mpi_startall(size - 1, requests);
1325 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1326 for(dst = 0; dst < size-1; dst++) {
1327 smpi_mpi_request_free(&requests[dst]);
1333 void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs,
1334 MPI_Datatype sendtype, void *recvbuf, int recvcount,
1335 MPI_Datatype recvtype, int root, MPI_Comm comm)
1337 int system_tag = COLL_TAG_SCATTERV;
1338 int rank, size, dst, index;
1339 MPI_Aint lb = 0, sendext = 0;
1340 MPI_Request *requests;
1342 rank = smpi_comm_rank(comm);
1343 size = smpi_comm_size(comm);
1345 // Recv buffer from root
1346 smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm,
1349 // FIXME: check for errors
1350 smpi_datatype_extent(sendtype, &lb, &sendext);
1351 // Local copy from root
1352 if(recvbuf!=MPI_IN_PLACE){
1353 smpi_datatype_copy((char *)sendbuf + displs[root] * sendext, sendcounts[root],
1354 sendtype, recvbuf, recvcount, recvtype);
1356 // Send buffers to receivers
1357 requests = xbt_new(MPI_Request, size - 1);
1359 for(dst = 0; dst < size; dst++) {
1362 smpi_isend_init((char *)sendbuf + displs[dst] * sendext, sendcounts[dst],
1363 sendtype, dst, system_tag, comm);
1367 // Wait for completion of isend's.
1368 smpi_mpi_startall(size - 1, requests);
1369 smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
1370 for(dst = 0; dst < size-1; dst++) {
1371 smpi_mpi_request_free(&requests[dst]);
1377 void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count,
1378 MPI_Datatype datatype, MPI_Op op, int root,
1381 int system_tag = COLL_TAG_REDUCE;
1382 int rank, size, src, index;
1383 MPI_Aint lb = 0, dataext = 0;
1384 MPI_Request *requests;
1388 char* sendtmpbuf = (char*) sendbuf;
1389 if( sendbuf == MPI_IN_PLACE ) {
1390 sendtmpbuf = (char *)smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype));
1391 smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
1394 rank = smpi_comm_rank(comm);
1395 size = smpi_comm_size(comm);
1396 //non commutative case, use a working algo from openmpi
1397 if(!smpi_op_is_commute(op)){
1398 smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count,
1399 datatype, op, root, comm);
1404 // Send buffer to root
1405 smpi_mpi_send(sendtmpbuf, count, datatype, root, system_tag, comm);
1407 // FIXME: check for errors
1408 smpi_datatype_extent(datatype, &lb, &dataext);
1409 // Local copy from root
1410 if (sendtmpbuf && recvbuf)
1411 smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
1412 // Receive buffers from senders
1413 //TODO: make a MPI_barrier here ?
1414 requests = xbt_new(MPI_Request, size - 1);
1415 tmpbufs = xbt_new(void *, size - 1);
1417 for(src = 0; src < size; src++) {
1419 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1421 if (!smpi_process_get_replaying())
1422 tmpbufs[index] = xbt_malloc(count * dataext);
1424 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1426 smpi_irecv_init(tmpbufs[index], count, datatype, src,
1431 // Wait for completion of irecv's.
1432 smpi_mpi_startall(size - 1, requests);
1433 for(src = 0; src < size - 1; src++) {
1434 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1435 XBT_DEBUG("finished waiting any request with index %d", index);
1436 if(index == MPI_UNDEFINED) {
1439 smpi_mpi_request_free(&requests[index]);
1441 if(op) /* op can be MPI_OP_NULL that does nothing */
1442 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1444 for(index = 0; index < size - 1; index++) {
1445 smpi_free_tmp_buffer(tmpbufs[index]);
1450 if( sendbuf == MPI_IN_PLACE ) {
1451 smpi_free_tmp_buffer(sendtmpbuf);
1456 void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count,
1457 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1459 smpi_mpi_reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
1460 smpi_mpi_bcast(recvbuf, count, datatype, 0, comm);
1463 void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count,
1464 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1466 int system_tag = -888;
1467 int rank, size, other, index;
1468 MPI_Aint lb = 0, dataext = 0;
1469 MPI_Request *requests;
1472 rank = smpi_comm_rank(comm);
1473 size = smpi_comm_size(comm);
1475 // FIXME: check for errors
1476 smpi_datatype_extent(datatype, &lb, &dataext);
1478 // Local copy from self
1479 smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
1481 // Send/Recv buffers to/from others;
1482 requests = xbt_new(MPI_Request, size - 1);
1483 tmpbufs = xbt_new(void *, rank);
1485 for(other = 0; other < rank; other++) {
1486 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1488 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1490 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1494 for(other = rank + 1; other < size; other++) {
1496 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1499 // Wait for completion of all comms.
1500 smpi_mpi_startall(size - 1, requests);
1502 if(smpi_op_is_commute(op)){
1503 for(other = 0; other < size - 1; other++) {
1504 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1505 if(index == MPI_UNDEFINED) {
1509 // #Request is below rank: it's a irecv
1510 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1514 //non commutative case, wait in order
1515 for(other = 0; other < size - 1; other++) {
1516 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1518 smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1522 for(index = 0; index < rank; index++) {
1523 smpi_free_tmp_buffer(tmpbufs[index]);
1525 for(index = 0; index < size-1; index++) {
1526 smpi_mpi_request_free(&requests[index]);
1532 void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count,
1533 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
1535 int system_tag = -888;
1536 int rank, size, other, index;
1537 MPI_Aint lb = 0, dataext = 0;
1538 MPI_Request *requests;
1540 int recvbuf_is_empty=1;
1541 rank = smpi_comm_rank(comm);
1542 size = smpi_comm_size(comm);
1544 // FIXME: check for errors
1545 smpi_datatype_extent(datatype, &lb, &dataext);
1547 // Send/Recv buffers to/from others;
1548 requests = xbt_new(MPI_Request, size - 1);
1549 tmpbufs = xbt_new(void *, rank);
1551 for(other = 0; other < rank; other++) {
1552 // FIXME: possibly overkill we we have contiguous/noncontiguous data
1554 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
1556 smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag,
1560 for(other = rank + 1; other < size; other++) {
1562 smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
1565 // Wait for completion of all comms.
1566 smpi_mpi_startall(size - 1, requests);
1567 if(smpi_op_is_commute(op)){
1568 for(other = 0; other < size - 1; other++) {
1569 index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
1570 if(index == MPI_UNDEFINED) {
1574 if(recvbuf_is_empty){
1575 smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
1578 // #Request is below rank: it's a irecv
1579 smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
1583 //non commutative case, wait in order
1584 for(other = 0; other < size - 1; other++) {
1585 smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
1587 if(recvbuf_is_empty){
1588 smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
1590 }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
1594 for(index = 0; index < rank; index++) {
1595 smpi_free_tmp_buffer(tmpbufs[index]);
1597 for(index = 0; index < size-1; index++) {
1598 smpi_mpi_request_free(&requests[index]);