X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/3fa78c2064c144ac3b4035142261bda6cead8211..9c300852ad4204e21d42402f7d1bbb266cf0b27f:/src/smpi/smpi_base.cpp diff --git a/src/smpi/smpi_base.cpp b/src/smpi/smpi_base.cpp index 9ad417780f..bd20dacbc0 100644 --- a/src/smpi/smpi_base.cpp +++ b/src/smpi/smpi_base.cpp @@ -5,6 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include +#include +#include #include "private.h" #include "xbt/virtu.h" @@ -17,11 +19,11 @@ #include "simgrid/sg_config.h" #include "colls/colls.h" -#include "src/simix/SynchroComm.hpp" +#include "src/kernel/activity/SynchroComm.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)"); -static int match_recv(void* a, void* b, smx_synchro_t ignored) { +static int match_recv(void* a, void* b, smx_activity_t ignored) { MPI_Request ref = static_cast(a); MPI_Request req = static_cast(b); XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag); @@ -44,7 +46,7 @@ static int match_recv(void* a, void* b, smx_synchro_t ignored) { }else return 0; } -static int match_send(void* a, void* b,smx_synchro_t ignored) { +static int match_send(void* a, void* b,smx_activity_t ignored) { MPI_Request ref = static_cast(a); MPI_Request req = static_cast(b); XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag); @@ -71,86 +73,94 @@ static int match_send(void* a, void* b,smx_synchro_t ignored) { // These are taken from surf/network.c and generalized to have more values for each factor typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t; typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c - long factor; - int nb_values; - double values[4];//arbitrary set to 4 + size_t factor=0; + std::vector values; } s_smpi_factor_multival_t; -xbt_dynar_t smpi_os_values = NULL; -xbt_dynar_t smpi_or_values = NULL; -xbt_dynar_t smpi_ois_values = NULL; +std::vector smpi_os_values; +std::vector smpi_or_values; +std::vector smpi_ois_values; static simgrid::config::Flag smpi_wtime_sleep( "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0); +static simgrid::config::Flag smpi_init_sleep( + "smpi/init", "Time to inject inside a call to MPI_Init", 0.0); static simgrid::config::Flag smpi_iprobe_sleep( "smpi/iprobe", "Minimum time to inject inside a call to MPI_Iprobe", 1e-4); static simgrid::config::Flag smpi_test_sleep( "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4); -static int factor_cmp(const void *pa, const void *pb) +static std::vector parse_factor(const char *smpi_coef_string) { - return ((static_cast(pa))->factor > (static_cast(pb))->factor) ? 1 : - ((static_cast(pa))->factor < (static_cast(pb))->factor) ? -1 : 0; -} - -static xbt_dynar_t parse_factor(const char *smpi_coef_string) -{ - char *value = NULL; - unsigned int iter = 0; - s_smpi_factor_multival_t fact; - fact.nb_values=0; - unsigned int i=0; - xbt_dynar_t radical_elements2 = NULL; - - xbt_dynar_t smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_multival_t), NULL); - xbt_dynar_t radical_elements = xbt_str_split(smpi_coef_string, ";"); - xbt_dynar_foreach(radical_elements, iter, value) { - memset(&fact, 0, sizeof(s_smpi_factor_multival_t)); - radical_elements2 = xbt_str_split(value, ":"); - if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5) + std::vector smpi_factor; + + /** Setup the tokenizer that parses the string **/ + typedef boost::tokenizer> Tokenizer; + boost::char_separator sep(";"); + boost::char_separator factor_separator(":"); + std::string tmp_string(smpi_coef_string); + Tokenizer tokens(tmp_string, sep); + + /** + * Iterate over patterns like A:B:C:D;E:F;G:H + * These will be broken down into: + * A --> B, C, D + * E --> F + * G --> H + */ + for (Tokenizer::iterator token_iter = tokens.begin(); + token_iter != tokens.end(); token_iter++) { +XBT_DEBUG("token : %s", token_iter->c_str()); + Tokenizer factor_values(*token_iter, factor_separator); + s_smpi_factor_multival_t fact; + if (factor_values.begin() == factor_values.end()) { xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string); - for(i =0; ic_str(), errmsg); + } + else { + errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1); + fact.values.push_back(xbt_str_parse_double(factor_iter->c_str(), errmsg)); } xbt_free(errmsg); } - xbt_dynar_push_as(smpi_factor, s_smpi_factor_multival_t, fact); - XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]); - xbt_dynar_free(&radical_elements2); + smpi_factor.push_back(fact); + XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]); } - xbt_dynar_free(&radical_elements); - xbt_dynar_sort(smpi_factor, &factor_cmp); - xbt_dynar_foreach(smpi_factor, iter, fact) { - XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]); + std::sort(smpi_factor.begin(), smpi_factor.end(), + [](const s_smpi_factor_multival_t &pa, + const s_smpi_factor_multival_t &pb) { + return (pa.factor < pb.factor); + }); + for (auto& fact : smpi_factor) { + XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]); } + return smpi_factor; } -static double smpi_os(double size) +static double smpi_os(size_t size) { - if (smpi_os_values == NULL) { + if (smpi_os_values.empty()) { smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os")); - smpi_register_static(smpi_os_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; - double current=0.0; + double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size; // Iterate over all the sections that were specified and find the right // value. (fact.factor represents the interval sizes; we want to find the // section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_os_values, iter, fact) { + // Note: parse_factor() (used before) already sorts the vector we iterate over! + for (auto& fact : smpi_os_values) { if (size <= fact.factor) { // Values already too large, use the previously // computed value of current! - XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current); + XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current); return current; }else{ // If the next section is too large, the current section must be used. @@ -158,26 +168,23 @@ static double smpi_os(double size) current = fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current); return current; } -static double smpi_ois(double size) +static double smpi_ois(size_t size) { - if (smpi_ois_values == NULL) { + if (smpi_ois_values.empty()) { smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois")); - smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; - double current=0.0; + double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size; // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_ois_values, iter, fact) { + // Note: parse_factor() (used before) already sorts the vector we iterate over! + for (auto& fact : smpi_ois_values) { if (size <= fact.factor) { // Values already too large, use the previously computed value of current! - XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current); + XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current); return current; }else{ // If the next section is too large, the current section must be used. @@ -185,27 +192,25 @@ static double smpi_ois(double size) current = fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current); return current; } -static double smpi_or(double size) +static double smpi_or(size_t size) { - if (smpi_or_values == NULL) { + if (smpi_or_values.empty()) { smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or")); - smpi_register_static(smpi_or_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; - double current=0.0; + + double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size; + // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) - // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_or_values, iter, fact) { - if (size <= fact.factor) { // Values already too large, use the previously - // computed value of current! - XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current); + // Note: parse_factor() (used before) already sorts the vector we iterate over! + for (auto fact : smpi_or_values) { + if (size <= fact.factor) { // Values already too large, use the previously computed value of current! + XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current); return current; } else { // If the next section is too large, the current section must be used. @@ -213,16 +218,19 @@ static double smpi_or(double size) current=fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current); return current; } +void smpi_mpi_init() { + if(smpi_init_sleep > 0) + simcall_process_sleep(smpi_init_sleep); +} + double smpi_mpi_wtime(){ double time; - if (smpi_process_initialized() != 0 && - smpi_process_finalized() == 0 && - smpi_process_get_sampling() == 0) { + if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) { smpi_bench_end(); time = SIMIX_get_clock(); // to avoid deadlocks if used as a break condition, such as @@ -243,9 +251,9 @@ double smpi_mpi_wtime(){ static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags) { - MPI_Request request = NULL; + MPI_Request request = nullptr; - void *old_buf = NULL; + void *old_buf = nullptr; request = xbt_new(s_smpi_mpi_request_t, 1); @@ -254,38 +262,37 @@ static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, in if((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->sizeof_substruct != 0)){ // This part handles the problem of non-contiguous memory old_buf = buf; - buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype)); + buf = count==0 ? nullptr : xbt_malloc(count*smpi_datatype_size(datatype)); if ((datatype->sizeof_substruct != 0) && ((flags & SEND) != 0)) { subtype->serialize(old_buf, buf, count, datatype->substruct); } } - request->buf = buf; + request->buf = buf; // This part handles the problem of non-contiguous memory (for the unserialisation at the reception) - request->old_buf = old_buf; + request->old_buf = old_buf; request->old_type = datatype; request->size = smpi_datatype_size(datatype) * count; smpi_datatype_use(datatype); - request->src = src; - request->dst = dst; - request->tag = tag; + request->src = src; + request->dst = dst; + request->tag = tag; request->comm = comm; smpi_comm_use(request->comm); - request->action = NULL; - request->flags = flags; - request->detached = 0; - request->detached_sender = NULL; - request->real_src = 0; - - request->truncated = 0; - request->real_size = 0; - request->real_tag = 0; - if(flags & PERSISTENT) + request->action = nullptr; + request->flags = flags; + request->detached = 0; + request->detached_sender = nullptr; + request->real_src = 0; + request->truncated = 0; + request->real_size = 0; + request->real_tag = 0; + if (flags & PERSISTENT) request->refcount = 1; else request->refcount = 0; - request->op = MPI_REPLACE; + request->op = MPI_REPLACE; request->send = 0; request->recv = 0; @@ -312,8 +319,8 @@ static void smpi_mpi_request_free_voidp(void* request) MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SEND | PREPARED); return request; } @@ -321,8 +328,8 @@ MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype, MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED); return request; } @@ -330,8 +337,8 @@ MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype, MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED); return request; @@ -341,7 +348,7 @@ void smpi_mpi_start(MPI_Request request) { smx_mailbox_t mailbox; - xbt_assert(!request->action, "Cannot (re)start a non-finished communication"); + xbt_assert(request->action == nullptr, "Cannot (re-)start unfinished communication"); request->flags &= ~PREPARED; request->flags &= ~FINISHED; request->refcount++; @@ -357,44 +364,41 @@ void smpi_mpi_start(MPI_Request request) if (async_small_thresh == 0 && (request->flags & RMA) == 0 ) { mailbox = smpi_process_mailbox(); - } else if (((request->flags & RMA) != 0) || static_cast(request->size) < async_small_thresh){ - //We have to check both mailboxes (because SSEND messages are sent to the large mbox). - //begin with the more appropriate one : the small one. + } + else if (((request->flags & RMA) != 0) || static_cast(request->size) < async_small_thresh) { + //We have to check both mailboxes (because SSEND messages are sent to the large mbox). + //begin with the more appropriate one : the small one. mailbox = smpi_process_mailbox_small(); XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); + smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); - if(action ==NULL){ + if (action == nullptr) { mailbox = smpi_process_mailbox(); XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox); action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); - if(action ==NULL){ + if (action == nullptr) { XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox); mailbox = smpi_process_mailbox_small(); - } - }else{ + } + } + else { XBT_DEBUG("yes there was something for us in the large mailbox"); } - }else{ + } + else { mailbox = smpi_process_mailbox_small(); XBT_DEBUG("Is there a corresponding send already posted the small mailbox?"); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request); + smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request); - if(action ==NULL){ + if (action == nullptr) { XBT_DEBUG("No, nothing in the permanent receive mailbox"); mailbox = smpi_process_mailbox(); - }else{ + } + else { XBT_DEBUG("yes there was something for us in the small mailbox"); } } - //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0 - double sleeptime = (request->detached != 0) ? smpi_or(request->size) : 0.0; - if(sleeptime >= 0.0){ - simcall_process_sleep(sleeptime); - XBT_DEBUG("receiving size of %zu : sleep %f ", request->size, smpi_or(request->size)); - } - // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later request->real_size=request->size; request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf, &request->real_size, &match_recv, @@ -404,7 +408,8 @@ void smpi_mpi_start(MPI_Request request) if (async_small_thresh != 0 || (request->flags & RMA) != 0 ) xbt_mutex_release(mut); - } else { + } + else { /* the RECV flag was not set, so this is a send */ int receiver = request->dst; int rank = request->src; @@ -413,17 +418,40 @@ void smpi_mpi_start(MPI_Request request) } print_request("New send", request); + void* buf = request->buf; + if ( (request->flags & SSEND) == 0 + && ( (request->flags & RMA) != 0 || static_cast(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) { + void *oldbuf = nullptr; + request->detached = 1; + XBT_DEBUG("Send request %p is detached", request); + request->refcount++; + if(request->old_type->sizeof_substruct == 0){ + oldbuf = request->buf; + if (!smpi_process_get_replaying() && oldbuf != nullptr && request->size!=0){ + if((smpi_privatize_global_variables != 0) + && (static_cast(request->buf) >= smpi_start_data_exe) + && (static_cast(request->buf) < smpi_start_data_exe + smpi_size_data_exe )){ + XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment "); + smpi_switch_data_segment(request->src); + } + buf = xbt_malloc(request->size); + memcpy(buf,oldbuf,request->size); + XBT_DEBUG("buf %p copied into %p",oldbuf,buf); + } + } + } + //if we are giving back the control to the user without waiting for completion, we have to inject timings double sleeptime = 0.0; - if(request->detached != 0 || (request->flags & (ISEND|SSEND))){// issend should be treated as isend + if(request->detached != 0 || ((request->flags & (ISEND|SSEND)) != 0)){// issend should be treated as isend //isend and send timings may be different - sleeptime = ((request->flags & ISEND) != 0)? smpi_ois(request->size) : smpi_os(request->size); + sleeptime = ((request->flags & ISEND) != 0) ? smpi_ois(request->size) : smpi_os(request->size); } - if(sleeptime != 0.0){ + if(sleeptime > 0.0){ simcall_process_sleep(sleeptime); - XBT_DEBUG("sending size of %zu : sleep %f ", request->size, smpi_os(request->size)); - } + XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime); + } int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh"); @@ -438,51 +466,31 @@ void smpi_mpi_start(MPI_Request request) else if (((request->flags & RMA) != 0) || static_cast(request->size) < async_small_thresh) { // eager mode mailbox = smpi_process_remote_mailbox(receiver); XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox); - smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); - if(action ==NULL){ - if ((request->flags & SSEND) == 0){ - mailbox = smpi_process_remote_mailbox_small(receiver); - XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox); - } else{ - mailbox = smpi_process_remote_mailbox_small(receiver); - XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox); - action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); - if(action ==NULL){ - XBT_DEBUG("No, we are first, send to large mailbox"); - mailbox = smpi_process_remote_mailbox(receiver); - } - } - }else{ + smx_activity_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); + if (action == nullptr) { + if ((request->flags & SSEND) == 0){ + mailbox = smpi_process_remote_mailbox_small(receiver); + XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox); + } + else { + mailbox = smpi_process_remote_mailbox_small(receiver); + XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox); + action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast(request)); + if (action == nullptr) { + XBT_DEBUG("No, we are first, send to large mailbox"); + mailbox = smpi_process_remote_mailbox(receiver); + } + } + } + else { XBT_DEBUG("Yes there was something for us in the large mailbox"); } - }else{ + } + else { mailbox = smpi_process_remote_mailbox(receiver); XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf); } - void* buf = request->buf; - if ( ((request->flags & SSEND) == 0) && (((request->flags & RMA) != 0) || - (static_cast(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh")))) { - void *oldbuf = NULL; - request->detached = 1; - XBT_DEBUG("Send request %p is detached", request); - request->refcount++; - if(request->old_type->sizeof_substruct == 0){ - oldbuf = request->buf; - if (!smpi_process_get_replaying() && oldbuf != NULL && request->size!=0){ - if((smpi_privatize_global_variables != 0) - && (static_cast(request->buf) >= smpi_start_data_exe) - && (static_cast(request->buf) < smpi_start_data_exe + smpi_size_data_exe )){ - XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment "); - smpi_switch_data_segment(request->src); - } - buf = xbt_malloc(request->size); - memcpy(buf,oldbuf,request->size); - XBT_DEBUG("buf %p copied into %p",oldbuf,buf); - } - } - } - // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later request->real_size=request->size; request->action = simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0, @@ -494,8 +502,8 @@ void smpi_mpi_start(MPI_Request request) request->detached); XBT_DEBUG("send simcall posted"); - /* FIXME: detached sends are not traceable (request->action == NULL) */ - if (request->action) + /* FIXME: detached sends are not traceable (request->action == nullptr) */ + if (request->action != nullptr) simcall_set_category(request->action, TRACE_internal_smpi_get_category()); if (async_small_thresh != 0 || ((request->flags & RMA)!=0)) @@ -505,7 +513,7 @@ void smpi_mpi_start(MPI_Request request) void smpi_mpi_startall(int count, MPI_Request * requests) { - if(requests==NULL) + if(requests== nullptr) return; for(int i = 0; i < count; i++) { @@ -536,12 +544,12 @@ void smpi_mpi_request_free(MPI_Request * request) MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, MPI_Op op) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ if(op==MPI_OP_NULL){ - request = build_request(buf==MPI_BOTTOM ? NULL : buf , count, datatype, src, dst, tag, + request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, src, dst, tag, comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED); }else{ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag, + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag, comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE); request->op = op; } @@ -551,12 +559,12 @@ MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype, int MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, MPI_Op op) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ if(op==MPI_OP_NULL){ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag, + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag, comm, RMA | NON_PERSISTENT | RECV | PREPARED); }else{ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src, dst, tag, + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag, comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE); request->op = op; } @@ -565,16 +573,16 @@ MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype, int MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf , count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED); return request; } MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | ISEND | SEND); smpi_mpi_start(request); return request; @@ -582,8 +590,8 @@ MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND); smpi_mpi_start(request); return request; @@ -591,8 +599,8 @@ MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED); return request; @@ -600,8 +608,8 @@ MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm, NON_PERSISTENT | RECV); smpi_mpi_start(request); @@ -610,32 +618,32 @@ MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, void smpi_mpi_recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = smpi_mpi_irecv(buf, count, datatype, src, tag, comm); smpi_mpi_wait(&request, status); - request = NULL; + request = nullptr; } void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SEND); smpi_mpi_start(request); smpi_mpi_wait(&request, MPI_STATUS_IGNORE); - request = NULL; + request = nullptr; } void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */ - request = build_request(buf==MPI_BOTTOM ? NULL : buf, count, datatype, smpi_process_index(), + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SSEND | SEND); smpi_mpi_start(request); smpi_mpi_wait(&request, MPI_STATUS_IGNORE); - request = NULL; + request = nullptr; } void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int dst, int sendtag, @@ -671,7 +679,7 @@ static void finish_wait(MPI_Request * request, MPI_Status * status) MPI_Request req = *request; smpi_empty_status(status); - if(!((req->detached != 0) && req->flags & SEND) && ((req->flags & PREPARED) == 0)){ + if(!((req->detached != 0) && ((req->flags & SEND) != 0)) && ((req->flags & PREPARED) == 0)){ if(status != MPI_STATUS_IGNORE) { int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src; status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src); @@ -708,17 +716,24 @@ static void finish_wait(MPI_Request * request, MPI_Status * status) } } - if (TRACE_smpi_view_internals() && req->flags & RECV){ + if (TRACE_smpi_view_internals() && ((req->flags & RECV) != 0)){ int rank = smpi_process_index(); int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src); TRACE_smpi_recv(rank, src_traced, rank); } - if(req->detached_sender!=NULL){ + if(req->detached_sender != nullptr){ + + //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0 + double sleeptime = smpi_or(req->real_size); + if(sleeptime > 0.0){ + simcall_process_sleep(sleeptime); + XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime); + } smpi_mpi_request_free(&(req->detached_sender)); } if(req->flags & PERSISTENT) - req->action = NULL; + req->action = nullptr; req->flags |= FINISHED; smpi_mpi_request_free(request); @@ -739,7 +754,7 @@ int smpi_mpi_test(MPI_Request * request, MPI_Status * status) { smpi_empty_status(status); int flag = 1; if (((*request)->flags & PREPARED) == 0) { - if ((*request)->action != NULL) + if ((*request)->action != nullptr) flag = simcall_comm_test((*request)->action); if (flag) { finish_wait(request, status); @@ -755,47 +770,44 @@ int smpi_mpi_test(MPI_Request * request, MPI_Status * status) { int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status) { - xbt_dynar_t comms; + std::vector comms; + comms.reserve(count); + int i; - int* map; int flag = 0; - int size = 0; *index = MPI_UNDEFINED; - comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL); - map = xbt_new(int, count); + + std::vector map; /** Maps all matching comms back to their location in requests **/ for(i = 0; i < count; i++) { if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) { - xbt_dynar_push(comms, &requests[i]->action); - map[size] = i; - size++; + comms.push_back(requests[i]->action); + map.push_back(i); } } - if(size > 0) { + if(!map.empty()) { //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it static int nsleeps = 1; if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep); - i = simcall_comm_testany(comms); - // not MPI_UNDEFINED, as this is a simix return code - if(i != -1) { - *index = map[i]; + i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches! + if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches) + *index = map[i]; finish_wait(&requests[*index], status); - if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) - requests[*index] = MPI_REQUEST_NULL; - flag = 1; - nsleeps=1; - }else{ + flag = 1; + nsleeps = 1; + if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) { + requests[*index] = MPI_REQUEST_NULL; + } + } else { nsleeps++; } - }else{ + } else { //all requests are null or inactive, return true - flag=1; + flag = 1; smpi_empty_status(status); } - xbt_free(map); - xbt_dynar_free(&comms); return flag; } @@ -805,8 +817,7 @@ int smpi_mpi_testall(int count, MPI_Request requests[], MPI_Status status[]) MPI_Status stat; MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat; int flag=1; - int i; - for(i=0; iflags & PREPARED)) { if (smpi_mpi_test(&requests[i], pstat)!=1){ flag=0; @@ -835,7 +846,7 @@ void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){ void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){ - MPI_Request request =build_request(NULL, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : + MPI_Request request = build_request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV); // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls @@ -849,28 +860,29 @@ void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* print_request("New iprobe", request); // We have to test both mailboxes as we don't know if we will receive one one or another - if (xbt_cfg_get_int("smpi/async-small-thresh")>0){ + if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){ mailbox = smpi_process_mailbox_small(); - XBT_DEBUG("trying to probe the perm recv mailbox"); + XBT_DEBUG("Trying to probe the perm recv mailbox"); request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, static_cast(request)); } - if (request->action==NULL){ - mailbox = smpi_process_mailbox(); - XBT_DEBUG("trying to probe the other mailbox"); - request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); + + if (request->action == nullptr){ + mailbox = smpi_process_mailbox(); + XBT_DEBUG("trying to probe the other mailbox"); + request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast(request)); } - if (request->action){ - simgrid::simix::Comm *sync_comm = static_cast(request->action); - MPI_Request req = static_cast(sync_comm->src_data); + if (request->action != nullptr){ + simgrid::kernel::activity::Comm *sync_comm = static_cast(request->action); + MPI_Request req = static_cast(sync_comm->src_data); *flag = 1; - if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED)==0) { + if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) { status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src); status->MPI_TAG = req->tag; status->MPI_ERROR = MPI_SUCCESS; status->count = req->real_size; } - nsleeps=1;//reset the number of sleeps we will do next time + nsleeps = 1;//reset the number of sleeps we will do next time } else { *flag = 0; @@ -889,7 +901,7 @@ void smpi_mpi_wait(MPI_Request * request, MPI_Status * status) return; } - if ((*request)->action != NULL) + if ((*request)->action != nullptr) // this is not a detached send simcall_comm_wait((*request)->action, -1.0); @@ -908,12 +920,12 @@ int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status) if(count > 0) { // Wait for a request to complete - comms = xbt_dynar_new(sizeof(smx_synchro_t), NULL); + comms = xbt_dynar_new(sizeof(smx_activity_t), nullptr); map = xbt_new(int, count); XBT_DEBUG("Wait for one of %d", count); for(i = 0; i < count; i++) { if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED) && !(requests[i]->flags & FINISHED)) { - if (requests[i]->action != NULL) { + if (requests[i]->action != nullptr) { XBT_DEBUG("Waiting any %p ", requests[i]); xbt_dynar_push(comms, &requests[i]->action); map[size] = i; @@ -930,7 +942,7 @@ int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status) } } if(size > 0) { - i = simcall_comm_waitany(comms); + i = simcall_comm_waitany(comms, -1); // not MPI_UNDEFINED, as this is a simix return code if (i != -1) { @@ -1226,12 +1238,13 @@ void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) { int system_tag = COLL_TAG_SCATTER; - int rank, size, dst, index; - MPI_Aint lb = 0, sendext = 0; + int dst; + MPI_Aint lb = 0; + MPI_Aint sendext = 0; MPI_Request *requests; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1244,7 +1257,7 @@ void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, } // Send buffers to receivers requests = xbt_new(MPI_Request, size - 1); - index = 0; + int index = 0; for(dst = 0; dst < size; dst++) { if(dst != root) { requests[index] = smpi_isend_init(static_cast(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst, @@ -1266,12 +1279,13 @@ void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype MPI_Datatype recvtype, int root, MPI_Comm comm) { int system_tag = COLL_TAG_SCATTERV; - int rank, size, dst, index; - MPI_Aint lb = 0, sendext = 0; + int dst; + MPI_Aint lb = 0; + MPI_Aint sendext = 0; MPI_Request *requests; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1284,7 +1298,7 @@ void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype } // Send buffers to receivers requests = xbt_new(MPI_Request, size - 1); - index = 0; + int index = 0; for(dst = 0; dst < size; dst++) { if(dst != root) { requests[index] = smpi_isend_init(static_cast(sendbuf) + displs[dst] * sendext, sendcounts[dst], @@ -1306,16 +1320,16 @@ void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat MPI_Comm comm) { int system_tag = COLL_TAG_REDUCE; - int rank, size, src, index; - MPI_Aint lb = 0, dataext = 0; + int src, index; + MPI_Aint lb = 0; + MPI_Aint dataext = 0; MPI_Request *requests; void **tmpbufs; char* sendtmpbuf = static_cast(sendbuf); - - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); //non commutative case, use a working algo from openmpi if(!smpi_op_is_commute(op)){ smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm); @@ -1333,7 +1347,7 @@ void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat } else { smpi_datatype_extent(datatype, &lb, &dataext); // Local copy from root - if (sendtmpbuf != NULL && recvbuf != NULL) + if (sendtmpbuf != nullptr && recvbuf != nullptr) smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype); // Receive buffers from senders requests = xbt_new(MPI_Request, size - 1); @@ -1384,13 +1398,13 @@ void smpi_mpi_allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype da void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; - int rank, size, other, index; + int other, index; MPI_Aint lb = 0, dataext = 0; MPI_Request *requests; void **tmpbufs; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); smpi_datatype_extent(datatype, &lb, &dataext); @@ -1446,13 +1460,13 @@ void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int system_tag = -888; - int rank, size, other, index; + int other, index; MPI_Aint lb = 0, dataext = 0; MPI_Request *requests; void **tmpbufs; int recvbuf_is_empty=1; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + int rank = smpi_comm_rank(comm); + int size = smpi_comm_size(comm); smpi_datatype_extent(datatype, &lb, &dataext); @@ -1462,13 +1476,11 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat index = 0; for(other = 0; other < rank; other++) { tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext); - requests[index] = - smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm); + requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm); index++; } for(other = rank + 1; other < size; other++) { - requests[index] = - smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm); + requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm); index++; } // Wait for completion of all comms. @@ -1483,9 +1495,9 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat if(recvbuf_is_empty){ smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype); recvbuf_is_empty=0; - }else - // #Request is below rank: it's a irecv - smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype); + } else + // #Request is below rank: it's a irecv + smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype); } } }else{ @@ -1496,7 +1508,8 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat if(recvbuf_is_empty){ smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype); recvbuf_is_empty=0; - }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype); + } else + smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype); } } }