* under the terms of the license (GNU LGPL) which comes with this package. */
#include <xbt/config.hpp>
+#include <algorithm>
#include "private.h"
#include "xbt/virtu.h"
#include "src/simix/smx_private.h"
#include "surf/surf.h"
#include "simgrid/sg_config.h"
+#include "smpi/smpi_utils.hpp"
#include "colls/colls.h"
-#include "src/simix/SynchroComm.hpp"
+#include "src/kernel/activity/SynchroComm.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
-static int match_recv(void* a, void* b, smx_synchro_t ignored) {
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
+static int match_recv(void* a, void* b, smx_activity_t ignored) {
+ MPI_Request ref = static_cast<MPI_Request>(a);
+ MPI_Request req = static_cast<MPI_Request>(b);
+ XBT_DEBUG("Trying to match a recv of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
xbt_assert(ref, "Cannot match recv against null reference");
xbt_assert(req, "Cannot match recv against null request");
&& ((ref->tag == MPI_ANY_TAG && req->tag >=0) || req->tag == ref->tag)){
//we match, we can transfer some values
if(ref->src == MPI_ANY_SOURCE)
- ref->real_src = req->src;
+ ref->real_src = req->src;
if(ref->tag == MPI_ANY_TAG)
- ref->real_tag = req->tag;
+ ref->real_tag = req->tag;
if(ref->real_size < req->real_size)
- ref->truncated = 1;
+ ref->truncated = 1;
if(req->detached==1)
- ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
+ ref->detached_sender=req; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
XBT_DEBUG("match succeeded");
return 1;
}else return 0;
}
-static int match_send(void* a, void* b,smx_synchro_t ignored) {
- MPI_Request ref = static_cast<MPI_Request>(a);
- MPI_Request req = static_cast<MPI_Request>(b);
- XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
- xbt_assert(ref, "Cannot match send against null reference");
- xbt_assert(req, "Cannot match send against null request");
-
- if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
- && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag))
- {
- if(req->src == MPI_ANY_SOURCE)
- req->real_src = ref->src;
- if(req->tag == MPI_ANY_TAG)
- req->real_tag = ref->tag;
- if(req->real_size < ref->real_size)
- req->truncated = 1;
- if(ref->detached==1)
- req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
- XBT_DEBUG("match succeeded");
- return 1;
- } else return 0;
+static int match_send(void* a, void* b,smx_activity_t ignored) {
+ MPI_Request ref = static_cast<MPI_Request>(a);
+ MPI_Request req = static_cast<MPI_Request>(b);
+ XBT_DEBUG("Trying to match a send of src %d against %d, tag %d against %d",ref->src,req->src, ref->tag, req->tag);
+ xbt_assert(ref, "Cannot match send against null reference");
+ xbt_assert(req, "Cannot match send against null request");
+
+ if((req->src == MPI_ANY_SOURCE || req->src == ref->src)
+ && ((req->tag == MPI_ANY_TAG && ref->tag >=0)|| req->tag == ref->tag)){
+ if(req->src == MPI_ANY_SOURCE)
+ req->real_src = ref->src;
+ if(req->tag == MPI_ANY_TAG)
+ req->real_tag = ref->tag;
+ if(req->real_size < ref->real_size)
+ req->truncated = 1;
+ if(ref->detached==1)
+ req->detached_sender=ref; //tie the sender to the receiver, as it is detached and has to be freed in the receiver
+ XBT_DEBUG("match succeeded");
+ return 1;
+ } else
+ return 0;
}
-// Methods used to parse and store the values for timing injections in smpi
-// These are taken from surf/network.c and generalized to have more values for each factor
-typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t;
-typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c
- long factor;
- int nb_values;
- double values[4];//arbitrary set to 4
-} s_smpi_factor_multival_t;
-
-xbt_dynar_t smpi_os_values = nullptr;
-xbt_dynar_t smpi_or_values = nullptr;
-xbt_dynar_t smpi_ois_values = nullptr;
+std::vector<s_smpi_factor_t> smpi_os_values;
+std::vector<s_smpi_factor_t> smpi_or_values;
+std::vector<s_smpi_factor_t> smpi_ois_values;
static simgrid::config::Flag<double> smpi_wtime_sleep(
"smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0);
static simgrid::config::Flag<double> smpi_test_sleep(
"smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
-static int factor_cmp(const void *pa, const void *pb)
-{
- return ((static_cast<const s_smpi_factor_multival_t*>(pa))->factor > (static_cast<const s_smpi_factor_multival_t*>(pb))->factor) ? 1 :
- ((static_cast<const s_smpi_factor_multival_t*>(pa))->factor < (static_cast<const s_smpi_factor_multival_t*>(pb))->factor) ? -1 : 0;
-}
-static xbt_dynar_t parse_factor(const char *smpi_coef_string)
+static double smpi_os(size_t size)
{
- s_smpi_factor_multival_t fact;
- char *value = nullptr;
- unsigned int iter = 0;
- fact.nb_values = 0;
- unsigned int i = 0;
- xbt_dynar_t radical_elements2 = nullptr;
-
- xbt_dynar_t smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_multival_t), nullptr);
- xbt_dynar_t radical_elements = xbt_str_split(smpi_coef_string, ";");
- xbt_dynar_foreach(radical_elements, iter, value) {
- memset(&fact, 0, sizeof(s_smpi_factor_multival_t));
- radical_elements2 = xbt_str_split(value, ":");
- if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
- xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
- for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
- char *errmsg;
- if (i==0) {
- errmsg = bprintf("Invalid factor in chunk #%d: %%s", iter+1);
- fact.factor = xbt_str_parse_int(xbt_dynar_get_as(radical_elements2, i, char *), errmsg);
- } else {
- errmsg = bprintf("Invalid factor value %d in chunk #%d: %%s", i, iter+1);
- fact.values[fact.nb_values] = xbt_str_parse_double(xbt_dynar_get_as(radical_elements2, i, char *), errmsg);
- fact.nb_values++;
- }
- xbt_free(errmsg);
- }
-
- xbt_dynar_push_as(smpi_factor, s_smpi_factor_multival_t, fact);
- XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
- xbt_dynar_free(&radical_elements2);
- }
- xbt_dynar_free(&radical_elements);
- xbt_dynar_sort(smpi_factor, &factor_cmp);
- xbt_dynar_foreach(smpi_factor, iter, fact) {
- XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
- }
- return smpi_factor;
-}
-
-static double smpi_os(double size)
-{
- if (smpi_os_values == nullptr) {
+ if (smpi_os_values.empty()) {
smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
- smpi_register_static(smpi_os_values, xbt_dynar_free_voidp);
}
- unsigned int iter = 0;
- s_smpi_factor_multival_t fact;
- double current=0.0;
+ double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size;
// Iterate over all the sections that were specified and find the right
// value. (fact.factor represents the interval sizes; we want to find the
// section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
- xbt_dynar_foreach(smpi_os_values, iter, fact) {
- if (size <= fact.factor) { // Values already too large, use the previously
- // computed value of current!
- XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current);
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto& fact : smpi_os_values) {
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
current = fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
return current;
}
-static double smpi_ois(double size)
+static double smpi_ois(size_t size)
{
- if (smpi_ois_values == nullptr) {
+ if (smpi_ois_values.empty()) {
smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
- smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp);
}
- unsigned int iter = 0;
- s_smpi_factor_multival_t fact;
- double current=0.0;
+ double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size;
// Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
// sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
- xbt_dynar_foreach(smpi_ois_values, iter, fact) {
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto& fact : smpi_ois_values) {
if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
current = fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
return current;
}
-static double smpi_or(double size)
+static double smpi_or(size_t size)
{
- if (smpi_or_values == nullptr) {
+ if (smpi_or_values.empty()) {
smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
- smpi_register_static(smpi_or_values, xbt_dynar_free_voidp);
}
- unsigned int iter = 0;
- s_smpi_factor_multival_t fact;
- double current=0.0;
+
+ double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size;
+
// Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
// sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the dynar we iterate over!
- xbt_dynar_foreach(smpi_or_values, iter, fact) {
- if (size <= fact.factor) { // Values already too large, use the previously
- // computed value of current!
- XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current);
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto fact : smpi_or_values) {
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
} else {
// If the next section is too large, the current section must be used.
current=fact.values[0]+fact.values[1]*size;
}
}
- XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current);
+ XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
return current;
}
double smpi_mpi_wtime(){
double time;
- if (smpi_process_initialized() != 0 &&
- smpi_process_finalized() == 0 &&
- smpi_process_get_sampling() == 0) {
+ if (smpi_process_initialized() != 0 && smpi_process_finalized() == 0 && smpi_process_get_sampling() == 0) {
smpi_bench_end();
time = SIMIX_get_clock();
// to avoid deadlocks if used as a break condition, such as
}
/* MPI Low level calls */
-MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype,
- int dst, int tag, MPI_Comm comm)
+MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
return request;
}
-MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
- int dst, int tag, MPI_Comm comm)
+MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(),
return request;
}
-MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
- int src, int tag, MPI_Comm comm)
+MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
//begin with the more appropriate one : the small one.
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
-
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv,
+ static_cast<void*>(request));
+
if (action == nullptr) {
mailbox = smpi_process_mailbox();
XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
mailbox = smpi_process_mailbox_small();
}
- }
- else {
+ } else {
XBT_DEBUG("yes there was something for us in the large mailbox");
}
- }
- else {
+ } else {
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
-
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
+
if (action == nullptr) {
XBT_DEBUG("No, nothing in the permanent receive mailbox");
mailbox = smpi_process_mailbox();
- }
- else {
+ } else {
XBT_DEBUG("yes there was something for us in the small mailbox");
}
}
request->action = simcall_comm_irecv(SIMIX_process_self(), mailbox, request->buf, &request->real_size, &match_recv,
! smpi_process_get_replaying()? &smpi_comm_copy_buffer_callback
: &smpi_comm_null_copy_buffer_callback, request, -1.0);
- XBT_DEBUG("recv simcall posted");
+ XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (request->flags & RMA) != 0 )
xbt_mutex_release(mut);
- }
- else { /* the RECV flag was not set, so this is a send */
+ } else { /* the RECV flag was not set, so this is a send */
int receiver = request->dst;
int rank = request->src;
if (TRACE_smpi_view_internals()) {
- TRACE_smpi_send(rank, rank, receiver,request->size);
+ TRACE_smpi_send(rank, rank, receiver, request->tag, request->size);
}
print_request("New send", request);
void* buf = request->buf;
- if ( (request->flags & SSEND) == 0
- && ( (request->flags & RMA) != 0 || static_cast<int>(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
+ if ((request->flags & SSEND) == 0 && ( (request->flags & RMA) != 0
+ || static_cast<int>(request->size) < xbt_cfg_get_int("smpi/send-is-detached-thresh") ) ) {
void *oldbuf = nullptr;
request->detached = 1;
XBT_DEBUG("Send request %p is detached", request);
}
if(sleeptime > 0.0){
- simcall_process_sleep(sleeptime);
- XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime);
- }
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("sending size of %zu : sleep %f ", request->size, sleeptime);
+ }
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
if (!(async_small_thresh != 0 || (request->flags & RMA) !=0)) {
mailbox = smpi_process_remote_mailbox(receiver);
- }
- else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
+ } else if (((request->flags & RMA) != 0) || static_cast<int>(request->size) < async_small_thresh) { // eager mode
mailbox = smpi_process_remote_mailbox(receiver);
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
- smx_synchro_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
+ smx_activity_t action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send,
+ static_cast<void*>(request));
if (action == nullptr) {
if ((request->flags & SSEND) == 0){
mailbox = smpi_process_remote_mailbox_small(receiver);
XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
- }
- else {
+ } else {
mailbox = smpi_process_remote_mailbox_small(receiver);
XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
action = simcall_comm_iprobe(mailbox, 1,request->dst, request->tag, &match_send, static_cast<void*>(request));
mailbox = smpi_process_remote_mailbox(receiver);
}
}
- }
- else {
+ } else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
- }
- else {
+ } else {
mailbox = smpi_process_remote_mailbox(receiver);
XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, request,request->buf);
}
xbt_free(*request);
*request = MPI_REQUEST_NULL;
}else{
- print_request("Decrementing", (*request));
+ print_request("Decrementing", (*request));
}
}else{
- xbt_die("freeing an already free request");
+ xbt_die("freeing an already free request");
}
}
if (TRACE_smpi_view_internals() && ((req->flags & RECV) != 0)){
int rank = smpi_process_index();
int src_traced = (req->src == MPI_ANY_SOURCE ? req->real_src : req->src);
- TRACE_smpi_recv(rank, src_traced, rank);
+ TRACE_smpi_recv(rank, src_traced, rank,req->tag);
}
if(req->detached_sender != nullptr){
-
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
double sleeptime = smpi_or(req->real_size);
if(sleeptime > 0.0){
- simcall_process_sleep(sleeptime);
- XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime);
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size, sleeptime);
}
smpi_mpi_request_free(&(req->detached_sender));
}
nsleeps=1;//reset the number of sleeps we will do next time
if (*request != MPI_REQUEST_NULL && ((*request)->flags & PERSISTENT)==0)
*request = MPI_REQUEST_NULL;
- }else{
+ } else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
nsleeps++;
}
}
int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
{
- xbt_dynar_t comms;
+ std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
+ comms.reserve(count);
+
int i;
- int* map;
int flag = 0;
- int size = 0;
*index = MPI_UNDEFINED;
- comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
- map = xbt_new(int, count);
+
+ std::vector<int> map; /** Maps all matching comms back to their location in requests **/
for(i = 0; i < count; i++) {
if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
- xbt_dynar_push(comms, &requests[i]->action);
- map[size] = i;
- size++;
+ comms.push_back(requests[i]->action);
+ map.push_back(i);
}
}
- if(size > 0) {
+ if(!map.empty()) {
//multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
static int nsleeps = 1;
if(smpi_test_sleep > 0)
simcall_process_sleep(nsleeps*smpi_test_sleep);
- i = simcall_comm_testany(comms);
- // not MPI_UNDEFINED, as this is a simix return code
- if(i != -1) {
- *index = map[i];
+ i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
+ if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
+ *index = map[i];
finish_wait(&requests[*index], status);
- if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
- requests[*index] = MPI_REQUEST_NULL;
- flag = 1;
- nsleeps=1;
- }else{
+ flag = 1;
+ nsleeps = 1;
+ if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) {
+ requests[*index] = MPI_REQUEST_NULL;
+ }
+ } else {
nsleeps++;
}
- }else{
+ } else {
//all requests are null or inactive, return true
- flag=1;
+ flag = 1;
smpi_empty_status(status);
}
- xbt_free(map);
- xbt_dynar_free(&comms);
return flag;
}
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
int flag=1;
- int i;
- for(i=0; i<count; i++){
+ for(int i=0; i<count; i++){
if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags & PREPARED)) {
if (smpi_mpi_test(&requests[i], pstat)!=1){
flag=0;
}
void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){
-
MPI_Request request = build_request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV);
if (xbt_cfg_get_int("smpi/async-small-thresh") > 0){
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("Trying to probe the perm recv mailbox");
- request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, static_cast<void*>(request));
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv,
+ static_cast<void*>(request));
}
if (request->action == nullptr){
mailbox = smpi_process_mailbox();
XBT_DEBUG("trying to probe the other mailbox");
- request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, static_cast<void*>(request));
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv,
+ static_cast<void*>(request));
}
if (request->action != nullptr){
- simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
- MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
+ simgrid::kernel::activity::Comm *sync_comm = static_cast<simgrid::kernel::activity::Comm*>(request->action);
+ MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
*flag = 1;
if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
}
else {
*flag = 0;
- nsleeps++;
+ if (xbt_cfg_get_boolean("smpi/grow-injected-times"))
+ nsleeps++;
}
smpi_mpi_request_free(&request);
-
- return;
}
void smpi_mpi_wait(MPI_Request * request, MPI_Status * status)
finish_wait(request, status);
if (*request != MPI_REQUEST_NULL && (((*request)->flags & NON_PERSISTENT)!=0))
- *request = MPI_REQUEST_NULL;
+ *request = MPI_REQUEST_NULL;
}
int smpi_mpi_waitany(int count, MPI_Request requests[], MPI_Status * status)
if(count > 0) {
// Wait for a request to complete
- comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
+ comms = xbt_dynar_new(sizeof(smx_activity_t), nullptr);
map = xbt_new(int, count);
XBT_DEBUG("Wait for one of %d", count);
for(i = 0; i < count; i++) {
}
}
if(size > 0) {
- i = simcall_comm_waitany(comms);
+ i = simcall_comm_waitany(comms, -1);
// not MPI_UNDEFINED, as this is a simix return code
if (i != -1) {
void smpi_mpi_bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
{
- smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
+ smpi_coll_tuned_bcast_binomial_tree(buf, count, datatype, root, comm);
}
void smpi_mpi_barrier(MPI_Comm comm)
{
- smpi_coll_tuned_barrier_ompi_basic_linear(comm);
+ smpi_coll_tuned_barrier_ompi_basic_linear(comm);
}
void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_GATHER;
- int rank, size, src, index;
- MPI_Aint lb = 0, recvext = 0;
- MPI_Request *requests;
+ MPI_Aint lb = 0;
+ MPI_Aint recvext = 0;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
if(rank != root) {
// Send buffer to root
smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
} else {
smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from root
- smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext, recvcount, recvtype);
+ smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext,
+ recvcount, recvtype);
// Receive buffers from senders
- requests = xbt_new(MPI_Request, size - 1);
- index = 0;
- for(src = 0; src < size; src++) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ int index = 0;
+ for (int src = 0; src < size; src++) {
if(src != root) {
requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + src * recvcount * recvext, recvcount, recvtype,
src, system_tag, comm);
// Wait for completion of irecv's.
smpi_mpi_startall(size - 1, requests);
smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
- for(src = 0; src < size-1; src++) {
+ for (int src = 0; src < size-1; src++) {
smpi_mpi_request_free(&requests[src]);
}
xbt_free(requests);
void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op,
MPI_Comm comm)
{
- int i, size, count;
- int *displs;
- int rank = smpi_comm_rank(comm);
- void *tmpbuf;
-
- /* arbitrarily choose root as rank 0 */
- size = smpi_comm_size(comm);
- count = 0;
- displs = xbt_new(int, size);
- for (i = 0; i < size; i++) {
- displs[i] = count;
- count += recvcounts[i];
- }
- tmpbuf=static_cast<void*>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
+ int rank = smpi_comm_rank(comm);
- mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
- smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
- xbt_free(displs);
- smpi_free_tmp_buffer(tmpbuf);
+ /* arbitrarily choose root as rank 0 */
+ int size = smpi_comm_size(comm);
+ int count = 0;
+ int *displs = xbt_new(int, size);
+ for (int i = 0; i < size; i++) {
+ displs[i] = count;
+ count += recvcounts[i];
+ }
+ void *tmpbuf = static_cast<void*>(smpi_get_tmp_sendbuffer(count*smpi_datatype_get_extent(datatype)));
+
+ mpi_coll_reduce_fun(sendbuf, tmpbuf, count, datatype, op, 0, comm);
+ smpi_mpi_scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
+ xbt_free(displs);
+ smpi_free_tmp_buffer(tmpbuf);
}
void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_GATHERV;
- int rank, size, src, index;
- MPI_Aint lb = 0, recvext = 0;
- MPI_Request *requests;
+ MPI_Aint lb = 0;
+ MPI_Aint recvext = 0;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
- if(rank != root) {
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
+ if (rank != root) {
// Send buffer to root
smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm);
} else {
smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + displs[root] * recvext,
recvcounts[root], recvtype);
// Receive buffers from senders
- requests = xbt_new(MPI_Request, size - 1);
- index = 0;
- for(src = 0; src < size; src++) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ int index = 0;
+ for (int src = 0; src < size; src++) {
if(src != root) {
requests[index] = smpi_irecv_init(static_cast<char*>(recvbuf) + displs[src] * recvext,
recvcounts[src], recvtype, src, system_tag, comm);
// Wait for completion of irecv's.
smpi_mpi_startall(size - 1, requests);
smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
- for(src = 0; src < size-1; src++) {
+ for (int src = 0; src < size-1; src++) {
smpi_mpi_request_free(&requests[src]);
}
xbt_free(requests);
void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
{
int system_tag = COLL_TAG_ALLGATHER;
- int rank, size, other, index;
- MPI_Aint lb = 0, recvext = 0;
+ MPI_Aint lb = 0;
+ MPI_Aint recvext = 0;
MPI_Request *requests;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
// FIXME: check for errors
smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from self
- smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount, recvtype);
+ smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount,
+ recvtype);
// Send/Recv buffers to/from others;
requests = xbt_new(MPI_Request, 2 * (size - 1));
- index = 0;
- for(other = 0; other < size; other++) {
+ int index = 0;
+ for (int other = 0; other < size; other++) {
if(other != rank) {
requests[index] = smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag,comm);
index++;
- requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype, other,
- system_tag, comm);
+ requests[index] = smpi_irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype,
+ other, system_tag, comm);
index++;
}
}
// Wait for completion of all comms.
smpi_mpi_startall(2 * (size - 1), requests);
smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
- for(other = 0; other < 2*(size-1); other++) {
+ for (int other = 0; other < 2*(size-1); other++) {
smpi_mpi_request_free(&requests[other]);
}
xbt_free(requests);
int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
{
int system_tag = COLL_TAG_ALLGATHERV;
- int rank, size, other, index;
- MPI_Aint lb = 0, recvext = 0;
- MPI_Request *requests;
+ MPI_Aint lb = 0;
+ MPI_Aint recvext = 0;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
smpi_datatype_extent(recvtype, &lb, &recvext);
// Local copy from self
- smpi_datatype_copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
+ smpi_datatype_copy(sendbuf, sendcount, sendtype,
+ static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
// Send buffers to others;
- requests = xbt_new(MPI_Request, 2 * (size - 1));
- index = 0;
- for(other = 0; other < size; other++) {
+ MPI_Request *requests = xbt_new(MPI_Request, 2 * (size - 1));
+ int index = 0;
+ for (int other = 0; other < size; other++) {
if(other != rank) {
requests[index] =
smpi_isend_init(sendbuf, sendcount, sendtype, other, system_tag, comm);
// Wait for completion of all comms.
smpi_mpi_startall(2 * (size - 1), requests);
smpi_mpi_waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
- for(other = 0; other < 2*(size-1); other++) {
+ for (int other = 0; other < 2*(size-1); other++) {
smpi_mpi_request_free(&requests[other]);
}
xbt_free(requests);
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_SCATTER;
- int rank, size, dst, index;
- MPI_Aint lb = 0, sendext = 0;
+ MPI_Aint lb = 0;
+ MPI_Aint sendext = 0;
MPI_Request *requests;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
if(rank != root) {
// Recv buffer from root
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
}
// Send buffers to receivers
requests = xbt_new(MPI_Request, size - 1);
- index = 0;
- for(dst = 0; dst < size; dst++) {
+ int index = 0;
+ for(int dst = 0; dst < size; dst++) {
if(dst != root) {
- requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype, dst,
- system_tag, comm);
+ requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype,
+ dst, system_tag, comm);
index++;
}
}
// Wait for completion of isend's.
smpi_mpi_startall(size - 1, requests);
smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
- for(dst = 0; dst < size-1; dst++) {
+ for (int dst = 0; dst < size-1; dst++) {
smpi_mpi_request_free(&requests[dst]);
}
xbt_free(requests);
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int system_tag = COLL_TAG_SCATTERV;
- int rank, size, dst, index;
- MPI_Aint lb = 0, sendext = 0;
- MPI_Request *requests;
+ MPI_Aint lb = 0;
+ MPI_Aint sendext = 0;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
if(rank != root) {
// Recv buffer from root
smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
sendtype, recvbuf, recvcount, recvtype);
}
// Send buffers to receivers
- requests = xbt_new(MPI_Request, size - 1);
- index = 0;
- for(dst = 0; dst < size; dst++) {
- if(dst != root) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ int index = 0;
+ for (int dst = 0; dst < size; dst++) {
+ if (dst != root) {
requests[index] = smpi_isend_init(static_cast<char *>(sendbuf) + displs[dst] * sendext, sendcounts[dst],
sendtype, dst, system_tag, comm);
index++;
// Wait for completion of isend's.
smpi_mpi_startall(size - 1, requests);
smpi_mpi_waitall(size - 1, requests, MPI_STATUS_IGNORE);
- for(dst = 0; dst < size-1; dst++) {
+ for (int dst = 0; dst < size-1; dst++) {
smpi_mpi_request_free(&requests[dst]);
}
xbt_free(requests);
MPI_Comm comm)
{
int system_tag = COLL_TAG_REDUCE;
- int rank, size, src, index;
- MPI_Aint lb = 0, dataext = 0;
- MPI_Request *requests;
- void **tmpbufs;
+ MPI_Aint lb = 0;
+ MPI_Aint dataext = 0;
char* sendtmpbuf = static_cast<char *>(sendbuf);
-
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
//non commutative case, use a working algo from openmpi
if(!smpi_op_is_commute(op)){
smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
if (sendtmpbuf != nullptr && recvbuf != nullptr)
smpi_datatype_copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
// Receive buffers from senders
- requests = xbt_new(MPI_Request, size - 1);
- tmpbufs = xbt_new(void *, size - 1);
- index = 0;
- for(src = 0; src < size; src++) {
- if(src != root) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ void **tmpbufs = xbt_new(void *, size - 1);
+ int index = 0;
+ for (int src = 0; src < size; src++) {
+ if (src != root) {
if (!smpi_process_get_replaying())
tmpbufs[index] = xbt_malloc(count * dataext);
else
}
// Wait for completion of irecv's.
smpi_mpi_startall(size - 1, requests);
- for(src = 0; src < size - 1; src++) {
+ for (int src = 0; src < size - 1; src++) {
index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
XBT_DEBUG("finished waiting any request with index %d", index);
if(index == MPI_UNDEFINED) {
void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int system_tag = -888;
- int rank, size, other, index;
MPI_Aint lb = 0, dataext = 0;
- MPI_Request *requests;
- void **tmpbufs;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
smpi_datatype_extent(datatype, &lb, &dataext);
smpi_datatype_copy(sendbuf, count, datatype, recvbuf, count, datatype);
// Send/Recv buffers to/from others;
- requests = xbt_new(MPI_Request, size - 1);
- tmpbufs = xbt_new(void *, rank);
- index = 0;
- for(other = 0; other < rank; other++) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ void **tmpbufs = xbt_new(void *, rank);
+ int index = 0;
+ for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
index++;
}
- for(other = rank + 1; other < size; other++) {
+ for (int other = rank + 1; other < size; other++) {
requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
index++;
}
smpi_mpi_startall(size - 1, requests);
if(smpi_op_is_commute(op)){
- for(other = 0; other < size - 1; other++) {
+ for (int other = 0; other < size - 1; other++) {
index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
if(index == MPI_UNDEFINED) {
break;
}
}else{
//non commutative case, wait in order
- for(other = 0; other < size - 1; other++) {
+ for (int other = 0; other < size - 1; other++) {
smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
if(index < rank) {
smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int system_tag = -888;
- int rank, size, other, index;
MPI_Aint lb = 0, dataext = 0;
- MPI_Request *requests;
- void **tmpbufs;
int recvbuf_is_empty=1;
- rank = smpi_comm_rank(comm);
- size = smpi_comm_size(comm);
+ int rank = smpi_comm_rank(comm);
+ int size = smpi_comm_size(comm);
smpi_datatype_extent(datatype, &lb, &dataext);
// Send/Recv buffers to/from others;
- requests = xbt_new(MPI_Request, size - 1);
- tmpbufs = xbt_new(void *, rank);
- index = 0;
- for(other = 0; other < rank; other++) {
+ MPI_Request *requests = xbt_new(MPI_Request, size - 1);
+ void **tmpbufs = xbt_new(void *, rank);
+ int index = 0;
+ for (int other = 0; other < rank; other++) {
tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
- requests[index] =
- smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
+ requests[index] = smpi_irecv_init(tmpbufs[index], count, datatype, other, system_tag, comm);
index++;
}
- for(other = rank + 1; other < size; other++) {
- requests[index] =
- smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
+ for (int other = rank + 1; other < size; other++) {
+ requests[index] = smpi_isend_init(sendbuf, count, datatype, other, system_tag, comm);
index++;
}
// Wait for completion of all comms.
smpi_mpi_startall(size - 1, requests);
if(smpi_op_is_commute(op)){
- for(other = 0; other < size - 1; other++) {
+ for (int other = 0; other < size - 1; other++) {
index = smpi_mpi_waitany(size - 1, requests, MPI_STATUS_IGNORE);
if(index == MPI_UNDEFINED) {
break;
if(recvbuf_is_empty){
smpi_datatype_copy(tmpbufs[index], count, datatype, recvbuf, count, datatype);
recvbuf_is_empty=0;
- }else
- // #Request is below rank: it's a irecv
- smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
+ } else
+ // #Request is below rank: it's a irecv
+ smpi_op_apply(op, tmpbufs[index], recvbuf, &count, &datatype);
}
}
}else{
//non commutative case, wait in order
- for(other = 0; other < size - 1; other++) {
+ for (int other = 0; other < size - 1; other++) {
smpi_mpi_wait(&(requests[other]), MPI_STATUS_IGNORE);
if(index < rank) {
if(recvbuf_is_empty){
smpi_datatype_copy(tmpbufs[other], count, datatype, recvbuf, count, datatype);
recvbuf_is_empty=0;
- }else smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
+ } else
+ smpi_op_apply(op, tmpbufs[other], recvbuf, &count, &datatype);
}
}
}