#include "simgrid/sg_config.h"
#include "colls/colls.h"
-#include "src/simix/SynchroComm.hpp"
+#include "src/kernel/activity/SynchroComm.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi, "Logging specific to SMPI (base)");
static simgrid::config::Flag<double> smpi_test_sleep(
"smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
-static int factor_cmp(const s_smpi_factor_multival_t& pa, const s_smpi_factor_multival_t& pb)
-{
- return (pa.factor > pb.factor) ? 1 :
- (pa.factor < pb.factor) ? -1 : 0;
-}
-
static std::vector<s_smpi_factor_multival_t> parse_factor(const char *smpi_coef_string)
{
std::vector<s_smpi_factor_multival_t> smpi_factor;
- s_smpi_factor_multival_t fact;
/** Setup the tokenizer that parses the string **/
typedef boost::tokenizer<boost::char_separator<char>> Tokenizer;
token_iter != tokens.end(); token_iter++) {
XBT_DEBUG("token : %s", token_iter->c_str());
Tokenizer factor_values(*token_iter, factor_separator);
-
+ s_smpi_factor_multival_t fact;
if (factor_values.begin() == factor_values.end()) {
xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string);
}
}
else {
errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1);
- fact.values.push_back(xbt_str_parse_double((*factor_iter).c_str(), errmsg));
+ fact.values.push_back(xbt_str_parse_double(factor_iter->c_str(), errmsg));
}
xbt_free(errmsg);
}
smpi_factor.push_back(fact);
- XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]);
+ XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]);
}
- std::sort(smpi_factor.begin(), smpi_factor.end(), &factor_cmp);
+ std::sort(smpi_factor.begin(), smpi_factor.end(),
+ [](const s_smpi_factor_multival_t &pa,
+ const s_smpi_factor_multival_t &pb) {
+ return (pa.factor < pb.factor);
+ });
for (auto& fact : smpi_factor) {
- XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]);
+ XBT_DEBUG("smpi_factor:\t%zu : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]);
}
return smpi_factor;
for (auto& fact : smpi_os_values) {
if (size <= fact.factor) { // Values already too large, use the previously
// computed value of current!
- XBT_DEBUG("os : %zu <= %ld return %.10f", size, fact.factor, current);
+ XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
// Note: parse_factor() (used before) already sorts the dynar we iterate over!
for (auto& fact : smpi_ois_values) {
if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("ois : %zu <= %ld return %.10f", size, fact.factor, current);
+ XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
}else{
// If the next section is too large, the current section must be used.
smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
}
- double current=smpi_or_values.empty()?0.0:smpi_or_values[0].values[0]+smpi_or_values[0].values[1]*size;
+ double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size;
+
// Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
// sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
// Note: parse_factor() (used before) already sorts the dynar we iterate over!
for (auto fact : smpi_or_values) {
if (size <= fact.factor) { // Values already too large, use the previously
// computed value of current!
- XBT_DEBUG("or : %zu <= %ld return %.10f", size, fact.factor, current);
+ XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
return current;
} else {
// If the next section is too large, the current section must be used.
int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
{
- xbt_dynar_t comms;
+ std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
+ comms.reserve(count);
+
int i;
int flag = 0;
*index = MPI_UNDEFINED;
- comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr);
+
std::vector<int> map; /** Maps all matching comms back to their location in requests **/
for(i = 0; i < count; i++) {
if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) {
- xbt_dynar_push(comms, &requests[i]->action);
+ comms.push_back(requests[i]->action);
map.push_back(i);
}
}
if(smpi_test_sleep > 0)
simcall_process_sleep(nsleeps*smpi_test_sleep);
- i = simcall_comm_testany(comms); // The i-th element in comms matches!
+ i = simcall_comm_testany(comms.data(), comms.size()); // The i-th element in comms matches!
if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches)
*index = map[i];
finish_wait(&requests[*index], status);
flag = 1;
smpi_empty_status(status);
}
- xbt_dynar_free(&comms);
return flag;
}
}
if (request->action != nullptr){
- simgrid::simix::Comm *sync_comm = static_cast<simgrid::simix::Comm*>(request->action);
- MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
+ simgrid::kernel::activity::Comm *sync_comm = static_cast<simgrid::kernel::activity::Comm*>(request->action);
+ MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
*flag = 1;
if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) {
status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src);
}
}
if(size > 0) {
- i = simcall_comm_waitany(comms);
+ i = simcall_comm_waitany(comms, -1);
// not MPI_UNDEFINED, as this is a simix return code
if (i != -1) {