X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/08ad8db588e16ee3640e7f5df6bbbcdfa0bea901..1dab349dd1ee15cf92ba34bdf609a57a1b9287bf:/src/smpi/smpi_base.cpp diff --git a/src/smpi/smpi_base.cpp b/src/smpi/smpi_base.cpp index 3a686805e1..011c539613 100644 --- a/src/smpi/smpi_base.cpp +++ b/src/smpi/smpi_base.cpp @@ -5,6 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include +#include +#include #include "private.h" #include "xbt/virtu.h" @@ -71,14 +73,13 @@ static int match_send(void* a, void* b,smx_synchro_t ignored) { // These are taken from surf/network.c and generalized to have more values for each factor typedef struct s_smpi_factor_multival *smpi_os_factor_multival_t; typedef struct s_smpi_factor_multival { // FIXME: this should be merged (deduplicated) with s_smpi_factor defined in network_smpi.c - long factor; - int nb_values; - double values[4];//arbitrary set to 4 + long factor=0; + std::vector values; } s_smpi_factor_multival_t; -xbt_dynar_t smpi_os_values = nullptr; -xbt_dynar_t smpi_or_values = nullptr; -xbt_dynar_t smpi_ois_values = nullptr; +std::vector smpi_os_values; +std::vector smpi_or_values; +std::vector smpi_ois_values; static simgrid::config::Flag smpi_wtime_sleep( "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0); @@ -89,67 +90,76 @@ static simgrid::config::Flag smpi_iprobe_sleep( static simgrid::config::Flag smpi_test_sleep( "smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4); -static int factor_cmp(const void *pa, const void *pb) +static int factor_cmp(const s_smpi_factor_multival_t& pa, const s_smpi_factor_multival_t& pb) { - return ((static_cast(pa))->factor > (static_cast(pb))->factor) ? 1 : - ((static_cast(pa))->factor < (static_cast(pb))->factor) ? -1 : 0; + return (pa.factor > pb.factor) ? 1 : + (pa.factor < pb.factor) ? -1 : 0; } -static xbt_dynar_t parse_factor(const char *smpi_coef_string) +static std::vector parse_factor(const char *smpi_coef_string) { + std::vector smpi_factor; s_smpi_factor_multival_t fact; - char *value = nullptr; - unsigned int iter = 0; - fact.nb_values = 0; - unsigned int i = 0; - xbt_dynar_t radical_elements2 = nullptr; - - xbt_dynar_t smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_multival_t), nullptr); - xbt_dynar_t radical_elements = xbt_str_split(smpi_coef_string, ";"); - xbt_dynar_foreach(radical_elements, iter, value) { - memset(&fact, 0, sizeof(s_smpi_factor_multival_t)); - radical_elements2 = xbt_str_split(value, ":"); - if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5) + + /** Setup the tokenizer that parses the string **/ + typedef boost::tokenizer> Tokenizer; + boost::char_separator sep(";"); + boost::char_separator factor_separator(":"); + std::string tmp_string(smpi_coef_string); + Tokenizer tokens(tmp_string, sep); + + /** + * Iterate over patterns like A:B:C:D;E:F;G:H + * These will be broken down into: + * A --> B, C, D + * E --> F + * G --> H + */ + for (Tokenizer::iterator token_iter = tokens.begin(); + token_iter != tokens.end(); token_iter++) { + Tokenizer factor_values(*token_iter, factor_separator); + + if (factor_values.begin() == factor_values.end()) { xbt_die("Malformed radical for smpi factor: '%s'", smpi_coef_string); - for(i =0; ic_str(), errmsg); + } + else { + errmsg = bprintf("Invalid factor value %d in chunk #%zu: %%s", iteration, smpi_factor.size()+1); + fact.values.push_back(xbt_str_parse_double((*factor_iter).c_str(), errmsg)); } xbt_free(errmsg); } - xbt_dynar_push_as(smpi_factor, s_smpi_factor_multival_t, fact); - XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]); - xbt_dynar_free(&radical_elements2); + smpi_factor.push_back(fact); + XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size(), fact.values[0]); } - xbt_dynar_free(&radical_elements); - xbt_dynar_sort(smpi_factor, &factor_cmp); - xbt_dynar_foreach(smpi_factor, iter, fact) { - XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]); + std::sort(smpi_factor.begin(), smpi_factor.end(), &factor_cmp); + for (auto& fact : smpi_factor) { + XBT_DEBUG("smpi_factor:\t%ld : %zu values, first: %f", fact.factor, smpi_factor.size() ,fact.values[0]); } + return smpi_factor; } static double smpi_os(double size) { - if (smpi_os_values == nullptr) { + if (smpi_os_values.empty()) { smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os")); - smpi_register_static(smpi_os_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; double current=0.0; // Iterate over all the sections that were specified and find the right // value. (fact.factor represents the interval sizes; we want to find the // section that has fact.factor <= size and no other such fact.factor <= size) // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_os_values, iter, fact) { + for (auto& fact : smpi_os_values) { if (size <= fact.factor) { // Values already too large, use the previously // computed value of current! XBT_DEBUG("os : %f <= %ld return %f", size, fact.factor, current); @@ -160,24 +170,21 @@ static double smpi_os(double size) current = fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("os : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("Searching for smpi/os: %f is larger than the largest boundary, return %f", size, current); return current; } static double smpi_ois(double size) { - if (smpi_ois_values == nullptr) { + if (smpi_ois_values.empty()) { smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois")); - smpi_register_static(smpi_ois_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; double current=0.0; // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_ois_values, iter, fact) { + for (auto& fact : smpi_ois_values) { if (size <= fact.factor) { // Values already too large, use the previously computed value of current! XBT_DEBUG("ois : %f <= %ld return %f", size, fact.factor, current); return current; @@ -187,24 +194,21 @@ static double smpi_ois(double size) current = fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("ois : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("Searching for smpi/ois: %f is larger than the largest boundary, return %f", size, current); return current; } static double smpi_or(double size) { - if (smpi_or_values == nullptr) { + if (smpi_or_values.empty()) { smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or")); - smpi_register_static(smpi_or_values, xbt_dynar_free_voidp); } - unsigned int iter = 0; - s_smpi_factor_multival_t fact; double current=0.0; // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size) // Note: parse_factor() (used before) already sorts the dynar we iterate over! - xbt_dynar_foreach(smpi_or_values, iter, fact) { + for (auto fact : smpi_or_values) { if (size <= fact.factor) { // Values already too large, use the previously // computed value of current! XBT_DEBUG("or : %f <= %ld return %f", size, fact.factor, current); @@ -215,7 +219,7 @@ static double smpi_or(double size) current=fact.values[0]+fact.values[1]*size; } } - XBT_DEBUG("or : %f > %ld return %f", size, fact.factor, current); + XBT_DEBUG("smpi_or: %f is larger than largest boundary, return %f", size, current); return current; } @@ -771,44 +775,40 @@ int smpi_mpi_testany(int count, MPI_Request requests[], int *index, MPI_Status * { xbt_dynar_t comms; int i; - int* map; int flag = 0; - int size = 0; *index = MPI_UNDEFINED; comms = xbt_dynar_new(sizeof(smx_synchro_t), nullptr); - map = xbt_new(int, count); + std::vector map; /** Maps all matching comms back to their location in requests **/ for(i = 0; i < count; i++) { if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action && !(requests[i]->flags & PREPARED)) { xbt_dynar_push(comms, &requests[i]->action); - map[size] = i; - size++; + map.push_back(i); } } - if(size > 0) { + if(!map.empty()) { //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it static int nsleeps = 1; if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep); - i = simcall_comm_testany(comms); - // not MPI_UNDEFINED, as this is a simix return code - if(i != -1) { - *index = map[i]; + i = simcall_comm_testany(comms); // The i-th element in comms matches! + if (i != -1) { // -1 is not MPI_UNDEFINED but a SIMIX return code. (nothing matches) + *index = map[i]; finish_wait(&requests[*index], status); - if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) - requests[*index] = MPI_REQUEST_NULL; - flag = 1; - nsleeps=1; - }else{ + flag = 1; + nsleeps = 1; + if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT)) { + requests[*index] = MPI_REQUEST_NULL; + } + } else { nsleeps++; } - }else{ + } else { //all requests are null or inactive, return true - flag=1; + flag = 1; smpi_empty_status(status); } - xbt_free(map); xbt_dynar_free(&comms); return flag;