summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
6890b92)
This commit introduces the possibility to use a csv file that
contains sections of the code that should be sped up or slowed down
according to the second column in that specific file.
One line could look like this:
exchange_1.f:30:exchange_1.f:130,1.
18244559422142
This means that computation time between line numbers 30 and 130 offile exchange_1.f
should be slowed down by a factor of 1.18.
Of course, this only makes sense if computation time is calculated.
Activation via the newly introduced smpi/comp-adjustment-file CLI cfg param:
--cfg=smpi/comp-adjustment-file:/path/to/file.csv
#ifndef INSTR_SMPI_H_
#define INSTR_SMPI_H_
#ifdef __cplusplus
#ifndef INSTR_SMPI_H_
#define INSTR_SMPI_H_
#ifdef __cplusplus
const char* previous_filename;
int previous_linenumber;
const char* previous_filename;
int previous_linenumber;
+
+#ifdef __cplusplus
+ std::string get_composed_key() {
+ return std::string(previous_filename) + ':' + std::to_string(previous_linenumber) + ':' + filename + ':' + std::to_string(linenumber);
+ }
+#endif
+
} smpi_trace_call_location_t;
smpi_trace_call_location_t* smpi_trace_get_call_location();
} smpi_trace_call_location_t;
smpi_trace_call_location_t* smpi_trace_get_call_location();
#if HAVE_PAPI
xbt_cfg_register_string("smpi/papi-events", nullptr, NULL, "This switch enables tracking the specified counters with PAPI");
#endif
#if HAVE_PAPI
xbt_cfg_register_string("smpi/papi-events", nullptr, NULL, "This switch enables tracking the specified counters with PAPI");
#endif
+ xbt_cfg_register_string("smpi/comp-adjustment-file", nullptr, NULL, "A file containing speedups or slowdowns for some parts of the code.");
xbt_cfg_register_string("smpi/os", "1:0:0:0:0", NULL, "Small messages timings (MPI_Send minimum time for small messages)");
xbt_cfg_register_string("smpi/ois", "1:0:0:0:0", NULL, "Small messages timings (MPI_Isend minimum time for small messages)");
xbt_cfg_register_string("smpi/or", "1:0:0:0:0", NULL, "Small messages timings (MPI_Recv minimum time for small messages)");
xbt_cfg_register_string("smpi/os", "1:0:0:0:0", NULL, "Small messages timings (MPI_Send minimum time for small messages)");
xbt_cfg_register_string("smpi/ois", "1:0:0:0:0", NULL, "Small messages timings (MPI_Isend minimum time for small messages)");
xbt_cfg_register_string("smpi/or", "1:0:0:0:0", NULL, "Small messages timings (MPI_Recv minimum time for small messages)");
XBT_PRIVATE int smpi_process_event_set(void);
#endif
XBT_PRIVATE int smpi_process_event_set(void);
#endif
+extern std::map<std::string, double> location2speedup;
if (MC_is_active() || MC_record_replay_is_active())
return;
if (MC_is_active() || MC_record_replay_is_active())
return;
xbt_os_timer_t timer = smpi_process_timer();
xbt_os_threadtimer_stop(timer);
// smpi_switch_data_segment(smpi_process_count());
xbt_os_timer_t timer = smpi_process_timer();
xbt_os_threadtimer_stop(timer);
// smpi_switch_data_segment(smpi_process_count());
xbt_backtrace_display_current();
xbt_die("Aborting.");
}
xbt_backtrace_display_current();
xbt_die("Aborting.");
}
+
+ if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') { // Maybe we need to artificially speed up or slow
+ // down our computation based on our statistical analysis.
+
+ smpi_trace_call_location_t* loc = smpi_process_get_call_location();
+ std::string key = loc->get_composed_key();
+ std::map<std::string, double>::const_iterator it = location2speedup.find(key);
+ if (it != location2speedup.end()) {
+ speedup = it->second;
+ }
+ }
+
// Simulate the benchmarked computation unless disabled via command-line argument
// Simulate the benchmarked computation unless disabled via command-line argument
- if (xbt_cfg_get_boolean("smpi/simulate-computation"))
- smpi_execute(xbt_os_timer_elapsed(timer));
+ if (xbt_cfg_get_boolean("smpi/simulate-computation")) {
+ smpi_execute(xbt_os_timer_elapsed(timer)/speedup);
+ }
smpi_total_benched_time += xbt_os_timer_elapsed(timer);
}
smpi_total_benched_time += xbt_os_timer_elapsed(timer);
}
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
+#include <boost/tokenizer.hpp>
+#include <boost/algorithm/string.hpp> /* trim_right / trim_left */
+
+std::map<std::string, double> location2speedup;
typedef struct s_smpi_process_data {
double simulated;
typedef struct s_smpi_process_data {
double simulated;
global_timer = xbt_os_timer_new();
xbt_os_walltimer_start(global_timer);
}
global_timer = xbt_os_timer_new();
xbt_os_walltimer_start(global_timer);
}
+
+ if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') {
+ std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")};
+ std::ifstream fstream(filename);
+ if (!fstream.is_open()) {
+ xbt_die("Could not open file %s. Does it exist?", filename.c_str());
+ }
+
+ std::string line;
+ typedef boost::tokenizer< boost::escaped_list_separator<char>> Tokenizer;
+ std::getline(fstream, line); // Skip the header line
+ while (std::getline(fstream, line)) {
+ Tokenizer tok(line);
+ Tokenizer::iterator it = tok.begin();
+ Tokenizer::iterator end = std::next(tok.begin());
+
+ std::string location = *it;
+ boost::trim(location);
+ location2speedup.insert(std::pair<std::string, double>(location, std::stod(*end)));
+ }
+
+ }
+
if (process_count == 0){
process_count = SIMIX_process_count();
smpirun=1;
if (process_count == 0){
process_count = SIMIX_process_count();
smpirun=1;
process_data[i]->sampling = 0;
process_data[i]->finalization_barrier = NULL;
process_data[i]->return_value = 0;
process_data[i]->sampling = 0;
process_data[i]->finalization_barrier = NULL;
process_data[i]->return_value = 0;
+ process_data[i]->trace_call_loc = xbt_new(smpi_trace_call_location_t, 1);
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance