double smpi_cpu_threshold = -1;
double smpi_host_speed;
-shared_malloc_type smpi_cfg_shared_malloc = shmalloc_global;
+SharedMallocType smpi_cfg_shared_malloc = SharedMallocType::GLOBAL;
double smpi_total_benched_time = 0;
extern "C" XBT_PUBLIC void smpi_execute_flops_(double* flops);
void smpi_execute_flops(double flops) {
xbt_assert(flops >= 0, "You're trying to execute a negative amount of flops (%f)!", flops);
XBT_DEBUG("Handle real computation time: %f flops", flops);
- smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->process()->get_host());
+ smx_activity_t action = simcall_execution_start("computation", flops, 1, 0, smpi_process()->get_actor()->get_host());
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
smpi_switch_data_segment(simgrid::s4u::Actor::self());
void smpi_bench_begin()
{
- if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
smpi_switch_data_segment(simgrid::s4u::Actor::self());
}
* An MPI function has been called and now is the right time to update
* our PAPI counters for this process.
*/
- if (simgrid::config::get_value<std::string>("smpi/papi-events")[0] != '\0') {
+ if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
papi_counter_t& counter_data = smpi_process()->papi_counters();
int event_set = smpi_process()->papi_event_set();
std::vector<long long> event_values = std::vector<long long>(counter_data.size());
}
#if HAVE_PAPI
- if (simgrid::config::get_value<std::string>("smpi/papi-events")[0] != '\0' && TRACE_smpi_is_enabled()) {
+ if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty() && TRACE_smpi_is_enabled()) {
container_t container =
- new simgrid::instr::Container(std::string("rank-") + std::to_string(simgrid::s4u::this_actor::get_pid()));
+ simgrid::instr::Container::by_name(std::string("rank-") + std::to_string(simgrid::s4u::this_actor::get_pid()));
papi_counter_t& counter_data = smpi_process()->papi_counters();
for (auto const& pair : counter_data) {
- new simgrid::instr::SetVariableEvent(
- surf_get_clock(), container, PJ_type_get(/* countername */ pair.first.c_str(), container->type), pair.second);
+ simgrid::instr::VariableType* variable = static_cast<simgrid::instr::VariableType*>(container->type_->by_name(pair.first));
+ variable->set_event(SIMIX_get_clock(), pair.second);
}
}
#endif
smpi_bench_end();
XBT_DEBUG("Sleep for: %lf secs", secs);
- int rank = MPI_COMM_WORLD->rank();
+ int rank = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_sleeping_in(rank, secs);
simcall_process_sleep(secs);
bool need_more_benchs() const;
};
-}
-
-std::unordered_map<SampleLocation, LocalData, std::hash<std::string>> samples;
bool LocalData::need_more_benchs() const
{
return res;
}
+std::unordered_map<SampleLocation, LocalData, std::hash<std::string>> samples;
+}
+
void smpi_sample_1(int global, const char *file, int line, int iters, double threshold)
{
SampleLocation loc(global, file, line);
samples.clear();
}
+int smpi_getopt_long_only (int argc, char *const *argv, const char *options,
+ const struct option * long_options, int *opt_index)
+{
+ if (smpi_process())
+ optind = smpi_process()->get_optind();
+ int ret = getopt_long_only (argc, argv, options, long_options, opt_index);
+ if (smpi_process())
+ smpi_process()->set_optind(optind);
+ return ret;
+}
+
int smpi_getopt_long (int argc, char *const *argv, const char *options,
const struct option * long_options, int *opt_index)
{