X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/198b09ec16ca1b8fc05053bcae9e75c0ad689711..f50c785a8f726657dc3b4e32de522a7b3baca707:/src/smpi/smpi_global.cpp diff --git a/src/smpi/smpi_global.cpp b/src/smpi/smpi_global.cpp index 3501498fe7..c5b7734db1 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/smpi_global.cpp @@ -1,41 +1,52 @@ -/* Copyright (c) 2007-2015. The SimGrid Team. - * All rights reserved. */ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ +#include "mc/mc.h" #include "private.h" #include "private.hpp" -#include "smpi_mpi_dt_private.h" -#include "mc/mc.h" -#include "src/mc/mc_record.h" -#include "xbt/replay.h" -#include "surf/surf.h" -#include "src/simix/smx_private.h" +#include "simgrid/s4u/Mailbox.hpp" #include "simgrid/sg_config.h" +#include "src/kernel/activity/SynchroComm.hpp" +#include "src/mc/mc_record.h" #include "src/mc/mc_replay.h" #include "src/msg/msg_private.h" -#include "src/simix/SynchroComm.hpp" - +#include "src/simix/smx_private.h" +#include "surf/surf.h" +#include "xbt/replay.hpp" -#include /* DBL_MAX */ +#include /* DBL_MAX */ +#include +#include #include #include #include -#include +#include +#include XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)"); #include #include /* trim_right / trim_left */ +#if HAVE_PAPI +#include "papi.h" +const char* papi_default_config_name = "default"; + +struct papi_process_data { + papi_counter_t counter_data; + int event_set; +}; + +#endif std::unordered_map location2speedup; typedef struct s_smpi_process_data { double simulated; int *argc; char ***argv; - smx_mailbox_t mailbox; - smx_mailbox_t mailbox_small; + simgrid::s4u::MailboxPtr mailbox; + simgrid::s4u::MailboxPtr mailbox_small; xbt_mutex_t mailboxes_mutex; xbt_os_timer_t timer; MPI_Comm comm_self; @@ -47,9 +58,14 @@ typedef struct s_smpi_process_data { int sampling; /* inside an SMPI_SAMPLE_ block? */ char* instance_id; bool replaying; /* is the process replaying a trace */ - xbt_bar_t finalization_barrier; + msg_bar_t finalization_barrier; int return_value; - smpi_trace_call_location_t* trace_call_loc; + smpi_trace_call_location_t trace_call_loc; +#if HAVE_PAPI + /** Contains hardware data as read by PAPI **/ + int papi_event_set; + papi_counter_t papi_counter_data; +#endif } s_smpi_process_data_t; static smpi_process_data_t *process_data = nullptr; @@ -63,6 +79,8 @@ MPI_Errhandler *MPI_ERRORS_RETURN = nullptr; MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr; MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr; +void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback; + #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1) static char *get_mailbox_name(char *str, int index) @@ -79,16 +97,17 @@ static char *get_mailbox_name_small(char *str, int index) void smpi_process_init(int *argc, char ***argv) { - int index=-1; - smpi_process_data_t data; - smx_process_t proc; + if (process_data == nullptr){ + printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n"); + exit(1); + } if (argc != nullptr && argv != nullptr) { - proc = SIMIX_process_self(); - SIMIX_process_set_cleanup_function(proc, MSG_process_cleanup_from_SIMIX); + smx_actor_t proc = SIMIX_process_self(); + proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX); char* instance_id = (*argv)[1]; int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s"); - index = smpi_process_index_of_smx_process(proc); + int index = smpi_process_index_of_smx_process(proc); if(index_to_process_data == nullptr){ index_to_process_data=static_cast(xbt_malloc(SIMIX_process_count()*sizeof(int))); @@ -102,22 +121,19 @@ void smpi_process_init(int *argc, char ***argv) } MPI_Comm* temp_comm_world; - xbt_bar_t temp_bar; + msg_bar_t temp_bar; smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar); - data = smpi_process_remote_data(index); - data->comm_world = temp_comm_world; + smpi_process_data_t data = smpi_process_remote_data(index); + data->comm_world = temp_comm_world; if(temp_bar != nullptr) data->finalization_barrier = temp_bar; data->index = index; data->instance_id = instance_id; data->replaying = false; - simdata_process_t simdata = static_cast(simcall_process_get_data(proc)); - simdata->data = data; + static_cast(proc->data)->data = data; if (*argc > 3) { - free((*argv)[0]); - free((*argv)[1]); memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2)); (*argv)[(*argc) - 1] = nullptr; (*argv)[(*argc) - 2] = nullptr; @@ -126,12 +142,12 @@ void smpi_process_init(int *argc, char ***argv) data->argc = argc; data->argv = argv; // set the process attached to the mailbox - simcall_mbox_set_receiver(data->mailbox_small, proc); + data->mailbox_small->setReceiver(simgrid::s4u::Actor::self()); XBT_DEBUG("<%d> New process in the game: %p", index, proc); } xbt_assert(smpi_process_data(), - "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. Although it's required by " - "MPI-2, this is currently not supported by SMPI."); + "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. " + "Although it's required by MPI-2, this is currently not supported by SMPI."); } void smpi_process_destroy() @@ -153,7 +169,7 @@ void smpi_process_finalize() int index = smpi_process_index(); // wait for all pending asynchronous comms to finish - xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier); + MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier); } /** @brief Check if a process is finalized */ @@ -195,7 +211,8 @@ bool smpi_process_get_replaying(){ int index = smpi_process_index(); if (index != MPI_UNDEFINED) return process_data[index_to_process_data[index]]->replaying; - else return (_xbt_replay_is_active() != 0); + else + return !simgrid::xbt::replay_is_active(); } int smpi_global_size() @@ -208,8 +225,8 @@ int smpi_global_size() smpi_process_data_t smpi_process_data() { - simdata_process_t simdata = static_cast(SIMIX_process_self_get_data()); - return static_cast(simdata->data); + simgrid::MsgActorExt* msgExt = static_cast(SIMIX_process_self()->data); + return static_cast(msgExt->data); } smpi_process_data_t smpi_process_remote_data(int index) @@ -243,7 +260,7 @@ int smpi_process_count() smpi_trace_call_location_t* smpi_process_get_call_location() { smpi_process_data_t process_data = smpi_process_data(); - return process_data->trace_call_loc; + return &process_data->trace_call_loc; } int smpi_process_index() @@ -263,13 +280,13 @@ MPI_Comm smpi_process_comm_world() smx_mailbox_t smpi_process_mailbox() { smpi_process_data_t data = smpi_process_data(); - return data->mailbox; + return data->mailbox->getImpl(); } smx_mailbox_t smpi_process_mailbox_small() { smpi_process_data_t data = smpi_process_data(); - return data->mailbox_small; + return data->mailbox_small->getImpl(); } xbt_mutex_t smpi_process_mailboxes_mutex() @@ -281,13 +298,13 @@ xbt_mutex_t smpi_process_mailboxes_mutex() smx_mailbox_t smpi_process_remote_mailbox(int index) { smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox; + return data->mailbox->getImpl(); } smx_mailbox_t smpi_process_remote_mailbox_small(int index) { smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox_small; + return data->mailbox_small->getImpl(); } xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index) @@ -296,6 +313,20 @@ xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index) return data->mailboxes_mutex; } +#if HAVE_PAPI +int smpi_process_papi_event_set(void) +{ + smpi_process_data_t data = smpi_process_data(); + return data->papi_event_set; +} + +papi_counter_t& smpi_process_papi_counters(void) +{ + smpi_process_data_t data = smpi_process_data(); + return data->papi_counter_data; +} +#endif + xbt_os_timer_t smpi_process_timer() { smpi_process_data_t data = smpi_process_data(); @@ -318,9 +349,9 @@ MPI_Comm smpi_process_comm_self() { smpi_process_data_t data = smpi_process_data(); if(data->comm_self==MPI_COMM_NULL){ - MPI_Group group = smpi_group_new(1); - data->comm_self = smpi_comm_new(group, nullptr); - smpi_group_set_mapping(group, smpi_process_index(), 0); + MPI_Group group = new Group(1); + data->comm_self = new Comm(group, nullptr); + group->set_mapping(smpi_process_index(), 0); } return data->comm_self; @@ -350,25 +381,24 @@ int smpi_process_get_sampling() return data->sampling; } -void print_request(const char *message, MPI_Request request) +void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t)) { - XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]", - message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags); + smpi_comm_copy_data_callback = callback; } -void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t buff_size) +void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size) { XBT_DEBUG("Copy the data over"); void* tmpbuff=buff; - simgrid::simix::Comm *comm = dynamic_cast(synchro); + simgrid::kernel::activity::Comm *comm = dynamic_cast(synchro); if((smpi_privatize_global_variables) && (static_cast(buff) >= smpi_start_data_exe) && (static_cast(buff) < smpi_start_data_exe + smpi_size_data_exe ) ){ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); - - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->src_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->src_proc->data)->data))->index)); tmpbuff = static_cast(xbt_malloc(buff_size)); memcpy(tmpbuff, buff, buff_size); } @@ -376,7 +406,8 @@ void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t bu if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe) && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->dst_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->dst_proc->data)->data))->index)); } memcpy(comm->dst_buff, tmpbuff, buff_size); @@ -392,9 +423,9 @@ void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t bu if(tmpbuff!=buff)xbt_free(tmpbuff); } -void smpi_comm_null_copy_buffer_callback(smx_synchro_t comm, void *buff, size_t buff_size) +void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size) { - return; + /* nothing done in this version */ } static void smpi_check_options(){ @@ -402,12 +433,16 @@ static void smpi_check_options(){ xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh")); - if (xbt_cfg_is_default_value("smpi/running-power")) { + if (xbt_cfg_is_default_value("smpi/host-speed")) { XBT_INFO("You did not set the power of the host running the simulation. " "The timings will certainly not be accurate. " - "Use the option \"--cfg=smpi/running-power:\" to set its value." + "Use the option \"--cfg=smpi/host-speed:\" to set its value." "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information."); } + + xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0, + "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard " + "the simulation of any computation, please use 'smpi/simulate-computation:no' instead."); } int smpi_enabled() { @@ -447,18 +482,87 @@ void smpi_global_init() } } +#if HAVE_PAPI + // This map holds for each computation unit (such as "default" or "process1" etc.) + // the configuration as given by the user (counter data as a pair of (counter_name, counter_counter)) + // and the (computed) event_set. + std::map units2papi_setup; + + if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') { + if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT) + XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?" + " Expected version is %i", + PAPI_VER_CURRENT); + + typedef boost::tokenizer> Tokenizer; + boost::char_separator separator_units(";"); + std::string str = std::string(xbt_cfg_get_string("smpi/papi-events")); + Tokenizer tokens(str, separator_units); + + // Iterate over all the computational units. This could be + // processes, hosts, threads, ranks... You name it. I'm not exactly + // sure what we will support eventually, so I'll leave it at the + // general term "units". + for (auto& unit_it : tokens) { + boost::char_separator separator_events(":"); + Tokenizer event_tokens(unit_it, separator_events); + + int event_set = PAPI_NULL; + if (PAPI_create_eventset(&event_set) != PAPI_OK) { + // TODO: Should this let the whole simulation die? + XBT_CRITICAL("Could not create PAPI event set during init."); + } + + // NOTE: We cannot use a map here, as we must obey the order of the counters + // This is important for PAPI: We need to map the values of counters back + // to the event_names (so, when PAPI_read() has finished)! + papi_counter_t counters2values; + + // Iterate over all counters that were specified for this specific + // unit. + // Note that we need to remove the name of the unit + // (that could also be the "default" value), which always comes first. + // Hence, we start at ++(events.begin())! + for (Tokenizer::iterator events_it = ++(event_tokens.begin()); events_it != event_tokens.end(); events_it++) { + + int event_code = PAPI_NULL; + char* event_name = const_cast((*events_it).c_str()); + if (PAPI_event_name_to_code(event_name, &event_code) == PAPI_OK) { + if (PAPI_add_event(event_set, event_code) != PAPI_OK) { + XBT_ERROR("Could not add PAPI event '%s'. Skipping.", event_name); + continue; + } else { + XBT_DEBUG("Successfully added PAPI event '%s' to the event set.", event_name); + } + } else { + XBT_CRITICAL("Could not find PAPI event '%s'. Skipping.", event_name); + continue; + } + + counters2values.push_back( + // We cannot just pass *events_it, as this is of type const basic_string + std::make_pair(std::string(*events_it), 0)); + } + + std::string unit_name = *(event_tokens.begin()); + papi_process_data config = {.counter_data = std::move(counters2values), .event_set = event_set}; + + units2papi_setup.insert(std::make_pair(unit_name, std::move(config))); + } + } +#endif if (process_count == 0){ process_count = SIMIX_process_count(); smpirun=1; } smpi_universe_size = process_count; - process_data = xbt_new0(smpi_process_data_t, process_count); + process_data = new smpi_process_data_t[process_count]; for (i = 0; i < process_count; i++) { - process_data[i] = xbt_new(s_smpi_process_data_t, 1); + process_data[i] = new s_smpi_process_data_t; process_data[i]->argc = nullptr; process_data[i]->argv = nullptr; - process_data[i]->mailbox = simcall_mbox_create(get_mailbox_name(name, i)); - process_data[i]->mailbox_small = simcall_mbox_create(get_mailbox_name_small(name, i)); + process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i)); + process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i)); process_data[i]->mailboxes_mutex = xbt_mutex_init(); process_data[i]->timer = xbt_os_timer_new(); if (MC_is_active()) @@ -471,20 +575,33 @@ void smpi_global_init() process_data[i]->finalization_barrier = nullptr; process_data[i]->return_value = 0; - if (xbt_cfg_get_boolean("smpi/trace-call-location")) { - process_data[i]->trace_call_loc = xbt_new(smpi_trace_call_location_t, 1); +#if HAVE_PAPI + if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') { + // TODO: Implement host/process/thread based counters. This implementation + // just always takes the values passed via "default", like this: + // "default:COUNTER1:COUNTER2:COUNTER3;". + auto it = units2papi_setup.find(papi_default_config_name); + if (it != units2papi_setup.end()) { + process_data[i]->papi_event_set = it->second.event_set; + process_data[i]->papi_counter_data = it->second.counter_data; + XBT_DEBUG("Setting PAPI set for process %i", i); + } else { + process_data[i]->papi_event_set = PAPI_NULL; + XBT_DEBUG("No PAPI set for process %i", i); + } } +#endif } //if the process was launched through smpirun script we generate a global mpi_comm_world //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance if(smpirun){ - group = smpi_group_new(process_count); - MPI_COMM_WORLD = smpi_comm_new(group, nullptr); + group = new Group(process_count); + MPI_COMM_WORLD = new Comm(group, nullptr); MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); - xbt_bar_t bar=xbt_barrier_init(process_count); + msg_bar_t bar = MSG_barrier_init(process_count); for (i = 0; i < process_count; i++) { - smpi_group_set_mapping(group, i, i); + group->set_mapping(i, i); process_data[i]->finalization_barrier = bar; } } @@ -493,38 +610,34 @@ void smpi_global_init() void smpi_global_destroy() { int count = smpi_process_count(); - int i; smpi_bench_destroy(); if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0); - xbt_barrier_destroy(process_data[0]->finalization_barrier); + delete MPI_COMM_WORLD->group(); + MSG_barrier_destroy(process_data[0]->finalization_barrier); }else{ smpi_deployment_cleanup_instances(); } - for (i = 0; i < count; i++) { + for (int i = 0; i < count; i++) { if(process_data[i]->comm_self!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_self); + Comm::destroy(process_data[i]->comm_self); } if(process_data[i]->comm_intra!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_intra); + Comm::destroy(process_data[i]->comm_intra); } xbt_os_timer_free(process_data[i]->timer); xbt_mutex_destroy(process_data[i]->mailboxes_mutex); - if (xbt_cfg_get_boolean("smpi/trace-call-location")) { - xbt_free(process_data[i]->trace_call_loc); - } - xbt_free(process_data[i]); + delete process_data[i]; } - xbt_free(process_data); + delete[] process_data; process_data = nullptr; if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - smpi_comm_cleanup_smp(MPI_COMM_WORLD); - smpi_comm_cleanup_attributes(MPI_COMM_WORLD); - if(smpi_coll_cleanup_callback!=nullptr) - smpi_coll_cleanup_callback(); - xbt_free(MPI_COMM_WORLD); + MPI_COMM_WORLD->cleanup_smp(); + MPI_COMM_WORLD->cleanup_attr(); + if(Colls::smpi_coll_cleanup_callback!=nullptr) + Colls::smpi_coll_cleanup_callback(); + delete MPI_COMM_WORLD; } MPI_COMM_WORLD = MPI_COMM_NULL; @@ -539,12 +652,13 @@ void smpi_global_destroy() smpi_free_static(); } +extern "C" { + #ifndef WIN32 void __attribute__ ((weak)) user_main_() { xbt_die("Should not be in this smpi_simulated_main"); - return; } int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv) @@ -570,7 +684,6 @@ int __attribute__ ((weak)) main(int argc, char **argv) #endif -extern "C" { static void smpi_init_logs(){ /* Connect log categories. See xbt/log.c */ @@ -583,75 +696,42 @@ static void smpi_init_logs(){ XBT_LOG_CONNECT(smpi_coll); XBT_LOG_CONNECT(smpi_colls); XBT_LOG_CONNECT(smpi_comm); + XBT_LOG_CONNECT(smpi_datatype); XBT_LOG_CONNECT(smpi_dvfs); XBT_LOG_CONNECT(smpi_group); XBT_LOG_CONNECT(smpi_kernel); XBT_LOG_CONNECT(smpi_mpi); - XBT_LOG_CONNECT(smpi_mpi_dt); + XBT_LOG_CONNECT(smpi_memory); + XBT_LOG_CONNECT(smpi_op); XBT_LOG_CONNECT(smpi_pmpi); + XBT_LOG_CONNECT(smpi_request); XBT_LOG_CONNECT(smpi_replay); XBT_LOG_CONNECT(smpi_rma); + XBT_LOG_CONNECT(smpi_utils); } } static void smpi_init_options(){ - int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather"); - mpi_coll_gather_fun = reinterpret_cast - (mpi_coll_gather_description[gather_id].coll); - - int allgather_id = find_coll_description(mpi_coll_allgather_description, - xbt_cfg_get_string("smpi/allgather"),"allgather"); - mpi_coll_allgather_fun = reinterpret_cast - (mpi_coll_allgather_description[allgather_id].coll); - - int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description, - xbt_cfg_get_string("smpi/allgatherv"),"allgatherv"); - mpi_coll_allgatherv_fun = reinterpret_cast - (mpi_coll_allgatherv_description[allgatherv_id].coll); - - int allreduce_id = find_coll_description(mpi_coll_allreduce_description, - xbt_cfg_get_string("smpi/allreduce"),"allreduce"); - mpi_coll_allreduce_fun = reinterpret_cast - (mpi_coll_allreduce_description[allreduce_id].coll); - - int alltoall_id = find_coll_description(mpi_coll_alltoall_description, - xbt_cfg_get_string("smpi/alltoall"),"alltoall"); - mpi_coll_alltoall_fun = reinterpret_cast - (mpi_coll_alltoall_description[alltoall_id].coll); - - int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description, - xbt_cfg_get_string("smpi/alltoallv"),"alltoallv"); - mpi_coll_alltoallv_fun = reinterpret_cast - (mpi_coll_alltoallv_description[alltoallv_id].coll); - - int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast"); - mpi_coll_bcast_fun = reinterpret_cast - (mpi_coll_bcast_description[bcast_id].coll); - - int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce"); - mpi_coll_reduce_fun = reinterpret_cast - (mpi_coll_reduce_description[reduce_id].coll); - - int reduce_scatter_id = - find_coll_description(mpi_coll_reduce_scatter_description, - xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter"); - mpi_coll_reduce_scatter_fun = reinterpret_cast - (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll); - - int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter"); - mpi_coll_scatter_fun = reinterpret_cast - (mpi_coll_scatter_description[scatter_id].coll); - - int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier"); - mpi_coll_barrier_fun = reinterpret_cast - (mpi_coll_barrier_description[barrier_id].coll); - - smpi_coll_cleanup_callback=nullptr; + + Colls::set_collectives(); + Colls::smpi_coll_cleanup_callback=nullptr; smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); - smpi_running_power = xbt_cfg_get_double("smpi/running-power"); + smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables"); if (smpi_cpu_threshold < 0) smpi_cpu_threshold = DBL_MAX; + + char* val = xbt_cfg_get_string("smpi/shared-malloc"); + if (!strcasecmp(val, "yes") || !strcmp(val, "1") || !strcasecmp(val, "on") || !strcasecmp(val, "global")) { + smpi_cfg_shared_malloc = shmalloc_global; + } else if (!strcasecmp(val, "local")) { + smpi_cfg_shared_malloc = shmalloc_local; + } else if (!strcasecmp(val, "no") || !strcmp(val, "0") || !strcasecmp(val, "off")) { + smpi_cfg_shared_malloc = shmalloc_none; + } else { + xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'", + val); + } } int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) @@ -672,13 +752,13 @@ int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) SIMIX_global_init(&argc, argv); MSG_init(&argc,argv); - SMPI_switch_data_segment = smpi_switch_data_segment; + SMPI_switch_data_segment = &smpi_switch_data_segment; smpi_init_options(); // parse the platform file: get the host list SIMIX_create_environment(argv[1]); - SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback); + SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback); SIMIX_function_register_default(realmain); SIMIX_launch_application(argv[2]);