X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/e18a7b043d161b29d0bf3f5743cb7a570241576a..f685334597e6b8bcdfe353d69be908914eed4dd6:/src/smpi/internals/smpi_global.cpp diff --git a/src/smpi/internals/smpi_global.cpp b/src/smpi/internals/smpi_global.cpp index aa46baa987..fa1605beb0 100644 --- a/src/smpi/internals/smpi_global.cpp +++ b/src/smpi/internals/smpi_global.cpp @@ -1,23 +1,23 @@ -/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "mc/mc.h" -#include "simgrid/s4u/Mailbox.hpp" -#include "simgrid/s4u/Host.hpp" -#include "src/msg/msg_private.h" -#include "src/simix/smx_private.h" -#include "src/surf/surf_interface.hpp" #include "SmpiHost.hpp" -#include "xbt/config.hpp" -#include "private.h" +#include "mc/mc.h" #include "private.hpp" +#include "simgrid/s4u/Host.hpp" +#include "simgrid/s4u/Mailbox.hpp" +#include "simgrid/s4u/forward.hpp" #include "smpi_coll.hpp" #include "smpi_comm.hpp" #include "smpi_group.hpp" #include "smpi_info.hpp" #include "smpi_process.hpp" +#include "src/msg/msg_private.hpp" +#include "src/simix/smx_private.hpp" +#include "src/surf/surf_interface.hpp" +#include "xbt/config.hpp" #include /* DBL_MAX */ #include @@ -51,18 +51,32 @@ struct papi_process_data { }; #endif +using simgrid::s4u::Actor; +using simgrid::s4u::ActorPtr; std::unordered_map location2speedup; -simgrid::smpi::Process **process_data = nullptr; +static std::map process_data; int process_count = 0; int smpi_universe_size = 0; -int* index_to_process_data = nullptr; extern double smpi_total_benched_time; xbt_os_timer_t global_timer; +/** + * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable) + * is important because the implementation of MPI_Comm checks + * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world() + * instead of "this". + * This is basically how we only have one global variable but all processes have + * different communicators (the one their SMPI instance uses). + * + * See smpi_comm.cpp and the functions therein for details. + */ MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED; MPI_Errhandler *MPI_ERRORS_RETURN = nullptr; MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr; MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr; +// No instance gets manually created; check also the smpirun.in script as +// this default name is used there as well (when the tag is generated). +static const char* smpi_default_instance_name = "smpirun"; static simgrid::config::Flag smpi_wtime_sleep( "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0); static simgrid::config::Flag smpi_init_sleep( @@ -70,6 +84,12 @@ static simgrid::config::Flag smpi_init_sleep( void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback; +void smpi_add_process(ActorPtr actor) +{ + process_data.insert({actor, new simgrid::smpi::Process(actor, nullptr)}); + // smpi_deployment_register_process("master_mpi", 0, actor); +} + int smpi_process_count() { return process_count; @@ -77,16 +97,16 @@ int smpi_process_count() simgrid::smpi::Process* smpi_process() { - smx_actor_t me = SIMIX_process_self(); + ActorPtr me = Actor::self(); if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...) return nullptr; - simgrid::msg::ActorExt* msgExt = static_cast(me->userdata); + simgrid::msg::ActorExt* msgExt = static_cast(me->getImpl()->userdata); return static_cast(msgExt->data); } -simgrid::smpi::Process* smpi_process_remote(int index) +simgrid::smpi::Process* smpi_process_remote(ActorPtr actor) { - return process_data[index_to_process_data[index]]; + return process_data.at(actor); } MPI_Comm smpi_process_comm_self(){ @@ -173,24 +193,23 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks); check_blocks(private_blocks, buff_size); void* tmpbuff=buff; - if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast(buff) >= smpi_start_data_exe) - && (static_cast(buff) < smpi_start_data_exe + smpi_size_data_exe ) - ){ - XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); - - smpi_switch_data_segment( - static_cast((static_cast(comm->src_proc->userdata)->data)) - ->index()); - tmpbuff = static_cast(xbt_malloc(buff_size)); - memcpy_private(tmpbuff, buff, private_blocks); + if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast(buff) >= smpi_data_exe_start) && + (static_cast(buff) < smpi_data_exe_start + smpi_data_exe_size)) { + XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); + + smpi_switch_data_segment( + static_cast((static_cast(comm->src_proc->userdata)->data)) + ->index()); + tmpbuff = static_cast(xbt_malloc(buff_size)); + memcpy_private(tmpbuff, buff, private_blocks); } - if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_start_data_exe) - && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){ - XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); - smpi_switch_data_segment( - static_cast((static_cast(comm->dst_proc->userdata)->data)) - ->index()); + if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_data_exe_start) && + ((char*)comm->dst_buff < smpi_data_exe_start + smpi_data_exe_size)) { + XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); + smpi_switch_data_segment( + static_cast((static_cast(comm->dst_proc->userdata)->data)) + ->index()); } XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff); memcpy_private(comm->dst_buff, tmpbuff, private_blocks); @@ -230,20 +249,18 @@ static void smpi_check_options(){ } int smpi_enabled() { - return process_data != nullptr; + return not process_data.empty(); } void smpi_global_init() { - MPI_Group group; - if (not MC_is_active()) { global_timer = xbt_os_timer_new(); xbt_os_walltimer_start(global_timer); } - if (xbt_cfg_get_string("smpi/comp-adjustment-file")[0] != '\0') { - std::string filename {xbt_cfg_get_string("smpi/comp-adjustment-file")}; + std::string filename = xbt_cfg_get_string("smpi/comp-adjustment-file"); + if (not filename.empty()) { std::ifstream fstream(filename); if (not fstream.is_open()) { xbt_die("Could not open file %s. Does it exist?", filename.c_str()); @@ -269,7 +286,7 @@ void smpi_global_init() // and the (computed) event_set. std::map units2papi_setup; - if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') { + if (not xbt_cfg_get_string("smpi/papi-events").empty()) { if (PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT) XBT_ERROR("Could not initialize PAPI library; is it correctly installed and linked?" " Expected version is %i", @@ -277,7 +294,7 @@ void smpi_global_init() typedef boost::tokenizer> Tokenizer; boost::char_separator separator_units(";"); - std::string str = std::string(xbt_cfg_get_string("smpi/papi-events")); + std::string str = xbt_cfg_get_string("smpi/papi-events"); Tokenizer tokens(str, separator_units); // Iterate over all the computational units. This could be processes, hosts, threads, ranks... You name it. @@ -330,64 +347,28 @@ void smpi_global_init() } } #endif - - int smpirun = 0; - msg_bar_t finalization_barrier = nullptr; - if (process_count == 0){ - process_count = SIMIX_process_count(); - smpirun=1; - finalization_barrier = MSG_barrier_init(process_count); - } - smpi_universe_size = process_count; - process_data = new simgrid::smpi::Process*[process_count]; - for (int i = 0; i < process_count; i++) { - process_data[i] = new simgrid::smpi::Process(i, finalization_barrier); - } - //if the process was launched through smpirun script we generate a global mpi_comm_world - //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance - if (smpirun) { - group = new simgrid::smpi::Group(process_count); - MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr); - MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); - - for (int i = 0; i < process_count; i++) - group->set_mapping(i, i); - } } void smpi_global_destroy() { - int count = smpi_process_count(); - smpi_bench_destroy(); smpi_shared_destroy(); - if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - delete MPI_COMM_WORLD->group(); - MSG_barrier_destroy(process_data[0]->finalization_barrier()); - }else{ - smpi_deployment_cleanup_instances(); - } - for (int i = 0; i < count; i++) { - if(process_data[i]->comm_self()!=MPI_COMM_NULL){ - simgrid::smpi::Comm::destroy(process_data[i]->comm_self()); + smpi_deployment_cleanup_instances(); + for (auto& pair : process_data) { + auto& process = pair.second; + if (process->comm_self() != MPI_COMM_NULL) { + simgrid::smpi::Comm::destroy(process->comm_self()); } - if(process_data[i]->comm_intra()!=MPI_COMM_NULL){ - simgrid::smpi::Comm::destroy(process_data[i]->comm_intra()); + if (process->comm_intra() != MPI_COMM_NULL) { + simgrid::smpi::Comm::destroy(process->comm_intra()); } - xbt_os_timer_free(process_data[i]->timer()); - xbt_mutex_destroy(process_data[i]->mailboxes_mutex()); - delete process_data[i]; - } - delete[] process_data; - process_data = nullptr; - - if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - MPI_COMM_WORLD->cleanup_smp(); - MPI_COMM_WORLD->cleanup_attr(); - if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr) - simgrid::smpi::Colls::smpi_coll_cleanup_callback(); - delete MPI_COMM_WORLD; + xbt_os_timer_free(process->timer()); + xbt_mutex_destroy(process->mailboxes_mutex()); } + process_data.clear(); + + if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr) + simgrid::smpi::Colls::smpi_coll_cleanup_callback(); MPI_COMM_WORLD = MPI_COMM_NULL; @@ -395,69 +376,30 @@ void smpi_global_destroy() xbt_os_timer_free(global_timer); } - xbt_free(index_to_process_data); if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) smpi_destroy_global_memory_segments(); smpi_free_static(); } -extern "C" { - -static void smpi_init_logs(){ - - /* Connect log categories. See xbt/log.c */ - - XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it - DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */ - XBT_LOG_CONNECT(instr_smpi); - XBT_LOG_CONNECT(smpi_bench); - XBT_LOG_CONNECT(smpi_coll); - XBT_LOG_CONNECT(smpi_colls); - XBT_LOG_CONNECT(smpi_comm); - XBT_LOG_CONNECT(smpi_datatype); - XBT_LOG_CONNECT(smpi_dvfs); - XBT_LOG_CONNECT(smpi_group); - XBT_LOG_CONNECT(smpi_host); - XBT_LOG_CONNECT(smpi_kernel); - XBT_LOG_CONNECT(smpi_mpi); - XBT_LOG_CONNECT(smpi_memory); - XBT_LOG_CONNECT(smpi_op); - XBT_LOG_CONNECT(smpi_pmpi); - XBT_LOG_CONNECT(smpi_process); - XBT_LOG_CONNECT(smpi_request); - XBT_LOG_CONNECT(smpi_replay); - XBT_LOG_CONNECT(smpi_rma); - XBT_LOG_CONNECT(smpi_shared); - XBT_LOG_CONNECT(smpi_utils); -} -} - static void smpi_init_options(){ - //return if already called - if (smpi_cpu_threshold > -1) - return; - simgrid::smpi::Colls::set_collectives(); - simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr; - smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); - smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); - const char* smpi_privatize_option = xbt_cfg_get_string("smpi/privatization"); - if (std::strcmp(smpi_privatize_option, "no") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE; - else if (std::strcmp(smpi_privatize_option, "yes") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT; - else if (std::strcmp(smpi_privatize_option, "mmap") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP; - else if (std::strcmp(smpi_privatize_option, "dlopen") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN; - - // Some compatibility stuff: - else if (std::strcmp(smpi_privatize_option, "1") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT; - else if (std::strcmp(smpi_privatize_option, "0") == 0) - smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE; - - else - xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option); + // return if already called + if (smpi_cpu_threshold > -1) + return; + simgrid::smpi::Colls::set_collectives(); + simgrid::smpi::Colls::smpi_coll_cleanup_callback = nullptr; + smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); + smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); + std::string smpi_privatize_option = xbt_cfg_get_string("smpi/privatization"); + if (smpi_privatize_option == "no" || smpi_privatize_option == "0") + smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE; + else if (smpi_privatize_option == "yes" || smpi_privatize_option == "1") + smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT; + else if (smpi_privatize_option == "mmap") + smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP; + else if (smpi_privatize_option == "dlopen") + smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN; + else + xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option.c_str()); #if defined(__FreeBSD__) if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) { @@ -469,17 +411,16 @@ static void smpi_init_options(){ if (smpi_cpu_threshold < 0) smpi_cpu_threshold = DBL_MAX; - char* val = xbt_cfg_get_string("smpi/shared-malloc"); - if (not strcasecmp(val, "yes") || not strcmp(val, "1") || not strcasecmp(val, "on") || - not strcasecmp(val, "global")) { + std::string val = xbt_cfg_get_string("smpi/shared-malloc"); + if ((val == "yes") || (val == "1") || (val == "on") || (val == "global")) { smpi_cfg_shared_malloc = shmalloc_global; - } else if (not strcasecmp(val, "local")) { + } else if (val == "local") { smpi_cfg_shared_malloc = shmalloc_local; - } else if (not strcasecmp(val, "no") || not strcmp(val, "0") || not strcasecmp(val, "off")) { + } else if ((val == "no") || (val == "0") || (val == "off")) { smpi_cfg_shared_malloc = shmalloc_none; } else { xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'", - val); + val.c_str()); } } @@ -541,6 +482,9 @@ int smpi_main(const char* executable, int argc, char *argv[]) SMPI_switch_data_segment = &smpi_switch_data_segment; + // TODO This will not be executed in the case where smpi_main is not called, + // e.g., not for smpi_msg_masterslave. This should be moved to another location + // that is always called -- maybe close to Actor::onCreation? simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) { host.extension_set(new simgrid::smpi::SmpiHost(&host)); }); @@ -550,7 +494,6 @@ int smpi_main(const char* executable, int argc, char *argv[]) SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback); smpi_init_options(); - if (smpi_privatize_global_variables == SMPI_PRIVATIZE_DLOPEN) { std::string executable_copy = executable; @@ -605,10 +548,11 @@ int smpi_main(const char* executable, int argc, char *argv[]) // Load the copy and resolve the entry point: void* handle = dlopen(target_executable.c_str(), RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND); + int saved_errno = errno; if (xbt_cfg_get_boolean("smpi/keep-temps") == false) unlink(target_executable.c_str()); if (handle == nullptr) - xbt_die("dlopen failed: %s (errno: %d -- %s)", dlerror(), errno, strerror(errno)); + xbt_die("dlopen failed: %s (errno: %d -- %s)", dlerror(), saved_errno, strerror(saved_errno)); smpi_entry_point_type entry_point = smpi_resolve_function(handle); if (not entry_point) xbt_die("Could not resolve entry point"); @@ -638,9 +582,13 @@ int smpi_main(const char* executable, int argc, char *argv[]) } + SMPI_init(); SIMIX_launch_application(argv[2]); + SMPI_app_instance_register(smpi_default_instance_name, nullptr, + SIMIX_process_count()); // This call has a side effect on process_count... + MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name); + smpi_universe_size = process_count; - SMPI_init(); /* Clean IO before the run */ fflush(stdout); @@ -665,11 +613,11 @@ int smpi_main(const char* executable, int argc, char *argv[]) "You may want to use sampling functions or trace replay to reduce this."); } } - int count = smpi_process_count(); int ret = 0; - for (int i = 0; i < count; i++) { - if(process_data[i]->return_value()!=0){ - ret=process_data[i]->return_value();//return first non 0 value + for (auto& pair : process_data) { + auto& smpi_process = pair.second; + if (smpi_process->return_value() != 0) { + ret = smpi_process->return_value(); // return first non 0 value break; } } @@ -682,14 +630,16 @@ int smpi_main(const char* executable, int argc, char *argv[]) // Called either directly from the user code, or from the code called by smpirun void SMPI_init(){ - smpi_init_logs(); + simgrid::s4u::Actor::onCreation.connect([](simgrid::s4u::ActorPtr actor) { + smpi_add_process(actor); + }); smpi_init_options(); smpi_global_init(); smpi_check_options(); TRACE_smpi_alloc(); simgrid::surf::surfExitCallbacks.connect(TRACE_smpi_release); if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) - smpi_initialize_global_memory_segments(); + smpi_backup_global_memory_segment(); } void SMPI_finalize(){