X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/74154653779f9c914ca6759c9e7230d4c052315f..611d822b02f836d7abe031cced6adc4281ef4356:/src/smpi/internals/smpi_global.cpp diff --git a/src/smpi/internals/smpi_global.cpp b/src/smpi/internals/smpi_global.cpp index 0407a77a54..ebf13d8d52 100644 --- a/src/smpi/internals/smpi_global.cpp +++ b/src/smpi/internals/smpi_global.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -6,8 +6,10 @@ #include "SmpiHost.hpp" #include "mc/mc.h" #include "private.hpp" +#include "simgrid/s4u/Engine.hpp" #include "simgrid/s4u/Host.hpp" #include "simgrid/s4u/Mailbox.hpp" +#include "simgrid/s4u/forward.hpp" #include "smpi_coll.hpp" #include "smpi_comm.hpp" #include "smpi_group.hpp" @@ -50,12 +52,14 @@ struct papi_process_data { }; #endif +using simgrid::s4u::Actor; +using simgrid::s4u::ActorPtr; std::unordered_map location2speedup; -static simgrid::smpi::Process** process_data = nullptr; +static std::map process_data; int process_count = 0; +static int smpi_exit_status = 0; int smpi_universe_size = 0; -int* index_to_process_data = nullptr; extern double smpi_total_benched_time; xbt_os_timer_t global_timer; /** @@ -64,7 +68,7 @@ xbt_os_timer_t global_timer; * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world() * instead of "this". * This is basically how we only have one global variable but all processes have - * different communicators (basically, the one their SMPI instance uses). + * different communicators (the one their SMPI instance uses). * * See smpi_comm.cpp and the functions therein for details. */ @@ -74,7 +78,7 @@ MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr; MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr; // No instance gets manually created; check also the smpirun.in script as // this default name is used there as well (when the tag is generated). -static const char* smpi_default_instance_name = "smpirun"; +static const std::string smpi_default_instance_name("smpirun"); static simgrid::config::Flag smpi_wtime_sleep( "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0); static simgrid::config::Flag smpi_init_sleep( @@ -89,16 +93,16 @@ int smpi_process_count() simgrid::smpi::Process* smpi_process() { - smx_actor_t me = SIMIX_process_self(); + ActorPtr me = Actor::self(); if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...) return nullptr; - simgrid::msg::ActorExt* msgExt = static_cast(me->userdata); + simgrid::msg::ActorExt* msgExt = static_cast(me->getImpl()->userdata); return static_cast(msgExt->data); } -simgrid::smpi::Process* smpi_process_remote(int index) +simgrid::smpi::Process* smpi_process_remote(ActorPtr actor) { - return process_data[index_to_process_data[index]]; + return process_data.at(actor); } MPI_Comm smpi_process_comm_self(){ @@ -110,7 +114,7 @@ void smpi_process_init(int *argc, char ***argv){ } int smpi_process_index(){ - return smpi_process()->index(); + return simgrid::s4u::this_actor::getPid(); } void * smpi_process_get_user_data(){ @@ -185,23 +189,19 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks); check_blocks(private_blocks, buff_size); void* tmpbuff=buff; - if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast(buff) >= smpi_data_exe_start) && + if ((smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) && + (static_cast(buff) >= smpi_data_exe_start) && (static_cast(buff) < smpi_data_exe_start + smpi_data_exe_size)) { XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); - - smpi_switch_data_segment( - static_cast((static_cast(comm->src_proc->userdata)->data)) - ->index()); + smpi_switch_data_segment(comm->src_proc->iface()); tmpbuff = static_cast(xbt_malloc(buff_size)); memcpy_private(tmpbuff, buff, private_blocks); } - if ((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_data_exe_start) && + if ((smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) && ((char*)comm->dst_buff >= smpi_data_exe_start) && ((char*)comm->dst_buff < smpi_data_exe_start + smpi_data_exe_size)) { XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); - smpi_switch_data_segment( - static_cast((static_cast(comm->dst_proc->userdata)->data)) - ->index()); + smpi_switch_data_segment(comm->dst_proc->iface()); } XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff); memcpy_private(comm->dst_buff, tmpbuff, private_blocks); @@ -241,7 +241,7 @@ static void smpi_check_options(){ } int smpi_enabled() { - return process_data != nullptr; + return not process_data.empty(); } void smpi_global_init() @@ -339,31 +339,6 @@ void smpi_global_init() } } #endif - - if (index_to_process_data == nullptr) { - index_to_process_data = new int[SIMIX_process_count()]; - } - - bool smpirun = 0; - if (process_count == 0) { // The program has been dispatched but no other - // SMPI instances have been registered. We're using smpirun. - smpirun = true; - SMPI_app_instance_register(smpi_default_instance_name, nullptr, - SIMIX_process_count()); // This call has a side effect on process_count... - MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name); - } - smpi_universe_size = process_count; - process_data = new simgrid::smpi::Process*[process_count]; - for (int i = 0; i < process_count; i++) { - if (smpirun) { - process_data[i] = new simgrid::smpi::Process(i, smpi_deployment_finalization_barrier(smpi_default_instance_name)); - smpi_deployment_register_process(smpi_default_instance_name, i, i); - } else { - // TODO We can pass a nullptr here because Process::set_data() assigns the - // barrier from the instance anyway. This is ugly and should be changed - process_data[i] = new simgrid::smpi::Process(i, nullptr); - } - } } void smpi_global_destroy() @@ -371,20 +346,6 @@ void smpi_global_destroy() smpi_bench_destroy(); smpi_shared_destroy(); smpi_deployment_cleanup_instances(); - int count = smpi_process_count(); - for (int i = 0; i < count; i++) { - if(process_data[i]->comm_self()!=MPI_COMM_NULL){ - simgrid::smpi::Comm::destroy(process_data[i]->comm_self()); - } - if(process_data[i]->comm_intra()!=MPI_COMM_NULL){ - simgrid::smpi::Comm::destroy(process_data[i]->comm_intra()); - } - xbt_os_timer_free(process_data[i]->timer()); - xbt_mutex_destroy(process_data[i]->mailboxes_mutex()); - delete process_data[i]; - } - delete[] process_data; - process_data = nullptr; if (simgrid::smpi::Colls::smpi_coll_cleanup_callback != nullptr) simgrid::smpi::Colls::smpi_coll_cleanup_callback(); @@ -395,43 +356,11 @@ void smpi_global_destroy() xbt_os_timer_free(global_timer); } - delete[] index_to_process_data; - if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) + if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) smpi_destroy_global_memory_segments(); smpi_free_static(); } -extern "C" { - -static void smpi_init_logs(){ - - /* Connect log categories. See xbt/log.c */ - - XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it - DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */ - XBT_LOG_CONNECT(instr_smpi); - XBT_LOG_CONNECT(smpi_bench); - XBT_LOG_CONNECT(smpi_coll); - XBT_LOG_CONNECT(smpi_colls); - XBT_LOG_CONNECT(smpi_comm); - XBT_LOG_CONNECT(smpi_datatype); - XBT_LOG_CONNECT(smpi_dvfs); - XBT_LOG_CONNECT(smpi_group); - XBT_LOG_CONNECT(smpi_host); - XBT_LOG_CONNECT(smpi_kernel); - XBT_LOG_CONNECT(smpi_mpi); - XBT_LOG_CONNECT(smpi_memory); - XBT_LOG_CONNECT(smpi_op); - XBT_LOG_CONNECT(smpi_pmpi); - XBT_LOG_CONNECT(smpi_process); - XBT_LOG_CONNECT(smpi_request); - XBT_LOG_CONNECT(smpi_replay); - XBT_LOG_CONNECT(smpi_rma); - XBT_LOG_CONNECT(smpi_shared); - XBT_LOG_CONNECT(smpi_utils); -} -} - static void smpi_init_options(){ // return if already called if (smpi_cpu_threshold > -1) @@ -440,23 +369,28 @@ static void smpi_init_options(){ simgrid::smpi::Colls::smpi_coll_cleanup_callback = nullptr; smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); + xbt_assert(smpi_host_speed >= 0, "You're trying to set the host_speed to a negative value (%f)", smpi_host_speed); std::string smpi_privatize_option = xbt_cfg_get_string("smpi/privatization"); if (smpi_privatize_option == "no" || smpi_privatize_option == "0") - smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE; + smpi_privatize_global_variables = SmpiPrivStrategies::None; else if (smpi_privatize_option == "yes" || smpi_privatize_option == "1") - smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT; + smpi_privatize_global_variables = SmpiPrivStrategies::Default; else if (smpi_privatize_option == "mmap") - smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP; + smpi_privatize_global_variables = SmpiPrivStrategies::Mmap; else if (smpi_privatize_option == "dlopen") - smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN; + smpi_privatize_global_variables = SmpiPrivStrategies::Dlopen; else xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option.c_str()); + if (not SMPI_switch_data_segment) { + XBT_DEBUG("Running without smpi_main(); disable smpi/privatization."); + smpi_privatize_global_variables = SmpiPrivStrategies::None; + } #if defined(__FreeBSD__) - if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) { - XBT_INFO("Mixing mmap privatization is broken on FreeBSD, switching to dlopen privatization instead."); - smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN; - } + if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) { + XBT_INFO("mmap privatization is broken on FreeBSD, switching to dlopen privatization instead."); + smpi_privatize_global_variables = SmpiPrivStrategies::Dlopen; + } #endif if (smpi_cpu_threshold < 0) @@ -491,7 +425,8 @@ static int smpi_run_entry_point(smpi_entry_point_type entry_point, std::vectorset_return_value(res); + if (smpi_exit_status == 0) + smpi_exit_status = res; } return 0; } @@ -533,6 +468,9 @@ int smpi_main(const char* executable, int argc, char *argv[]) SMPI_switch_data_segment = &smpi_switch_data_segment; + // TODO This will not be executed in the case where smpi_main is not called, + // e.g., not for smpi_msg_masterslave. This should be moved to another location + // that is always called -- maybe close to Actor::onCreation? simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) { host.extension_set(new simgrid::smpi::SmpiHost(&host)); }); @@ -542,8 +480,7 @@ int smpi_main(const char* executable, int argc, char *argv[]) SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback); smpi_init_options(); - - if (smpi_privatize_global_variables == SMPI_PRIVATIZE_DLOPEN) { + if (smpi_privatize_global_variables == SmpiPrivStrategies::Dlopen) { std::string executable_copy = executable; @@ -562,17 +499,18 @@ int smpi_main(const char* executable, int argc, char *argv[]) + "_" + std::to_string(rank++) + ".so"; int fdin = open(executable_copy.c_str(), O_RDONLY); - xbt_assert(fdin >= 0, "Cannot read from %s", executable_copy.c_str()); + xbt_assert(fdin >= 0, "Cannot read from %s. Please make sure that the file exists and is executable.", + executable_copy.c_str()); int fdout = open(target_executable.c_str(), O_CREAT | O_RDWR, S_IRWXU); xbt_assert(fdout >= 0, "Cannot write into %s", target_executable.c_str()); + XBT_DEBUG("Copy %ld bytes into %s", static_cast(fdin_size), target_executable.c_str()); #if HAVE_SENDFILE ssize_t sent_size = sendfile(fdout, fdin, NULL, fdin_size); xbt_assert(sent_size == fdin_size, "Error while copying %s: only %zd bytes copied instead of %ld (errno: %d -- %s)", target_executable.c_str(), sent_size, fdin_size, errno, strerror(errno)); #else - XBT_VERB("Copy %d bytes into %s", static_cast(fdin_size), target_executable.c_str()); const int bufsize = 1024 * 1024 * 4; char buf[bufsize]; while (int got = read(fdin, buf, bufsize)) { @@ -609,12 +547,10 @@ int smpi_main(const char* executable, int argc, char *argv[]) smpi_run_entry_point(entry_point, args); }); }; - } else { - // Load the dynamic library and resolve the entry point: - void* handle = dlopen(executable, RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND); + void* handle = dlopen(executable, RTLD_LAZY | RTLD_LOCAL); if (handle == nullptr) xbt_die("dlopen failed for %s: %s (errno: %d -- %s)", executable, dlerror(), errno, strerror(errno)); smpi_entry_point_type entry_point = smpi_resolve_function(handle); @@ -628,12 +564,15 @@ int smpi_main(const char* executable, int argc, char *argv[]) smpi_run_entry_point(entry_point, args); }); }; - } + SMPI_init(); SIMIX_launch_application(argv[2]); + SMPI_app_instance_register(smpi_default_instance_name.c_str(), nullptr, + process_data.size()); // This call has a side effect on process_count... + MPI_COMM_WORLD = *smpi_deployment_comm_world(smpi_default_instance_name); + smpi_universe_size = process_count; - SMPI_init(); /* Clean IO before the run */ fflush(stdout); @@ -658,30 +597,34 @@ int smpi_main(const char* executable, int argc, char *argv[]) "You may want to use sampling functions or trace replay to reduce this."); } } - int ret = 0; - int count = smpi_process_count(); - for (int i = 0; i < count; i++) { - if(process_data[i]->return_value()!=0){ - ret=process_data[i]->return_value();//return first non 0 value - break; - } - } smpi_global_destroy(); TRACE_end(); - return ret; + return smpi_exit_status; } // Called either directly from the user code, or from the code called by smpirun void SMPI_init(){ - smpi_init_logs(); + simgrid::s4u::Actor::on_creation.connect([](simgrid::s4u::ActorPtr actor) { + if (not actor->is_daemon()) { + process_data.insert({actor, new simgrid::smpi::Process(actor, nullptr)}); + } + }); + simgrid::s4u::Actor::on_destruction.connect([](simgrid::s4u::ActorPtr actor) { + auto it = process_data.find(actor); + if (it != process_data.end()) { + delete it->second; + process_data.erase(it); + } + }); + smpi_init_options(); smpi_global_init(); smpi_check_options(); TRACE_smpi_alloc(); - simgrid::surf::surfExitCallbacks.connect(TRACE_smpi_release); - if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) + simgrid::s4u::onSimulationEnd.connect(TRACE_smpi_release); + if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) smpi_backup_global_memory_segment(); }