X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/fbf96d959e4912fe445823b75cf102a0e89131e2..d2fd3818a5e681f28d2ee11aa2c20c2d7dbabc03:/src/smpi/smpi_global.cpp diff --git a/src/smpi/smpi_global.cpp b/src/smpi/smpi_global.cpp index 560bce26e1..1ad2a306b9 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/smpi_global.cpp @@ -1,20 +1,22 @@ -/* Copyright (c) 2007-2015. The SimGrid Team. - * All rights reserved. */ +/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ +#include "mc/mc.h" #include "private.h" #include "private.hpp" -#include "smpi_mpi_dt_private.h" -#include "mc/mc.h" -#include "src/mc/mc_record.h" -#include "xbt/replay.h" -#include "surf/surf.h" -#include "src/simix/smx_private.h" +#include "simgrid/s4u/Mailbox.hpp" +#include "smpi/smpi_shared_malloc.hpp" #include "simgrid/sg_config.h" -#include "src/mc/mc_replay.h" #include "src/kernel/activity/SynchroComm.hpp" +#include "src/mc/mc_record.h" +#include "src/mc/mc_replay.h" +#include "src/msg/msg_private.h" +#include "src/simix/smx_private.h" +#include "surf/surf.h" +#include "xbt/replay.hpp" +#include #include /* DBL_MAX */ #include @@ -24,7 +26,6 @@ #include #include #include -#include "../msg/msg_private.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)"); #include @@ -42,175 +43,53 @@ struct papi_process_data { #endif std::unordered_map location2speedup; -typedef struct s_smpi_process_data { - double simulated; - int *argc; - char ***argv; - smx_mailbox_t mailbox; - smx_mailbox_t mailbox_small; - xbt_mutex_t mailboxes_mutex; - xbt_os_timer_t timer; - MPI_Comm comm_self; - MPI_Comm comm_intra; - MPI_Comm* comm_world; - void *data; /* user data */ - int index; - char state; - int sampling; /* inside an SMPI_SAMPLE_ block? */ - char* instance_id; - bool replaying; /* is the process replaying a trace */ - xbt_bar_t finalization_barrier; - int return_value; - smpi_trace_call_location_t trace_call_loc; -#if HAVE_PAPI - /** Contains hardware data as read by PAPI **/ - int papi_event_set; - papi_counter_t papi_counter_data; -#endif -} s_smpi_process_data_t; - -static smpi_process_data_t *process_data = nullptr; +simgrid::smpi::Process **process_data = nullptr; int process_count = 0; int smpi_universe_size = 0; int* index_to_process_data = nullptr; extern double smpi_total_benched_time; -extern xbt_dict_t smpi_type_keyvals; -extern xbt_dict_t smpi_comm_keyvals; xbt_os_timer_t global_timer; MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED; MPI_Errhandler *MPI_ERRORS_RETURN = nullptr; MPI_Errhandler *MPI_ERRORS_ARE_FATAL = nullptr; MPI_Errhandler *MPI_ERRHANDLER_NULL = nullptr; +static simgrid::config::Flag smpi_wtime_sleep( + "smpi/wtime", "Minimum time to inject inside a call to MPI_Wtime", 0.0); +static simgrid::config::Flag smpi_init_sleep( + "smpi/init", "Time to inject inside a call to MPI_Init", 0.0); -#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1) +void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t) = &smpi_comm_copy_buffer_callback; -static char *get_mailbox_name(char *str, int index) -{ - snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast (sizeof(int) * 2), index); - return str; -} -static char *get_mailbox_name_small(char *str, int index) -{ - snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast (sizeof(int) * 2), index); - return str; -} -void smpi_process_init(int *argc, char ***argv) +int smpi_process_count() { - - if (argc != nullptr && argv != nullptr) { - smx_actor_t proc = SIMIX_process_self(); - proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX); - char* instance_id = (*argv)[1]; - int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s"); - int index = smpi_process_index_of_smx_process(proc); - - if(index_to_process_data == nullptr){ - index_to_process_data=static_cast(xbt_malloc(SIMIX_process_count()*sizeof(int))); - } - - if(smpi_privatize_global_variables){ - /* Now using segment index of the process */ - index = proc->segment_index; - /* Done at the process's creation */ - SMPI_switch_data_segment(index); - } - - MPI_Comm* temp_comm_world; - xbt_bar_t temp_bar; - smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar); - smpi_process_data_t data = smpi_process_remote_data(index); - data->comm_world = temp_comm_world; - if(temp_bar != nullptr) - data->finalization_barrier = temp_bar; - data->index = index; - data->instance_id = instance_id; - data->replaying = false; - - simdata_process_t simdata = static_cast(simcall_process_get_data(proc)); - simdata->data = data; - - if (*argc > 3) { - memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2)); - (*argv)[(*argc) - 1] = nullptr; - (*argv)[(*argc) - 2] = nullptr; - } - (*argc)-=2; - data->argc = argc; - data->argv = argv; - // set the process attached to the mailbox - simcall_mbox_set_receiver(data->mailbox_small, proc); - XBT_DEBUG("<%d> New process in the game: %p", index, proc); - } - xbt_assert(smpi_process_data(), - "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. " - "Although it's required by MPI-2, this is currently not supported by SMPI."); + return process_count; } -void smpi_process_destroy() +simgrid::smpi::Process* smpi_process() { - int index = smpi_process_index(); - if(smpi_privatize_global_variables){ - smpi_switch_data_segment(index); - } - process_data[index_to_process_data[index]]->state = SMPI_FINALIZED; - XBT_DEBUG("<%d> Process left the game", index); + simgrid::MsgActorExt* msgExt = static_cast(SIMIX_process_self()->data); + return static_cast(msgExt->data); } -/** @brief Prepares the current process for termination. */ -void smpi_process_finalize() +simgrid::smpi::Process* smpi_process_remote(int index) { - // This leads to an explosion of the search graph which cannot be reduced: - if(MC_is_active() || MC_record_replay_is_active()) - return; - - int index = smpi_process_index(); - // wait for all pending asynchronous comms to finish - xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier); + return process_data[index_to_process_data[index]]; } -/** @brief Check if a process is finalized */ -int smpi_process_finalized() -{ - int index = smpi_process_index(); - if (index != MPI_UNDEFINED) - return (process_data[index_to_process_data[index]]->state == SMPI_FINALIZED); - else - return 0; +MPI_Comm smpi_process_comm_self(){ + return smpi_process()->comm_self(); } -/** @brief Check if a process is initialized */ -int smpi_process_initialized() -{ - if (index_to_process_data == nullptr){ - return false; - } else{ - int index = smpi_process_index(); - return ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state == SMPI_INITIALIZED)); - } +void smpi_process_init(int *argc, char ***argv){ + simgrid::smpi::Process::init(argc, argv); } -/** @brief Mark a process as initialized (=MPI_Init called) */ -void smpi_process_mark_as_initialized() -{ - int index = smpi_process_index(); - if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED)) - process_data[index_to_process_data[index]]->state = SMPI_INITIALIZED; +int smpi_process_index(){ + return smpi_process()->index(); } -void smpi_process_set_replaying(bool value){ - int index = smpi_process_index(); - if ((index != MPI_UNDEFINED) && (process_data[index_to_process_data[index]]->state != SMPI_FINALIZED)) - process_data[index_to_process_data[index]]->replaying = value; -} - -bool smpi_process_get_replaying(){ - int index = smpi_process_index(); - if (index != MPI_UNDEFINED) - return process_data[index_to_process_data[index]]->replaying; - else return (_xbt_replay_is_active() != 0); -} int smpi_global_size() { @@ -220,194 +99,71 @@ int smpi_global_size() return xbt_str_parse_int(value, "SMPI_GLOBAL_SIZE contains a non-numerical value: %s"); } -smpi_process_data_t smpi_process_data() -{ - simdata_process_t simdata = static_cast(SIMIX_process_self_get_data()); - return static_cast(simdata->data); -} - -smpi_process_data_t smpi_process_remote_data(int index) -{ - return process_data[index_to_process_data[index]]; -} - -void smpi_process_set_user_data(void *data) -{ - smpi_process_data_t process_data = smpi_process_data(); - process_data->data = data; -} - -void *smpi_process_get_user_data() -{ - smpi_process_data_t process_data = smpi_process_data(); - return process_data->data; -} - -int smpi_process_count() -{ - return process_count; -} - -/** - * \brief Returns a structure that stores the location (filename + linenumber) - * of the last calls to MPI_* functions. - * - * \see smpi_trace_set_call_location - */ -smpi_trace_call_location_t* smpi_process_get_call_location() -{ - smpi_process_data_t process_data = smpi_process_data(); - return &process_data->trace_call_loc; -} - -int smpi_process_index() -{ - smpi_process_data_t data = smpi_process_data(); - //return -1 if not initialized - return data != nullptr ? data->index : MPI_UNDEFINED; -} - -MPI_Comm smpi_process_comm_world() -{ - smpi_process_data_t data = smpi_process_data(); - //return MPI_COMM_NULL if not initialized - return data != nullptr ? *data->comm_world : MPI_COMM_NULL; -} - -smx_mailbox_t smpi_process_mailbox() -{ - smpi_process_data_t data = smpi_process_data(); - return data->mailbox; -} - -smx_mailbox_t smpi_process_mailbox_small() -{ - smpi_process_data_t data = smpi_process_data(); - return data->mailbox_small; -} - -xbt_mutex_t smpi_process_mailboxes_mutex() -{ - smpi_process_data_t data = smpi_process_data(); - return data->mailboxes_mutex; -} - -smx_mailbox_t smpi_process_remote_mailbox(int index) -{ - smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox; -} - -smx_mailbox_t smpi_process_remote_mailbox_small(int index) -{ - smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox_small; -} - -xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index) -{ - smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailboxes_mutex; -} - -#if HAVE_PAPI -int smpi_process_papi_event_set(void) -{ - smpi_process_data_t data = smpi_process_data(); - return data->papi_event_set; -} - -papi_counter_t& smpi_process_papi_counters(void) -{ - smpi_process_data_t data = smpi_process_data(); - return data->papi_counter_data; -} -#endif - -xbt_os_timer_t smpi_process_timer() +void smpi_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t)) { - smpi_process_data_t data = smpi_process_data(); - return data->timer; + smpi_comm_copy_data_callback = callback; } -void smpi_process_simulated_start() -{ - smpi_process_data_t data = smpi_process_data(); - data->simulated = SIMIX_get_clock(); -} - -double smpi_process_simulated_elapsed() -{ - smpi_process_data_t data = smpi_process_data(); - return SIMIX_get_clock() - data->simulated; +void print(std::vector> vec) { + fprintf(stderr, "{"); + for(auto elt: vec) { + fprintf(stderr, "(0x%x, 0x%x),", elt.first, elt.second); + } + stderr, fprintf(stderr, "}\n"); } - -MPI_Comm smpi_process_comm_self() -{ - smpi_process_data_t data = smpi_process_data(); - if(data->comm_self==MPI_COMM_NULL){ - MPI_Group group = smpi_group_new(1); - data->comm_self = smpi_comm_new(group, nullptr); - smpi_group_set_mapping(group, smpi_process_index(), 0); +void memcpy_private(void *dest, const void *src, size_t n, std::vector> &private_blocks) { + for(auto block : private_blocks) { + memcpy((uint8_t*)dest+block.first, (uint8_t*)src+block.first, block.second-block.first); } - - return data->comm_self; -} - -MPI_Comm smpi_process_get_comm_intra() -{ - smpi_process_data_t data = smpi_process_data(); - return data->comm_intra; -} - -void smpi_process_set_comm_intra(MPI_Comm comm) -{ - smpi_process_data_t data = smpi_process_data(); - data->comm_intra = comm; -} - -void smpi_process_set_sampling(int s) -{ - smpi_process_data_t data = smpi_process_data(); - data->sampling = s; -} - -int smpi_process_get_sampling() -{ - smpi_process_data_t data = smpi_process_data(); - return data->sampling; -} - -void print_request(const char *message, MPI_Request request) -{ - XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]", - message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags); } void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size) { + simgrid::kernel::activity::Comm *comm = dynamic_cast(synchro); + int src_shared=0, dst_shared=0; + int src_offset, dst_offset; + std::vector> src_private_blocks; + std::vector> dst_private_blocks; XBT_DEBUG("Copy the data over"); + if(src_shared=smpi_is_shared(buff, src_private_blocks, &src_offset)) { + XBT_DEBUG("Sender %p is shared. Let's ignore it.", buff); + src_private_blocks = shift_private_blocks(src_private_blocks, src_offset); + } + else { + src_private_blocks.clear(); + src_private_blocks.push_back(std::make_pair(0, buff_size)); + } + if(dst_shared=smpi_is_shared((char*)comm->dst_buff, dst_private_blocks, &dst_offset)) { + XBT_DEBUG("Receiver %p is shared. Let's ignore it.", (char*)comm->dst_buff); + dst_private_blocks = shift_private_blocks(dst_private_blocks, dst_offset); + } + else { + dst_private_blocks.clear(); + dst_private_blocks.push_back(std::make_pair(0, buff_size)); + } + auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks); void* tmpbuff=buff; - simgrid::kernel::activity::Comm *comm = dynamic_cast(synchro); - if((smpi_privatize_global_variables) && (static_cast(buff) >= smpi_start_data_exe) && (static_cast(buff) < smpi_start_data_exe + smpi_size_data_exe ) ){ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); - - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->src_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->src_proc->data)->data))->index())); tmpbuff = static_cast(xbt_malloc(buff_size)); - memcpy(tmpbuff, buff, buff_size); + memcpy_private(tmpbuff, buff, buff_size, private_blocks); } if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe) && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->dst_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->dst_proc->data)->data))->index())); } - memcpy(comm->dst_buff, tmpbuff, buff_size); + XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff); + memcpy_private(comm->dst_buff, tmpbuff, buff_size, private_blocks); + if (comm->detached) { // if this is a detached send, the source buffer was duplicated by SMPI // sender to make the original buffer available to the application ASAP @@ -416,13 +172,13 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free comm->src_buff = nullptr; } - if(tmpbuff!=buff)xbt_free(tmpbuff); + } void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size) { - return; + /* nothing done in this version */ } static void smpi_check_options(){ @@ -450,7 +206,6 @@ void smpi_global_init() { int i; MPI_Group group; - char name[MAILBOX_NAME_MAXLEN]; int smpirun=0; if (!MC_is_active()) { @@ -553,53 +308,21 @@ void smpi_global_init() smpirun=1; } smpi_universe_size = process_count; - process_data = new smpi_process_data_t[process_count]; + process_data = new simgrid::smpi::Process*[process_count]; for (i = 0; i < process_count; i++) { - process_data[i] = new s_smpi_process_data_t; - process_data[i]->argc = nullptr; - process_data[i]->argv = nullptr; - process_data[i]->mailbox = simcall_mbox_create(get_mailbox_name(name, i)); - process_data[i]->mailbox_small = simcall_mbox_create(get_mailbox_name_small(name, i)); - process_data[i]->mailboxes_mutex = xbt_mutex_init(); - process_data[i]->timer = xbt_os_timer_new(); - if (MC_is_active()) - MC_ignore_heap(process_data[i]->timer, xbt_os_timer_size()); - process_data[i]->comm_self = MPI_COMM_NULL; - process_data[i]->comm_intra = MPI_COMM_NULL; - process_data[i]->comm_world = nullptr; - process_data[i]->state = SMPI_UNINITIALIZED; - process_data[i]->sampling = 0; - process_data[i]->finalization_barrier = nullptr; - process_data[i]->return_value = 0; - -#if HAVE_PAPI - if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') { - // TODO: Implement host/process/thread based counters. This implementation - // just always takes the values passed via "default", like this: - // "default:COUNTER1:COUNTER2:COUNTER3;". - auto it = units2papi_setup.find(papi_default_config_name); - if (it != units2papi_setup.end()) { - process_data[i]->papi_event_set = it->second.event_set; - process_data[i]->papi_counter_data = it->second.counter_data; - XBT_DEBUG("Setting PAPI set for process %i", i); - } else { - process_data[i]->papi_event_set = PAPI_NULL; - XBT_DEBUG("No PAPI set for process %i", i); - } - } -#endif + process_data[i] = new simgrid::smpi::Process(i); } //if the process was launched through smpirun script we generate a global mpi_comm_world //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance if(smpirun){ - group = smpi_group_new(process_count); - MPI_COMM_WORLD = smpi_comm_new(group, nullptr); + group = new simgrid::smpi::Group(process_count); + MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr); MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); - xbt_bar_t bar=xbt_barrier_init(process_count); + msg_bar_t bar = MSG_barrier_init(process_count); for (i = 0; i < process_count; i++) { - smpi_group_set_mapping(group, i, i); - process_data[i]->finalization_barrier = bar; + group->set_mapping(i, i); + process_data[i]->set_finalization_barrier(bar); } } } @@ -607,35 +330,35 @@ void smpi_global_init() void smpi_global_destroy() { int count = smpi_process_count(); - int i; smpi_bench_destroy(); + smpi_shared_destroy(); if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0); - xbt_barrier_destroy(process_data[0]->finalization_barrier); + delete MPI_COMM_WORLD->group(); + MSG_barrier_destroy(process_data[0]->finalization_barrier()); }else{ smpi_deployment_cleanup_instances(); } - for (i = 0; i < count; i++) { - if(process_data[i]->comm_self!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_self); + for (int i = 0; i < count; i++) { + if(process_data[i]->comm_self()!=MPI_COMM_NULL){ + simgrid::smpi::Comm::destroy(process_data[i]->comm_self()); } - if(process_data[i]->comm_intra!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_intra); + if(process_data[i]->comm_intra()!=MPI_COMM_NULL){ + simgrid::smpi::Comm::destroy(process_data[i]->comm_intra()); } - xbt_os_timer_free(process_data[i]->timer); - xbt_mutex_destroy(process_data[i]->mailboxes_mutex); + xbt_os_timer_free(process_data[i]->timer()); + xbt_mutex_destroy(process_data[i]->mailboxes_mutex()); delete process_data[i]; } delete[] process_data; process_data = nullptr; if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - smpi_comm_cleanup_smp(MPI_COMM_WORLD); - smpi_comm_cleanup_attributes(MPI_COMM_WORLD); - if(smpi_coll_cleanup_callback!=nullptr) - smpi_coll_cleanup_callback(); - xbt_free(MPI_COMM_WORLD); + MPI_COMM_WORLD->cleanup_smp(); + MPI_COMM_WORLD->cleanup_attr(); + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr) + simgrid::smpi::Colls::smpi_coll_cleanup_callback(); + delete MPI_COMM_WORLD; } MPI_COMM_WORLD = MPI_COMM_NULL; @@ -645,26 +368,23 @@ void smpi_global_destroy() } xbt_free(index_to_process_data); - if(smpi_type_keyvals!=nullptr) - xbt_dict_free(&smpi_type_keyvals); - if(smpi_comm_keyvals!=nullptr) - xbt_dict_free(&smpi_comm_keyvals); if(smpi_privatize_global_variables) smpi_destroy_global_memory_segments(); smpi_free_static(); } +extern "C" { + #ifndef WIN32 void __attribute__ ((weak)) user_main_() { xbt_die("Should not be in this smpi_simulated_main"); - return; } int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv) { - smpi_process_init(&argc, &argv); + simgrid::smpi::Process::init(&argc, &argv); user_main_(); return 0; } @@ -673,7 +393,7 @@ inline static int smpi_main_wrapper(int argc, char **argv){ int ret = smpi_simulated_main_(argc,argv); if(ret !=0){ XBT_WARN("SMPI process did not return 0. Return value : %d", ret); - smpi_process_data()->return_value=ret; + smpi_process()->set_return_value(ret); } return 0; } @@ -685,7 +405,6 @@ int __attribute__ ((weak)) main(int argc, char **argv) #endif -extern "C" { static void smpi_init_logs(){ /* Connect log categories. See xbt/log.c */ @@ -693,80 +412,47 @@ static void smpi_init_logs(){ XBT_LOG_CONNECT(smpi); /* Keep this line as soon as possible in this function: xbt_log_appender_file.c depends on it DO NOT connect this in XBT or so, or it will be useless to xbt_log_appender_file.c */ XBT_LOG_CONNECT(instr_smpi); - XBT_LOG_CONNECT(smpi_base); XBT_LOG_CONNECT(smpi_bench); XBT_LOG_CONNECT(smpi_coll); XBT_LOG_CONNECT(smpi_colls); XBT_LOG_CONNECT(smpi_comm); + XBT_LOG_CONNECT(smpi_datatype); XBT_LOG_CONNECT(smpi_dvfs); XBT_LOG_CONNECT(smpi_group); XBT_LOG_CONNECT(smpi_kernel); XBT_LOG_CONNECT(smpi_mpi); - XBT_LOG_CONNECT(smpi_mpi_dt); + XBT_LOG_CONNECT(smpi_memory); + XBT_LOG_CONNECT(smpi_op); XBT_LOG_CONNECT(smpi_pmpi); + XBT_LOG_CONNECT(smpi_request); XBT_LOG_CONNECT(smpi_replay); XBT_LOG_CONNECT(smpi_rma); + XBT_LOG_CONNECT(smpi_shared); + XBT_LOG_CONNECT(smpi_utils); } } static void smpi_init_options(){ - int gather_id = find_coll_description(mpi_coll_gather_description, xbt_cfg_get_string("smpi/gather"),"gather"); - mpi_coll_gather_fun = reinterpret_cast - (mpi_coll_gather_description[gather_id].coll); - - int allgather_id = find_coll_description(mpi_coll_allgather_description, - xbt_cfg_get_string("smpi/allgather"),"allgather"); - mpi_coll_allgather_fun = reinterpret_cast - (mpi_coll_allgather_description[allgather_id].coll); - - int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description, - xbt_cfg_get_string("smpi/allgatherv"),"allgatherv"); - mpi_coll_allgatherv_fun = reinterpret_cast - (mpi_coll_allgatherv_description[allgatherv_id].coll); - - int allreduce_id = find_coll_description(mpi_coll_allreduce_description, - xbt_cfg_get_string("smpi/allreduce"),"allreduce"); - mpi_coll_allreduce_fun = reinterpret_cast - (mpi_coll_allreduce_description[allreduce_id].coll); - - int alltoall_id = find_coll_description(mpi_coll_alltoall_description, - xbt_cfg_get_string("smpi/alltoall"),"alltoall"); - mpi_coll_alltoall_fun = reinterpret_cast - (mpi_coll_alltoall_description[alltoall_id].coll); - - int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description, - xbt_cfg_get_string("smpi/alltoallv"),"alltoallv"); - mpi_coll_alltoallv_fun = reinterpret_cast - (mpi_coll_alltoallv_description[alltoallv_id].coll); - - int bcast_id = find_coll_description(mpi_coll_bcast_description, xbt_cfg_get_string("smpi/bcast"),"bcast"); - mpi_coll_bcast_fun = reinterpret_cast - (mpi_coll_bcast_description[bcast_id].coll); - - int reduce_id = find_coll_description(mpi_coll_reduce_description, xbt_cfg_get_string("smpi/reduce"),"reduce"); - mpi_coll_reduce_fun = reinterpret_cast - (mpi_coll_reduce_description[reduce_id].coll); - - int reduce_scatter_id = - find_coll_description(mpi_coll_reduce_scatter_description, - xbt_cfg_get_string("smpi/reduce-scatter"),"reduce_scatter"); - mpi_coll_reduce_scatter_fun = reinterpret_cast - (mpi_coll_reduce_scatter_description[reduce_scatter_id].coll); - - int scatter_id = find_coll_description(mpi_coll_scatter_description, xbt_cfg_get_string("smpi/scatter"),"scatter"); - mpi_coll_scatter_fun = reinterpret_cast - (mpi_coll_scatter_description[scatter_id].coll); - - int barrier_id = find_coll_description(mpi_coll_barrier_description, xbt_cfg_get_string("smpi/barrier"),"barrier"); - mpi_coll_barrier_fun = reinterpret_cast - (mpi_coll_barrier_description[barrier_id].coll); - - smpi_coll_cleanup_callback=nullptr; + + simgrid::smpi::Colls::set_collectives(); + simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr; smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables"); if (smpi_cpu_threshold < 0) smpi_cpu_threshold = DBL_MAX; + + char* val = xbt_cfg_get_string("smpi/shared-malloc"); + if (!strcasecmp(val, "yes") || !strcmp(val, "1") || !strcasecmp(val, "on") || !strcasecmp(val, "global")) { + smpi_cfg_shared_malloc = shmalloc_global; + } else if (!strcasecmp(val, "local")) { + smpi_cfg_shared_malloc = shmalloc_local; + } else if (!strcasecmp(val, "no") || !strcmp(val, "0") || !strcasecmp(val, "off")) { + smpi_cfg_shared_malloc = shmalloc_none; + } else { + xbt_die("Invalid value '%s' for option smpi/shared-malloc. Possible values: 'on' or 'global', 'local', 'off'", + val); + } } int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) @@ -787,13 +473,13 @@ int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) SIMIX_global_init(&argc, argv); MSG_init(&argc,argv); - SMPI_switch_data_segment = smpi_switch_data_segment; + SMPI_switch_data_segment = &smpi_switch_data_segment; smpi_init_options(); // parse the platform file: get the host list SIMIX_create_environment(argv[1]); - SIMIX_comm_set_copy_data_callback(&smpi_comm_copy_buffer_callback); + SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback); SIMIX_function_register_default(realmain); SIMIX_launch_application(argv[2]); @@ -830,8 +516,8 @@ int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) int count = smpi_process_count(); int i, ret=0; for (i = 0; i < count; i++) { - if(process_data[i]->return_value!=0){ - ret=process_data[i]->return_value;//return first non 0 value + if(process_data[i]->return_value()!=0){ + ret=process_data[i]->return_value();//return first non 0 value break; } } @@ -858,3 +544,29 @@ void SMPI_init(){ void SMPI_finalize(){ smpi_global_destroy(); } + +void smpi_mpi_init() { + if(smpi_init_sleep > 0) + simcall_process_sleep(smpi_init_sleep); +} + +double smpi_mpi_wtime(){ + double time; + if (smpi_process()->initialized() != 0 && smpi_process()->finalized() == 0 && smpi_process()->sampling() == 0) { + smpi_bench_end(); + time = SIMIX_get_clock(); + // to avoid deadlocks if used as a break condition, such as + // while (MPI_Wtime(...) < time_limit) { + // .... + // } + // because the time will not normally advance when only calls to MPI_Wtime + // are made -> deadlock (MPI_Wtime never reaches the time limit) + if(smpi_wtime_sleep > 0) + simcall_process_sleep(smpi_wtime_sleep); + smpi_bench_begin(); + } else { + time = SIMIX_get_clock(); + } + return time; +} +