X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/e64baa30ebcf76e39a3b3994b9364f52e2b93cd5..e9decd04ae6f5fb586c56fd5473f8b9b321143d2:/src/smpi/smpi_global.cpp diff --git a/src/smpi/smpi_global.cpp b/src/smpi/smpi_global.cpp index 595b87954f..f6094d53e3 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/smpi_global.cpp @@ -4,18 +4,19 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ +#include "mc/mc.h" #include "private.h" #include "private.hpp" +#include "simgrid/s4u/Mailbox.hpp" +#include "simgrid/sg_config.h" #include "smpi_mpi_dt_private.h" -#include "mc/mc.h" +#include "src/kernel/activity/SynchroComm.hpp" #include "src/mc/mc_record.h" -#include "xbt/replay.h" -#include "surf/surf.h" -#include "src/simix/smx_private.h" -#include "simgrid/sg_config.h" #include "src/mc/mc_replay.h" #include "src/msg/msg_private.h" -#include "src/simix/SynchroComm.hpp" +#include "src/simix/smx_private.h" +#include "surf/surf.h" +#include "xbt/replay.h" #include /* DBL_MAX */ #include @@ -46,8 +47,8 @@ typedef struct s_smpi_process_data { double simulated; int *argc; char ***argv; - smx_mailbox_t mailbox; - smx_mailbox_t mailbox_small; + simgrid::s4u::MailboxPtr mailbox; + simgrid::s4u::MailboxPtr mailbox_small; xbt_mutex_t mailboxes_mutex; xbt_os_timer_t timer; MPI_Comm comm_self; @@ -59,7 +60,7 @@ typedef struct s_smpi_process_data { int sampling; /* inside an SMPI_SAMPLE_ block? */ char* instance_id; bool replaying; /* is the process replaying a trace */ - xbt_bar_t finalization_barrier; + msg_bar_t finalization_barrier; int return_value; smpi_trace_call_location_t trace_call_loc; #if HAVE_PAPI @@ -74,6 +75,8 @@ int process_count = 0; int smpi_universe_size = 0; int* index_to_process_data = nullptr; extern double smpi_total_benched_time; +extern xbt_dict_t smpi_type_keyvals; +extern xbt_dict_t smpi_comm_keyvals; xbt_os_timer_t global_timer; MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED; MPI_Errhandler *MPI_ERRORS_RETURN = nullptr; @@ -96,16 +99,17 @@ static char *get_mailbox_name_small(char *str, int index) void smpi_process_init(int *argc, char ***argv) { - int index=-1; - smpi_process_data_t data; - smx_process_t proc; + if (process_data == nullptr){ + printf("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n"); + exit(1); + } if (argc != nullptr && argv != nullptr) { - proc = SIMIX_process_self(); - SIMIX_process_set_cleanup_function(proc, MSG_process_cleanup_from_SIMIX); + smx_actor_t proc = SIMIX_process_self(); + proc->context->set_cleanup(&MSG_process_cleanup_from_SIMIX); char* instance_id = (*argv)[1]; int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s"); - index = smpi_process_index_of_smx_process(proc); + int index = smpi_process_index_of_smx_process(proc); if(index_to_process_data == nullptr){ index_to_process_data=static_cast(xbt_malloc(SIMIX_process_count()*sizeof(int))); @@ -119,18 +123,17 @@ void smpi_process_init(int *argc, char ***argv) } MPI_Comm* temp_comm_world; - xbt_bar_t temp_bar; + msg_bar_t temp_bar; smpi_deployment_register_process(instance_id, rank, index, &temp_comm_world, &temp_bar); - data = smpi_process_remote_data(index); - data->comm_world = temp_comm_world; + smpi_process_data_t data = smpi_process_remote_data(index); + data->comm_world = temp_comm_world; if(temp_bar != nullptr) data->finalization_barrier = temp_bar; data->index = index; data->instance_id = instance_id; data->replaying = false; - simdata_process_t simdata = static_cast(simcall_process_get_data(proc)); - simdata->data = data; + static_cast(proc->data)->data = data; if (*argc > 3) { memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2)); @@ -141,12 +144,12 @@ void smpi_process_init(int *argc, char ***argv) data->argc = argc; data->argv = argv; // set the process attached to the mailbox - simcall_mbox_set_receiver(data->mailbox_small, proc); + data->mailbox_small->setReceiver(simgrid::s4u::Actor::self()); XBT_DEBUG("<%d> New process in the game: %p", index, proc); } xbt_assert(smpi_process_data(), - "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. Although it's required by " - "MPI-2, this is currently not supported by SMPI."); + "smpi_process_data() returned nullptr. You probably gave a nullptr parameter to MPI_Init. " + "Although it's required by MPI-2, this is currently not supported by SMPI."); } void smpi_process_destroy() @@ -168,7 +171,7 @@ void smpi_process_finalize() int index = smpi_process_index(); // wait for all pending asynchronous comms to finish - xbt_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier); + MSG_barrier_wait(process_data[index_to_process_data[index]]->finalization_barrier); } /** @brief Check if a process is finalized */ @@ -223,8 +226,8 @@ int smpi_global_size() smpi_process_data_t smpi_process_data() { - simdata_process_t simdata = static_cast(SIMIX_process_self_get_data()); - return static_cast(simdata->data); + MsgActorExt* msgExt = static_cast(SIMIX_process_self()->data); + return static_cast(msgExt->data); } smpi_process_data_t smpi_process_remote_data(int index) @@ -278,13 +281,13 @@ MPI_Comm smpi_process_comm_world() smx_mailbox_t smpi_process_mailbox() { smpi_process_data_t data = smpi_process_data(); - return data->mailbox; + return data->mailbox->getImpl(); } smx_mailbox_t smpi_process_mailbox_small() { smpi_process_data_t data = smpi_process_data(); - return data->mailbox_small; + return data->mailbox_small->getImpl(); } xbt_mutex_t smpi_process_mailboxes_mutex() @@ -296,13 +299,13 @@ xbt_mutex_t smpi_process_mailboxes_mutex() smx_mailbox_t smpi_process_remote_mailbox(int index) { smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox; + return data->mailbox->getImpl(); } smx_mailbox_t smpi_process_remote_mailbox_small(int index) { smpi_process_data_t data = smpi_process_remote_data(index); - return data->mailbox_small; + return data->mailbox_small->getImpl(); } xbt_mutex_t smpi_process_remote_mailboxes_mutex(int index) @@ -385,19 +388,19 @@ void print_request(const char *message, MPI_Request request) message, request, request->buf, request->size, request->src, request->dst, request->tag, request->flags); } -void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t buff_size) +void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size) { XBT_DEBUG("Copy the data over"); void* tmpbuff=buff; - simgrid::simix::Comm *comm = dynamic_cast(synchro); + simgrid::kernel::activity::Comm *comm = dynamic_cast(synchro); if((smpi_privatize_global_variables) && (static_cast(buff) >= smpi_start_data_exe) && (static_cast(buff) < smpi_start_data_exe + smpi_size_data_exe ) ){ XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); - - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->src_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->src_proc->data)->data))->index)); tmpbuff = static_cast(xbt_malloc(buff_size)); memcpy(tmpbuff, buff, buff_size); } @@ -405,7 +408,8 @@ void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t bu if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe) && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); - smpi_switch_data_segment((static_cast((static_cast(SIMIX_process_get_data(comm->dst_proc))->data))->index)); + smpi_switch_data_segment( + (static_cast((static_cast(comm->dst_proc->data)->data))->index)); } memcpy(comm->dst_buff, tmpbuff, buff_size); @@ -421,9 +425,9 @@ void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t bu if(tmpbuff!=buff)xbt_free(tmpbuff); } -void smpi_comm_null_copy_buffer_callback(smx_synchro_t comm, void *buff, size_t buff_size) +void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size) { - return; + /* nothing done in this version */ } static void smpi_check_options(){ @@ -431,12 +435,16 @@ static void smpi_check_options(){ xbt_assert(xbt_cfg_get_int("smpi/async-small-thresh") <= xbt_cfg_get_int("smpi/send-is-detached-thresh")); - if (xbt_cfg_is_default_value("smpi/running-power")) { + if (xbt_cfg_is_default_value("smpi/host-speed")) { XBT_INFO("You did not set the power of the host running the simulation. " "The timings will certainly not be accurate. " - "Use the option \"--cfg=smpi/running-power:\" to set its value." + "Use the option \"--cfg=smpi/host-speed:\" to set its value." "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information."); } + + xbt_assert(xbt_cfg_get_double("smpi/cpu-threshold") >=0, + "The 'smpi/cpu-threshold' option cannot have negative values [anymore]. If you want to discard " + "the simulation of any computation, please use 'smpi/simulate-computation:no' instead."); } int smpi_enabled() { @@ -555,8 +563,8 @@ void smpi_global_init() process_data[i] = new s_smpi_process_data_t; process_data[i]->argc = nullptr; process_data[i]->argv = nullptr; - process_data[i]->mailbox = simcall_mbox_create(get_mailbox_name(name, i)); - process_data[i]->mailbox_small = simcall_mbox_create(get_mailbox_name_small(name, i)); + process_data[i]->mailbox = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, i)); + process_data[i]->mailbox_small = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, i)); process_data[i]->mailboxes_mutex = xbt_mutex_init(); process_data[i]->timer = xbt_os_timer_new(); if (MC_is_active()) @@ -592,7 +600,7 @@ void smpi_global_init() group = smpi_group_new(process_count); MPI_COMM_WORLD = smpi_comm_new(group, nullptr); MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); - xbt_bar_t bar=xbt_barrier_init(process_count); + msg_bar_t bar = MSG_barrier_init(process_count); for (i = 0; i < process_count; i++) { smpi_group_set_mapping(group, i, i); @@ -604,16 +612,15 @@ void smpi_global_init() void smpi_global_destroy() { int count = smpi_process_count(); - int i; smpi_bench_destroy(); if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0); - xbt_barrier_destroy(process_data[0]->finalization_barrier); + MSG_barrier_destroy(process_data[0]->finalization_barrier); }else{ smpi_deployment_cleanup_instances(); } - for (i = 0; i < count; i++) { + for (int i = 0; i < count; i++) { if(process_data[i]->comm_self!=MPI_COMM_NULL){ smpi_comm_destroy(process_data[i]->comm_self); } @@ -642,6 +649,10 @@ void smpi_global_destroy() } xbt_free(index_to_process_data); + if(smpi_type_keyvals!=nullptr) + xbt_dict_free(&smpi_type_keyvals); + if(smpi_comm_keyvals!=nullptr) + xbt_dict_free(&smpi_comm_keyvals); if(smpi_privatize_global_variables) smpi_destroy_global_memory_segments(); smpi_free_static(); @@ -652,7 +663,6 @@ void smpi_global_destroy() void __attribute__ ((weak)) user_main_() { xbt_die("Should not be in this smpi_simulated_main"); - return; } int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv) @@ -756,7 +766,7 @@ static void smpi_init_options(){ smpi_coll_cleanup_callback=nullptr; smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); - smpi_running_power = xbt_cfg_get_double("smpi/running-power"); + smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables"); if (smpi_cpu_threshold < 0) smpi_cpu_threshold = DBL_MAX; @@ -780,7 +790,7 @@ int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[]) SIMIX_global_init(&argc, argv); MSG_init(&argc,argv); - SMPI_switch_data_segment = smpi_switch_data_segment; + SMPI_switch_data_segment = &smpi_switch_data_segment; smpi_init_options();