X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/d7344bf4cd4b75fe33e19f2ac1994b200962aa7e..4c753f8d4cabd4104f3f7109823f16be2ebdcce3:/src/smpi/bindings/smpi_pmpi.cpp diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp index ff6a24857d..684b283a86 100644 --- a/src/smpi/bindings/smpi_pmpi.cpp +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -1,15 +1,21 @@ -/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ +#include "private.hpp" +#include "simgrid/host.h" +#include "simgrid/instr.h" #include "simgrid/s4u/Engine.hpp" #include "simgrid/s4u/Host.hpp" -#include "private.h" +#include "simgrid/version.h" +#include "smpi_coll.hpp" #include "smpi_comm.hpp" #include "smpi_datatype_derived.hpp" -#include "smpi_process.hpp" #include "smpi_status.hpp" +#include "src/kernel/EngineImpl.hpp" +#include "src/kernel/actor/ActorImpl.hpp" +#include "src/smpi/include/smpi_actor.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi)"); @@ -17,36 +23,42 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_pmpi, smpi, "Logging specific to SMPI (pmpi void TRACE_smpi_set_category(const char *category) { //need to end bench otherwise categories for execution tasks are wrong - smpi_bench_end(); - TRACE_internal_smpi_set_category (category); - //begin bench after changing process's category - smpi_bench_begin(); + const SmpiBenchGuard suspend_bench; + + if (category != nullptr) { + // declare category + simgrid::instr::declare_tracing_category(category); + smpi_process()->set_tracing_category(category); + } } /* PMPI User level calls */ -extern "C" { // Obviously, the C MPI interface should use the C linkage -int PMPI_Init(int *argc, char ***argv) +int PMPI_Init(int*, char***) { - xbt_assert(simgrid::s4u::Engine::isInitialized(), + xbt_assert(simgrid::s4u::Engine::is_initialized(), "Your MPI program was not properly initialized. The easiest is to use smpirun to start it."); - // PMPI_Init is called only once per SMPI process - int already_init; - MPI_Initialized(&already_init); - if(already_init == 0){ - simgrid::smpi::Process::init(argc, argv); - smpi_process()->mark_as_initialized(); - int rank = smpi_process()->index(); - TRACE_smpi_init(rank); - TRACE_smpi_computing_init(rank); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_INIT; - TRACE_smpi_collective_in(rank, __FUNCTION__, extra); - TRACE_smpi_collective_out(rank, __FUNCTION__); - smpi_bench_begin(); + + if(smpi_process()->initializing()){ + XBT_WARN("SMPI is already initializing - MPI_Init called twice ?"); + return MPI_ERR_OTHER; + } + if(smpi_process()->initialized()){ + XBT_WARN("SMPI already initialized once - MPI_Init called twice ?"); + return MPI_ERR_OTHER; + } + if(smpi_process()->finalized()){ + XBT_WARN("SMPI already finalized"); + return MPI_ERR_OTHER; } + simgrid::smpi::ActorExt::init(); + TRACE_smpi_init(simgrid::s4u::this_actor::get_pid(), __func__); smpi_mpi_init(); + smpi_bench_begin(); + smpi_process()->mark_as_initialized(); + + CHECK_COLLECTIVE(smpi_process()->comm_world(), "MPI_Init") return MPI_SUCCESS; } @@ -54,15 +66,17 @@ int PMPI_Init(int *argc, char ***argv) int PMPI_Finalize() { smpi_bench_end(); - int rank = smpi_process()->index(); - instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); - extra->type = TRACING_FINALIZE; - TRACE_smpi_collective_in(rank, __FUNCTION__, extra); + CHECK_COLLECTIVE(smpi_process()->comm_world(), "MPI_Finalize") + aid_t rank_traced = simgrid::s4u::this_actor::get_pid(); + smpi_process()->mark_as_finalizing(); + TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::NoOpTIData("finalize")); + + if (simgrid::config::get_value("smpi/barrier-finalization")) + simgrid::smpi::colls::barrier(MPI_COMM_WORLD); smpi_process()->finalize(); - TRACE_smpi_collective_out(rank, __FUNCTION__); - TRACE_smpi_finalize(smpi_process()->index()); + TRACE_smpi_comm_out(rank_traced); return MPI_SUCCESS; } @@ -79,15 +93,13 @@ int PMPI_Get_version (int *version,int *subversion){ } int PMPI_Get_library_version (char *version,int *len){ - smpi_bench_end(); - snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2017", + snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The SimGrid Team 2007-2022", SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR); - *len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version); - smpi_bench_begin(); + *len = std::min(static_cast(strlen(version)), MPI_MAX_LIBRARY_VERSION_STRING); return MPI_SUCCESS; } -int PMPI_Init_thread(int *argc, char ***argv, int required, int *provided) +int PMPI_Init_thread(int* argc, char*** argv, int /*required*/, int* provided) { if (provided != nullptr) { *provided = MPI_THREAD_SINGLE; @@ -107,19 +119,31 @@ int PMPI_Query_thread(int *provided) int PMPI_Is_thread_main(int *flag) { + // FIXME: The MPI standard seems to say that fatal errors need to be triggered + // if MPI has been finalized or not yet been initialized if (flag == nullptr) { return MPI_ERR_ARG; } else { - *flag = smpi_process()->index() == 0; + *flag = simgrid::s4u::this_actor::get_pid() == + 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1, + // regardless of whether this process called MPI_Thread_Init() or not. return MPI_SUCCESS; } } -int PMPI_Abort(MPI_Comm comm, int errorcode) +int PMPI_Abort(MPI_Comm comm, int /*errorcode*/) { smpi_bench_end(); - // FIXME: should kill all processes in comm instead - simcall_process_kill(SIMIX_process_self()); + CHECK_COMM(1) + XBT_WARN("MPI_Abort was called, something went probably wrong in this simulation ! Killing all processes sharing the same MPI_COMM_WORLD"); + auto myself = simgrid::kernel::actor::ActorImpl::self(); + for (int i = 0; i < comm->size(); i++){ + auto actor = simgrid::kernel::EngineImpl::get_instance()->get_actor_by_pid(comm->group()->actor(i)); + if (actor != nullptr && actor != myself) + simgrid::kernel::actor::simcall_answered([actor] { actor->exit(); }); + } + // now ourself + simgrid::kernel::actor::simcall_answered([myself] { myself->exit(); }); return MPI_SUCCESS; } @@ -134,7 +158,7 @@ double PMPI_Wtick() return sg_maxmin_precision; } -int PMPI_Address(void *location, MPI_Aint * address) +int PMPI_Address(const void* location, MPI_Aint* address) { if (address==nullptr) { return MPI_ERR_ARG; @@ -144,38 +168,48 @@ int PMPI_Address(void *location, MPI_Aint * address) } } -int PMPI_Get_address(void *location, MPI_Aint * address) +int PMPI_Get_address(const void *location, MPI_Aint * address) { return PMPI_Address(location, address); } +MPI_Aint PMPI_Aint_add(MPI_Aint address, MPI_Aint disp) +{ + xbt_assert(address <= PTRDIFF_MAX - disp, "overflow in MPI_Aint_add"); + return address + disp; +} + +MPI_Aint PMPI_Aint_diff(MPI_Aint address, MPI_Aint disp) +{ + xbt_assert(address >= PTRDIFF_MIN + disp, "underflow in MPI_Aint_diff"); + return address - disp; +} + int PMPI_Get_processor_name(char *name, int *resultlen) { - strncpy(name, sg_host_self()->getCname(), strlen(sg_host_self()->getCname()) < MPI_MAX_PROCESSOR_NAME - 1 - ? strlen(sg_host_self()->getCname()) + 1 - : MPI_MAX_PROCESSOR_NAME - 1); - *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name); + int len = std::min(static_cast(sg_host_self()->get_name().size()), MPI_MAX_PROCESSOR_NAME - 1); + sg_host_self()->get_name().copy(name, len); + name[len] = '\0'; + *resultlen = len; return MPI_SUCCESS; } -int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count) +int PMPI_Get_count(const MPI_Status * status, MPI_Datatype datatype, int *count) { if (status == nullptr || count == nullptr) { return MPI_ERR_ARG; } else if (not datatype->is_valid()) { return MPI_ERR_TYPE; } else { - size_t size = datatype->size(); - if (size == 0) { + if (datatype->size() == 0) { *count = 0; - return MPI_SUCCESS; - } else if (status->count % size != 0) { - return MPI_UNDEFINED; + } else if (status->count % datatype->size() != 0) { + *count = MPI_UNDEFINED; } else { *count = simgrid::smpi::Status::get_count(status, datatype); - return MPI_SUCCESS; } + return MPI_SUCCESS; } } @@ -184,14 +218,12 @@ int PMPI_Initialized(int* flag) { return MPI_SUCCESS; } -int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){ +int PMPI_Alloc_mem(MPI_Aint size, MPI_Info /*info*/, void* baseptr) +{ + CHECK_NEGATIVE(1, MPI_ERR_COUNT, size) void *ptr = xbt_malloc(size); - if(ptr==nullptr) - return MPI_ERR_NO_MEM; - else { - *static_cast(baseptr) = ptr; - return MPI_SUCCESS; - } + *static_cast(baseptr) = ptr; + return MPI_SUCCESS; } int PMPI_Free_mem(void *baseptr){ @@ -205,14 +237,38 @@ int PMPI_Error_class(int errorcode, int* errorclass) { return MPI_SUCCESS; } +int PMPI_Error_string(int errorcode, char* string, int* resultlen) +{ + static const std::vector smpi_error_string = {FOREACH_ERROR(GENERATE_STRING)}; + if (errorcode < 0 || static_cast(errorcode) >= smpi_error_string.size() || string == nullptr) + return MPI_ERR_ARG; + + int len = snprintf(string, MPI_MAX_ERROR_STRING, "%s", smpi_error_string[errorcode]); + *resultlen = std::min(len, MPI_MAX_ERROR_STRING - 1); + return MPI_SUCCESS; +} + int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) { - smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr}; - smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr}; + smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr,nullptr,nullptr,nullptr}; + smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr,nullptr,nullptr,nullptr}; return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); } int PMPI_Keyval_free(int* keyval) { + CHECK_NULL(1, MPI_ERR_ARG, keyval) + CHECK_VAL(1, MPI_KEYVAL_INVALID, MPI_ERR_KEYVAL, *keyval) return simgrid::smpi::Keyval::keyval_free(keyval); } -} // extern "C" +int PMPI_Buffer_attach(void *buf, int size){ + if(buf==nullptr) + return MPI_ERR_BUFFER; + if(size<0) + return MPI_ERR_ARG; + return smpi_process()->set_bsend_buffer(buf, size); +} + +int PMPI_Buffer_detach(void* buffer, int* size){ + smpi_process()->bsend_buffer((void**)buffer, size); + return smpi_process()->set_bsend_buffer(nullptr, 0); +}