-/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "simgrid/instr.h"
#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/Host.hpp"
+#include "simgrid/version.h"
#include "smpi_comm.hpp"
#include "smpi_datatype_derived.hpp"
#include "smpi_status.hpp"
+#include "smpi_coll.hpp"
#include "src/kernel/actor/ActorImpl.hpp"
#include "src/smpi/include/smpi_actor.hpp"
void TRACE_smpi_set_category(const char *category)
{
//need to end bench otherwise categories for execution tasks are wrong
- smpi_bench_end();
+ const SmpiBenchGuard suspend_bench;
+
if (category != nullptr) {
// declare category
TRACE_category(category);
smpi_process()->set_tracing_category(category);
}
- //begin bench after changing process's category
- smpi_bench_begin();
}
/* PMPI User level calls */
xbt_assert(simgrid::s4u::Engine::is_initialized(),
"Your MPI program was not properly initialized. The easiest is to use smpirun to start it.");
- xbt_assert(not smpi_process()->initializing());
- xbt_assert(not smpi_process()->initialized());
+ if(smpi_process()->initializing()){
+ XBT_WARN("SMPI is already initializing - MPI_Init called twice ?");
+ return MPI_ERR_OTHER;
+ }
+ if(smpi_process()->initialized()){
+ XBT_WARN("SMPI already initialized once - MPI_Init called twice ?");
+ return MPI_ERR_OTHER;
+ }
+ if(smpi_process()->finalized()){
+ XBT_WARN("SMPI already finalized");
+ return MPI_ERR_OTHER;
+ }
simgrid::smpi::ActorExt::init();
- int rank_traced = simgrid::s4u::this_actor::get_pid();
- TRACE_smpi_init(rank_traced);
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::NoOpTIData("init"));
- TRACE_smpi_comm_out(rank_traced);
- TRACE_smpi_computing_init(rank_traced);
- TRACE_smpi_sleeping_init(rank_traced);
+ TRACE_smpi_init(simgrid::s4u::this_actor::get_pid(), __func__);
smpi_bench_begin();
smpi_process()->mark_as_initialized();
int PMPI_Finalize()
{
smpi_bench_end();
- int rank_traced = simgrid::s4u::this_actor::get_pid();
+ aid_t rank_traced = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::NoOpTIData("finalize"));
+ if(simgrid::config::get_value<bool>("smpi/finalization-barrier"))
+ simgrid::smpi::colls::barrier(MPI_COMM_WORLD);
+
smpi_process()->finalize();
TRACE_smpi_comm_out(rank_traced);
}
int PMPI_Get_library_version (char *version,int *len){
- smpi_bench_end();
- snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2019",
+ snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The SimGrid Team 2007-2022",
SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR);
- *len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version);
- smpi_bench_begin();
+ *len = std::min(static_cast<int>(strlen(version)), MPI_MAX_LIBRARY_VERSION_STRING);
return MPI_SUCCESS;
}
}
}
-int PMPI_Abort(MPI_Comm /*comm*/, int /*errorcode*/)
+int PMPI_Abort(MPI_Comm comm, int /*errorcode*/)
{
smpi_bench_end();
- // FIXME: should kill all processes in comm instead
- smx_actor_t actor = SIMIX_process_self();
- simgrid::kernel::actor::simcall([actor] { actor->exit(); });
+ CHECK_COMM(1)
+ XBT_WARN("MPI_Abort was called, something went probably wrong in this simulation ! Killing all processes sharing the same MPI_COMM_WORLD");
+ auto myself = simgrid::kernel::actor::ActorImpl::self();
+ for (int i = 0; i < comm->size(); i++){
+ auto actor = simgrid::kernel::actor::ActorImpl::by_pid(comm->group()->actor(i));
+ if (actor != nullptr && actor != myself)
+ simgrid::kernel::actor::simcall([actor] { actor->exit(); });
+ }
+ // now ourself
+ simgrid::kernel::actor::simcall([myself] { myself->exit(); });
return MPI_SUCCESS;
}
return PMPI_Address(location, address);
}
+MPI_Aint PMPI_Aint_add(MPI_Aint address, MPI_Aint disp)
+{
+ xbt_assert(address <= PTRDIFF_MAX - disp, "overflow in MPI_Aint_add");
+ return address + disp;
+}
+
+MPI_Aint PMPI_Aint_diff(MPI_Aint address, MPI_Aint disp)
+{
+ xbt_assert(address >= PTRDIFF_MIN + disp, "underflow in MPI_Aint_diff");
+ return address - disp;
+}
+
int PMPI_Get_processor_name(char *name, int *resultlen)
{
- strncpy(name, sg_host_self()->get_cname(),
- strlen(sg_host_self()->get_cname()) < MPI_MAX_PROCESSOR_NAME - 1 ? strlen(sg_host_self()->get_cname()) + 1
- : MPI_MAX_PROCESSOR_NAME - 1);
- *resultlen = strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
+ int len = std::min(static_cast<int>(sg_host_self()->get_name().size()), MPI_MAX_PROCESSOR_NAME - 1);
+ std::string(sg_host_self()->get_name()).copy(name, len);
+ name[len] = '\0';
+ *resultlen = len;
return MPI_SUCCESS;
}
size_t size = datatype->size();
if (size == 0) {
*count = 0;
- return MPI_SUCCESS;
} else if (status->count % size != 0) {
- return MPI_UNDEFINED;
+ *count = MPI_UNDEFINED;
} else {
*count = simgrid::smpi::Status::get_count(status, datatype);
- return MPI_SUCCESS;
}
+ return MPI_SUCCESS;
}
}
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info /*info*/, void* baseptr)
{
+ CHECK_NEGATIVE(1, MPI_ERR_COUNT, size)
void *ptr = xbt_malloc(size);
- if(ptr==nullptr)
- return MPI_ERR_NO_MEM;
- else {
- *static_cast<void**>(baseptr) = ptr;
- return MPI_SUCCESS;
- }
+ *static_cast<void**>(baseptr) = ptr;
+ return MPI_SUCCESS;
}
int PMPI_Free_mem(void *baseptr){
int PMPI_Error_string(int errorcode, char* string, int* resultlen)
{
- static const char* smpi_error_string[] = {FOREACH_ERROR(GENERATE_STRING)};
- constexpr int nerrors = (sizeof smpi_error_string) / (sizeof smpi_error_string[0]);
- if (errorcode < 0 || errorcode >= nerrors || string == nullptr)
+ static const std::vector<const char*> smpi_error_string = {FOREACH_ERROR(GENERATE_STRING)};
+ if (errorcode < 0 || static_cast<size_t>(errorcode) >= smpi_error_string.size() || string == nullptr)
return MPI_ERR_ARG;
int len = snprintf(string, MPI_MAX_ERROR_STRING, "%s", smpi_error_string[errorcode]);
}
int PMPI_Keyval_free(int* keyval) {
+ CHECK_NULL(1, MPI_ERR_ARG, keyval)
+ CHECK_VAL(1, MPI_KEYVAL_INVALID, MPI_ERR_KEYVAL, *keyval)
return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Comm>(keyval);
}
-MPI_Errhandler PMPI_Errhandler_f2c(MPI_Fint errhan){
- if(errhan==-1)
- return MPI_ERRHANDLER_NULL;
- return static_cast<MPI_Errhandler>(simgrid::smpi::Errhandler::f2c(errhan));
-}
-
-MPI_Fint PMPI_Errhandler_c2f(MPI_Errhandler errhan){
- if(errhan==MPI_ERRHANDLER_NULL)
- return -1;
- return errhan->c2f();
-}
-
int PMPI_Buffer_attach(void *buf, int size){
if(buf==nullptr)
return MPI_ERR_BUFFER;
if(size<0)
return MPI_ERR_ARG;
- smpi_process()->set_bsend_buffer(buf, size);
- return MPI_SUCCESS;
+ return smpi_process()->set_bsend_buffer(buf, size);
}
int PMPI_Buffer_detach(void* buffer, int* size){
smpi_process()->bsend_buffer((void**)buffer, size);
- smpi_process()->set_bsend_buffer(nullptr, 0);
- return MPI_SUCCESS;
+ return smpi_process()->set_bsend_buffer(nullptr, 0);
}