-/* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2009-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/smpi/include/smpi_actor.hpp"
#include "mc/mc.h"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Mutex.hpp"
#include "smpi_comm.hpp"
+#include "smpi_info.hpp"
#include "src/mc/mc_replay.hpp"
-#include "src/simix/smx_private.hpp"
+#include "xbt/str.h"
#if HAVE_PAPI
#include "papi.h"
-extern std::string papi_default_config_name;
#endif
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
namespace simgrid {
namespace smpi {
+simgrid::xbt::Extension<simgrid::s4u::Actor, ActorExt> ActorExt::EXTENSION_ID;
-using simgrid::s4u::Actor;
-using simgrid::s4u::ActorPtr;
-
-ActorExt::ActorExt(ActorPtr actor, simgrid::s4u::Barrier* finalization_barrier)
- : finalization_barrier_(finalization_barrier), actor_(actor)
+ActorExt::ActorExt(s4u::Actor* actor) : actor_(actor)
{
- mailbox_ = simgrid::s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
- mailbox_small_ = simgrid::s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
- mailboxes_mutex_ = xbt_mutex_init();
+ if (not simgrid::smpi::ActorExt::EXTENSION_ID.valid())
+ simgrid::smpi::ActorExt::EXTENSION_ID = simgrid::s4u::Actor::extension_create<simgrid::smpi::ActorExt>();
+
+ mailbox_ = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
+ mailbox_small_ = s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
+ mailboxes_mutex_ = s4u::Mutex::create();
timer_ = xbt_os_timer_new();
state_ = SmpiProcessState::UNINITIALIZED;
+ info_env_ = MPI_INFO_NULL;
if (MC_is_active())
MC_ignore_heap(timer_, xbt_os_timer_size());
#if HAVE_PAPI
- if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
+ if (not smpi_cfg_papi_events_file().empty()) {
// TODO: Implement host/process/thread based counters. This implementation
// just always takes the values passed via "default", like this:
// "default:COUNTER1:COUNTER2:COUNTER3;".
- auto it = units2papi_setup.find(papi_default_config_name);
+ auto it = units2papi_setup.find("default");
if (it != units2papi_setup.end()) {
papi_event_set_ = it->second.event_set;
papi_counter_data_ = it->second.counter_data;
ActorExt::~ActorExt()
{
- if (comm_self_ != MPI_COMM_NULL)
- simgrid::smpi::Comm::destroy(comm_self_);
- if (comm_intra_ != MPI_COMM_NULL)
- simgrid::smpi::Comm::destroy(comm_intra_);
xbt_os_timer_free(timer_);
- xbt_mutex_destroy(mailboxes_mutex_);
-}
-
-void ActorExt::set_data(int* argc, char*** argv)
-{
- instance_id_ = std::string((*argv)[1]);
- comm_world_ = smpi_deployment_comm_world(instance_id_);
- simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
- if (barrier != nullptr) // don't overwrite the current one if the instance has none
- finalization_barrier_ = barrier;
-
- if (*argc > 3) {
- memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
- (*argv)[(*argc) - 1] = nullptr;
- (*argv)[(*argc) - 2] = nullptr;
- }
- (*argc) -= 2;
- argc_ = argc;
- argv_ = argv;
- // set the process attached to the mailbox
- mailbox_small_->set_receiver(actor_);
- XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
}
/** @brief Prepares the current process for termination. */
{
state_ = SmpiProcessState::FINALIZED;
XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
-
- // This leads to an explosion of the search graph which cannot be reduced:
- if (MC_is_active() || MC_record_replay_is_active())
- return;
- // wait for all pending asynchronous comms to finish
- finalization_barrier_->wait();
+ if (info_env_ != MPI_INFO_NULL)
+ simgrid::smpi::Info::unref(info_env_);
+ if (comm_self_ != MPI_COMM_NULL)
+ simgrid::smpi::Comm::destroy(comm_self_);
+ if (comm_intra_ != MPI_COMM_NULL)
+ simgrid::smpi::Comm::destroy(comm_intra_);
+ smpi_deployment_unregister_process(instance_id_);
}
/** @brief Check if a process is finalized */
-int ActorExt::finalized()
+int ActorExt::finalized() const
{
return (state_ == SmpiProcessState::FINALIZED);
}
+/** @brief Check if a process is partially initialized already */
+int ActorExt::initializing() const
+{
+ return (state_ == SmpiProcessState::INITIALIZING);
+}
+
/** @brief Check if a process is initialized */
-int ActorExt::initialized()
+int ActorExt::initialized() const
{
// TODO cheinrich: Check if we still need this. This should be a global condition, not for a
// single process ... ?
state_ = SmpiProcessState::INITIALIZED;
}
-void ActorExt::set_replaying(bool value)
+/** @brief Mark a process as finalizing (=MPI_Finalize called) */
+void ActorExt::mark_as_finalizing()
{
if (state_ != SmpiProcessState::FINALIZED)
- replaying_ = value;
+ state_ = SmpiProcessState::FINALIZING;
}
-bool ActorExt::replaying()
+/** @brief Check if a process is finalizing */
+int ActorExt::finalizing() const
{
- return replaying_;
+ return (state_ == SmpiProcessState::FINALIZING);
}
-void ActorExt::set_user_data(void* data)
+void ActorExt::set_replaying(bool value)
{
- data_ = data;
+ if (state_ != SmpiProcessState::FINALIZED)
+ replaying_ = value;
}
-void* ActorExt::get_user_data()
+bool ActorExt::replaying() const
{
- return data_;
+ return replaying_;
}
-ActorPtr ActorExt::get_actor()
+s4u::ActorPtr ActorExt::get_actor()
{
return actor_;
}
/**
- * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
+ * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
*
- * \see smpi_trace_set_call_location
+ * @see smpi_trace_set_call_location
*/
smpi_trace_call_location_t* ActorExt::call_location()
{
privatized_region_ = region;
}
-smpi_privatization_region_t ActorExt::privatized_region()
+smpi_privatization_region_t ActorExt::privatized_region() const
{
return privatized_region_;
}
-MPI_Comm ActorExt::comm_world()
+MPI_Comm ActorExt::comm_world() const
{
return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
}
-smx_mailbox_t ActorExt::mailbox()
-{
- return mailbox_->get_impl();
-}
-
-smx_mailbox_t ActorExt::mailbox_small()
-{
- return mailbox_small_->get_impl();
-}
-
-xbt_mutex_t ActorExt::mailboxes_mutex()
+s4u::MutexPtr ActorExt::mailboxes_mutex() const
{
return mailboxes_mutex_;
}
#if HAVE_PAPI
-int ActorExt::papi_event_set()
+int ActorExt::papi_event_set() const
{
return papi_event_set_;
}
void ActorExt::simulated_start()
{
- simulated_ = SIMIX_get_clock();
+ simulated_ = s4u::Engine::get_clock();
}
-double ActorExt::simulated_elapsed()
+double ActorExt::simulated_elapsed() const
{
- return SIMIX_get_clock() - simulated_;
+ return s4u::Engine::get_clock() - simulated_;
}
MPI_Comm ActorExt::comm_self()
{
if (comm_self_ == MPI_COMM_NULL) {
- MPI_Group group = new Group(1);
- comm_self_ = new Comm(group, nullptr);
- group->set_mapping(actor_, 0);
+ auto* group = new Group(1);
+ comm_self_ = new Comm(group, nullptr);
+ comm_self_->set_name("MPI_COMM_SELF");
+ group->set_mapping(actor_->get_pid(), 0);
}
return comm_self_;
}
+MPI_Info ActorExt::info_env()
+{
+ if (info_env_==MPI_INFO_NULL)
+ info_env_=new Info();
+ return info_env_;
+}
+
MPI_Comm ActorExt::comm_intra()
{
return comm_intra_;
sampling_ = s;
}
-int ActorExt::sampling()
+int ActorExt::sampling() const
{
return sampling_;
}
-void ActorExt::init(int* argc, char*** argv)
+void ActorExt::init()
{
+ xbt_assert(smpi_get_universe_size() != 0, "SimGrid was not initialized properly before entering MPI_Init. "
+ "Aborting, please check compilation process and use smpirun.");
- if (smpi_process_count() == 0) {
- xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process "
- "and use smpirun\n");
- }
- if (argc != nullptr && argv != nullptr) {
- simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
- proc->get_impl()->context_->set_cleanup(&SIMIX_process_cleanup);
-
- char* instance_id = (*argv)[1];
- try {
- int rank = std::stoi(std::string((*argv)[2]));
- smpi_deployment_register_process(instance_id, rank, proc);
- } catch (std::invalid_argument& ia) {
- throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
- }
+ ActorExt* ext = smpi_process();
+ // if we are in MPI_Init and argc handling has already been done.
+ if (ext->initialized())
+ return;
- // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
- // this up here so that I can set the privatized region before the switch.
- ActorExt* process = smpi_process_remote(proc);
- if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
- /* Now using the segment index of this process */
- process->set_privatized_region(smpi_init_global_memory_segment_process());
- /* Done at the process's creation */
- SMPI_switch_data_segment(proc);
- }
+ const simgrid::s4u::Actor* self = simgrid::s4u::Actor::self();
+ ext->instance_id_ = self->get_property("instance_id");
+ const int rank = static_cast<int>(xbt_str_parse_int(self->get_property("rank"), "Cannot parse rank"));
- process->set_data(argc, argv);
- }
- xbt_assert(smpi_process(), "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
- "Although it's required by MPI-2, this is currently not supported by SMPI. "
- "Please use MPI_Init(&argc, &argv) as usual instead.");
+ ext->state_ = SmpiProcessState::INITIALIZING;
+ smpi_deployment_register_process(ext->instance_id_, rank, self);
+
+ ext->comm_world_ = smpi_deployment_comm_world(ext->instance_id_);
+
+ // set the process attached to the mailbox
+ ext->mailbox_small_->set_receiver(ext->actor_);
+ XBT_DEBUG("<%ld> SMPI process has been initialized: %p", ext->actor_->get_pid(), ext->actor_);
}
-int ActorExt::get_optind()
+int ActorExt::get_optind() const
{
- return optind;
+ return optind_;
}
+
void ActorExt::set_optind(int new_optind)
{
- optind = new_optind;
+ optind_ = new_optind;
+}
+
+void ActorExt::bsend_buffer(void** buf, int* size)
+{
+ *buf = bsend_buffer_;
+ *size = bsend_buffer_size_;
+}
+
+int ActorExt::set_bsend_buffer(void* buf, int size)
+{
+ if(buf!=nullptr && bsend_buffer_!=nullptr)
+ return MPI_ERR_BUFFER;
+ bsend_buffer_ = buf;
+ bsend_buffer_size_= size;
+ return MPI_SUCCESS;
}
} // namespace smpi