#include "smpi_process.hpp"
#include "mc/mc.h"
-#include "private.hpp"
-#include "simgrid/s4u/forward.hpp"
#include "smpi_comm.hpp"
-#include "smpi_group.hpp"
#include "src/mc/mc_replay.hpp"
#include "src/msg/msg_private.hpp"
#include "src/simix/smx_private.hpp"
+#if HAVE_PAPI
+#include "papi.h"
+extern std::string papi_default_config_name;
+#endif
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
namespace simgrid{
using simgrid::s4u::ActorPtr;
Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
- : finalization_barrier_(finalization_barrier), process_(actor)
+ : finalization_barrier_(finalization_barrier), actor_(actor)
{
- mailbox_ = simgrid::s4u::Mailbox::byName("SMPI-" + std::to_string(process_->getPid()));
- mailbox_small_ = simgrid::s4u::Mailbox::byName("small-" + std::to_string(process_->getPid()));
+ mailbox_ = simgrid::s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
+ mailbox_small_ = simgrid::s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
mailboxes_mutex_ = xbt_mutex_init();
timer_ = xbt_os_timer_new();
- state_ = SMPI_UNINITIALIZED;
+ state_ = SmpiProcessState::UNINITIALIZED;
if (MC_is_active())
MC_ignore_heap(timer_, xbt_os_timer_size());
#if HAVE_PAPI
- if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
+ if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
// TODO: Implement host/process/thread based counters. This implementation
// just always takes the values passed via "default", like this:
// "default:COUNTER1:COUNTER2:COUNTER3;".
if (it != units2papi_setup.end()) {
papi_event_set_ = it->second.event_set;
papi_counter_data_ = it->second.counter_data;
- XBT_DEBUG("Setting PAPI set for process %i", i);
+ XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
} else {
papi_event_set_ = PAPI_NULL;
- XBT_DEBUG("No PAPI set for process %i", i);
+ XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
}
}
#endif
if (barrier != nullptr) // don't overwrite the current one if the instance has none
finalization_barrier_ = barrier;
- process_ = simgrid::s4u::Actor::self();
- static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
+ actor_ = simgrid::s4u::Actor::self();
+ static_cast<simgrid::msg::ActorExt*>(actor_->get_impl()->get_user_data())->data = this;
if (*argc > 3) {
memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
argc_ = argc;
argv_ = argv;
// set the process attached to the mailbox
- mailbox_small_->setReceiver(process_);
- XBT_DEBUG("<%ld> SMPI process has been initialized: %p", process_->getPid(), process_.get());
+ mailbox_small_->set_receiver(actor_);
+ XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
}
/** @brief Prepares the current process for termination. */
void Process::finalize()
{
- state_ = SMPI_FINALIZED;
- XBT_DEBUG("<%ld> Process left the game", process_->getPid());
+ state_ = SmpiProcessState::FINALIZED;
+ XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
// This leads to an explosion of the search graph which cannot be reduced:
if(MC_is_active() || MC_record_replay_is_active())
/** @brief Check if a process is finalized */
int Process::finalized()
{
- return (state_ == SMPI_FINALIZED);
+ return (state_ == SmpiProcessState::FINALIZED);
}
/** @brief Check if a process is initialized */
{
// TODO cheinrich: Check if we still need this. This should be a global condition, not for a
// single process ... ?
- return (state_ == SMPI_INITIALIZED);
+ return (state_ == SmpiProcessState::INITIALIZED);
}
/** @brief Mark a process as initialized (=MPI_Init called) */
void Process::mark_as_initialized()
{
- if (state_ != SMPI_FINALIZED)
- state_ = SMPI_INITIALIZED;
+ if (state_ != SmpiProcessState::FINALIZED)
+ state_ = SmpiProcessState::INITIALIZED;
}
void Process::set_replaying(bool value){
- if (state_ != SMPI_FINALIZED)
+ if (state_ != SmpiProcessState::FINALIZED)
replaying_ = value;
}
return data_;
}
-ActorPtr Process::process(){
- return process_;
+ActorPtr Process::get_actor()
+{
+ return actor_;
}
/**
smx_mailbox_t Process::mailbox()
{
- return mailbox_->getImpl();
+ return mailbox_->get_impl();
}
smx_mailbox_t Process::mailbox_small()
{
- return mailbox_small_->getImpl();
+ return mailbox_small_->get_impl();
}
xbt_mutex_t Process::mailboxes_mutex()
if(comm_self_==MPI_COMM_NULL){
MPI_Group group = new Group(1);
comm_self_ = new Comm(group, nullptr);
- group->set_mapping(process_, 0);
+ group->set_mapping(actor_, 0);
}
return comm_self_;
}
return sampling_;
}
-msg_bar_t Process::finalization_barrier(){
- return finalization_barrier_;
-}
-
void Process::init(int *argc, char ***argv){
if (smpi_process_count() == 0) {
}
if (argc != nullptr && argv != nullptr) {
simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
- proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
+ proc->get_impl()->context_->set_cleanup(&MSG_process_cleanup_from_SIMIX);
char* instance_id = (*argv)[1];
try {
// cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
// this up here so that I can set the privatized region before the switch.
Process* process = smpi_process_remote(proc);
- if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
+ if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
/* Now using the segment index of this process */
process->set_privatized_region(smpi_init_global_memory_segment_process());
/* Done at the process's creation */
"Please use MPI_Init(&argc, &argv) as usual instead.");
}
+int Process::get_optind(){
+ return optind;
+}
+void Process::set_optind(int new_optind){
+ optind=new_optind;
+}
+
}
}