Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
d4727a959f1156d31a1d384786e6f32765877df5
[simgrid.git] / src / smpi / internals / smpi_actor.cpp
1 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "src/smpi/include/smpi_actor.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/simix/smx_private.hpp"
11
12 #if HAVE_PAPI
13 #include "papi.h"
14 extern std::string papi_default_config_name;
15 #endif
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid {
20 namespace smpi {
21
22 ActorExt::ActorExt(s4u::ActorPtr actor, s4u::Barrier* finalization_barrier)
23     : finalization_barrier_(finalization_barrier), actor_(actor)
24 {
25   mailbox_         = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
26   mailbox_small_   = s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
27   mailboxes_mutex_ = s4u::Mutex::create();
28   timer_           = xbt_os_timer_new();
29   state_           = SmpiProcessState::UNINITIALIZED;
30   info_env_        = MPI_INFO_NULL;
31   if (MC_is_active())
32     MC_ignore_heap(timer_, xbt_os_timer_size());
33
34 #if HAVE_PAPI
35   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
36     // TODO: Implement host/process/thread based counters. This implementation
37     // just always takes the values passed via "default", like this:
38     // "default:COUNTER1:COUNTER2:COUNTER3;".
39     auto it = units2papi_setup.find(papi_default_config_name);
40     if (it != units2papi_setup.end()) {
41       papi_event_set_    = it->second.event_set;
42       papi_counter_data_ = it->second.counter_data;
43       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
44     } else {
45       papi_event_set_ = PAPI_NULL;
46       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
47     }
48   }
49 #endif
50 }
51
52 ActorExt::~ActorExt()
53 {
54   if (comm_self_ != MPI_COMM_NULL)
55     simgrid::smpi::Comm::destroy(comm_self_);
56   if (comm_intra_ != MPI_COMM_NULL)
57     simgrid::smpi::Comm::destroy(comm_intra_);
58   xbt_os_timer_free(timer_);
59 }
60
61 /** @brief Prepares the current process for termination. */
62 void ActorExt::finalize()
63 {
64   state_ = SmpiProcessState::FINALIZED;
65   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
66
67   // This leads to an explosion of the search graph which cannot be reduced:
68   if (MC_is_active() || MC_record_replay_is_active())
69     return;
70   // wait for all pending asynchronous comms to finish
71   finalization_barrier_->wait();
72 }
73
74 /** @brief Check if a process is finalized */
75 int ActorExt::finalized()
76 {
77   return (state_ == SmpiProcessState::FINALIZED);
78 }
79
80 /** @brief Check if a process is partially initialized already */
81 int ActorExt::initializing()
82 {
83   return (state_ == SmpiProcessState::INITIALIZING);
84 }
85
86 /** @brief Check if a process is initialized */
87 int ActorExt::initialized()
88 {
89   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
90   // single process ... ?
91   return (state_ == SmpiProcessState::INITIALIZED);
92 }
93
94 /** @brief Mark a process as initialized (=MPI_Init called) */
95 void ActorExt::mark_as_initialized()
96 {
97   if (state_ != SmpiProcessState::FINALIZED)
98     state_ = SmpiProcessState::INITIALIZED;
99 }
100
101 void ActorExt::set_replaying(bool value)
102 {
103   if (state_ != SmpiProcessState::FINALIZED)
104     replaying_ = value;
105 }
106
107 bool ActorExt::replaying()
108 {
109   return replaying_;
110 }
111
112 s4u::ActorPtr ActorExt::get_actor()
113 {
114   return actor_;
115 }
116
117 /**
118  * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
119  *
120  * @see smpi_trace_set_call_location
121  */
122 smpi_trace_call_location_t* ActorExt::call_location()
123 {
124   return &trace_call_loc_;
125 }
126
127 void ActorExt::set_privatized_region(smpi_privatization_region_t region)
128 {
129   privatized_region_ = region;
130 }
131
132 smpi_privatization_region_t ActorExt::privatized_region()
133 {
134   return privatized_region_;
135 }
136
137 MPI_Comm ActorExt::comm_world()
138 {
139   return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
140 }
141
142 s4u::MutexPtr ActorExt::mailboxes_mutex()
143 {
144   return mailboxes_mutex_;
145 }
146
147 #if HAVE_PAPI
148 int ActorExt::papi_event_set()
149 {
150   return papi_event_set_;
151 }
152
153 papi_counter_t& ActorExt::papi_counters()
154 {
155   return papi_counter_data_;
156 }
157 #endif
158
159 xbt_os_timer_t ActorExt::timer()
160 {
161   return timer_;
162 }
163
164 void ActorExt::simulated_start()
165 {
166   simulated_ = SIMIX_get_clock();
167 }
168
169 double ActorExt::simulated_elapsed()
170 {
171   return SIMIX_get_clock() - simulated_;
172 }
173
174 MPI_Comm ActorExt::comm_self()
175 {
176   if (comm_self_ == MPI_COMM_NULL) {
177     MPI_Group group = new Group(1);
178     comm_self_      = new Comm(group, nullptr);
179     group->set_mapping(actor_, 0);
180   }
181   return comm_self_;
182 }
183
184 MPI_Info ActorExt::info_env()
185 {
186   return info_env_;
187 }
188
189 MPI_Comm ActorExt::comm_intra()
190 {
191   return comm_intra_;
192 }
193
194 void ActorExt::set_comm_intra(MPI_Comm comm)
195 {
196   comm_intra_ = comm;
197 }
198
199 void ActorExt::set_sampling(int s)
200 {
201   sampling_ = s;
202 }
203
204 int ActorExt::sampling()
205 {
206   return sampling_;
207 }
208
209 void ActorExt::init()
210 {
211   xbt_assert(smpi_get_universe_size() != 0, "SimGrid was not initialized properly before entering MPI_Init. "
212                                             "Aborting, please check compilation process and use smpirun.");
213
214   simgrid::s4u::ActorPtr self = simgrid::s4u::Actor::self();
215   // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
216   // this up here so that I can set the privatized region before the switch.
217   ActorExt* ext = smpi_process_remote(self);
218   // if we are in MPI_Init and argc handling has already been done.
219   if (ext->initialized())
220     return;
221
222   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
223     /* Now using the segment index of this process  */
224     ext->set_privatized_region(smpi_init_global_memory_segment_process());
225     /* Done at the process's creation */
226     SMPI_switch_data_segment(self);
227   }
228
229   std::string instance_id = self->get_property("instance_id");
230   const int rank          = xbt_str_parse_int(self->get_property("rank"), "Cannot parse rank");
231
232   ext->state_ = SmpiProcessState::INITIALIZING;
233   smpi_deployment_register_process(instance_id, rank, self);
234
235   ext->instance_id_              = instance_id;
236   ext->comm_world_               = smpi_deployment_comm_world(instance_id);
237   simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id);
238   if (barrier != nullptr) // don't overwrite the current one if the instance has none
239     ext->finalization_barrier_ = barrier;
240
241   // set the process attached to the mailbox
242   ext->mailbox_small_->set_receiver(ext->actor_);
243   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", ext->actor_->get_pid(), ext->actor_.get());
244 }
245
246 int ActorExt::get_optind()
247 {
248   return optind_;
249 }
250
251 void ActorExt::set_optind(int new_optind)
252 {
253   optind_ = new_optind;
254 }
255
256 } // namespace smpi
257 } // namespace simgrid