Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
e657ab901b55f2b946bfac594702c044f6255090
[simgrid.git] / src / smpi / internals / smpi_actor.cpp
1 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "src/smpi/include/smpi_actor.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/simix/smx_private.hpp"
11
12 #if HAVE_PAPI
13 #include "papi.h"
14 extern std::string papi_default_config_name;
15 #endif
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid {
20 namespace smpi {
21
22 ActorExt::ActorExt(s4u::ActorPtr actor, s4u::Barrier* finalization_barrier)
23     : finalization_barrier_(finalization_barrier), actor_(actor)
24 {
25   mailbox_         = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
26   mailbox_small_   = s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
27   mailboxes_mutex_ = s4u::Mutex::create();
28   timer_           = xbt_os_timer_new();
29   state_           = SmpiProcessState::UNINITIALIZED;
30   if (MC_is_active())
31     MC_ignore_heap(timer_, xbt_os_timer_size());
32
33 #if HAVE_PAPI
34   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
35     // TODO: Implement host/process/thread based counters. This implementation
36     // just always takes the values passed via "default", like this:
37     // "default:COUNTER1:COUNTER2:COUNTER3;".
38     auto it = units2papi_setup.find(papi_default_config_name);
39     if (it != units2papi_setup.end()) {
40       papi_event_set_    = it->second.event_set;
41       papi_counter_data_ = it->second.counter_data;
42       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
43     } else {
44       papi_event_set_ = PAPI_NULL;
45       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
46     }
47   }
48 #endif
49 }
50
51 ActorExt::~ActorExt()
52 {
53   if (comm_self_ != MPI_COMM_NULL)
54     simgrid::smpi::Comm::destroy(comm_self_);
55   if (comm_intra_ != MPI_COMM_NULL)
56     simgrid::smpi::Comm::destroy(comm_intra_);
57   xbt_os_timer_free(timer_);
58 }
59
60 void ActorExt::set_data(const char* instance_id)
61 {
62   instance_id_                   = std::string(instance_id);
63   comm_world_                    = smpi_deployment_comm_world(instance_id_);
64   simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
65   if (barrier != nullptr) // don't overwrite the current one if the instance has none
66     finalization_barrier_ = barrier;
67
68   // set the process attached to the mailbox
69   mailbox_small_->set_receiver(actor_);
70   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
71 }
72
73 /** @brief Prepares the current process for termination. */
74 void ActorExt::finalize()
75 {
76   state_ = SmpiProcessState::FINALIZED;
77   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
78
79   // This leads to an explosion of the search graph which cannot be reduced:
80   if (MC_is_active() || MC_record_replay_is_active())
81     return;
82   // wait for all pending asynchronous comms to finish
83   finalization_barrier_->wait();
84 }
85
86 /** @brief Check if a process is finalized */
87 int ActorExt::finalized()
88 {
89   return (state_ == SmpiProcessState::FINALIZED);
90 }
91
92 /** @brief Check if a process is partially initialized already */
93 int ActorExt::initializing()
94 {
95   return (state_ == SmpiProcessState::INITIALIZING);
96 }
97
98 /** @brief Check if a process is initialized */
99 int ActorExt::initialized()
100 {
101   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
102   // single process ... ?
103   return (state_ == SmpiProcessState::INITIALIZED);
104 }
105
106 /** @brief Mark a process as initialized (=MPI_Init called) */
107 void ActorExt::mark_as_initialized()
108 {
109   if (state_ != SmpiProcessState::FINALIZED)
110     state_ = SmpiProcessState::INITIALIZED;
111 }
112
113 void ActorExt::set_replaying(bool value)
114 {
115   if (state_ != SmpiProcessState::FINALIZED)
116     replaying_ = value;
117 }
118
119 bool ActorExt::replaying()
120 {
121   return replaying_;
122 }
123
124 s4u::ActorPtr ActorExt::get_actor()
125 {
126   return actor_;
127 }
128
129 /**
130  * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
131  *
132  * @see smpi_trace_set_call_location
133  */
134 smpi_trace_call_location_t* ActorExt::call_location()
135 {
136   return &trace_call_loc_;
137 }
138
139 void ActorExt::set_privatized_region(smpi_privatization_region_t region)
140 {
141   privatized_region_ = region;
142 }
143
144 smpi_privatization_region_t ActorExt::privatized_region()
145 {
146   return privatized_region_;
147 }
148
149 MPI_Comm ActorExt::comm_world()
150 {
151   return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
152 }
153
154 s4u::MutexPtr ActorExt::mailboxes_mutex()
155 {
156   return mailboxes_mutex_;
157 }
158
159 #if HAVE_PAPI
160 int ActorExt::papi_event_set()
161 {
162   return papi_event_set_;
163 }
164
165 papi_counter_t& ActorExt::papi_counters()
166 {
167   return papi_counter_data_;
168 }
169 #endif
170
171 xbt_os_timer_t ActorExt::timer()
172 {
173   return timer_;
174 }
175
176 void ActorExt::simulated_start()
177 {
178   simulated_ = SIMIX_get_clock();
179 }
180
181 double ActorExt::simulated_elapsed()
182 {
183   return SIMIX_get_clock() - simulated_;
184 }
185
186 MPI_Comm ActorExt::comm_self()
187 {
188   if (comm_self_ == MPI_COMM_NULL) {
189     MPI_Group group = new Group(1);
190     comm_self_      = new Comm(group, nullptr);
191     group->set_mapping(actor_, 0);
192   }
193   return comm_self_;
194 }
195
196 MPI_Comm ActorExt::comm_intra()
197 {
198   return comm_intra_;
199 }
200
201 void ActorExt::set_comm_intra(MPI_Comm comm)
202 {
203   comm_intra_ = comm;
204 }
205
206 void ActorExt::set_sampling(int s)
207 {
208   sampling_ = s;
209 }
210
211 int ActorExt::sampling()
212 {
213   return sampling_;
214 }
215
216 void ActorExt::init()
217 {
218   if (smpi_process_count() == 0) {
219     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process "
220             "and use smpirun\n");
221   }
222
223   simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
224   // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
225   // this up here so that I can set the privatized region before the switch.
226   ActorExt* process = smpi_process_remote(proc);
227   // if we are in MPI_Init and argc handling has already been done.
228   if (process->initialized())
229     return;
230
231   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
232     /* Now using the segment index of this process  */
233     process->set_privatized_region(smpi_init_global_memory_segment_process());
234     /* Done at the process's creation */
235     SMPI_switch_data_segment(proc);
236   }
237
238   const char* instance_id = simgrid::s4u::Actor::self()->get_property("instance_id");
239   const int rank          = xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank");
240
241   process->state_ = SmpiProcessState::INITIALIZING;
242   smpi_deployment_register_process(instance_id, rank, proc);
243
244   process->set_data(instance_id);
245 }
246
247 int ActorExt::get_optind()
248 {
249   return optind;
250 }
251 void ActorExt::set_optind(int new_optind)
252 {
253   optind = new_optind;
254 }
255
256 } // namespace smpi
257 } // namespace simgrid