Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
c4c7e81e5e5848e86448b3e0b66546c02c2ae9d0
[simgrid.git] / src / smpi / internals / smpi_actor.cpp
1 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "src/smpi/include/smpi_actor.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/simix/smx_private.hpp"
11
12 #if HAVE_PAPI
13 #include "papi.h"
14 extern std::string papi_default_config_name;
15 #endif
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid {
20 namespace smpi {
21
22 ActorExt::ActorExt(s4u::ActorPtr actor, s4u::Barrier* finalization_barrier)
23     : finalization_barrier_(finalization_barrier), actor_(actor)
24 {
25   mailbox_         = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
26   mailbox_small_   = s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
27   mailboxes_mutex_ = s4u::Mutex::create();
28   timer_           = xbt_os_timer_new();
29   state_           = SmpiProcessState::UNINITIALIZED;
30   info_env_        = MPI_INFO_NULL;
31   if (MC_is_active())
32     MC_ignore_heap(timer_, xbt_os_timer_size());
33
34 #if HAVE_PAPI
35   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
36     // TODO: Implement host/process/thread based counters. This implementation
37     // just always takes the values passed via "default", like this:
38     // "default:COUNTER1:COUNTER2:COUNTER3;".
39     auto it = units2papi_setup.find(papi_default_config_name);
40     if (it != units2papi_setup.end()) {
41       papi_event_set_    = it->second.event_set;
42       papi_counter_data_ = it->second.counter_data;
43       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
44     } else {
45       papi_event_set_ = PAPI_NULL;
46       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
47     }
48   }
49 #endif
50 }
51
52 ActorExt::~ActorExt()
53 {
54   if (comm_self_ != MPI_COMM_NULL)
55     simgrid::smpi::Comm::destroy(comm_self_);
56   if (comm_intra_ != MPI_COMM_NULL)
57     simgrid::smpi::Comm::destroy(comm_intra_);
58   xbt_os_timer_free(timer_);
59 }
60
61 void ActorExt::set_data(const char* instance_id)
62 {
63   instance_id_                   = std::string(instance_id);
64   comm_world_                    = smpi_deployment_comm_world(instance_id_);
65   simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
66   if (barrier != nullptr) // don't overwrite the current one if the instance has none
67     finalization_barrier_ = barrier;
68
69   // set the process attached to the mailbox
70   mailbox_small_->set_receiver(actor_);
71   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
72 }
73
74 /** @brief Prepares the current process for termination. */
75 void ActorExt::finalize()
76 {
77   state_ = SmpiProcessState::FINALIZED;
78   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
79
80   // This leads to an explosion of the search graph which cannot be reduced:
81   if (MC_is_active() || MC_record_replay_is_active())
82     return;
83   // wait for all pending asynchronous comms to finish
84   finalization_barrier_->wait();
85 }
86
87 /** @brief Check if a process is finalized */
88 int ActorExt::finalized()
89 {
90   return (state_ == SmpiProcessState::FINALIZED);
91 }
92
93 /** @brief Check if a process is partially initialized already */
94 int ActorExt::initializing()
95 {
96   return (state_ == SmpiProcessState::INITIALIZING);
97 }
98
99 /** @brief Check if a process is initialized */
100 int ActorExt::initialized()
101 {
102   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
103   // single process ... ?
104   return (state_ == SmpiProcessState::INITIALIZED);
105 }
106
107 /** @brief Mark a process as initialized (=MPI_Init called) */
108 void ActorExt::mark_as_initialized()
109 {
110   if (state_ != SmpiProcessState::FINALIZED)
111     state_ = SmpiProcessState::INITIALIZED;
112 }
113
114 void ActorExt::set_replaying(bool value)
115 {
116   if (state_ != SmpiProcessState::FINALIZED)
117     replaying_ = value;
118 }
119
120 bool ActorExt::replaying()
121 {
122   return replaying_;
123 }
124
125 s4u::ActorPtr ActorExt::get_actor()
126 {
127   return actor_;
128 }
129
130 /**
131  * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
132  *
133  * @see smpi_trace_set_call_location
134  */
135 smpi_trace_call_location_t* ActorExt::call_location()
136 {
137   return &trace_call_loc_;
138 }
139
140 void ActorExt::set_privatized_region(smpi_privatization_region_t region)
141 {
142   privatized_region_ = region;
143 }
144
145 smpi_privatization_region_t ActorExt::privatized_region()
146 {
147   return privatized_region_;
148 }
149
150 MPI_Comm ActorExt::comm_world()
151 {
152   return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
153 }
154
155 s4u::MutexPtr ActorExt::mailboxes_mutex()
156 {
157   return mailboxes_mutex_;
158 }
159
160 #if HAVE_PAPI
161 int ActorExt::papi_event_set()
162 {
163   return papi_event_set_;
164 }
165
166 papi_counter_t& ActorExt::papi_counters()
167 {
168   return papi_counter_data_;
169 }
170 #endif
171
172 xbt_os_timer_t ActorExt::timer()
173 {
174   return timer_;
175 }
176
177 void ActorExt::simulated_start()
178 {
179   simulated_ = SIMIX_get_clock();
180 }
181
182 double ActorExt::simulated_elapsed()
183 {
184   return SIMIX_get_clock() - simulated_;
185 }
186
187 MPI_Comm ActorExt::comm_self()
188 {
189   if (comm_self_ == MPI_COMM_NULL) {
190     MPI_Group group = new Group(1);
191     comm_self_      = new Comm(group, nullptr);
192     group->set_mapping(actor_, 0);
193   }
194   return comm_self_;
195 }
196
197 MPI_Info ActorExt::info_env()
198 {
199   return info_env_;
200 }
201
202 MPI_Comm ActorExt::comm_intra()
203 {
204   return comm_intra_;
205 }
206
207 void ActorExt::set_comm_intra(MPI_Comm comm)
208 {
209   comm_intra_ = comm;
210 }
211
212 void ActorExt::set_sampling(int s)
213 {
214   sampling_ = s;
215 }
216
217 int ActorExt::sampling()
218 {
219   return sampling_;
220 }
221
222 void ActorExt::init()
223 {
224   xbt_assert(smpi_get_universe_size() != 0, "SimGrid was not initialized properly before entering MPI_Init. "
225                                             "Aborting, please check compilation process and use smpirun.");
226
227   simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
228   // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
229   // this up here so that I can set the privatized region before the switch.
230   ActorExt* process = smpi_process_remote(proc);
231   // if we are in MPI_Init and argc handling has already been done.
232   if (process->initialized())
233     return;
234
235   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
236     /* Now using the segment index of this process  */
237     process->set_privatized_region(smpi_init_global_memory_segment_process());
238     /* Done at the process's creation */
239     SMPI_switch_data_segment(proc);
240   }
241
242   const char* instance_id = proc->get_property("instance_id");
243   const int rank          = xbt_str_parse_int(proc->get_property("rank"), "Cannot parse rank");
244
245   process->state_ = SmpiProcessState::INITIALIZING;
246   smpi_deployment_register_process(instance_id, rank, proc);
247
248   process->set_data(instance_id);
249 }
250
251 int ActorExt::get_optind()
252 {
253   return optind_;
254 }
255
256 void ActorExt::set_optind(int new_optind)
257 {
258   optind_ = new_optind;
259 }
260
261 } // namespace smpi
262 } // namespace simgrid