Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
do not use xbt_mutex_t in SMPI
[simgrid.git] / src / smpi / internals / smpi_actor.cpp
1 /* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "src/smpi/include/smpi_actor.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/simix/smx_private.hpp"
11
12 #if HAVE_PAPI
13 #include "papi.h"
14 extern std::string papi_default_config_name;
15 #endif
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid {
20 namespace smpi {
21
22 using simgrid::s4u::Actor;
23 using simgrid::s4u::ActorPtr;
24
25 ActorExt::ActorExt(ActorPtr actor, simgrid::s4u::Barrier* finalization_barrier)
26     : finalization_barrier_(finalization_barrier), actor_(actor)
27 {
28   mailbox_         = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
29   mailbox_small_   = s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
30   mailboxes_mutex_ = s4u::Mutex::create();
31   timer_           = xbt_os_timer_new();
32   state_           = SmpiProcessState::UNINITIALIZED;
33   if (MC_is_active())
34     MC_ignore_heap(timer_, xbt_os_timer_size());
35
36 #if HAVE_PAPI
37   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
38     // TODO: Implement host/process/thread based counters. This implementation
39     // just always takes the values passed via "default", like this:
40     // "default:COUNTER1:COUNTER2:COUNTER3;".
41     auto it = units2papi_setup.find(papi_default_config_name);
42     if (it != units2papi_setup.end()) {
43       papi_event_set_    = it->second.event_set;
44       papi_counter_data_ = it->second.counter_data;
45       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
46     } else {
47       papi_event_set_ = PAPI_NULL;
48       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
49     }
50   }
51 #endif
52 }
53
54 ActorExt::~ActorExt()
55 {
56   if (comm_self_ != MPI_COMM_NULL)
57     simgrid::smpi::Comm::destroy(comm_self_);
58   if (comm_intra_ != MPI_COMM_NULL)
59     simgrid::smpi::Comm::destroy(comm_intra_);
60   xbt_os_timer_free(timer_);
61 }
62
63 void ActorExt::set_data(const char* instance_id)
64 {
65   instance_id_                   = std::string(instance_id);
66   comm_world_                    = smpi_deployment_comm_world(instance_id_);
67   simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
68   if (barrier != nullptr) // don't overwrite the current one if the instance has none
69     finalization_barrier_ = barrier;
70
71   // set the process attached to the mailbox
72   mailbox_small_->set_receiver(actor_);
73   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
74 }
75
76 /** @brief Prepares the current process for termination. */
77 void ActorExt::finalize()
78 {
79   state_ = SmpiProcessState::FINALIZED;
80   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
81
82   // This leads to an explosion of the search graph which cannot be reduced:
83   if (MC_is_active() || MC_record_replay_is_active())
84     return;
85   // wait for all pending asynchronous comms to finish
86   finalization_barrier_->wait();
87 }
88
89 /** @brief Check if a process is finalized */
90 int ActorExt::finalized()
91 {
92   return (state_ == SmpiProcessState::FINALIZED);
93 }
94
95 /** @brief Check if a process is partially initialized already */
96 int ActorExt::initializing()
97 {
98   return (state_ == SmpiProcessState::INITIALIZING);
99 }
100
101 /** @brief Check if a process is initialized */
102 int ActorExt::initialized()
103 {
104   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
105   // single process ... ?
106   return (state_ == SmpiProcessState::INITIALIZED);
107 }
108
109 /** @brief Mark a process as initialized (=MPI_Init called) */
110 void ActorExt::mark_as_initialized()
111 {
112   if (state_ != SmpiProcessState::FINALIZED)
113     state_ = SmpiProcessState::INITIALIZED;
114 }
115
116 void ActorExt::set_replaying(bool value)
117 {
118   if (state_ != SmpiProcessState::FINALIZED)
119     replaying_ = value;
120 }
121
122 bool ActorExt::replaying()
123 {
124   return replaying_;
125 }
126
127 ActorPtr ActorExt::get_actor()
128 {
129   return actor_;
130 }
131
132 /**
133  * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
134  *
135  * @see smpi_trace_set_call_location
136  */
137 smpi_trace_call_location_t* ActorExt::call_location()
138 {
139   return &trace_call_loc_;
140 }
141
142 void ActorExt::set_privatized_region(smpi_privatization_region_t region)
143 {
144   privatized_region_ = region;
145 }
146
147 smpi_privatization_region_t ActorExt::privatized_region()
148 {
149   return privatized_region_;
150 }
151
152 MPI_Comm ActorExt::comm_world()
153 {
154   return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
155 }
156
157 s4u::MutexPtr ActorExt::mailboxes_mutex()
158 {
159   return mailboxes_mutex_;
160 }
161
162 #if HAVE_PAPI
163 int ActorExt::papi_event_set()
164 {
165   return papi_event_set_;
166 }
167
168 papi_counter_t& ActorExt::papi_counters()
169 {
170   return papi_counter_data_;
171 }
172 #endif
173
174 xbt_os_timer_t ActorExt::timer()
175 {
176   return timer_;
177 }
178
179 void ActorExt::simulated_start()
180 {
181   simulated_ = SIMIX_get_clock();
182 }
183
184 double ActorExt::simulated_elapsed()
185 {
186   return SIMIX_get_clock() - simulated_;
187 }
188
189 MPI_Comm ActorExt::comm_self()
190 {
191   if (comm_self_ == MPI_COMM_NULL) {
192     MPI_Group group = new Group(1);
193     comm_self_      = new Comm(group, nullptr);
194     group->set_mapping(actor_, 0);
195   }
196   return comm_self_;
197 }
198
199 MPI_Comm ActorExt::comm_intra()
200 {
201   return comm_intra_;
202 }
203
204 void ActorExt::set_comm_intra(MPI_Comm comm)
205 {
206   comm_intra_ = comm;
207 }
208
209 void ActorExt::set_sampling(int s)
210 {
211   sampling_ = s;
212 }
213
214 int ActorExt::sampling()
215 {
216   return sampling_;
217 }
218
219 void ActorExt::init()
220 {
221   if (smpi_process_count() == 0) {
222     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process "
223             "and use smpirun\n");
224   }
225
226   simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
227   // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
228   // this up here so that I can set the privatized region before the switch.
229   ActorExt* process = smpi_process_remote(proc);
230   // if we are in MPI_Init and argc handling has already been done.
231   if (process->initialized())
232     return;
233
234   if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
235     /* Now using the segment index of this process  */
236     process->set_privatized_region(smpi_init_global_memory_segment_process());
237     /* Done at the process's creation */
238     SMPI_switch_data_segment(proc);
239   }
240
241   const char* instance_id = simgrid::s4u::Actor::self()->get_property("instance_id");
242   const int rank          = xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank");
243
244   process->state_ = SmpiProcessState::INITIALIZING;
245   smpi_deployment_register_process(instance_id, rank, proc);
246
247   process->set_data(instance_id);
248 }
249
250 int ActorExt::get_optind()
251 {
252   return optind;
253 }
254 void ActorExt::set_optind(int new_optind)
255 {
256   optind = new_optind;
257 }
258
259 } // namespace smpi
260 } // namespace simgrid