Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
[CI] Activate PAPI in Coverage checks.
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/msg/msg_private.hpp"
11 #include "src/simix/smx_private.hpp"
12
13 #if HAVE_PAPI
14 #include "papi.h"
15 extern std::string papi_default_config_name;
16 #endif
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
19
20 namespace simgrid{
21 namespace smpi{
22
23 using simgrid::s4u::Actor;
24 using simgrid::s4u::ActorPtr;
25
26 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
27     : finalization_barrier_(finalization_barrier), actor_(actor)
28 {
29   mailbox_         = simgrid::s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
30   mailbox_small_   = simgrid::s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
31   mailboxes_mutex_ = xbt_mutex_init();
32   timer_           = xbt_os_timer_new();
33   state_           = SmpiProcessState::UNINITIALIZED;
34   if (MC_is_active())
35     MC_ignore_heap(timer_, xbt_os_timer_size());
36
37 #if HAVE_PAPI
38   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
39     // TODO: Implement host/process/thread based counters. This implementation
40     // just always takes the values passed via "default", like this:
41     // "default:COUNTER1:COUNTER2:COUNTER3;".
42     auto it = units2papi_setup.find(papi_default_config_name);
43     if (it != units2papi_setup.end()) {
44       papi_event_set_    = it->second.event_set;
45       papi_counter_data_ = it->second.counter_data;
46       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
47     } else {
48       papi_event_set_ = PAPI_NULL;
49       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
50     }
51   }
52 #endif
53 }
54
55 Process::~Process()
56 {
57   if (comm_self_ != MPI_COMM_NULL)
58     simgrid::smpi::Comm::destroy(comm_self_);
59   if (comm_intra_ != MPI_COMM_NULL)
60     simgrid::smpi::Comm::destroy(comm_intra_);
61   xbt_os_timer_free(timer_);
62   xbt_mutex_destroy(mailboxes_mutex_);
63 }
64
65 void Process::set_data(int* argc, char*** argv)
66 {
67   instance_id_      = std::string((*argv)[1]);
68   comm_world_       = smpi_deployment_comm_world(instance_id_);
69   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_);
70   if (barrier != nullptr) // don't overwrite the current one if the instance has none
71     finalization_barrier_ = barrier;
72
73   actor_                                                                        = simgrid::s4u::Actor::self();
74   static_cast<simgrid::msg::ActorExt*>(actor_->get_impl()->get_user_data())->data = this;
75
76   if (*argc > 3) {
77     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
78     (*argv)[(*argc) - 1] = nullptr;
79     (*argv)[(*argc) - 2] = nullptr;
80   }
81   (*argc) -= 2;
82   argc_ = argc;
83   argv_ = argv;
84   // set the process attached to the mailbox
85   mailbox_small_->set_receiver(actor_);
86   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
87 }
88
89 /** @brief Prepares the current process for termination. */
90 void Process::finalize()
91 {
92   state_ = SmpiProcessState::FINALIZED;
93   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
94
95   // This leads to an explosion of the search graph which cannot be reduced:
96   if(MC_is_active() || MC_record_replay_is_active())
97     return;
98   // wait for all pending asynchronous comms to finish
99   MSG_barrier_wait(finalization_barrier_);
100 }
101
102 /** @brief Check if a process is finalized */
103 int Process::finalized()
104 {
105   return (state_ == SmpiProcessState::FINALIZED);
106 }
107
108 /** @brief Check if a process is initialized */
109 int Process::initialized()
110 {
111   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
112   // single process ... ?
113   return (state_ == SmpiProcessState::INITIALIZED);
114 }
115
116 /** @brief Mark a process as initialized (=MPI_Init called) */
117 void Process::mark_as_initialized()
118 {
119   if (state_ != SmpiProcessState::FINALIZED)
120     state_ = SmpiProcessState::INITIALIZED;
121 }
122
123 void Process::set_replaying(bool value){
124   if (state_ != SmpiProcessState::FINALIZED)
125     replaying_ = value;
126 }
127
128 bool Process::replaying(){
129   return replaying_;
130 }
131
132 void Process::set_user_data(void *data)
133 {
134   data_ = data;
135 }
136
137 void *Process::get_user_data()
138 {
139   return data_;
140 }
141
142 ActorPtr Process::get_actor()
143 {
144   return actor_;
145 }
146
147 /**
148  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
149  *
150  * \see smpi_trace_set_call_location
151  */
152 smpi_trace_call_location_t* Process::call_location()
153 {
154   return &trace_call_loc_;
155 }
156
157 void Process::set_privatized_region(smpi_privatization_region_t region)
158 {
159   privatized_region_ = region;
160 }
161
162 smpi_privatization_region_t Process::privatized_region()
163 {
164   return privatized_region_;
165 }
166
167 MPI_Comm Process::comm_world()
168 {
169   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
170 }
171
172 smx_mailbox_t Process::mailbox()
173 {
174   return mailbox_->get_impl();
175 }
176
177 smx_mailbox_t Process::mailbox_small()
178 {
179   return mailbox_small_->get_impl();
180 }
181
182 xbt_mutex_t Process::mailboxes_mutex()
183 {
184   return mailboxes_mutex_;
185 }
186
187 #if HAVE_PAPI
188 int Process::papi_event_set()
189 {
190   return papi_event_set_;
191 }
192
193 papi_counter_t& Process::papi_counters()
194 {
195   return papi_counter_data_;
196 }
197 #endif
198
199 xbt_os_timer_t Process::timer()
200 {
201   return timer_;
202 }
203
204 void Process::simulated_start()
205 {
206   simulated_ = SIMIX_get_clock();
207 }
208
209 double Process::simulated_elapsed()
210 {
211   return SIMIX_get_clock() - simulated_;
212 }
213
214 MPI_Comm Process::comm_self()
215 {
216   if(comm_self_==MPI_COMM_NULL){
217     MPI_Group group = new  Group(1);
218     comm_self_ = new  Comm(group, nullptr);
219     group->set_mapping(actor_, 0);
220   }
221   return comm_self_;
222 }
223
224 MPI_Comm Process::comm_intra()
225 {
226   return comm_intra_;
227 }
228
229 void Process::set_comm_intra(MPI_Comm comm)
230 {
231   comm_intra_ = comm;
232 }
233
234 void Process::set_sampling(int s)
235 {
236   sampling_ = s;
237 }
238
239 int Process::sampling()
240 {
241   return sampling_;
242 }
243
244 void Process::init(int *argc, char ***argv){
245
246   if (smpi_process_count() == 0) {
247     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
248   }
249   if (argc != nullptr && argv != nullptr) {
250     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
251     proc->get_impl()->context_->set_cleanup(&MSG_process_cleanup_from_SIMIX);
252
253     char* instance_id = (*argv)[1];
254     try {
255       int rank = std::stoi(std::string((*argv)[2]));
256       smpi_deployment_register_process(instance_id, rank, proc);
257     } catch (std::invalid_argument& ia) {
258       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
259     }
260
261     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
262     // this up here so that I can set the privatized region before the switch.
263     Process* process = smpi_process_remote(proc);
264     if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
265       /* Now using the segment index of this process  */
266       process->set_privatized_region(smpi_init_global_memory_segment_process());
267       /* Done at the process's creation */
268       SMPI_switch_data_segment(proc);
269     }
270
271     process->set_data(argc, argv);
272   }
273   xbt_assert(smpi_process(), "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
274                              "Although it's required by MPI-2, this is currently not supported by SMPI. "
275                              "Please use MPI_Init(&argc, &argv) as usual instead.");
276 }
277
278 int Process::get_optind(){
279   return optind;
280 }
281 void Process::set_optind(int new_optind){
282   optind=new_optind;
283 }
284
285 }
286 }