Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
2e173de0c1e1059f1def6674735bf08e3b53c8a0
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15
16 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
17
18 namespace simgrid{
19 namespace smpi{
20
21 using simgrid::s4u::Actor;
22 using simgrid::s4u::ActorPtr;
23
24 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
25     : finalization_barrier_(finalization_barrier), process_(actor)
26 {
27   mailbox_         = simgrid::s4u::Mailbox::byName("SMPI-" + std::to_string(process_->getPid()));
28   mailbox_small_   = simgrid::s4u::Mailbox::byName("small-" + std::to_string(process_->getPid()));
29   mailboxes_mutex_ = xbt_mutex_init();
30   timer_           = xbt_os_timer_new();
31   state_           = SMPI_UNINITIALIZED;
32   if (MC_is_active())
33     MC_ignore_heap(timer_, xbt_os_timer_size());
34
35 #if HAVE_PAPI
36   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
37     // TODO: Implement host/process/thread based counters. This implementation
38     // just always takes the values passed via "default", like this:
39     // "default:COUNTER1:COUNTER2:COUNTER3;".
40     auto it = units2papi_setup.find(papi_default_config_name);
41     if (it != units2papi_setup.end()) {
42       papi_event_set_    = it->second.event_set;
43       papi_counter_data_ = it->second.counter_data;
44       XBT_DEBUG("Setting PAPI set for process %i", i);
45     } else {
46       papi_event_set_ = PAPI_NULL;
47       XBT_DEBUG("No PAPI set for process %i", i);
48     }
49   }
50 #endif
51 }
52
53 Process::~Process()
54 {
55   if (comm_self_ != MPI_COMM_NULL)
56     simgrid::smpi::Comm::destroy(comm_self_);
57   if (comm_intra_ != MPI_COMM_NULL)
58     simgrid::smpi::Comm::destroy(comm_intra_);
59   xbt_os_timer_free(timer_);
60   xbt_mutex_destroy(mailboxes_mutex_);
61 }
62
63 void Process::set_data(int* argc, char*** argv)
64 {
65   instance_id_      = std::string((*argv)[1]);
66   comm_world_       = smpi_deployment_comm_world(instance_id_.c_str());
67   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
68   if (barrier != nullptr) // don't overwrite the current one if the instance has none
69     finalization_barrier_ = barrier;
70
71   process_                                                                  = simgrid::s4u::Actor::self();
72   static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
73
74   if (*argc > 3) {
75     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
76     (*argv)[(*argc) - 1] = nullptr;
77     (*argv)[(*argc) - 2] = nullptr;
78   }
79   (*argc) -= 2;
80   argc_ = argc;
81   argv_ = argv;
82   // set the process attached to the mailbox
83   mailbox_small_->setReceiver(process_);
84   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", process_->getPid(), process_.get());
85 }
86
87 /** @brief Prepares the current process for termination. */
88 void Process::finalize()
89 {
90   state_ = SMPI_FINALIZED;
91   XBT_DEBUG("<%ld> Process left the game", process_->getPid());
92
93   // This leads to an explosion of the search graph which cannot be reduced:
94   if(MC_is_active() || MC_record_replay_is_active())
95     return;
96   // wait for all pending asynchronous comms to finish
97   MSG_barrier_wait(finalization_barrier_);
98 }
99
100 /** @brief Check if a process is finalized */
101 int Process::finalized()
102 {
103   return (state_ == SMPI_FINALIZED);
104 }
105
106 /** @brief Check if a process is initialized */
107 int Process::initialized()
108 {
109   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
110   // single process ... ?
111   return (state_ == SMPI_INITIALIZED);
112 }
113
114 /** @brief Mark a process as initialized (=MPI_Init called) */
115 void Process::mark_as_initialized()
116 {
117   if (state_ != SMPI_FINALIZED)
118     state_ = SMPI_INITIALIZED;
119 }
120
121 void Process::set_replaying(bool value){
122   if (state_ != SMPI_FINALIZED)
123     replaying_ = value;
124 }
125
126 bool Process::replaying(){
127   return replaying_;
128 }
129
130 void Process::set_user_data(void *data)
131 {
132   data_ = data;
133 }
134
135 void *Process::get_user_data()
136 {
137   return data_;
138 }
139
140 ActorPtr Process::process(){
141   return process_;
142 }
143
144 /**
145  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
146  *
147  * \see smpi_trace_set_call_location
148  */
149 smpi_trace_call_location_t* Process::call_location()
150 {
151   return &trace_call_loc_;
152 }
153
154 void Process::set_privatized_region(smpi_privatization_region_t region)
155 {
156   privatized_region_ = region;
157 }
158
159 smpi_privatization_region_t Process::privatized_region()
160 {
161   return privatized_region_;
162 }
163
164 MPI_Comm Process::comm_world()
165 {
166   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
167 }
168
169 smx_mailbox_t Process::mailbox()
170 {
171   return mailbox_->getImpl();
172 }
173
174 smx_mailbox_t Process::mailbox_small()
175 {
176   return mailbox_small_->getImpl();
177 }
178
179 xbt_mutex_t Process::mailboxes_mutex()
180 {
181   return mailboxes_mutex_;
182 }
183
184 #if HAVE_PAPI
185 int Process::papi_event_set()
186 {
187   return papi_event_set_;
188 }
189
190 papi_counter_t& Process::papi_counters()
191 {
192   return papi_counter_data_;
193 }
194 #endif
195
196 xbt_os_timer_t Process::timer()
197 {
198   return timer_;
199 }
200
201 void Process::simulated_start()
202 {
203   simulated_ = SIMIX_get_clock();
204 }
205
206 double Process::simulated_elapsed()
207 {
208   return SIMIX_get_clock() - simulated_;
209 }
210
211 MPI_Comm Process::comm_self()
212 {
213   if(comm_self_==MPI_COMM_NULL){
214     MPI_Group group = new  Group(1);
215     comm_self_ = new  Comm(group, nullptr);
216     group->set_mapping(process_, 0);
217   }
218   return comm_self_;
219 }
220
221 MPI_Comm Process::comm_intra()
222 {
223   return comm_intra_;
224 }
225
226 void Process::set_comm_intra(MPI_Comm comm)
227 {
228   comm_intra_ = comm;
229 }
230
231 void Process::set_sampling(int s)
232 {
233   sampling_ = s;
234 }
235
236 int Process::sampling()
237 {
238   return sampling_;
239 }
240
241 msg_bar_t Process::finalization_barrier(){
242   return finalization_barrier_;
243 }
244
245 void Process::init(int *argc, char ***argv){
246
247   if (smpi_process_count() == 0) {
248     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
249   }
250   if (argc != nullptr && argv != nullptr) {
251     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
252     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
253
254     char* instance_id = (*argv)[1];
255     try {
256       int rank = std::stoi(std::string((*argv)[2]));
257       smpi_deployment_register_process(instance_id, rank, proc);
258     } catch (std::invalid_argument& ia) {
259       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
260     }
261
262     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
263     // this up here so that I can set the privatized region before the switch.
264     Process* process = smpi_process_remote(proc);
265     int my_proc_id   = proc->getPid();
266     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
267       /* Now using the segment index of this process  */
268       process->set_privatized_region(smpi_init_global_memory_segment_process());
269       /* Done at the process's creation */
270       SMPI_switch_data_segment(my_proc_id);
271     }
272
273     process->set_data(argc, argv);
274   }
275   xbt_assert(smpi_process(), "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
276                              "Although it's required by MPI-2, this is currently not supported by SMPI. "
277                              "Please use MPI_Init(&argc, &argv) as usual instead.");
278 }
279
280 }
281 }