Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Make ActorImpl::userdata private.
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/msg/msg_private.hpp"
11 #include "src/simix/smx_private.hpp"
12
13 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
14
15 namespace simgrid{
16 namespace smpi{
17
18 using simgrid::s4u::Actor;
19 using simgrid::s4u::ActorPtr;
20
21 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
22     : finalization_barrier_(finalization_barrier), process_(actor)
23 {
24   mailbox_         = simgrid::s4u::Mailbox::byName("SMPI-" + std::to_string(process_->get_pid()));
25   mailbox_small_   = simgrid::s4u::Mailbox::byName("small-" + std::to_string(process_->get_pid()));
26   mailboxes_mutex_ = xbt_mutex_init();
27   timer_           = xbt_os_timer_new();
28   state_           = SMPI_UNINITIALIZED;
29   if (MC_is_active())
30     MC_ignore_heap(timer_, xbt_os_timer_size());
31
32 #if HAVE_PAPI
33   if (simgrid::config::get_value<std::string>("smpi/papi-events")[0] != '\0') {
34     // TODO: Implement host/process/thread based counters. This implementation
35     // just always takes the values passed via "default", like this:
36     // "default:COUNTER1:COUNTER2:COUNTER3;".
37     auto it = units2papi_setup.find(papi_default_config_name);
38     if (it != units2papi_setup.end()) {
39       papi_event_set_    = it->second.event_set;
40       papi_counter_data_ = it->second.counter_data;
41       XBT_DEBUG("Setting PAPI set for process %i", i);
42     } else {
43       papi_event_set_ = PAPI_NULL;
44       XBT_DEBUG("No PAPI set for process %i", i);
45     }
46   }
47 #endif
48 }
49
50 Process::~Process()
51 {
52   if (comm_self_ != MPI_COMM_NULL)
53     simgrid::smpi::Comm::destroy(comm_self_);
54   if (comm_intra_ != MPI_COMM_NULL)
55     simgrid::smpi::Comm::destroy(comm_intra_);
56   xbt_os_timer_free(timer_);
57   xbt_mutex_destroy(mailboxes_mutex_);
58 }
59
60 void Process::set_data(int* argc, char*** argv)
61 {
62   instance_id_      = std::string((*argv)[1]);
63   comm_world_       = smpi_deployment_comm_world(instance_id_);
64   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_);
65   if (barrier != nullptr) // don't overwrite the current one if the instance has none
66     finalization_barrier_ = barrier;
67
68   process_                                                                  = simgrid::s4u::Actor::self();
69   static_cast<simgrid::msg::ActorExt*>(process_->get_impl()->getUserData())->data = this;
70
71   if (*argc > 3) {
72     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
73     (*argv)[(*argc) - 1] = nullptr;
74     (*argv)[(*argc) - 2] = nullptr;
75   }
76   (*argc) -= 2;
77   argc_ = argc;
78   argv_ = argv;
79   // set the process attached to the mailbox
80   mailbox_small_->setReceiver(process_);
81   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", process_->get_pid(), process_.get());
82 }
83
84 /** @brief Prepares the current process for termination. */
85 void Process::finalize()
86 {
87   state_ = SMPI_FINALIZED;
88   XBT_DEBUG("<%ld> Process left the game", process_->get_pid());
89
90   // This leads to an explosion of the search graph which cannot be reduced:
91   if(MC_is_active() || MC_record_replay_is_active())
92     return;
93   // wait for all pending asynchronous comms to finish
94   MSG_barrier_wait(finalization_barrier_);
95 }
96
97 /** @brief Check if a process is finalized */
98 int Process::finalized()
99 {
100   return (state_ == SMPI_FINALIZED);
101 }
102
103 /** @brief Check if a process is initialized */
104 int Process::initialized()
105 {
106   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
107   // single process ... ?
108   return (state_ == SMPI_INITIALIZED);
109 }
110
111 /** @brief Mark a process as initialized (=MPI_Init called) */
112 void Process::mark_as_initialized()
113 {
114   if (state_ != SMPI_FINALIZED)
115     state_ = SMPI_INITIALIZED;
116 }
117
118 void Process::set_replaying(bool value){
119   if (state_ != SMPI_FINALIZED)
120     replaying_ = value;
121 }
122
123 bool Process::replaying(){
124   return replaying_;
125 }
126
127 void Process::set_user_data(void *data)
128 {
129   data_ = data;
130 }
131
132 void *Process::get_user_data()
133 {
134   return data_;
135 }
136
137 ActorPtr Process::process(){
138   return process_;
139 }
140
141 /**
142  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
143  *
144  * \see smpi_trace_set_call_location
145  */
146 smpi_trace_call_location_t* Process::call_location()
147 {
148   return &trace_call_loc_;
149 }
150
151 void Process::set_privatized_region(smpi_privatization_region_t region)
152 {
153   privatized_region_ = region;
154 }
155
156 smpi_privatization_region_t Process::privatized_region()
157 {
158   return privatized_region_;
159 }
160
161 MPI_Comm Process::comm_world()
162 {
163   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
164 }
165
166 smx_mailbox_t Process::mailbox()
167 {
168   return mailbox_->get_impl();
169 }
170
171 smx_mailbox_t Process::mailbox_small()
172 {
173   return mailbox_small_->get_impl();
174 }
175
176 xbt_mutex_t Process::mailboxes_mutex()
177 {
178   return mailboxes_mutex_;
179 }
180
181 #if HAVE_PAPI
182 int Process::papi_event_set()
183 {
184   return papi_event_set_;
185 }
186
187 papi_counter_t& Process::papi_counters()
188 {
189   return papi_counter_data_;
190 }
191 #endif
192
193 xbt_os_timer_t Process::timer()
194 {
195   return timer_;
196 }
197
198 void Process::simulated_start()
199 {
200   simulated_ = SIMIX_get_clock();
201 }
202
203 double Process::simulated_elapsed()
204 {
205   return SIMIX_get_clock() - simulated_;
206 }
207
208 MPI_Comm Process::comm_self()
209 {
210   if(comm_self_==MPI_COMM_NULL){
211     MPI_Group group = new  Group(1);
212     comm_self_ = new  Comm(group, nullptr);
213     group->set_mapping(process_, 0);
214   }
215   return comm_self_;
216 }
217
218 MPI_Comm Process::comm_intra()
219 {
220   return comm_intra_;
221 }
222
223 void Process::set_comm_intra(MPI_Comm comm)
224 {
225   comm_intra_ = comm;
226 }
227
228 void Process::set_sampling(int s)
229 {
230   sampling_ = s;
231 }
232
233 int Process::sampling()
234 {
235   return sampling_;
236 }
237
238 msg_bar_t Process::finalization_barrier(){
239   return finalization_barrier_;
240 }
241
242 void Process::init(int *argc, char ***argv){
243
244   if (smpi_process_count() == 0) {
245     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
246   }
247   if (argc != nullptr && argv != nullptr) {
248     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
249     proc->get_impl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
250
251     char* instance_id = (*argv)[1];
252     try {
253       int rank = std::stoi(std::string((*argv)[2]));
254       smpi_deployment_register_process(instance_id, rank, proc);
255     } catch (std::invalid_argument& ia) {
256       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
257     }
258
259     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
260     // this up here so that I can set the privatized region before the switch.
261     Process* process = smpi_process_remote(proc);
262     if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
263       /* Now using the segment index of this process  */
264       process->set_privatized_region(smpi_init_global_memory_segment_process());
265       /* Done at the process's creation */
266       SMPI_switch_data_segment(proc);
267     }
268
269     process->set_data(argc, argv);
270   }
271   xbt_assert(smpi_process(), "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
272                              "Although it's required by MPI-2, this is currently not supported by SMPI. "
273                              "Please use MPI_Init(&argc, &argv) as usual instead.");
274 }
275
276 int Process::get_optind(){
277   return optind;
278 }
279 void Process::set_optind(int new_optind){
280   optind=new_optind;
281 }
282
283 }
284 }