Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
[SMPI] Cosmetics in smpi_process.cpp: Correct indentation
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15
16 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
17
18 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
19
20 static char* get_mailbox_name(char* str, int process_id)
21 {
22   snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
23   return str;
24 }
25
26 static char* get_mailbox_name_small(char* str, int process_id)
27 {
28   snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
29   return str;
30 }
31
32 namespace simgrid{
33 namespace smpi{
34
35 using simgrid::s4u::Actor;
36 using simgrid::s4u::ActorPtr;
37
38 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
39   : finalization_barrier_(finalization_barrier)
40 {
41   char name[MAILBOX_NAME_MAXLEN];
42   process_              = actor;
43   mailbox_              = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, process_->getPid()));
44   mailbox_small_        = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, process_->getPid()));
45   mailboxes_mutex_      = xbt_mutex_init();
46   timer_                = xbt_os_timer_new();
47   state_                = SMPI_UNINITIALIZED;
48   if (MC_is_active())
49     MC_ignore_heap(timer_, xbt_os_timer_size());
50
51 #if HAVE_PAPI
52   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
53     // TODO: Implement host/process/thread based counters. This implementation
54     // just always takes the values passed via "default", like this:
55     // "default:COUNTER1:COUNTER2:COUNTER3;".
56     auto it = units2papi_setup.find(papi_default_config_name);
57     if (it != units2papi_setup.end()) {
58       papi_event_set_    = it->second.event_set;
59       papi_counter_data_ = it->second.counter_data;
60       XBT_DEBUG("Setting PAPI set for process %i", i);
61     } else {
62       papi_event_set_ = PAPI_NULL;
63       XBT_DEBUG("No PAPI set for process %i", i);
64     }
65   }
66 #endif
67 }
68
69 void Process::set_data(int* argc, char*** argv)
70 {
71   instance_id_      = std::string((*argv)[1]);
72   comm_world_       = smpi_deployment_comm_world(instance_id_.c_str());
73   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
74   if (barrier != nullptr) // don't overwrite the current one if the instance has none
75     finalization_barrier_ = barrier;
76
77   process_                                                                  = simgrid::s4u::Actor::self();
78   static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
79
80   if (*argc > 3) {
81     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
82     (*argv)[(*argc) - 1] = nullptr;
83     (*argv)[(*argc) - 2] = nullptr;
84   }
85   (*argc) -= 2;
86   argc_ = argc;
87   argv_ = argv;
88   // set the process attached to the mailbox
89   mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
90   XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_);
91 }
92
93 /** @brief Prepares the current process for termination. */
94 void Process::finalize()
95 {
96   state_ = SMPI_FINALIZED;
97   XBT_DEBUG("<%lu> Process left the game", process_->getPid());
98
99   // This leads to an explosion of the search graph which cannot be reduced:
100   if(MC_is_active() || MC_record_replay_is_active())
101     return;
102   // wait for all pending asynchronous comms to finish
103   MSG_barrier_wait(finalization_barrier_);
104 }
105
106 /** @brief Check if a process is finalized */
107 int Process::finalized()
108 {
109   return (state_ == SMPI_FINALIZED);
110 }
111
112 /** @brief Check if a process is initialized */
113 int Process::initialized()
114 {
115   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
116   // single process ... ?
117   return (state_ == SMPI_INITIALIZED);
118 }
119
120 /** @brief Mark a process as initialized (=MPI_Init called) */
121 void Process::mark_as_initialized()
122 {
123   if (state_ != SMPI_FINALIZED)
124     state_ = SMPI_INITIALIZED;
125 }
126
127 void Process::set_replaying(bool value){
128   if (state_ != SMPI_FINALIZED)
129     replaying_ = value;
130 }
131
132 bool Process::replaying(){
133   return replaying_;
134 }
135
136 void Process::set_user_data(void *data)
137 {
138   data_ = data;
139 }
140
141 void *Process::get_user_data()
142 {
143   return data_;
144 }
145
146 ActorPtr Process::process(){
147   return process_;
148 }
149
150 /**
151  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
152  *
153  * \see smpi_trace_set_call_location
154  */
155 smpi_trace_call_location_t* Process::call_location()
156 {
157   return &trace_call_loc_;
158 }
159
160 void Process::set_privatized_region(smpi_privatization_region_t region)
161 {
162   privatized_region_ = region;
163 }
164
165 smpi_privatization_region_t Process::privatized_region()
166 {
167   return privatized_region_;
168 }
169
170 MPI_Comm Process::comm_world()
171 {
172   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
173 }
174
175 smx_mailbox_t Process::mailbox()
176 {
177   return mailbox_->getImpl();
178 }
179
180 smx_mailbox_t Process::mailbox_small()
181 {
182   return mailbox_small_->getImpl();
183 }
184
185 xbt_mutex_t Process::mailboxes_mutex()
186 {
187   return mailboxes_mutex_;
188 }
189
190 #if HAVE_PAPI
191 int Process::papi_event_set()
192 {
193   return papi_event_set_;
194 }
195
196 papi_counter_t& smpi_process_papi_counters()
197 {
198   return papi_counter_data_;
199 }
200 #endif
201
202 xbt_os_timer_t Process::timer()
203 {
204   return timer_;
205 }
206
207 void Process::simulated_start()
208 {
209   simulated_ = SIMIX_get_clock();
210 }
211
212 double Process::simulated_elapsed()
213 {
214   return SIMIX_get_clock() - simulated_;
215 }
216
217 MPI_Comm Process::comm_self()
218 {
219   if(comm_self_==MPI_COMM_NULL){
220     MPI_Group group = new  Group(1);
221     comm_self_ = new  Comm(group, nullptr);
222     group->set_mapping(process_, 0);
223   }
224   return comm_self_;
225 }
226
227 MPI_Comm Process::comm_intra()
228 {
229   return comm_intra_;
230 }
231
232 void Process::set_comm_intra(MPI_Comm comm)
233 {
234   comm_intra_ = comm;
235 }
236
237 void Process::set_sampling(int s)
238 {
239   sampling_ = s;
240 }
241
242 int Process::sampling()
243 {
244   return sampling_;
245 }
246
247 msg_bar_t Process::finalization_barrier(){
248   return finalization_barrier_;
249 }
250
251 int Process::return_value(){
252   return return_value_;
253 }
254
255 void Process::set_return_value(int val){
256   return_value_=val;
257 }
258
259 void Process::init(int *argc, char ***argv){
260
261   if (smpi_process_count() == 0) {
262     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
263   }
264   if (argc != nullptr && argv != nullptr) {
265     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
266     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
267
268     char* instance_id = (*argv)[1];
269     try {
270       int rank = std::stoi(std::string((*argv)[2]));
271       smpi_deployment_register_process(instance_id, rank, proc);
272     } catch (std::invalid_argument& ia) {
273       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
274     }
275
276     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
277     // this up here so that I can set the privatized region before the switch.
278     Process* process = smpi_process_remote(proc);
279     int my_proc_id   = proc->getPid();
280     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
281       /* Now using the segment index of this process  */
282       process->set_privatized_region(smpi_init_global_memory_segment_process());
283       /* Done at the process's creation */
284       SMPI_switch_data_segment(my_proc_id);
285     }
286
287     process->set_data(argc, argv);
288   }
289   xbt_assert(smpi_process(),
290       "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
291       "Although it's required by MPI-2, this is currently not supported by SMPI.");
292 }
293
294 }
295 }