Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
313324acb9d5f31990a38e333bbaeaba34d2d536
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15
16 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
17
18 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
19
20 static char* get_mailbox_name(char* str, int process_id)
21 {
22   snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
23   return str;
24 }
25
26 static char* get_mailbox_name_small(char* str, int process_id)
27 {
28   snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
29   return str;
30 }
31
32 namespace simgrid{
33 namespace smpi{
34
35 using simgrid::s4u::Actor;
36 using simgrid::s4u::ActorPtr;
37
38 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
39   : finalization_barrier_(finalization_barrier)
40 {
41   char name[MAILBOX_NAME_MAXLEN];
42   process_              = actor;
43   mailbox_              = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, process_->getPid()));
44   mailbox_small_        = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, process_->getPid()));
45   mailboxes_mutex_      = xbt_mutex_init();
46   timer_                = xbt_os_timer_new();
47   state_                = SMPI_UNINITIALIZED;
48   if (MC_is_active())
49     MC_ignore_heap(timer_, xbt_os_timer_size());
50
51 #if HAVE_PAPI
52   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
53     // TODO: Implement host/process/thread based counters. This implementation
54     // just always takes the values passed via "default", like this:
55     // "default:COUNTER1:COUNTER2:COUNTER3;".
56     auto it = units2papi_setup.find(papi_default_config_name);
57     if (it != units2papi_setup.end()) {
58       papi_event_set_    = it->second.event_set;
59       papi_counter_data_ = it->second.counter_data;
60       XBT_DEBUG("Setting PAPI set for process %i", i);
61     } else {
62       papi_event_set_ = PAPI_NULL;
63       XBT_DEBUG("No PAPI set for process %i", i);
64     }
65   }
66 #endif
67 }
68
69 void Process::set_data(int index, int* argc, char*** argv)
70 {
71     char* instance_id = (*argv)[1];
72     comm_world_       = smpi_deployment_comm_world(instance_id);
73     msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id);
74     if (barrier != nullptr) // don't overwrite the current one if the instance has none
75       finalization_barrier_ = barrier;
76     instance_id_ = instance_id;
77     index_       = index;
78
79     process_                                                       = simgrid::s4u::Actor::self();
80     static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
81
82     if (*argc > 3) {
83       memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
84       (*argv)[(*argc) - 1] = nullptr;
85       (*argv)[(*argc) - 2] = nullptr;
86     }
87     (*argc)-=2;
88     argc_ = argc;
89     argv_ = argv;
90     // set the process attached to the mailbox
91     mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
92     XBT_DEBUG("<%d> New process in the game: %p", index_, process_);
93 }
94
95 /** @brief Prepares the current process for termination. */
96 void Process::finalize()
97 {
98   state_ = SMPI_FINALIZED;
99   XBT_DEBUG("<%d> Process left the game", index_);
100
101     // This leads to an explosion of the search graph which cannot be reduced:
102     if(MC_is_active() || MC_record_replay_is_active())
103       return;
104     // wait for all pending asynchronous comms to finish
105     MSG_barrier_wait(finalization_barrier_);
106 }
107
108 /** @brief Check if a process is finalized */
109 int Process::finalized()
110 {
111     if (index_ != MPI_UNDEFINED)
112       return (state_ == SMPI_FINALIZED);
113     else
114       return 0;
115 }
116
117 /** @brief Check if a process is initialized */
118 int Process::initialized()
119 {
120   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
121   // single process ... ?
122   return ((index_ != MPI_UNDEFINED) && (state_ == SMPI_INITIALIZED));
123 }
124
125 /** @brief Mark a process as initialized (=MPI_Init called) */
126 void Process::mark_as_initialized()
127 {
128   if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
129     state_ = SMPI_INITIALIZED;
130 }
131
132 void Process::set_replaying(bool value){
133   if ((index_ != MPI_UNDEFINED) && (state_ != SMPI_FINALIZED))
134     replaying_ = value;
135 }
136
137 bool Process::replaying(){
138   if (index_ != MPI_UNDEFINED)
139     return replaying_;
140   else
141     return false;
142 }
143
144 void Process::set_user_data(void *data)
145 {
146   data_ = data;
147 }
148
149 void *Process::get_user_data()
150 {
151   return data_;
152 }
153
154 ActorPtr Process::process(){
155   return process_;
156 }
157
158 /**
159  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
160  *
161  * \see smpi_trace_set_call_location
162  */
163 smpi_trace_call_location_t* Process::call_location()
164 {
165   return &trace_call_loc_;
166 }
167
168 void Process::set_privatized_region(smpi_privatization_region_t region)
169 {
170   privatized_region_ = region;
171 }
172
173 smpi_privatization_region_t Process::privatized_region()
174 {
175   return privatized_region_;
176 }
177
178 int Process::index()
179 {
180   return index_;
181 }
182
183 MPI_Comm Process::comm_world()
184 {
185   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
186 }
187
188 smx_mailbox_t Process::mailbox()
189 {
190   return mailbox_->getImpl();
191 }
192
193 smx_mailbox_t Process::mailbox_small()
194 {
195   return mailbox_small_->getImpl();
196 }
197
198 xbt_mutex_t Process::mailboxes_mutex()
199 {
200   return mailboxes_mutex_;
201 }
202
203 #if HAVE_PAPI
204 int Process::papi_event_set()
205 {
206   return papi_event_set_;
207 }
208
209 papi_counter_t& smpi_process_papi_counters()
210 {
211   return papi_counter_data_;
212 }
213 #endif
214
215 xbt_os_timer_t Process::timer()
216 {
217   return timer_;
218 }
219
220 void Process::simulated_start()
221 {
222   simulated_ = SIMIX_get_clock();
223 }
224
225 double Process::simulated_elapsed()
226 {
227   return SIMIX_get_clock() - simulated_;
228 }
229
230 MPI_Comm Process::comm_self()
231 {
232   if(comm_self_==MPI_COMM_NULL){
233     MPI_Group group = new  Group(1);
234     comm_self_ = new  Comm(group, nullptr);
235     group->set_mapping(process_, 0);
236   }
237   return comm_self_;
238 }
239
240 MPI_Comm Process::comm_intra()
241 {
242   return comm_intra_;
243 }
244
245 void Process::set_comm_intra(MPI_Comm comm)
246 {
247   comm_intra_ = comm;
248 }
249
250 void Process::set_sampling(int s)
251 {
252   sampling_ = s;
253 }
254
255 int Process::sampling()
256 {
257   return sampling_;
258 }
259
260 msg_bar_t Process::finalization_barrier(){
261   return finalization_barrier_;
262 }
263
264 int Process::return_value(){
265   return return_value_;
266 }
267
268 void Process::set_return_value(int val){
269   return_value_=val;
270 }
271
272 void Process::init(int *argc, char ***argv){
273
274   if (smpi_process_count() == 0) {
275     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
276   }
277   if (argc != nullptr && argv != nullptr) {
278     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
279     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
280
281     char* instance_id = (*argv)[1];
282     try {
283       int rank = std::stoi(std::string((*argv)[2]));
284       smpi_deployment_register_process(instance_id, rank, proc);
285     } catch (std::invalid_argument& ia) {
286       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
287     }
288
289     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
290     // this up here so that I can set the privatized region before the switch.
291     Process* process = smpi_process_remote(proc);
292     int my_proc_id   = proc->getPid();
293     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
294       /* Now using the segment index of this process  */
295       process->set_privatized_region(smpi_init_global_memory_segment_process());
296       /* Done at the process's creation */
297       SMPI_switch_data_segment(my_proc_id);
298     }
299
300     process->set_data(my_proc_id, argc, argv);
301   }
302   xbt_assert(smpi_process(),
303       "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
304       "Although it's required by MPI-2, this is currently not supported by SMPI.");
305 }
306
307 }
308 }