Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
4478a931aba06621f68cafe14c5661934d1e6a8c
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15
16 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
17
18 #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
19
20 static char* get_mailbox_name(char* str, int process_id)
21 {
22   snprintf(str, MAILBOX_NAME_MAXLEN, "SMPI-%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
23   return str;
24 }
25
26 static char* get_mailbox_name_small(char* str, int process_id)
27 {
28   snprintf(str, MAILBOX_NAME_MAXLEN, "small%0*x", static_cast<int>(sizeof(int) * 2), static_cast<unsigned>(process_id));
29   return str;
30 }
31
32 namespace simgrid{
33 namespace smpi{
34
35 using simgrid::s4u::Actor;
36 using simgrid::s4u::ActorPtr;
37
38 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
39   : finalization_barrier_(finalization_barrier)
40 {
41   char name[MAILBOX_NAME_MAXLEN];
42   process_              = actor;
43   mailbox_              = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, process_->getPid()));
44   mailbox_small_        = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, process_->getPid()));
45   mailboxes_mutex_      = xbt_mutex_init();
46   timer_                = xbt_os_timer_new();
47   state_                = SMPI_UNINITIALIZED;
48   if (MC_is_active())
49     MC_ignore_heap(timer_, xbt_os_timer_size());
50
51 #if HAVE_PAPI
52   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
53     // TODO: Implement host/process/thread based counters. This implementation
54     // just always takes the values passed via "default", like this:
55     // "default:COUNTER1:COUNTER2:COUNTER3;".
56     auto it = units2papi_setup.find(papi_default_config_name);
57     if (it != units2papi_setup.end()) {
58       papi_event_set_    = it->second.event_set;
59       papi_counter_data_ = it->second.counter_data;
60       XBT_DEBUG("Setting PAPI set for process %i", i);
61     } else {
62       papi_event_set_ = PAPI_NULL;
63       XBT_DEBUG("No PAPI set for process %i", i);
64     }
65   }
66 #endif
67 }
68
69 void Process::set_data(int* argc, char*** argv)
70 {
71     char* instance_id = (*argv)[1];
72     comm_world_       = smpi_deployment_comm_world(instance_id);
73     msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id);
74     if (barrier != nullptr) // don't overwrite the current one if the instance has none
75       finalization_barrier_ = barrier;
76     instance_id_ = instance_id;
77
78     process_                                                       = simgrid::s4u::Actor::self();
79     static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
80
81     if (*argc > 3) {
82       memmove(&(*argv)[0], &(*argv)[2], sizeof(char *) * (*argc - 2));
83       (*argv)[(*argc) - 1] = nullptr;
84       (*argv)[(*argc) - 2] = nullptr;
85     }
86     (*argc)-=2;
87     argc_ = argc;
88     argv_ = argv;
89     // set the process attached to the mailbox
90     mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
91     XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_);
92 }
93
94 /** @brief Prepares the current process for termination. */
95 void Process::finalize()
96 {
97   state_ = SMPI_FINALIZED;
98   XBT_DEBUG("<%lu> Process left the game", process_->getPid());
99
100   // This leads to an explosion of the search graph which cannot be reduced:
101   if(MC_is_active() || MC_record_replay_is_active())
102     return;
103   // wait for all pending asynchronous comms to finish
104   MSG_barrier_wait(finalization_barrier_);
105 }
106
107 /** @brief Check if a process is finalized */
108 int Process::finalized()
109 {
110   return (state_ == SMPI_FINALIZED);
111 }
112
113 /** @brief Check if a process is initialized */
114 int Process::initialized()
115 {
116   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
117   // single process ... ?
118   return (state_ == SMPI_INITIALIZED);
119 }
120
121 /** @brief Mark a process as initialized (=MPI_Init called) */
122 void Process::mark_as_initialized()
123 {
124   if (state_ != SMPI_FINALIZED)
125     state_ = SMPI_INITIALIZED;
126 }
127
128 void Process::set_replaying(bool value){
129   if (state_ != SMPI_FINALIZED)
130     replaying_ = value;
131 }
132
133 bool Process::replaying(){
134   return replaying_;
135 }
136
137 void Process::set_user_data(void *data)
138 {
139   data_ = data;
140 }
141
142 void *Process::get_user_data()
143 {
144   return data_;
145 }
146
147 ActorPtr Process::process(){
148   return process_;
149 }
150
151 /**
152  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
153  *
154  * \see smpi_trace_set_call_location
155  */
156 smpi_trace_call_location_t* Process::call_location()
157 {
158   return &trace_call_loc_;
159 }
160
161 void Process::set_privatized_region(smpi_privatization_region_t region)
162 {
163   privatized_region_ = region;
164 }
165
166 smpi_privatization_region_t Process::privatized_region()
167 {
168   return privatized_region_;
169 }
170
171 MPI_Comm Process::comm_world()
172 {
173   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
174 }
175
176 smx_mailbox_t Process::mailbox()
177 {
178   return mailbox_->getImpl();
179 }
180
181 smx_mailbox_t Process::mailbox_small()
182 {
183   return mailbox_small_->getImpl();
184 }
185
186 xbt_mutex_t Process::mailboxes_mutex()
187 {
188   return mailboxes_mutex_;
189 }
190
191 #if HAVE_PAPI
192 int Process::papi_event_set()
193 {
194   return papi_event_set_;
195 }
196
197 papi_counter_t& smpi_process_papi_counters()
198 {
199   return papi_counter_data_;
200 }
201 #endif
202
203 xbt_os_timer_t Process::timer()
204 {
205   return timer_;
206 }
207
208 void Process::simulated_start()
209 {
210   simulated_ = SIMIX_get_clock();
211 }
212
213 double Process::simulated_elapsed()
214 {
215   return SIMIX_get_clock() - simulated_;
216 }
217
218 MPI_Comm Process::comm_self()
219 {
220   if(comm_self_==MPI_COMM_NULL){
221     MPI_Group group = new  Group(1);
222     comm_self_ = new  Comm(group, nullptr);
223     group->set_mapping(process_, 0);
224   }
225   return comm_self_;
226 }
227
228 MPI_Comm Process::comm_intra()
229 {
230   return comm_intra_;
231 }
232
233 void Process::set_comm_intra(MPI_Comm comm)
234 {
235   comm_intra_ = comm;
236 }
237
238 void Process::set_sampling(int s)
239 {
240   sampling_ = s;
241 }
242
243 int Process::sampling()
244 {
245   return sampling_;
246 }
247
248 msg_bar_t Process::finalization_barrier(){
249   return finalization_barrier_;
250 }
251
252 int Process::return_value(){
253   return return_value_;
254 }
255
256 void Process::set_return_value(int val){
257   return_value_=val;
258 }
259
260 void Process::init(int *argc, char ***argv){
261
262   if (smpi_process_count() == 0) {
263     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
264   }
265   if (argc != nullptr && argv != nullptr) {
266     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
267     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
268
269     char* instance_id = (*argv)[1];
270     try {
271       int rank = std::stoi(std::string((*argv)[2]));
272       smpi_deployment_register_process(instance_id, rank, proc);
273     } catch (std::invalid_argument& ia) {
274       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
275     }
276
277     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
278     // this up here so that I can set the privatized region before the switch.
279     Process* process = smpi_process_remote(proc);
280     int my_proc_id   = proc->getPid();
281     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
282       /* Now using the segment index of this process  */
283       process->set_privatized_region(smpi_init_global_memory_segment_process());
284       /* Done at the process's creation */
285       SMPI_switch_data_segment(my_proc_id);
286     }
287
288     process->set_data(argc, argv);
289   }
290   xbt_assert(smpi_process(),
291       "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
292       "Although it's required by MPI-2, this is currently not supported by SMPI.");
293 }
294
295 }
296 }