Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
90a265e3333d329a25d4db46558685cf1aa33347
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15 #include <sstream>
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid{
20 namespace smpi{
21
22 using simgrid::s4u::Actor;
23 using simgrid::s4u::ActorPtr;
24
25 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
26     : finalization_barrier_(finalization_barrier), process_(actor)
27 {
28   std::stringstream mailboxname;
29   std::stringstream mailboxname_small;
30
31   mailboxname           << std::string("SMPI-")  << process_->getPid();
32   mailboxname_small     << std::string("small-") << process_->getPid();
33   mailbox_              = simgrid::s4u::Mailbox::byName(mailboxname.str());
34   mailbox_small_        = simgrid::s4u::Mailbox::byName(mailboxname_small.str());
35   mailboxes_mutex_      = xbt_mutex_init();
36   timer_                = xbt_os_timer_new();
37   state_                = SMPI_UNINITIALIZED;
38   if (MC_is_active())
39     MC_ignore_heap(timer_, xbt_os_timer_size());
40
41 #if HAVE_PAPI
42   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
43     // TODO: Implement host/process/thread based counters. This implementation
44     // just always takes the values passed via "default", like this:
45     // "default:COUNTER1:COUNTER2:COUNTER3;".
46     auto it = units2papi_setup.find(papi_default_config_name);
47     if (it != units2papi_setup.end()) {
48       papi_event_set_    = it->second.event_set;
49       papi_counter_data_ = it->second.counter_data;
50       XBT_DEBUG("Setting PAPI set for process %i", i);
51     } else {
52       papi_event_set_ = PAPI_NULL;
53       XBT_DEBUG("No PAPI set for process %i", i);
54     }
55   }
56 #endif
57 }
58
59 void Process::set_data(int* argc, char*** argv)
60 {
61   instance_id_      = std::string((*argv)[1]);
62   comm_world_       = smpi_deployment_comm_world(instance_id_.c_str());
63   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
64   if (barrier != nullptr) // don't overwrite the current one if the instance has none
65     finalization_barrier_ = barrier;
66
67   process_                                                                  = simgrid::s4u::Actor::self();
68   static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
69
70   if (*argc > 3) {
71     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
72     (*argv)[(*argc) - 1] = nullptr;
73     (*argv)[(*argc) - 2] = nullptr;
74   }
75   (*argc) -= 2;
76   argc_ = argc;
77   argv_ = argv;
78   // set the process attached to the mailbox
79   mailbox_small_->setReceiver(process_);
80   XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_.get());
81 }
82
83 /** @brief Prepares the current process for termination. */
84 void Process::finalize()
85 {
86   state_ = SMPI_FINALIZED;
87   XBT_DEBUG("<%lu> Process left the game", process_->getPid());
88
89   // This leads to an explosion of the search graph which cannot be reduced:
90   if(MC_is_active() || MC_record_replay_is_active())
91     return;
92   // wait for all pending asynchronous comms to finish
93   MSG_barrier_wait(finalization_barrier_);
94 }
95
96 /** @brief Check if a process is finalized */
97 int Process::finalized()
98 {
99   return (state_ == SMPI_FINALIZED);
100 }
101
102 /** @brief Check if a process is initialized */
103 int Process::initialized()
104 {
105   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
106   // single process ... ?
107   return (state_ == SMPI_INITIALIZED);
108 }
109
110 /** @brief Mark a process as initialized (=MPI_Init called) */
111 void Process::mark_as_initialized()
112 {
113   if (state_ != SMPI_FINALIZED)
114     state_ = SMPI_INITIALIZED;
115 }
116
117 void Process::set_replaying(bool value){
118   if (state_ != SMPI_FINALIZED)
119     replaying_ = value;
120 }
121
122 bool Process::replaying(){
123   return replaying_;
124 }
125
126 void Process::set_user_data(void *data)
127 {
128   data_ = data;
129 }
130
131 void *Process::get_user_data()
132 {
133   return data_;
134 }
135
136 ActorPtr Process::process(){
137   return process_;
138 }
139
140 /**
141  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
142  *
143  * \see smpi_trace_set_call_location
144  */
145 smpi_trace_call_location_t* Process::call_location()
146 {
147   return &trace_call_loc_;
148 }
149
150 void Process::set_privatized_region(smpi_privatization_region_t region)
151 {
152   privatized_region_ = region;
153 }
154
155 smpi_privatization_region_t Process::privatized_region()
156 {
157   return privatized_region_;
158 }
159
160 MPI_Comm Process::comm_world()
161 {
162   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
163 }
164
165 smx_mailbox_t Process::mailbox()
166 {
167   return mailbox_->getImpl();
168 }
169
170 smx_mailbox_t Process::mailbox_small()
171 {
172   return mailbox_small_->getImpl();
173 }
174
175 xbt_mutex_t Process::mailboxes_mutex()
176 {
177   return mailboxes_mutex_;
178 }
179
180 #if HAVE_PAPI
181 int Process::papi_event_set()
182 {
183   return papi_event_set_;
184 }
185
186 papi_counter_t& smpi_process_papi_counters()
187 {
188   return papi_counter_data_;
189 }
190 #endif
191
192 xbt_os_timer_t Process::timer()
193 {
194   return timer_;
195 }
196
197 void Process::simulated_start()
198 {
199   simulated_ = SIMIX_get_clock();
200 }
201
202 double Process::simulated_elapsed()
203 {
204   return SIMIX_get_clock() - simulated_;
205 }
206
207 MPI_Comm Process::comm_self()
208 {
209   if(comm_self_==MPI_COMM_NULL){
210     MPI_Group group = new  Group(1);
211     comm_self_ = new  Comm(group, nullptr);
212     group->set_mapping(process_, 0);
213   }
214   return comm_self_;
215 }
216
217 MPI_Comm Process::comm_intra()
218 {
219   return comm_intra_;
220 }
221
222 void Process::set_comm_intra(MPI_Comm comm)
223 {
224   comm_intra_ = comm;
225 }
226
227 void Process::set_sampling(int s)
228 {
229   sampling_ = s;
230 }
231
232 int Process::sampling()
233 {
234   return sampling_;
235 }
236
237 msg_bar_t Process::finalization_barrier(){
238   return finalization_barrier_;
239 }
240
241 int Process::return_value(){
242   return return_value_;
243 }
244
245 void Process::set_return_value(int val){
246   return_value_=val;
247 }
248
249 void Process::init(int *argc, char ***argv){
250
251   if (smpi_process_count() == 0) {
252     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
253   }
254   if (argc != nullptr && argv != nullptr) {
255     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
256     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
257
258     char* instance_id = (*argv)[1];
259     try {
260       int rank = std::stoi(std::string((*argv)[2]));
261       smpi_deployment_register_process(instance_id, rank, proc);
262     } catch (std::invalid_argument& ia) {
263       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
264     }
265
266     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
267     // this up here so that I can set the privatized region before the switch.
268     Process* process = smpi_process_remote(proc);
269     int my_proc_id   = proc->getPid();
270     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
271       /* Now using the segment index of this process  */
272       process->set_privatized_region(smpi_init_global_memory_segment_process());
273       /* Done at the process's creation */
274       SMPI_switch_data_segment(my_proc_id);
275     }
276
277     process->set_data(argc, argv);
278   }
279   xbt_assert(smpi_process(),
280       "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
281       "Although it's required by MPI-2, this is currently not supported by SMPI.");
282 }
283
284 }
285 }