Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
f5d50eaeaca9c531a753386d009d4704598be262
[simgrid.git] / src / smpi / internals / smpi_process.cpp
1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "smpi_process.hpp"
7 #include "mc/mc.h"
8 #include "private.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
15 #include <sstream>
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid{
20 namespace smpi{
21
22 using simgrid::s4u::Actor;
23 using simgrid::s4u::ActorPtr;
24
25 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
26   : finalization_barrier_(finalization_barrier)
27 {
28   std::stringstream mailboxname, mailboxname_small;
29   process_              = actor;
30   mailboxname           << std::string("SMPI-")  << process_->getPid();
31   mailboxname_small     << std::string("small-") << process_->getPid();
32   mailbox_              = simgrid::s4u::Mailbox::byName(mailboxname.str());
33   mailbox_small_        = simgrid::s4u::Mailbox::byName(mailboxname_small.str());
34   mailboxes_mutex_      = xbt_mutex_init();
35   timer_                = xbt_os_timer_new();
36   state_                = SMPI_UNINITIALIZED;
37   if (MC_is_active())
38     MC_ignore_heap(timer_, xbt_os_timer_size());
39
40 #if HAVE_PAPI
41   if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
42     // TODO: Implement host/process/thread based counters. This implementation
43     // just always takes the values passed via "default", like this:
44     // "default:COUNTER1:COUNTER2:COUNTER3;".
45     auto it = units2papi_setup.find(papi_default_config_name);
46     if (it != units2papi_setup.end()) {
47       papi_event_set_    = it->second.event_set;
48       papi_counter_data_ = it->second.counter_data;
49       XBT_DEBUG("Setting PAPI set for process %i", i);
50     } else {
51       papi_event_set_ = PAPI_NULL;
52       XBT_DEBUG("No PAPI set for process %i", i);
53     }
54   }
55 #endif
56 }
57
58 void Process::set_data(int* argc, char*** argv)
59 {
60   instance_id_      = std::string((*argv)[1]);
61   comm_world_       = smpi_deployment_comm_world(instance_id_.c_str());
62   msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
63   if (barrier != nullptr) // don't overwrite the current one if the instance has none
64     finalization_barrier_ = barrier;
65
66   process_                                                                  = simgrid::s4u::Actor::self();
67   static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
68
69   if (*argc > 3) {
70     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
71     (*argv)[(*argc) - 1] = nullptr;
72     (*argv)[(*argc) - 2] = nullptr;
73   }
74   (*argc) -= 2;
75   argc_ = argc;
76   argv_ = argv;
77   // set the process attached to the mailbox
78   mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
79   XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_);
80 }
81
82 /** @brief Prepares the current process for termination. */
83 void Process::finalize()
84 {
85   state_ = SMPI_FINALIZED;
86   XBT_DEBUG("<%lu> Process left the game", process_->getPid());
87
88   // This leads to an explosion of the search graph which cannot be reduced:
89   if(MC_is_active() || MC_record_replay_is_active())
90     return;
91   // wait for all pending asynchronous comms to finish
92   MSG_barrier_wait(finalization_barrier_);
93 }
94
95 /** @brief Check if a process is finalized */
96 int Process::finalized()
97 {
98   return (state_ == SMPI_FINALIZED);
99 }
100
101 /** @brief Check if a process is initialized */
102 int Process::initialized()
103 {
104   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
105   // single process ... ?
106   return (state_ == SMPI_INITIALIZED);
107 }
108
109 /** @brief Mark a process as initialized (=MPI_Init called) */
110 void Process::mark_as_initialized()
111 {
112   if (state_ != SMPI_FINALIZED)
113     state_ = SMPI_INITIALIZED;
114 }
115
116 void Process::set_replaying(bool value){
117   if (state_ != SMPI_FINALIZED)
118     replaying_ = value;
119 }
120
121 bool Process::replaying(){
122   return replaying_;
123 }
124
125 void Process::set_user_data(void *data)
126 {
127   data_ = data;
128 }
129
130 void *Process::get_user_data()
131 {
132   return data_;
133 }
134
135 ActorPtr Process::process(){
136   return process_;
137 }
138
139 /**
140  * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
141  *
142  * \see smpi_trace_set_call_location
143  */
144 smpi_trace_call_location_t* Process::call_location()
145 {
146   return &trace_call_loc_;
147 }
148
149 void Process::set_privatized_region(smpi_privatization_region_t region)
150 {
151   privatized_region_ = region;
152 }
153
154 smpi_privatization_region_t Process::privatized_region()
155 {
156   return privatized_region_;
157 }
158
159 MPI_Comm Process::comm_world()
160 {
161   return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
162 }
163
164 smx_mailbox_t Process::mailbox()
165 {
166   return mailbox_->getImpl();
167 }
168
169 smx_mailbox_t Process::mailbox_small()
170 {
171   return mailbox_small_->getImpl();
172 }
173
174 xbt_mutex_t Process::mailboxes_mutex()
175 {
176   return mailboxes_mutex_;
177 }
178
179 #if HAVE_PAPI
180 int Process::papi_event_set()
181 {
182   return papi_event_set_;
183 }
184
185 papi_counter_t& smpi_process_papi_counters()
186 {
187   return papi_counter_data_;
188 }
189 #endif
190
191 xbt_os_timer_t Process::timer()
192 {
193   return timer_;
194 }
195
196 void Process::simulated_start()
197 {
198   simulated_ = SIMIX_get_clock();
199 }
200
201 double Process::simulated_elapsed()
202 {
203   return SIMIX_get_clock() - simulated_;
204 }
205
206 MPI_Comm Process::comm_self()
207 {
208   if(comm_self_==MPI_COMM_NULL){
209     MPI_Group group = new  Group(1);
210     comm_self_ = new  Comm(group, nullptr);
211     group->set_mapping(process_, 0);
212   }
213   return comm_self_;
214 }
215
216 MPI_Comm Process::comm_intra()
217 {
218   return comm_intra_;
219 }
220
221 void Process::set_comm_intra(MPI_Comm comm)
222 {
223   comm_intra_ = comm;
224 }
225
226 void Process::set_sampling(int s)
227 {
228   sampling_ = s;
229 }
230
231 int Process::sampling()
232 {
233   return sampling_;
234 }
235
236 msg_bar_t Process::finalization_barrier(){
237   return finalization_barrier_;
238 }
239
240 int Process::return_value(){
241   return return_value_;
242 }
243
244 void Process::set_return_value(int val){
245   return_value_=val;
246 }
247
248 void Process::init(int *argc, char ***argv){
249
250   if (smpi_process_count() == 0) {
251     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
252   }
253   if (argc != nullptr && argv != nullptr) {
254     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
255     proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
256
257     char* instance_id = (*argv)[1];
258     try {
259       int rank = std::stoi(std::string((*argv)[2]));
260       smpi_deployment_register_process(instance_id, rank, proc);
261     } catch (std::invalid_argument& ia) {
262       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
263     }
264
265     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
266     // this up here so that I can set the privatized region before the switch.
267     Process* process = smpi_process_remote(proc);
268     int my_proc_id   = proc->getPid();
269     if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
270       /* Now using the segment index of this process  */
271       process->set_privatized_region(smpi_init_global_memory_segment_process());
272       /* Done at the process's creation */
273       SMPI_switch_data_segment(my_proc_id);
274     }
275
276     process->set_data(argc, argv);
277   }
278   xbt_assert(smpi_process(),
279       "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
280       "Although it's required by MPI-2, this is currently not supported by SMPI.");
281 }
282
283 }
284 }