Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' of git+ssh://scm.gforge.inria.fr//gitroot/simgrid/simgrid
[simgrid.git] / src / smpi / internals / smpi_actor.cpp
1 /* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include "src/smpi/include/smpi_actor.hpp"
7 #include "mc/mc.h"
8 #include "smpi_comm.hpp"
9 #include "src/mc/mc_replay.hpp"
10 #include "src/simix/smx_private.hpp"
11
12 #if HAVE_PAPI
13 #include "papi.h"
14 extern std::string papi_default_config_name;
15 #endif
16
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
18
19 namespace simgrid {
20 namespace smpi {
21
22 using simgrid::s4u::Actor;
23 using simgrid::s4u::ActorPtr;
24
25 ActorExt::ActorExt(ActorPtr actor, simgrid::s4u::Barrier* finalization_barrier)
26     : finalization_barrier_(finalization_barrier), actor_(actor)
27 {
28   mailbox_         = simgrid::s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
29   mailbox_small_   = simgrid::s4u::Mailbox::by_name("small-" + std::to_string(actor_->get_pid()));
30   mailboxes_mutex_ = xbt_mutex_init();
31   timer_           = xbt_os_timer_new();
32   state_           = SmpiProcessState::UNINITIALIZED;
33   if (MC_is_active())
34     MC_ignore_heap(timer_, xbt_os_timer_size());
35
36 #if HAVE_PAPI
37   if (not simgrid::config::get_value<std::string>("smpi/papi-events").empty()) {
38     // TODO: Implement host/process/thread based counters. This implementation
39     // just always takes the values passed via "default", like this:
40     // "default:COUNTER1:COUNTER2:COUNTER3;".
41     auto it = units2papi_setup.find(papi_default_config_name);
42     if (it != units2papi_setup.end()) {
43       papi_event_set_    = it->second.event_set;
44       papi_counter_data_ = it->second.counter_data;
45       XBT_DEBUG("Setting PAPI set for process %li", actor->get_pid());
46     } else {
47       papi_event_set_ = PAPI_NULL;
48       XBT_DEBUG("No PAPI set for process %li", actor->get_pid());
49     }
50   }
51 #endif
52 }
53
54 ActorExt::~ActorExt()
55 {
56   if (comm_self_ != MPI_COMM_NULL)
57     simgrid::smpi::Comm::destroy(comm_self_);
58   if (comm_intra_ != MPI_COMM_NULL)
59     simgrid::smpi::Comm::destroy(comm_intra_);
60   xbt_os_timer_free(timer_);
61   xbt_mutex_destroy(mailboxes_mutex_);
62 }
63
64 void ActorExt::set_data(int* argc, char*** argv)
65 {
66   instance_id_                   = std::string((*argv)[1]);
67   comm_world_                    = smpi_deployment_comm_world(instance_id_);
68   simgrid::s4u::Barrier* barrier = smpi_deployment_finalization_barrier(instance_id_);
69   if (barrier != nullptr) // don't overwrite the current one if the instance has none
70     finalization_barrier_ = barrier;
71
72   if (*argc > 3) {
73     memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
74     (*argv)[(*argc) - 1] = nullptr;
75     (*argv)[(*argc) - 2] = nullptr;
76   }
77   (*argc) -= 2;
78   argc_ = argc;
79   argv_ = argv;
80   // set the process attached to the mailbox
81   mailbox_small_->set_receiver(actor_);
82   XBT_DEBUG("<%ld> SMPI process has been initialized: %p", actor_->get_pid(), actor_.get());
83 }
84
85 /** @brief Prepares the current process for termination. */
86 void ActorExt::finalize()
87 {
88   state_ = SmpiProcessState::FINALIZED;
89   XBT_DEBUG("<%ld> Process left the game", actor_->get_pid());
90
91   // This leads to an explosion of the search graph which cannot be reduced:
92   if (MC_is_active() || MC_record_replay_is_active())
93     return;
94   // wait for all pending asynchronous comms to finish
95   finalization_barrier_->wait();
96 }
97
98 /** @brief Check if a process is finalized */
99 int ActorExt::finalized()
100 {
101   return (state_ == SmpiProcessState::FINALIZED);
102 }
103
104 /** @brief Check if a process is initialized */
105 int ActorExt::initialized()
106 {
107   // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
108   // single process ... ?
109   return (state_ == SmpiProcessState::INITIALIZED);
110 }
111
112 /** @brief Mark a process as initialized (=MPI_Init called) */
113 void ActorExt::mark_as_initialized()
114 {
115   if (state_ != SmpiProcessState::FINALIZED)
116     state_ = SmpiProcessState::INITIALIZED;
117 }
118
119 void ActorExt::set_replaying(bool value)
120 {
121   if (state_ != SmpiProcessState::FINALIZED)
122     replaying_ = value;
123 }
124
125 bool ActorExt::replaying()
126 {
127   return replaying_;
128 }
129
130 ActorPtr ActorExt::get_actor()
131 {
132   return actor_;
133 }
134
135 /**
136  * @brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
137  *
138  * @see smpi_trace_set_call_location
139  */
140 smpi_trace_call_location_t* ActorExt::call_location()
141 {
142   return &trace_call_loc_;
143 }
144
145 void ActorExt::set_privatized_region(smpi_privatization_region_t region)
146 {
147   privatized_region_ = region;
148 }
149
150 smpi_privatization_region_t ActorExt::privatized_region()
151 {
152   return privatized_region_;
153 }
154
155 MPI_Comm ActorExt::comm_world()
156 {
157   return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
158 }
159
160 smx_mailbox_t ActorExt::mailbox()
161 {
162   return mailbox_->get_impl();
163 }
164
165 smx_mailbox_t ActorExt::mailbox_small()
166 {
167   return mailbox_small_->get_impl();
168 }
169
170 xbt_mutex_t ActorExt::mailboxes_mutex()
171 {
172   return mailboxes_mutex_;
173 }
174
175 #if HAVE_PAPI
176 int ActorExt::papi_event_set()
177 {
178   return papi_event_set_;
179 }
180
181 papi_counter_t& ActorExt::papi_counters()
182 {
183   return papi_counter_data_;
184 }
185 #endif
186
187 xbt_os_timer_t ActorExt::timer()
188 {
189   return timer_;
190 }
191
192 void ActorExt::simulated_start()
193 {
194   simulated_ = SIMIX_get_clock();
195 }
196
197 double ActorExt::simulated_elapsed()
198 {
199   return SIMIX_get_clock() - simulated_;
200 }
201
202 MPI_Comm ActorExt::comm_self()
203 {
204   if (comm_self_ == MPI_COMM_NULL) {
205     MPI_Group group = new Group(1);
206     comm_self_      = new Comm(group, nullptr);
207     group->set_mapping(actor_, 0);
208   }
209   return comm_self_;
210 }
211
212 MPI_Comm ActorExt::comm_intra()
213 {
214   return comm_intra_;
215 }
216
217 void ActorExt::set_comm_intra(MPI_Comm comm)
218 {
219   comm_intra_ = comm;
220 }
221
222 void ActorExt::set_sampling(int s)
223 {
224   sampling_ = s;
225 }
226
227 int ActorExt::sampling()
228 {
229   return sampling_;
230 }
231
232 void ActorExt::init(int* argc, char*** argv)
233 {
234
235   if (smpi_process_count() == 0) {
236     xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process "
237             "and use smpirun\n");
238   }
239   if (argc != nullptr && argv != nullptr) {
240     simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
241     proc->get_impl()->context_->set_cleanup(&SIMIX_process_cleanup);
242
243     char* instance_id = (*argv)[1];
244     try {
245       int rank = std::stoi(std::string((*argv)[2]));
246       smpi_deployment_register_process(instance_id, rank, proc);
247     } catch (std::invalid_argument& ia) {
248       throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
249     }
250
251     // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
252     // this up here so that I can set the privatized region before the switch.
253     ActorExt* process = smpi_process_remote(proc);
254     if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
255       /* Now using the segment index of this process  */
256       process->set_privatized_region(smpi_init_global_memory_segment_process());
257       /* Done at the process's creation */
258       SMPI_switch_data_segment(proc);
259     }
260
261     process->set_data(argc, argv);
262   }
263   xbt_assert(smpi_process(), "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
264                              "Although it's required by MPI-2, this is currently not supported by SMPI. "
265                              "Please use MPI_Init(&argc, &argv) as usual instead.");
266 }
267
268 int ActorExt::get_optind()
269 {
270   return optind;
271 }
272 void ActorExt::set_optind(int new_optind)
273 {
274   optind = new_optind;
275 }
276
277 } // namespace smpi
278 } // namespace simgrid