1 /* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_process.hpp"
9 #include "simgrid/s4u/forward.hpp"
10 #include "smpi_comm.hpp"
11 #include "smpi_group.hpp"
12 #include "src/mc/mc_replay.hpp"
13 #include "src/msg/msg_private.hpp"
14 #include "src/simix/smx_private.hpp"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
22 using simgrid::s4u::Actor;
23 using simgrid::s4u::ActorPtr;
25 Process::Process(ActorPtr actor, msg_bar_t finalization_barrier)
26 : finalization_barrier_(finalization_barrier), process_(actor)
28 std::stringstream mailboxname;
29 std::stringstream mailboxname_small;
31 mailboxname << std::string("SMPI-") << process_->getPid();
32 mailboxname_small << std::string("small-") << process_->getPid();
33 mailbox_ = simgrid::s4u::Mailbox::byName(mailboxname.str());
34 mailbox_small_ = simgrid::s4u::Mailbox::byName(mailboxname_small.str());
35 mailboxes_mutex_ = xbt_mutex_init();
36 timer_ = xbt_os_timer_new();
37 state_ = SMPI_UNINITIALIZED;
39 MC_ignore_heap(timer_, xbt_os_timer_size());
42 if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
43 // TODO: Implement host/process/thread based counters. This implementation
44 // just always takes the values passed via "default", like this:
45 // "default:COUNTER1:COUNTER2:COUNTER3;".
46 auto it = units2papi_setup.find(papi_default_config_name);
47 if (it != units2papi_setup.end()) {
48 papi_event_set_ = it->second.event_set;
49 papi_counter_data_ = it->second.counter_data;
50 XBT_DEBUG("Setting PAPI set for process %i", i);
52 papi_event_set_ = PAPI_NULL;
53 XBT_DEBUG("No PAPI set for process %i", i);
61 xbt_os_timer_free(timer_);
62 xbt_mutex_destroy(mailboxes_mutex_);
65 void Process::set_data(int* argc, char*** argv)
67 instance_id_ = std::string((*argv)[1]);
68 comm_world_ = smpi_deployment_comm_world(instance_id_.c_str());
69 msg_bar_t barrier = smpi_deployment_finalization_barrier(instance_id_.c_str());
70 if (barrier != nullptr) // don't overwrite the current one if the instance has none
71 finalization_barrier_ = barrier;
73 process_ = simgrid::s4u::Actor::self();
74 static_cast<simgrid::msg::ActorExt*>(process_->getImpl()->userdata)->data = this;
77 memmove(&(*argv)[0], &(*argv)[2], sizeof(char*) * (*argc - 2));
78 (*argv)[(*argc) - 1] = nullptr;
79 (*argv)[(*argc) - 2] = nullptr;
84 // set the process attached to the mailbox
85 mailbox_small_->setReceiver(process_);
86 XBT_DEBUG("<%lu> New process in the game: %p", process_->getPid(), process_.get());
89 /** @brief Prepares the current process for termination. */
90 void Process::finalize()
92 state_ = SMPI_FINALIZED;
93 XBT_DEBUG("<%lu> Process left the game", process_->getPid());
95 // This leads to an explosion of the search graph which cannot be reduced:
96 if(MC_is_active() || MC_record_replay_is_active())
98 // wait for all pending asynchronous comms to finish
99 MSG_barrier_wait(finalization_barrier_);
102 /** @brief Check if a process is finalized */
103 int Process::finalized()
105 return (state_ == SMPI_FINALIZED);
108 /** @brief Check if a process is initialized */
109 int Process::initialized()
111 // TODO cheinrich: Check if we still need this. This should be a global condition, not for a
112 // single process ... ?
113 return (state_ == SMPI_INITIALIZED);
116 /** @brief Mark a process as initialized (=MPI_Init called) */
117 void Process::mark_as_initialized()
119 if (state_ != SMPI_FINALIZED)
120 state_ = SMPI_INITIALIZED;
123 void Process::set_replaying(bool value){
124 if (state_ != SMPI_FINALIZED)
128 bool Process::replaying(){
132 void Process::set_user_data(void *data)
137 void *Process::get_user_data()
142 ActorPtr Process::process(){
147 * \brief Returns a structure that stores the location (filename + linenumber) of the last calls to MPI_* functions.
149 * \see smpi_trace_set_call_location
151 smpi_trace_call_location_t* Process::call_location()
153 return &trace_call_loc_;
156 void Process::set_privatized_region(smpi_privatization_region_t region)
158 privatized_region_ = region;
161 smpi_privatization_region_t Process::privatized_region()
163 return privatized_region_;
166 MPI_Comm Process::comm_world()
168 return comm_world_==nullptr ? MPI_COMM_NULL : *comm_world_;
171 smx_mailbox_t Process::mailbox()
173 return mailbox_->getImpl();
176 smx_mailbox_t Process::mailbox_small()
178 return mailbox_small_->getImpl();
181 xbt_mutex_t Process::mailboxes_mutex()
183 return mailboxes_mutex_;
187 int Process::papi_event_set()
189 return papi_event_set_;
192 papi_counter_t& smpi_process_papi_counters()
194 return papi_counter_data_;
198 xbt_os_timer_t Process::timer()
203 void Process::simulated_start()
205 simulated_ = SIMIX_get_clock();
208 double Process::simulated_elapsed()
210 return SIMIX_get_clock() - simulated_;
213 MPI_Comm Process::comm_self()
215 if(comm_self_==MPI_COMM_NULL){
216 MPI_Group group = new Group(1);
217 comm_self_ = new Comm(group, nullptr);
218 group->set_mapping(process_, 0);
223 MPI_Comm Process::comm_intra()
228 void Process::set_comm_intra(MPI_Comm comm)
233 void Process::set_sampling(int s)
238 int Process::sampling()
243 msg_bar_t Process::finalization_barrier(){
244 return finalization_barrier_;
247 int Process::return_value(){
248 return return_value_;
251 void Process::set_return_value(int val){
255 void Process::init(int *argc, char ***argv){
257 if (smpi_process_count() == 0) {
258 xbt_die("SimGrid was not initialized properly before entering MPI_Init. Aborting, please check compilation process and use smpirun\n");
260 if (argc != nullptr && argv != nullptr) {
261 simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
262 proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
264 char* instance_id = (*argv)[1];
266 int rank = std::stoi(std::string((*argv)[2]));
267 smpi_deployment_register_process(instance_id, rank, proc);
268 } catch (std::invalid_argument& ia) {
269 throw std::invalid_argument(std::string("Invalid rank: ") + (*argv)[2]);
272 // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
273 // this up here so that I can set the privatized region before the switch.
274 Process* process = smpi_process_remote(proc);
275 int my_proc_id = proc->getPid();
276 if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
277 /* Now using the segment index of this process */
278 process->set_privatized_region(smpi_init_global_memory_segment_process());
279 /* Done at the process's creation */
280 SMPI_switch_data_segment(my_proc_id);
283 process->set_data(argc, argv);
285 xbt_assert(smpi_process(),
286 "smpi_process() returned nullptr. You probably gave a nullptr parameter to MPI_Init. "
287 "Although it's required by MPI-2, this is currently not supported by SMPI.");