1 /* Copyright (c) 2010-2021. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_comm.hpp"
7 #include "smpi_coll.hpp"
8 #include "smpi_datatype.hpp"
9 #include "smpi_request.hpp"
10 #include "smpi_win.hpp"
11 #include "smpi_info.hpp"
12 #include "src/smpi/include/smpi_actor.hpp"
13 #include "src/surf/HostImpl.hpp"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
19 simgrid::smpi::Comm smpi_MPI_COMM_UNINITIALIZED;
20 MPI_Comm MPI_COMM_UNINITIALIZED=&smpi_MPI_COMM_UNINITIALIZED;
22 * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable)
23 * is important because the implementation of MPI_Comm checks
24 * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world()
26 * This is basically how we only have one global variable but all processes have
27 * different communicators (the one their SMPI instance uses).
30 MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
32 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
33 * support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
38 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
39 int Comm::keyval_id_=0;
41 Comm::Comm(MPI_Group group, MPI_Topology topo, bool smp, int in_id)
42 : group_(group), topo_(topo), is_smp_comm_(smp), id_(in_id)
45 //First creation of comm is done before SIMIX_run, so only do comms for others
46 if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
51 static int global_id_ = 0;
55 colls::bcast(&id, 1, MPI_INT, 0, this);
56 XBT_DEBUG("Communicator %p has id %d", this, id);
57 id_=id;//only set here, as we don't want to change it in the middle of the bcast
62 void Comm::destroy(Comm* comm)
64 if (comm == MPI_COMM_UNINITIALIZED){
65 Comm::destroy(smpi_process()->comm_world());
68 if(comm != MPI_COMM_WORLD)
69 comm->mark_as_deleted();
73 int Comm::dup(MPI_Comm* newcomm){
74 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
75 // we need to switch as the called function may silently touch global variables
76 smpi_switch_data_segment(s4u::Actor::self());
78 auto* cp = new Group(this->group());
79 (*newcomm) = new Comm(cp, this->topo());
80 int ret = MPI_SUCCESS;
82 if (not attributes()->empty()) {
84 void* value_out=nullptr;
85 for (auto const& it : *attributes()) {
86 smpi_key_elem elem = keyvals_.at(it.first);
88 if( elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN &&
89 elem->copy_fn.comm_copy_fn != MPI_COMM_DUP_FN)
90 ret = elem->copy_fn.comm_copy_fn(this, it.first, elem->extra_state, it.second, &value_out, &flag);
91 else if ( elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN &&
92 *(int*)*elem->copy_fn.comm_copy_fn_fort != 1){
93 value_out=(int*)xbt_malloc(sizeof(int));
94 elem->copy_fn.comm_copy_fn_fort(this, it.first, elem->extra_state, it.second, value_out, &flag,&ret);
96 if (ret != MPI_SUCCESS) {
97 Comm::destroy(*newcomm);
98 *newcomm = MPI_COMM_NULL;
101 if (elem->copy_fn.comm_copy_fn == MPI_COMM_DUP_FN ||
102 ((elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN) && *(int*)*elem->copy_fn.comm_copy_fn_fort == 1)){
104 (*newcomm)->attributes()->insert({it.first, it.second});
107 (*newcomm)->attributes()->insert({it.first, value_out});
112 //duplicate info if present
113 if(info_!=MPI_INFO_NULL)
114 (*newcomm)->info_ = new simgrid::smpi::Info(info_);
115 //duplicate errhandler
116 if (errhandlers_ != nullptr)//MPI_COMM_WORLD, only grab our own
117 (*newcomm)->set_errhandler(errhandlers_[this->rank()]);
119 (*newcomm)->set_errhandler(errhandler_);
123 int Comm::dup_with_info(MPI_Info info, MPI_Comm* newcomm){
124 int ret = dup(newcomm);
125 if(ret != MPI_SUCCESS)
127 if((*newcomm)->info_!=MPI_INFO_NULL){
128 simgrid::smpi::Info::unref((*newcomm)->info_);
129 (*newcomm)->info_=MPI_INFO_NULL;
131 if(info != MPI_INFO_NULL){
133 (*newcomm)->info_=info;
138 MPI_Group Comm::group()
140 if (this == MPI_COMM_UNINITIALIZED)
141 return smpi_process()->comm_world()->group();
145 int Comm::size() const
147 if (this == MPI_COMM_UNINITIALIZED)
148 return smpi_process()->comm_world()->size();
149 return group_->size();
152 int Comm::rank() const
154 if (this == MPI_COMM_UNINITIALIZED)
155 return smpi_process()->comm_world()->rank();
156 return group_->rank(s4u::Actor::self());
164 void Comm::get_name(char* name, int* len) const
166 if (this == MPI_COMM_UNINITIALIZED){
167 smpi_process()->comm_world()->get_name(name, len);
170 if(this == MPI_COMM_WORLD && name_.empty()) {
171 strncpy(name, "MPI_COMM_WORLD", 15);
173 } else if(this == MPI_COMM_SELF && name_.empty()) {
174 strncpy(name, "MPI_COMM_SELF", 14);
177 *len = snprintf(name, MPI_MAX_NAME_STRING+1, "%s", name_.c_str());
181 std::string Comm::name() const
184 char name[MPI_MAX_NAME_STRING];
185 this->get_name(name, &size);
187 return std::string("MPI_Comm");
189 return std::string(name);
193 void Comm::set_name (const char* name)
195 if (this == MPI_COMM_UNINITIALIZED){
196 smpi_process()->comm_world()->set_name(name);
199 name_.replace (0, MPI_MAX_NAME_STRING+1, name);
203 void Comm::set_leaders_comm(MPI_Comm leaders){
204 if (this == MPI_COMM_UNINITIALIZED){
205 smpi_process()->comm_world()->set_leaders_comm(leaders);
208 leaders_comm_=leaders;
211 int* Comm::get_non_uniform_map() const
213 if (this == MPI_COMM_UNINITIALIZED)
214 return smpi_process()->comm_world()->get_non_uniform_map();
215 return non_uniform_map_;
218 int* Comm::get_leaders_map() const
220 if (this == MPI_COMM_UNINITIALIZED)
221 return smpi_process()->comm_world()->get_leaders_map();
225 MPI_Comm Comm::get_leaders_comm() const
227 if (this == MPI_COMM_UNINITIALIZED)
228 return smpi_process()->comm_world()->get_leaders_comm();
229 return leaders_comm_;
232 MPI_Comm Comm::get_intra_comm() const
234 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
235 return smpi_process()->comm_intra();
236 else return intra_comm_;
239 bool Comm::is_uniform() const
241 if (this == MPI_COMM_UNINITIALIZED)
242 return smpi_process()->comm_world()->is_uniform();
243 return is_uniform_ != 0;
246 bool Comm::is_blocked() const
248 if (this == MPI_COMM_UNINITIALIZED)
249 return smpi_process()->comm_world()->is_blocked();
250 return is_blocked_ != 0;
253 bool Comm::is_smp_comm() const
255 if (this == MPI_COMM_UNINITIALIZED)
256 return smpi_process()->comm_world()->is_smp_comm();
260 MPI_Comm Comm::split(int color, int key)
262 if (this == MPI_COMM_UNINITIALIZED)
263 return smpi_process()->comm_world()->split(color, key);
264 int system_tag = -123;
266 MPI_Group group_root = nullptr;
267 MPI_Group group_out = nullptr;
268 MPI_Group group = this->group();
269 int myrank = this->rank();
270 int size = this->size();
271 /* Gather all colors and keys on rank 0 */
272 const std::array<int, 2> sendbuf = {{color, key}};
273 std::vector<int> recvbuf;
275 recvbuf.resize(2 * size);
276 gather__default(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, 0, this);
277 /* Do the actual job */
279 std::vector<MPI_Group> group_snd(size);
280 std::vector<std::pair<int, int>> rankmap;
281 rankmap.reserve(size);
282 for (int i = 0; i < size; i++) {
283 if (recvbuf[2 * i] != MPI_UNDEFINED) {
285 for (int j = i + 1; j < size; j++) {
286 if(recvbuf[2 * i] == recvbuf[2 * j]) {
287 recvbuf[2 * j] = MPI_UNDEFINED;
288 rankmap.emplace_back(recvbuf[2 * j + 1], j);
291 /* Add self in the group */
292 recvbuf[2 * i] = MPI_UNDEFINED;
293 rankmap.emplace_back(recvbuf[2 * i + 1], i);
294 std::sort(begin(rankmap), end(rankmap));
295 group_out = new Group(rankmap.size());
297 group_root = group_out; /* Save root's group */
299 for (unsigned j = 0; j < rankmap.size(); j++) {
300 s4u::Actor* actor = group->actor(rankmap[j].second);
301 group_out->set_mapping(actor, j);
303 std::vector<MPI_Request> requests(rankmap.size());
305 for (auto const& rank : rankmap) {
306 if (rank.second != 0) {
307 group_snd[reqs]=new Group(group_out);
308 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rank.second, system_tag, this);
312 if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
313 Group::unref(group_out);
315 Request::waitall(reqs, requests.data(), MPI_STATUS_IGNORE);
318 group_out = group_root; /* exit with root's group */
320 if(color != MPI_UNDEFINED) {
321 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
322 } /* otherwise, exit with group_out == nullptr */
324 return group_out!=nullptr ? new Comm(group_out, topo_) : MPI_COMM_NULL;
328 if (this == MPI_COMM_UNINITIALIZED){
329 smpi_process()->comm_world()->ref();
336 void Comm::cleanup_smp(){
337 if (intra_comm_ != MPI_COMM_NULL)
338 Comm::unref(intra_comm_);
339 if (leaders_comm_ != MPI_COMM_NULL)
340 Comm::unref(leaders_comm_);
341 xbt_free(non_uniform_map_);
342 delete[] leaders_map_;
345 void Comm::unref(Comm* comm){
346 if (comm == MPI_COMM_UNINITIALIZED){
347 Comm::unref(smpi_process()->comm_world());
352 if(comm->refcount_==0){
353 if(simgrid::smpi::F2C::lookup() != nullptr)
354 F2C::free_f(comm->f2c_id());
356 comm->cleanup_attr<Comm>();
357 if (comm->info_ != MPI_INFO_NULL)
358 simgrid::smpi::Info::unref(comm->info_);
359 if(comm->errhandlers_!=nullptr){
360 for (int i=0; i<comm->size(); i++)
361 if (comm->errhandlers_[i]!=MPI_ERRHANDLER_NULL)
362 simgrid::smpi::Errhandler::unref(comm->errhandlers_[i]);
363 delete[] comm->errhandlers_;
364 } else if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
365 simgrid::smpi::Errhandler::unref(comm->errhandler_);
367 Group::unref(comm->group_);
368 if(comm->refcount_==0)
372 MPI_Comm Comm::find_intra_comm(int * leader){
373 //get the indices of all processes sharing the same simix host
374 int intra_comm_size = 0;
375 int min_index = INT_MAX; // the minimum index will be the leader
376 sg_host_self()->get_impl()->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
377 int index = actor.get_pid();
378 if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
380 if (index < min_index)
384 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
385 auto* group_intra = new Group(intra_comm_size);
387 sg_host_self()->get_impl()->foreach_actor([this, group_intra, &i](auto& actor) {
388 if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) {
389 group_intra->set_mapping(actor.get_ciface(), i);
394 return new Comm(group_intra, nullptr, true);
397 void Comm::init_smp(){
400 if (this == MPI_COMM_UNINITIALIZED)
401 smpi_process()->comm_world()->init_smp();
403 int comm_size = this->size();
405 // If we are in replay - perform an ugly hack
406 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
407 bool replaying = false; //cache data to set it back again after
408 if(smpi_process()->replaying()){
410 smpi_process()->set_replaying(false);
413 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
414 // we need to switch as the called function may silently touch global variables
415 smpi_switch_data_segment(s4u::Actor::self());
417 // identify neighbors in comm
418 MPI_Comm comm_intra = find_intra_comm(&leader);
420 auto* leaders_map = new int[comm_size];
421 auto* leader_list = new int[comm_size];
422 std::fill_n(leaders_map, comm_size, 0);
423 std::fill_n(leader_list, comm_size, -1);
425 allgather__ring(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
427 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
428 // we need to switch as the called function may silently touch global variables
429 smpi_switch_data_segment(s4u::Actor::self());
432 if(leaders_map_==nullptr){
433 leaders_map_= leaders_map;
435 delete[] leaders_map;
437 int leader_group_size = 0;
438 for(i=0; i<comm_size; i++){
439 int already_done = 0;
440 for (int j = 0; j < leader_group_size; j++) {
441 if (leaders_map_[i] == leader_list[j]) {
445 if (already_done == 0) {
446 leader_list[leader_group_size] = leaders_map_[i];
450 xbt_assert(leader_group_size > 0);
451 std::sort(leader_list, leader_list + leader_group_size);
453 auto* leaders_group = new Group(leader_group_size);
455 MPI_Comm leader_comm = MPI_COMM_NULL;
456 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
457 //create leader_communicator
458 for (i=0; i< leader_group_size;i++)
459 leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
460 leader_comm = new Comm(leaders_group, nullptr, true);
461 this->set_leaders_comm(leader_comm);
462 this->set_intra_comm(comm_intra);
464 // create intracommunicator
466 for (i=0; i< leader_group_size;i++)
467 leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
469 if(this->get_leaders_comm()==MPI_COMM_NULL){
470 leader_comm = new Comm(leaders_group, nullptr, true);
471 this->set_leaders_comm(leader_comm);
473 leader_comm=this->get_leaders_comm();
474 Group::unref(leaders_group);
476 smpi_process()->set_comm_intra(comm_intra);
479 // Are the nodes uniform ? = same number of process/node
480 int my_local_size=comm_intra->size();
481 if(comm_intra->rank()==0) {
483 int* non_uniform_map = xbt_new0(int,leader_group_size);
484 allgather__ring(&my_local_size, 1, MPI_INT,
485 non_uniform_map, 1, MPI_INT, leader_comm);
486 for(i=0; i < leader_group_size; i++) {
487 if(non_uniform_map[0] != non_uniform_map[i]) {
492 if (is_uniform == 0 && this->is_uniform()) {
493 non_uniform_map_ = non_uniform_map;
495 xbt_free(non_uniform_map);
497 is_uniform_=is_uniform;
499 bcast__scatter_LR_allgather(&is_uniform_, 1, MPI_INT, 0, comm_intra);
501 if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
502 // we need to switch as the called function may silently touch global variables
503 smpi_switch_data_segment(s4u::Actor::self());
505 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
507 int prev=this->group()->rank(comm_intra->group()->actor(0));
508 for (i = 1; i < my_local_size; i++) {
509 int that = this->group()->rank(comm_intra->group()->actor(i));
510 if (that != prev + 1) {
518 allreduce__default(&is_blocked, &global_blocked, 1, MPI_INT, MPI_LAND, this);
520 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
522 is_blocked_ = global_blocked;
525 is_blocked_=global_blocked;
527 delete[] leader_list;
530 smpi_process()->set_replaying(true);
533 MPI_Comm Comm::f2c(int id) {
535 return MPI_COMM_SELF;
537 return MPI_COMM_WORLD;
538 } else if (F2C::lookup() != nullptr && id >= 0) {
539 const auto& lookup = F2C::lookup();
540 auto comm = lookup->find(id);
541 return comm == lookup->end() ? MPI_COMM_NULL : static_cast<MPI_Comm>(comm->second);
543 return MPI_COMM_NULL;
547 void Comm::free_f(int id) {
548 F2C::lookup()->erase(id);
551 void Comm::add_rma_win(MPI_Win win){
552 rma_wins_.push_back(win);
555 void Comm::remove_rma_win(MPI_Win win){
556 rma_wins_.remove(win);
559 void Comm::finish_rma_calls() const
561 for (auto const& it : rma_wins_) {
562 if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
563 int finished = it->finish_comms();
564 XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);
569 MPI_Info Comm::info()
571 if (info_ == MPI_INFO_NULL)
577 void Comm::set_info(MPI_Info info)
579 if (info_ != MPI_INFO_NULL)
580 simgrid::smpi::Info::unref(info);
582 if (info_ != MPI_INFO_NULL)
586 MPI_Errhandler Comm::errhandler()
588 if (this != MPI_COMM_WORLD){
589 if (errhandler_ != MPI_ERRHANDLER_NULL)
593 if(errhandlers_==nullptr)
594 return MPI_ERRORS_ARE_FATAL;
596 if(errhandlers_[this->rank()] != MPI_ERRHANDLER_NULL)
597 errhandlers_[this->rank()]->ref();
598 return errhandlers_[this->rank()];
603 void Comm::set_errhandler(MPI_Errhandler errhandler)
605 if(this != MPI_COMM_WORLD){
606 if (errhandler_ != MPI_ERRHANDLER_NULL)
607 simgrid::smpi::Errhandler::unref(errhandler_);
608 errhandler_ = errhandler;
610 if(errhandlers_==nullptr)
611 errhandlers_= new MPI_Errhandler[this->size()]{MPI_ERRHANDLER_NULL};
612 if(errhandlers_[this->rank()] != MPI_ERRHANDLER_NULL)
613 simgrid::smpi::Errhandler::unref(errhandlers_[this->rank()]);
614 errhandlers_[this->rank()]=errhandler;
616 if (errhandler != MPI_ERRHANDLER_NULL)
620 MPI_Comm Comm::split_type(int type, int /*key*/, const Info*)
622 //MPI_UNDEFINED can be given to some nodes... but we need them to still perform the smp part which is collective
623 if(type != MPI_COMM_TYPE_SHARED && type != MPI_UNDEFINED){
624 return MPI_COMM_NULL;
627 MPI_Comm res= this->find_intra_comm(&leader);
628 if(type != MPI_UNDEFINED)
631 xbt_assert(res->refcount_ == 1); // ensure the next call to Comm::destroy really frees the comm
633 return MPI_COMM_NULL;
638 } // namespace simgrid