1 /* Copyright (c) 2010-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "smpi_comm.hpp"
8 #include "simgrid/s4u/Host.hpp"
9 #include "smpi_coll.hpp"
10 #include "smpi_datatype.hpp"
11 #include "smpi_process.hpp"
12 #include "smpi_request.hpp"
13 #include "smpi_status.hpp"
14 #include "smpi_win.hpp"
15 #include "src/simix/smx_host_private.hpp"
16 #include "src/simix/smx_private.hpp"
22 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
24 simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
25 MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
27 using simgrid::s4u::ActorPtr;
29 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
30 * support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
35 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
36 int Comm::keyval_id_=0;
38 Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
41 topoType_ = MPI_INVALID_TOPO;
42 intra_comm_ = MPI_COMM_NULL;
43 leaders_comm_ = MPI_COMM_NULL;
45 non_uniform_map_ = nullptr;
46 leaders_map_ = nullptr;
50 void Comm::destroy(Comm* comm)
52 if (comm == MPI_COMM_UNINITIALIZED){
53 Comm::destroy(smpi_process()->comm_world());
56 delete comm->topo_; // there's no use count on topos
60 int Comm::dup(MPI_Comm* newcomm){
61 if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
62 // we need to switch as the called function may silently touch global variables
63 smpi_switch_data_segment(simgrid::s4u::Actor::self());
65 MPI_Group cp = new Group(this->group());
66 (*newcomm) = new Comm(cp, this->topo());
67 int ret = MPI_SUCCESS;
69 if (not attributes()->empty()) {
72 for (auto const& it : *attributes()) {
73 smpi_key_elem elem = keyvals_.at(it.first);
74 if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
75 ret = elem->copy_fn.comm_copy_fn(this, it.first, nullptr, it.second, &value_out, &flag);
76 if (ret != MPI_SUCCESS) {
77 Comm::destroy(*newcomm);
78 *newcomm = MPI_COMM_NULL;
83 (*newcomm)->attributes()->insert({it.first, value_out});
91 MPI_Group Comm::group()
93 if (this == MPI_COMM_UNINITIALIZED)
94 return smpi_process()->comm_world()->group();
98 MPI_Topology Comm::topo() {
104 if (this == MPI_COMM_UNINITIALIZED)
105 return smpi_process()->comm_world()->size();
106 return group_->size();
111 if (this == MPI_COMM_UNINITIALIZED)
112 return smpi_process()->comm_world()->rank();
113 return group_->rank(simgrid::s4u::Actor::self());
116 void Comm::get_name (char* name, int* len)
118 if (this == MPI_COMM_UNINITIALIZED){
119 smpi_process()->comm_world()->get_name(name, len);
122 if(this == MPI_COMM_WORLD) {
123 strncpy(name, "WORLD",5);
126 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
130 void Comm::set_leaders_comm(MPI_Comm leaders){
131 if (this == MPI_COMM_UNINITIALIZED){
132 smpi_process()->comm_world()->set_leaders_comm(leaders);
135 leaders_comm_=leaders;
138 void Comm::set_intra_comm(MPI_Comm leaders){
142 int* Comm::get_non_uniform_map(){
143 if (this == MPI_COMM_UNINITIALIZED)
144 return smpi_process()->comm_world()->get_non_uniform_map();
145 return non_uniform_map_;
148 int* Comm::get_leaders_map(){
149 if (this == MPI_COMM_UNINITIALIZED)
150 return smpi_process()->comm_world()->get_leaders_map();
154 MPI_Comm Comm::get_leaders_comm(){
155 if (this == MPI_COMM_UNINITIALIZED)
156 return smpi_process()->comm_world()->get_leaders_comm();
157 return leaders_comm_;
160 MPI_Comm Comm::get_intra_comm(){
161 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
162 return smpi_process()->comm_intra();
163 else return intra_comm_;
166 int Comm::is_uniform(){
167 if (this == MPI_COMM_UNINITIALIZED)
168 return smpi_process()->comm_world()->is_uniform();
172 int Comm::is_blocked(){
173 if (this == MPI_COMM_UNINITIALIZED)
174 return smpi_process()->comm_world()->is_blocked();
178 MPI_Comm Comm::split(int color, int key)
180 if (this == MPI_COMM_UNINITIALIZED)
181 return smpi_process()->comm_world()->split(color, key);
182 int system_tag = 123;
185 MPI_Group group_root = nullptr;
186 MPI_Group group_out = nullptr;
187 MPI_Group group = this->group();
188 int rank = this->rank();
189 int size = this->size();
190 /* Gather all colors and keys on rank 0 */
191 int* sendbuf = xbt_new(int, 2);
195 recvbuf = xbt_new(int, 2 * size);
199 Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
201 /* Do the actual job */
203 MPI_Group* group_snd = xbt_new(MPI_Group, size);
204 std::vector<std::pair<int, int>> rankmap;
205 rankmap.reserve(size);
206 for (int i = 0; i < size; i++) {
207 if (recvbuf[2 * i] != MPI_UNDEFINED) {
209 for (int j = i + 1; j < size; j++) {
210 if(recvbuf[2 * i] == recvbuf[2 * j]) {
211 recvbuf[2 * j] = MPI_UNDEFINED;
212 rankmap.push_back({recvbuf[2 * j + 1], j});
215 /* Add self in the group */
216 recvbuf[2 * i] = MPI_UNDEFINED;
217 rankmap.push_back({recvbuf[2 * i + 1], i});
218 std::sort(begin(rankmap), end(rankmap));
219 group_out = new Group(rankmap.size());
221 group_root = group_out; /* Save root's group */
223 for (unsigned j = 0; j < rankmap.size(); j++) {
224 ActorPtr actor = group->actor(rankmap[j].second);
225 group_out->set_mapping(actor, j);
227 MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
229 for (auto const& rank : rankmap) {
230 if (rank.second != 0) {
231 group_snd[reqs]=new Group(group_out);
232 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rank.second, system_tag, this);
236 if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
237 Group::unref(group_out);
239 Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
245 group_out = group_root; /* exit with root's group */
247 if(color != MPI_UNDEFINED) {
248 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
249 } /* otherwise, exit with group_out == nullptr */
251 return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
255 if (this == MPI_COMM_UNINITIALIZED){
256 smpi_process()->comm_world()->ref();
263 void Comm::cleanup_smp(){
264 if (intra_comm_ != MPI_COMM_NULL)
265 Comm::unref(intra_comm_);
266 if (leaders_comm_ != MPI_COMM_NULL)
267 Comm::unref(leaders_comm_);
268 if (non_uniform_map_ != nullptr)
269 xbt_free(non_uniform_map_);
270 if (leaders_map_ != nullptr)
271 delete[] leaders_map_;
274 void Comm::unref(Comm* comm){
275 if (comm == MPI_COMM_UNINITIALIZED){
276 Comm::unref(smpi_process()->comm_world());
280 Group::unref(comm->group_);
282 if(comm->refcount_==0){
284 comm->cleanup_attr<Comm>();
289 void Comm::init_smp(){
292 if (this == MPI_COMM_UNINITIALIZED)
293 smpi_process()->comm_world()->init_smp();
295 int comm_size = this->size();
297 // If we are in replay - perform an ugly hack
298 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
299 bool replaying = false; //cache data to set it back again after
300 if(smpi_process()->replaying()){
302 smpi_process()->set_replaying(false);
305 if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
306 // we need to switch as the called function may silently touch global variables
307 smpi_switch_data_segment(simgrid::s4u::Actor::self());
309 //identify neighbours in comm
310 //get the indices of all processes sharing the same simix host
311 auto& process_list = sg_host_self()->extension<simgrid::simix::Host>()->process_list;
312 int intra_comm_size = 0;
313 int min_index = INT_MAX; // the minimum index will be the leader
314 for (auto& actor : process_list) {
315 int index = actor.pid;
316 if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
318 if (index < min_index)
322 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
323 MPI_Group group_intra = new Group(intra_comm_size);
325 for (auto& actor : process_list) {
326 if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) {
327 group_intra->set_mapping(actor.iface(), i);
332 MPI_Comm comm_intra = new Comm(group_intra, nullptr);
335 int* leaders_map = new int[comm_size];
336 int* leader_list = new int[comm_size];
337 std::fill_n(leaders_map, comm_size, 0);
338 std::fill_n(leader_list, comm_size, -1);
340 Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
342 if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
343 // we need to switch as the called function may silently touch global variables
344 smpi_switch_data_segment(simgrid::s4u::Actor::self());
347 if(leaders_map_==nullptr){
348 leaders_map_= leaders_map;
350 delete[] leaders_map;
352 int leader_group_size = 0;
353 for(i=0; i<comm_size; i++){
354 int already_done = 0;
355 for (int j = 0; j < leader_group_size; j++) {
356 if (leaders_map_[i] == leader_list[j]) {
360 if (already_done == 0) {
361 leader_list[leader_group_size] = leaders_map_[i];
365 std::sort(leader_list, leader_list + leader_group_size);
367 MPI_Group leaders_group = new Group(leader_group_size);
369 MPI_Comm leader_comm = MPI_COMM_NULL;
370 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
371 //create leader_communicator
372 for (i=0; i< leader_group_size;i++)
373 leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
374 leader_comm = new Comm(leaders_group, nullptr);
375 this->set_leaders_comm(leader_comm);
376 this->set_intra_comm(comm_intra);
378 // create intracommunicator
380 for (i=0; i< leader_group_size;i++)
381 leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
383 if(this->get_leaders_comm()==MPI_COMM_NULL){
384 leader_comm = new Comm(leaders_group, nullptr);
385 this->set_leaders_comm(leader_comm);
387 leader_comm=this->get_leaders_comm();
388 Group::unref(leaders_group);
390 smpi_process()->set_comm_intra(comm_intra);
393 // Are the nodes uniform ? = same number of process/node
394 int my_local_size=comm_intra->size();
395 if(comm_intra->rank()==0) {
397 int* non_uniform_map = xbt_new0(int,leader_group_size);
398 Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
399 non_uniform_map, 1, MPI_INT, leader_comm);
400 for(i=0; i < leader_group_size; i++) {
401 if(non_uniform_map[0] != non_uniform_map[i]) {
406 if(is_uniform==0 && this->is_uniform()!=0){
407 non_uniform_map_ = non_uniform_map;
409 xbt_free(non_uniform_map);
411 is_uniform_=is_uniform;
413 Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
415 if (smpi_privatize_global_variables == SmpiPrivStrategies::Mmap) {
416 // we need to switch as the called function may silently touch global variables
417 smpi_switch_data_segment(simgrid::s4u::Actor::self());
419 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
421 int prev=this->group()->rank(comm_intra->group()->actor(0));
422 for (i = 1; i < my_local_size; i++) {
423 int that = this->group()->rank(comm_intra->group()->actor(i));
424 if (that != prev + 1) {
432 Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
434 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
436 is_blocked_ = global_blocked;
439 is_blocked_=global_blocked;
441 delete[] leader_list;
444 smpi_process()->set_replaying(true);
447 MPI_Comm Comm::f2c(int id) {
449 return MPI_COMM_SELF;
451 return MPI_COMM_WORLD;
452 } else if(F2C::f2c_lookup() != nullptr && id >= 0) {
454 const auto& lookup = F2C::f2c_lookup();
455 auto comm = lookup->find(get_key_id(key, id));
456 return comm == lookup->end() ? MPI_COMM_NULL : static_cast<MPI_Comm>(comm->second);
458 return MPI_COMM_NULL;
462 void Comm::free_f(int id) {
464 F2C::f2c_lookup()->erase(id == 0 ? get_key(key, id) : get_key_id(key, id));
468 if(F2C::f2c_lookup()==nullptr){
469 F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
472 (*(F2C::f2c_lookup()))[this == MPI_COMM_WORLD ? get_key(key, F2C::f2c_id()) : get_key_id(key, F2C::f2c_id())] = this;
474 return F2C::f2c_id()-1;
477 void Comm::add_rma_win(MPI_Win win){
478 rma_wins_.push_back(win);
481 void Comm::remove_rma_win(MPI_Win win){
482 rma_wins_.remove(win);
485 void Comm::finish_rma_calls(){
486 for (auto const& it : rma_wins_) {
487 if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
488 int finished = it->finish_comms();
489 XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);
494 MPI_Comm Comm::split_type(int type, int key, MPI_Info info)
496 if(type != MPI_COMM_TYPE_SHARED){
497 return MPI_COMM_NULL;
501 this->get_intra_comm()->ref();
502 return this->get_intra_comm();