1 /* Copyright (c) 2010-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include <simgrid/s4u/host.hpp>
17 #include "src/simix/smx_private.h"
19 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
21 Comm mpi_MPI_COMM_UNINITIALIZED;
22 MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
24 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
25 * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
27 static int smpi_compare_rankmap(const void *a, const void *b)
29 const int* x = static_cast<const int*>(a);
30 const int* y = static_cast<const int*>(b);
50 std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
51 int Comm::keyval_id_=0;
53 Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo)
56 topoType_ = MPI_INVALID_TOPO;
57 intra_comm_ = MPI_COMM_NULL;
58 leaders_comm_ = MPI_COMM_NULL;
60 non_uniform_map_ = nullptr;
61 leaders_map_ = nullptr;
66 void Comm::destroy(Comm* comm)
68 if (comm == MPI_COMM_UNINITIALIZED){
69 Comm::destroy(smpi_process_comm_world());
72 delete comm->topo_; // there's no use count on topos
76 int Comm::dup(MPI_Comm* newcomm){
77 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
78 smpi_switch_data_segment(smpi_process_index());
80 MPI_Group cp = new Group(this->group());
81 (*newcomm) = new Comm(cp, this->topo());
82 int ret = MPI_SUCCESS;
84 if(attributes_ !=nullptr){
85 (*newcomm)->attributes_ = xbt_dict_new_homogeneous(nullptr);
86 xbt_dict_cursor_t cursor = nullptr;
91 xbt_dict_foreach (attributes_, cursor, key, value_in) {
92 smpi_key_elem elem = keyvals_.at(*key);
93 if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
94 ret = elem->copy_fn.comm_copy_fn(this, *key, nullptr, value_in, &value_out, &flag);
95 if (ret != MPI_SUCCESS) {
96 Comm::destroy(*newcomm);
97 *newcomm = MPI_COMM_NULL;
98 xbt_dict_cursor_free(&cursor);
102 xbt_dict_set_ext((*newcomm)->attributes_, key, sizeof(int), value_out, nullptr);
109 MPI_Group Comm::group()
111 if (this == MPI_COMM_UNINITIALIZED)
112 return smpi_process_comm_world()->group();
116 MPI_Topology Comm::topo() {
122 if (this == MPI_COMM_UNINITIALIZED)
123 return smpi_process_comm_world()->size();
124 return group_->size();
129 if (this == MPI_COMM_UNINITIALIZED)
130 return smpi_process_comm_world()->rank();
131 return group_->rank(smpi_process_index());
134 void Comm::get_name (char* name, int* len)
136 if (this == MPI_COMM_UNINITIALIZED){
137 smpi_process_comm_world()->get_name(name, len);
140 if(this == MPI_COMM_WORLD) {
141 strncpy(name, "WORLD",5);
144 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
148 void Comm::set_leaders_comm(MPI_Comm leaders){
149 if (this == MPI_COMM_UNINITIALIZED){
150 smpi_process_comm_world()->set_leaders_comm(leaders);
153 leaders_comm_=leaders;
156 void Comm::set_intra_comm(MPI_Comm leaders){
160 int* Comm::get_non_uniform_map(){
161 if (this == MPI_COMM_UNINITIALIZED)
162 return smpi_process_comm_world()->get_non_uniform_map();
163 return non_uniform_map_;
166 int* Comm::get_leaders_map(){
167 if (this == MPI_COMM_UNINITIALIZED)
168 return smpi_process_comm_world()->get_leaders_map();
172 MPI_Comm Comm::get_leaders_comm(){
173 if (this == MPI_COMM_UNINITIALIZED)
174 return smpi_process_comm_world()->get_leaders_comm();
175 return leaders_comm_;
178 MPI_Comm Comm::get_intra_comm(){
179 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
180 return smpi_process_get_comm_intra();
181 else return intra_comm_;
184 int Comm::is_uniform(){
185 if (this == MPI_COMM_UNINITIALIZED)
186 return smpi_process_comm_world()->is_uniform();
190 int Comm::is_blocked(){
191 if (this == MPI_COMM_UNINITIALIZED)
192 return smpi_process_comm_world()->is_blocked();
196 MPI_Comm Comm::split(int color, int key)
198 if (this == MPI_COMM_UNINITIALIZED)
199 return smpi_process_comm_world()->split(color, key);
200 int system_tag = 123;
203 MPI_Group group_root = nullptr;
204 MPI_Group group_out = nullptr;
205 MPI_Group group = this->group();
206 int rank = this->rank();
207 int size = this->size();
208 /* Gather all colors and keys on rank 0 */
209 int* sendbuf = xbt_new(int, 2);
213 recvbuf = xbt_new(int, 2 * size);
217 Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
219 /* Do the actual job */
221 MPI_Group* group_snd = xbt_new(MPI_Group, size);
222 int* rankmap = xbt_new(int, 2 * size);
223 for (int i = 0; i < size; i++) {
224 if (recvbuf[2 * i] != MPI_UNDEFINED) {
226 for (int j = i + 1; j < size; j++) {
227 if(recvbuf[2 * i] == recvbuf[2 * j]) {
228 recvbuf[2 * j] = MPI_UNDEFINED;
229 rankmap[2 * count] = j;
230 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
234 /* Add self in the group */
235 recvbuf[2 * i] = MPI_UNDEFINED;
236 rankmap[2 * count] = i;
237 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
239 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
240 group_out = new Group(count);
242 group_root = group_out; /* Save root's group */
244 for (int j = 0; j < count; j++) {
245 int index = group->index(rankmap[2 * j]);
246 group_out->set_mapping(index, j);
248 MPI_Request* requests = xbt_new(MPI_Request, count);
250 for (int j = 0; j < count; j++) {
251 if(rankmap[2 * j] != 0) {
252 group_snd[reqs]=new Group(group_out);
253 requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this);
258 if(group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
259 Group::unref(group_out);
261 Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
268 group_out = group_root; /* exit with root's group */
270 if(color != MPI_UNDEFINED) {
271 Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
272 } /* otherwise, exit with group_out == nullptr */
274 return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
278 if (this == MPI_COMM_UNINITIALIZED){
279 smpi_process_comm_world()->ref();
286 void Comm::cleanup_attributes(){
287 if(attributes_ !=nullptr){
288 xbt_dict_cursor_t cursor = nullptr;
292 xbt_dict_foreach (attributes_, cursor, key, value) {
294 smpi_key_elem elem = keyvals_.at(*key);
295 if (elem != nullptr && elem->delete_fn.comm_delete_fn != nullptr)
296 elem->delete_fn.comm_delete_fn(this, *key, value, &flag);
297 }catch(const std::out_of_range& oor) {
298 //already deleted, not a problem;
301 xbt_dict_free(&attributes_);
305 void Comm::cleanup_smp(){
306 if (intra_comm_ != MPI_COMM_NULL)
307 Comm::unref(intra_comm_);
308 if (leaders_comm_ != MPI_COMM_NULL)
309 Comm::unref(leaders_comm_);
310 if (non_uniform_map_ != nullptr)
311 xbt_free(non_uniform_map_);
312 if (leaders_map_ != nullptr)
313 xbt_free(leaders_map_);
316 void Comm::unref(Comm* comm){
317 if (comm == MPI_COMM_UNINITIALIZED){
318 Comm::unref(smpi_process_comm_world());
322 Group::unref(comm->group_);
324 if(comm->refcount_==0){
326 comm->cleanup_attributes();
331 static int compare_ints (const void *a, const void *b)
333 const int *da = static_cast<const int *>(a);
334 const int *db = static_cast<const int *>(b);
336 return static_cast<int>(*da > *db) - static_cast<int>(*da < *db);
339 void Comm::init_smp(){
342 if (this == MPI_COMM_UNINITIALIZED)
343 smpi_process_comm_world()->init_smp();
345 int comm_size = this->size();
347 // If we are in replay - perform an ugly hack
348 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
349 bool replaying = false; //cache data to set it back again after
350 if(smpi_process_get_replaying()){
352 smpi_process_set_replaying(false);
355 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
356 smpi_switch_data_segment(smpi_process_index());
358 //identify neighbours in comm
359 //get the indexes of all processes sharing the same simix host
360 xbt_swag_t process_list = SIMIX_host_self()->extension<simgrid::simix::Host>()->process_list;
361 int intra_comm_size = 0;
362 int min_index = INT_MAX;//the minimum index will be the leader
363 smx_actor_t actor = nullptr;
364 xbt_swag_foreach(actor, process_list) {
365 int index = actor->pid -1;
367 if(this->group()->rank(index)!=MPI_UNDEFINED){
369 //the process is in the comm
370 if(index < min_index)
374 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
375 MPI_Group group_intra = new Group(intra_comm_size);
378 xbt_swag_foreach(actor, process_list) {
379 int index = actor->pid -1;
380 if(this->group()->rank(index)!=MPI_UNDEFINED){
381 group_intra->set_mapping(index, i);
386 MPI_Comm comm_intra = new Comm(group_intra, nullptr);
389 int * leaders_map= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
390 int * leader_list= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
391 for(i=0; i<comm_size; i++){
395 Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
397 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
398 smpi_switch_data_segment(smpi_process_index());
401 if(leaders_map_==nullptr){
402 leaders_map_= leaders_map;
404 xbt_free(leaders_map);
407 int leader_group_size = 0;
408 for(i=0; i<comm_size; i++){
410 for(j=0;j<leader_group_size; j++){
411 if(leaders_map_[i]==leader_list[j]){
416 leader_list[leader_group_size]=leaders_map_[i];
420 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
422 MPI_Group leaders_group = new Group(leader_group_size);
424 MPI_Comm leader_comm = MPI_COMM_NULL;
425 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
426 //create leader_communicator
427 for (i=0; i< leader_group_size;i++)
428 leaders_group->set_mapping(leader_list[i], i);
429 leader_comm = new Comm(leaders_group, nullptr);
430 this->set_leaders_comm(leader_comm);
431 this->set_intra_comm(comm_intra);
433 //create intracommunicator
435 for (i=0; i< leader_group_size;i++)
436 leaders_group->set_mapping(leader_list[i], i);
438 if(this->get_leaders_comm()==MPI_COMM_NULL){
439 leader_comm = new Comm(leaders_group, nullptr);
440 this->set_leaders_comm(leader_comm);
442 leader_comm=this->get_leaders_comm();
443 Group::unref(leaders_group);
445 smpi_process_set_comm_intra(comm_intra);
450 // Are the nodes uniform ? = same number of process/node
451 int my_local_size=comm_intra->size();
452 if(comm_intra->rank()==0) {
453 int* non_uniform_map = xbt_new0(int,leader_group_size);
454 Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
455 non_uniform_map, 1, MPI_INT, leader_comm);
456 for(i=0; i < leader_group_size; i++) {
457 if(non_uniform_map[0] != non_uniform_map[i]) {
462 if(is_uniform==0 && this->is_uniform()!=0){
463 non_uniform_map_= non_uniform_map;
465 xbt_free(non_uniform_map);
467 is_uniform_=is_uniform;
469 Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
471 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
472 smpi_switch_data_segment(smpi_process_index());
474 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
476 int prev=this->group()->rank(comm_intra->group()->index(0));
477 for (i=1; i<my_local_size; i++){
478 int that=this->group()->rank(comm_intra->group()->index(i));
487 Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
489 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
491 is_blocked_=global_blocked;
494 is_blocked_=global_blocked;
496 xbt_free(leader_list);
499 smpi_process_set_replaying(true);
502 int Comm::attr_delete(int keyval){
503 smpi_key_elem elem = keyvals_.at(keyval);
506 if(elem->delete_fn.comm_delete_fn!=MPI_NULL_DELETE_FN){
507 void* value = nullptr;
509 if(this->attr_get(keyval, &value, &flag)==MPI_SUCCESS){
510 int ret = elem->delete_fn.comm_delete_fn(this, keyval, value, &flag);
515 if(attributes_==nullptr)
518 xbt_dict_remove_ext(attributes_, reinterpret_cast<const char*>(&keyval), sizeof(int));
522 int Comm::attr_get(int keyval, void* attr_value, int* flag){
523 smpi_key_elem elem = keyvals_.at(keyval);
526 if(attributes_==nullptr){
531 *static_cast<void**>(attr_value) =
532 xbt_dict_get_ext(attributes_, reinterpret_cast<const char*>(&keyval), sizeof(int));
541 int Comm::attr_put(int keyval, void* attr_value){
542 smpi_key_elem elem = keyvals_.at(keyval);
546 void* value = nullptr;
547 this->attr_get(keyval, &value, &flag);
548 if(flag!=0 && elem->delete_fn.comm_delete_fn!=MPI_NULL_DELETE_FN){
549 int ret = elem->delete_fn.comm_delete_fn(this, keyval, value, &flag);
553 if(attributes_==nullptr)
554 attributes_ = xbt_dict_new_homogeneous(nullptr);
556 xbt_dict_set_ext(attributes_, reinterpret_cast<const char*>(&keyval), sizeof(int), attr_value, nullptr);
560 MPI_Comm Comm::f2c(int id) {
562 return MPI_COMM_SELF;
564 return MPI_COMM_WORLD;
565 } else if(F2C::f2c_lookup_ != nullptr && id >= 0) {
567 MPI_Comm tmp = static_cast<MPI_Comm>(xbt_dict_get_or_null(F2C::f2c_lookup_,get_key_id(key, id)));
568 return tmp != nullptr ? tmp : MPI_COMM_NULL ;
570 return MPI_COMM_NULL;
574 void Comm::free_f(int id) {
576 xbt_dict_remove(F2C::f2c_lookup_, id==0? get_key(key, id) : get_key_id(key, id));
580 if(F2C::f2c_lookup_==nullptr){
581 F2C::f2c_lookup_=xbt_dict_new_homogeneous(nullptr);
584 xbt_dict_set(F2C::f2c_lookup_, this==MPI_COMM_WORLD? get_key(key, F2C::f2c_id_) : get_key_id(key,F2C::f2c_id_), this, nullptr);
586 return F2C::f2c_id_-1;