1 /* Copyright (c) 2010-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include <simgrid/s4u/host.hpp>
17 #include "smpi_mpi_dt_private.h"
18 #include "src/simix/smx_private.h"
19 #include "colls/colls.h"
21 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
23 xbt_dict_t smpi_comm_keyvals = nullptr;
24 int comm_keyval_id = 0;//avoid collisions
26 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
27 * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
29 static int smpi_compare_rankmap(const void *a, const void *b)
31 const int* x = static_cast<const int*>(a);
32 const int* y = static_cast<const int*>(b);
52 Comm::Comm(MPI_Group group, MPI_Topology topo)
56 m_topoType = MPI_INVALID_TOPO;
58 m_intra_comm = MPI_COMM_NULL;
59 m_leaders_comm = MPI_COMM_NULL;
61 m_non_uniform_map = nullptr;
62 m_leaders_map = nullptr;
69 if (this == MPI_COMM_UNINITIALIZED)
70 return smpi_process_comm_world()->destroy();
71 smpi_topo_destroy(m_topo); // there's no use count on topos
75 int Comm::dup(MPI_Comm* newcomm){
76 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
77 smpi_switch_data_segment(smpi_process_index());
79 MPI_Group cp = new simgrid::SMPI::Group(this->group());
80 (*newcomm) = new simgrid::SMPI::Comm(cp, this->topo());
81 int ret = MPI_SUCCESS;
83 if(m_attributes !=nullptr){
84 (*newcomm)->m_attributes = xbt_dict_new_homogeneous(nullptr);
85 xbt_dict_cursor_t cursor = nullptr;
90 xbt_dict_foreach (m_attributes, cursor, key, value_in) {
91 smpi_comm_key_elem elem =
92 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, key, sizeof(int)));
93 if (elem != nullptr && elem->copy_fn != MPI_NULL_COPY_FN) {
94 ret = elem->copy_fn(this, atoi(key), nullptr, value_in, &value_out, &flag);
95 if (ret != MPI_SUCCESS) {
96 (*newcomm)->destroy();
97 *newcomm = MPI_COMM_NULL;
98 xbt_dict_cursor_free(&cursor);
102 xbt_dict_set_ext((*newcomm)->m_attributes, key, sizeof(int), value_out, nullptr);
109 MPI_Group Comm::group()
111 if (this == MPI_COMM_UNINITIALIZED)
112 return smpi_process_comm_world()->group();
116 MPI_Topology Comm::topo() {
117 if (this != MPI_COMM_NULL)
124 if (this == MPI_COMM_UNINITIALIZED)
125 return smpi_process_comm_world()->size();
126 return m_group->size();
131 if (this == MPI_COMM_UNINITIALIZED)
132 return smpi_process_comm_world()->rank();
133 return m_group->rank(smpi_process_index());
136 void Comm::get_name (char* name, int* len)
138 if (this == MPI_COMM_UNINITIALIZED)
139 return smpi_process_comm_world()->get_name(name, len);
140 if(this == MPI_COMM_WORLD) {
141 strncpy(name, "WORLD",5);
144 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this);
148 void Comm::set_leaders_comm(MPI_Comm leaders){
149 if (this == MPI_COMM_UNINITIALIZED)
150 return smpi_process_comm_world()->set_leaders_comm(leaders);
151 m_leaders_comm=leaders;
154 void Comm::set_intra_comm(MPI_Comm leaders){
155 m_intra_comm=leaders;
158 int* Comm::get_non_uniform_map(){
159 if (this == MPI_COMM_UNINITIALIZED)
160 return smpi_process_comm_world()->get_non_uniform_map();
161 return m_non_uniform_map;
164 int* Comm::get_leaders_map(){
165 if (this == MPI_COMM_UNINITIALIZED)
166 return smpi_process_comm_world()->get_leaders_map();
167 return m_leaders_map;
170 MPI_Comm Comm::get_leaders_comm(){
171 if (this == MPI_COMM_UNINITIALIZED)
172 return smpi_process_comm_world()->get_leaders_comm();
173 return m_leaders_comm;
176 MPI_Comm Comm::get_intra_comm(){
177 if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
178 return smpi_process_get_comm_intra();
179 else return m_intra_comm;
182 int Comm::is_uniform(){
183 if (this == MPI_COMM_UNINITIALIZED)
184 return smpi_process_comm_world()->is_uniform();
188 int Comm::is_blocked(){
189 if (this == MPI_COMM_UNINITIALIZED)
190 return smpi_process_comm_world()->is_blocked();
194 MPI_Comm Comm::split(int color, int key)
196 if (this == MPI_COMM_UNINITIALIZED)
197 return smpi_process_comm_world()->split(color, key);
198 int system_tag = 123;
201 MPI_Group group_root = nullptr;
202 MPI_Group group_out = nullptr;
203 MPI_Group group = this->group();
204 int rank = this->rank();
205 int size = this->size();
206 /* Gather all colors and keys on rank 0 */
207 int* sendbuf = xbt_new(int, 2);
211 recvbuf = xbt_new(int, 2 * size);
215 smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
217 /* Do the actual job */
219 MPI_Group* group_snd = xbt_new(MPI_Group, size);
220 int* rankmap = xbt_new(int, 2 * size);
221 for (int i = 0; i < size; i++) {
222 if (recvbuf[2 * i] != MPI_UNDEFINED) {
224 for (int j = i + 1; j < size; j++) {
225 if(recvbuf[2 * i] == recvbuf[2 * j]) {
226 recvbuf[2 * j] = MPI_UNDEFINED;
227 rankmap[2 * count] = j;
228 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
232 /* Add self in the group */
233 recvbuf[2 * i] = MPI_UNDEFINED;
234 rankmap[2 * count] = i;
235 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
237 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
238 group_out = new simgrid::SMPI::Group(count);
240 group_root = group_out; /* Save root's group */
242 for (int j = 0; j < count; j++) {
243 int index = group->index(rankmap[2 * j]);
244 group_out->set_mapping(index, j);
246 MPI_Request* requests = xbt_new(MPI_Request, count);
248 for (int j = 0; j < count; j++) {
249 if(rankmap[2 * j] != 0) {
250 group_snd[reqs]=new simgrid::SMPI::Group(group_out);
251 requests[reqs] = smpi_mpi_isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this);
256 group_out->destroy();
258 smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
265 group_out = group_root; /* exit with root's group */
267 if(color != MPI_UNDEFINED) {
268 smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
269 } /* otherwise, exit with group_out == nullptr */
271 return group_out!=nullptr ? new simgrid::SMPI::Comm(group_out, nullptr) : MPI_COMM_NULL;
275 if (this == MPI_COMM_UNINITIALIZED)
276 return smpi_process_comm_world()->use();
281 void Comm::cleanup_attributes(){
282 if(m_attributes !=nullptr){
283 xbt_dict_cursor_t cursor = nullptr;
287 xbt_dict_foreach (m_attributes, cursor, key, value) {
288 smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null(smpi_comm_keyvals, key));
289 if (elem != nullptr && elem->delete_fn != nullptr)
290 elem->delete_fn(this, atoi(key), value, &flag);
292 xbt_dict_free(&m_attributes);
296 void Comm::cleanup_smp(){
297 if (m_intra_comm != MPI_COMM_NULL)
298 m_intra_comm->unuse();
299 if (m_leaders_comm != MPI_COMM_NULL)
300 m_leaders_comm->unuse();
301 if (m_non_uniform_map != nullptr)
302 xbt_free(m_non_uniform_map);
303 if (m_leaders_map != nullptr)
304 xbt_free(m_leaders_map);
308 if (this == MPI_COMM_UNINITIALIZED)
309 return smpi_process_comm_world()->unuse();
315 this->cleanup_attributes();
320 static int compare_ints (const void *a, const void *b)
322 const int *da = static_cast<const int *>(a);
323 const int *db = static_cast<const int *>(b);
325 return static_cast<int>(*da > *db) - static_cast<int>(*da < *db);
328 void Comm::init_smp(){
331 if (this == MPI_COMM_UNINITIALIZED)
332 return smpi_process_comm_world()->init_smp();
334 int comm_size = this->size();
336 // If we are in replay - perform an ugly hack
337 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
338 bool replaying = false; //cache data to set it back again after
339 if(smpi_process_get_replaying()){
341 smpi_process_set_replaying(false);
344 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
345 smpi_switch_data_segment(smpi_process_index());
347 //identify neighbours in comm
348 //get the indexes of all processes sharing the same simix host
349 xbt_swag_t process_list = SIMIX_host_self()->processes();
350 int intra_comm_size = 0;
352 int min_index=INT_MAX;//the minimum index will be the leader
353 smx_actor_t process = nullptr;
354 xbt_swag_foreach(process, process_list) {
355 int index = process->pid -1;
357 if(this->group()->rank(index)!=MPI_UNDEFINED){
359 //the process is in the comm
360 if(index < min_index)
365 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
366 MPI_Group group_intra = new simgrid::SMPI::Group(intra_comm_size);
369 xbt_swag_foreach(process, process_list) {
370 int index = process->pid -1;
371 if(this->group()->rank(index)!=MPI_UNDEFINED){
372 group_intra->set_mapping(index, i);
377 MPI_Comm comm_intra = new simgrid::SMPI::Comm(group_intra, nullptr);
380 int * leaders_map= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
381 int * leader_list= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
382 for(i=0; i<comm_size; i++){
386 smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
388 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
389 smpi_switch_data_segment(smpi_process_index());
392 if(m_leaders_map==nullptr){
393 m_leaders_map= leaders_map;
395 xbt_free(leaders_map);
398 int leader_group_size = 0;
399 for(i=0; i<comm_size; i++){
401 for(j=0;j<leader_group_size; j++){
402 if(m_leaders_map[i]==leader_list[j]){
407 leader_list[leader_group_size]=m_leaders_map[i];
411 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
413 MPI_Group leaders_group = new simgrid::SMPI::Group(leader_group_size);
415 MPI_Comm leader_comm = MPI_COMM_NULL;
416 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
417 //create leader_communicator
418 for (i=0; i< leader_group_size;i++)
419 leaders_group->set_mapping(leader_list[i], i);
420 leader_comm = new simgrid::SMPI::Comm(leaders_group, nullptr);
421 this->set_leaders_comm(leader_comm);
422 this->set_intra_comm(comm_intra);
424 //create intracommunicator
426 for (i=0; i< leader_group_size;i++)
427 leaders_group->set_mapping(leader_list[i], i);
429 if(this->get_leaders_comm()==MPI_COMM_NULL){
430 leader_comm = new simgrid::SMPI::Comm(leaders_group, nullptr);
431 this->set_leaders_comm(leader_comm);
433 leader_comm=this->get_leaders_comm();
434 leaders_group->unuse();
436 smpi_process_set_comm_intra(comm_intra);
441 // Are the nodes uniform ? = same number of process/node
442 int my_local_size=comm_intra->size();
443 if(comm_intra->rank()==0) {
444 int* non_uniform_map = xbt_new0(int,leader_group_size);
445 smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
446 non_uniform_map, 1, MPI_INT, leader_comm);
447 for(i=0; i < leader_group_size; i++) {
448 if(non_uniform_map[0] != non_uniform_map[i]) {
453 if(is_uniform==0 && this->is_uniform()!=0){
454 m_non_uniform_map= non_uniform_map;
456 xbt_free(non_uniform_map);
458 m_is_uniform=is_uniform;
460 smpi_coll_tuned_bcast_mpich(&(m_is_uniform),1, MPI_INT, 0, comm_intra );
462 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
463 smpi_switch_data_segment(smpi_process_index());
465 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
467 int prev=this->group()->rank(comm_intra->group()->index(0));
468 for (i=1; i<my_local_size; i++){
469 int that=this->group()->rank(comm_intra->group()->index(i));
478 smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
480 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
482 m_is_blocked=global_blocked;
485 m_is_blocked=global_blocked;
487 xbt_free(leader_list);
490 smpi_process_set_replaying(true);
493 int Comm::attr_delete(int keyval){
494 smpi_comm_key_elem elem =
495 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
498 if(elem->delete_fn!=MPI_NULL_DELETE_FN){
499 void* value = nullptr;
501 if(this->attr_get(keyval, &value, &flag)==MPI_SUCCESS){
502 int ret = elem->delete_fn(this, keyval, value, &flag);
507 if(m_attributes==nullptr)
510 xbt_dict_remove_ext(m_attributes, reinterpret_cast<const char*>(&keyval), sizeof(int));
514 int Comm::attr_get(int keyval, void* attr_value, int* flag){
515 smpi_comm_key_elem elem =
516 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
519 if(m_attributes==nullptr){
524 *static_cast<void**>(attr_value) =
525 xbt_dict_get_ext(m_attributes, reinterpret_cast<const char*>(&keyval), sizeof(int));
534 int Comm::attr_put(int keyval, void* attr_value){
535 if(smpi_comm_keyvals==nullptr)
536 smpi_comm_keyvals = xbt_dict_new_homogeneous(nullptr);
537 smpi_comm_key_elem elem =
538 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
542 void* value = nullptr;
543 this->attr_get(keyval, &value, &flag);
544 if(flag!=0 && elem->delete_fn!=MPI_NULL_DELETE_FN){
545 int ret = elem->delete_fn(this, keyval, value, &flag);
549 if(m_attributes==nullptr)
550 m_attributes = xbt_dict_new_homogeneous(nullptr);
552 xbt_dict_set_ext(m_attributes, reinterpret_cast<const char*>(&keyval), sizeof(int), attr_value, nullptr);
559 int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval,
561 if(smpi_comm_keyvals==nullptr)
562 smpi_comm_keyvals = xbt_dict_new_homogeneous(nullptr);
564 smpi_comm_key_elem value = static_cast<smpi_comm_key_elem>(xbt_new0(s_smpi_mpi_comm_key_elem_t,1));
566 value->copy_fn=copy_fn;
567 value->delete_fn=delete_fn;
569 *keyval = comm_keyval_id;
570 xbt_dict_set_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int),static_cast<void*>(value), nullptr);
575 int smpi_comm_keyval_free(int* keyval){
576 smpi_comm_key_elem elem =
577 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int)));
580 xbt_dict_remove_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int));