1 /* Copyright (c) 2010-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
14 #include <simgrid/s4u/host.hpp>
17 #include "smpi_mpi_dt_private.h"
18 #include "src/simix/smx_private.h"
19 #include "colls/colls.h"
21 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
23 xbt_dict_t smpi_comm_keyvals = nullptr;
24 int comm_keyval_id = 0;//avoid collisions
26 /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
27 * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
29 typedef struct s_smpi_mpi_communicator {
31 MPIR_Topo_type topoType;
32 MPI_Topology topo; // to be replaced by an union
34 MPI_Comm leaders_comm;//inter-node communicator
35 MPI_Comm intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global.
36 //use an intracomm stored in the process data instead
37 int* leaders_map; //who is the leader of each process
39 int* non_uniform_map; //set if smp nodes have a different number of processes allocated
40 int is_blocked;// are ranks allocated on the same smp node contiguous ?
41 xbt_dict_t attributes;
42 } s_smpi_mpi_communicator_t;
44 static int smpi_compare_rankmap(const void *a, const void *b)
46 const int* x = static_cast<const int*>(a);
47 const int* y = static_cast<const int*>(b);
64 MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo)
68 comm = xbt_new(s_smpi_mpi_communicator_t, 1);
71 comm->topoType = MPI_INVALID_TOPO;
73 comm->intra_comm = MPI_COMM_NULL;
74 comm->leaders_comm = MPI_COMM_NULL;
76 comm->non_uniform_map = nullptr;
77 comm->leaders_map = nullptr;
79 comm->attributes=nullptr;
83 void smpi_comm_destroy(MPI_Comm comm)
85 if (comm == MPI_COMM_UNINITIALIZED)
86 comm = smpi_process_comm_world();
87 smpi_topo_destroy(comm->topo); // there's no use count on topos
88 smpi_comm_unuse(comm);
91 int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){
92 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
93 smpi_switch_data_segment(smpi_process_index());
95 MPI_Group cp=smpi_group_copy(smpi_comm_group(comm));
96 (*newcomm) = smpi_comm_new(cp, smpi_comm_topo(comm));
97 int ret = MPI_SUCCESS;
99 if(comm->attributes !=nullptr){
100 (*newcomm)->attributes = xbt_dict_new_homogeneous(nullptr);
101 xbt_dict_cursor_t cursor = nullptr;
106 xbt_dict_foreach (comm->attributes, cursor, key, value_in) {
107 smpi_comm_key_elem elem =
108 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, key, sizeof(int)));
109 if (elem != nullptr && elem->copy_fn != MPI_NULL_COPY_FN) {
110 ret = elem->copy_fn(comm, atoi(key), nullptr, value_in, &value_out, &flag);
111 if (ret != MPI_SUCCESS) {
112 smpi_comm_destroy(*newcomm);
113 *newcomm = MPI_COMM_NULL;
114 xbt_dict_cursor_free(&cursor);
118 xbt_dict_set_ext((*newcomm)->attributes, key, sizeof(int), value_out, nullptr);
125 MPI_Group smpi_comm_group(MPI_Comm comm)
127 if (comm == MPI_COMM_UNINITIALIZED)
128 comm = smpi_process_comm_world();
132 MPI_Topology smpi_comm_topo(MPI_Comm comm) {
133 if (comm != MPI_COMM_NULL)
138 int smpi_comm_size(MPI_Comm comm)
140 if (comm == MPI_COMM_UNINITIALIZED)
141 comm = smpi_process_comm_world();
142 return smpi_group_size(smpi_comm_group(comm));
145 int smpi_comm_rank(MPI_Comm comm)
147 if (comm == MPI_COMM_UNINITIALIZED)
148 comm = smpi_process_comm_world();
149 return smpi_group_rank(smpi_comm_group(comm), smpi_process_index());
152 void smpi_comm_get_name (MPI_Comm comm, char* name, int* len)
154 if (comm == MPI_COMM_UNINITIALIZED)
155 comm = smpi_process_comm_world();
156 if(comm == MPI_COMM_WORLD) {
157 strncpy(name, "WORLD",5);
160 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm);
164 void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){
165 if (comm == MPI_COMM_UNINITIALIZED)
166 comm = smpi_process_comm_world();
167 comm->leaders_comm=leaders;
170 void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){
171 comm->intra_comm=leaders;
174 int* smpi_comm_get_non_uniform_map(MPI_Comm comm){
175 if (comm == MPI_COMM_UNINITIALIZED)
176 comm = smpi_process_comm_world();
177 return comm->non_uniform_map;
180 int* smpi_comm_get_leaders_map(MPI_Comm comm){
181 if (comm == MPI_COMM_UNINITIALIZED)
182 comm = smpi_process_comm_world();
183 return comm->leaders_map;
186 MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){
187 if (comm == MPI_COMM_UNINITIALIZED)
188 comm = smpi_process_comm_world();
189 return comm->leaders_comm;
192 MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){
193 if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD)
194 return smpi_process_get_comm_intra();
195 else return comm->intra_comm;
198 int smpi_comm_is_uniform(MPI_Comm comm){
199 if (comm == MPI_COMM_UNINITIALIZED)
200 comm = smpi_process_comm_world();
201 return comm->is_uniform;
204 int smpi_comm_is_blocked(MPI_Comm comm){
205 if (comm == MPI_COMM_UNINITIALIZED)
206 comm = smpi_process_comm_world();
207 return comm->is_blocked;
210 MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key)
212 if (comm == MPI_COMM_UNINITIALIZED)
213 comm = smpi_process_comm_world();
214 int system_tag = 123;
217 MPI_Group group_root = nullptr;
218 MPI_Group group_out = nullptr;
219 MPI_Group group = smpi_comm_group(comm);
220 int rank = smpi_comm_rank(comm);
221 int size = smpi_comm_size(comm);
222 /* Gather all colors and keys on rank 0 */
223 int* sendbuf = xbt_new(int, 2);
227 recvbuf = xbt_new(int, 2 * size);
231 smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm);
233 /* Do the actual job */
235 MPI_Group* group_snd = xbt_new(MPI_Group, size);
236 int* rankmap = xbt_new(int, 2 * size);
237 for (int i = 0; i < size; i++) {
238 if (recvbuf[2 * i] != MPI_UNDEFINED) {
240 for (int j = i + 1; j < size; j++) {
241 if(recvbuf[2 * i] == recvbuf[2 * j]) {
242 recvbuf[2 * j] = MPI_UNDEFINED;
243 rankmap[2 * count] = j;
244 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
248 /* Add self in the group */
249 recvbuf[2 * i] = MPI_UNDEFINED;
250 rankmap[2 * count] = i;
251 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
253 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
254 group_out = smpi_group_new(count);
256 group_root = group_out; /* Save root's group */
258 for (int j = 0; j < count; j++) {
259 int index = smpi_group_index(group, rankmap[2 * j]);
260 smpi_group_set_mapping(group_out, index, j);
262 MPI_Request* requests = xbt_new(MPI_Request, count);
264 for (int j = 0; j < count; j++) {
265 if(rankmap[2 * j] != 0) {
266 group_snd[reqs]=smpi_group_copy(group_out);
267 requests[reqs] = smpi_mpi_isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
272 smpi_group_destroy(group_out);
274 smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
281 group_out = group_root; /* exit with root's group */
283 if(color != MPI_UNDEFINED) {
284 smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE);
285 } /* otherwise, exit with group_out == nullptr */
287 return group_out!=nullptr ? smpi_comm_new(group_out, nullptr) : MPI_COMM_NULL;
290 void smpi_comm_use(MPI_Comm comm){
291 if (comm == MPI_COMM_UNINITIALIZED)
292 comm = smpi_process_comm_world();
293 smpi_group_use(comm->group);
297 void smpi_comm_cleanup_attributes(MPI_Comm comm){
298 if(comm->attributes !=nullptr){
299 xbt_dict_cursor_t cursor = nullptr;
303 xbt_dict_foreach (comm->attributes, cursor, key, value) {
304 smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null(smpi_comm_keyvals, key));
305 if (elem != nullptr && elem->delete_fn != nullptr)
306 elem->delete_fn(comm, atoi(key), value, &flag);
308 xbt_dict_free(&comm->attributes);
312 void smpi_comm_cleanup_smp(MPI_Comm comm){
313 if (comm->intra_comm != MPI_COMM_NULL)
314 smpi_comm_unuse(comm->intra_comm);
315 if (comm->leaders_comm != MPI_COMM_NULL)
316 smpi_comm_unuse(comm->leaders_comm);
317 if (comm->non_uniform_map != nullptr)
318 xbt_free(comm->non_uniform_map);
319 if (comm->leaders_map != nullptr)
320 xbt_free(comm->leaders_map);
323 void smpi_comm_unuse(MPI_Comm comm){
324 if (comm == MPI_COMM_UNINITIALIZED)
325 comm = smpi_process_comm_world();
327 smpi_group_unuse(comm->group);
329 if(comm->refcount==0){
330 smpi_comm_cleanup_smp(comm);
331 smpi_comm_cleanup_attributes(comm);
336 static int compare_ints (const void *a, const void *b)
338 const int *da = static_cast<const int *>(a);
339 const int *db = static_cast<const int *>(b);
341 return static_cast<int>(*da > *db) - static_cast<int>(*da < *db);
344 void smpi_comm_init_smp(MPI_Comm comm){
347 if (comm == MPI_COMM_UNINITIALIZED)
348 comm = smpi_process_comm_world();
350 int comm_size =smpi_comm_size(comm);
352 // If we are in replay - perform an ugly hack
353 // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
354 bool replaying = false; //cache data to set it back again after
355 if(smpi_process_get_replaying()){
357 smpi_process_set_replaying(false);
360 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
361 smpi_switch_data_segment(smpi_process_index());
363 //identify neighbours in comm
364 //get the indexes of all processes sharing the same simix host
365 xbt_swag_t process_list = SIMIX_host_self()->processes();
366 int intra_comm_size = 0;
368 int min_index=INT_MAX;//the minimum index will be the leader
369 smx_actor_t process = nullptr;
370 xbt_swag_foreach(process, process_list) {
371 int index = process->pid -1;
373 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
375 //the process is in the comm
376 if(index < min_index)
381 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
382 MPI_Group group_intra = smpi_group_new(intra_comm_size);
385 xbt_swag_foreach(process, process_list) {
386 int index = process->pid -1;
387 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
388 smpi_group_set_mapping(group_intra, index, i);
393 MPI_Comm comm_intra = smpi_comm_new(group_intra, nullptr);
396 int * leaders_map= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
397 int * leader_list= static_cast<int*>(xbt_malloc0(sizeof(int)*comm_size));
398 for(i=0; i<comm_size; i++){
402 smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm);
404 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
405 smpi_switch_data_segment(smpi_process_index());
408 if(comm->leaders_map==nullptr){
409 comm->leaders_map= leaders_map;
411 xbt_free(leaders_map);
414 int leader_group_size = 0;
415 for(i=0; i<comm_size; i++){
417 for(j=0;j<leader_group_size; j++){
418 if(comm->leaders_map[i]==leader_list[j]){
423 leader_list[leader_group_size]=comm->leaders_map[i];
427 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
429 MPI_Group leaders_group = smpi_group_new(leader_group_size);
431 MPI_Comm leader_comm = MPI_COMM_NULL;
432 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){
433 //create leader_communicator
434 for (i=0; i< leader_group_size;i++)
435 smpi_group_set_mapping(leaders_group, leader_list[i], i);
436 leader_comm = smpi_comm_new(leaders_group, nullptr);
437 smpi_comm_set_leaders_comm(comm, leader_comm);
438 smpi_comm_set_intra_comm(comm, comm_intra);
440 //create intracommunicator
442 for (i=0; i< leader_group_size;i++)
443 smpi_group_set_mapping(leaders_group, leader_list[i], i);
445 if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
446 leader_comm = smpi_comm_new(leaders_group, nullptr);
447 smpi_comm_set_leaders_comm(comm, leader_comm);
449 leader_comm=smpi_comm_get_leaders_comm(comm);
450 smpi_group_unuse(leaders_group);
452 smpi_process_set_comm_intra(comm_intra);
457 // Are the nodes uniform ? = same number of process/node
458 int my_local_size=smpi_comm_size(comm_intra);
459 if(smpi_comm_rank(comm_intra)==0) {
460 int* non_uniform_map = xbt_new0(int,leader_group_size);
461 smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
462 non_uniform_map, 1, MPI_INT, leader_comm);
463 for(i=0; i < leader_group_size; i++) {
464 if(non_uniform_map[0] != non_uniform_map[i]) {
469 if(is_uniform==0 && smpi_comm_is_uniform(comm)!=0){
470 comm->non_uniform_map= non_uniform_map;
472 xbt_free(non_uniform_map);
474 comm->is_uniform=is_uniform;
476 smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
478 if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
479 smpi_switch_data_segment(smpi_process_index());
481 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
483 int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0));
484 for (i=1; i<my_local_size; i++){
485 int that=smpi_group_rank(smpi_comm_group(comm),smpi_group_index(smpi_comm_group(comm_intra), i));
494 smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, comm);
496 if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD){
497 if(smpi_comm_rank(comm)==0){
498 comm->is_blocked=global_blocked;
501 comm->is_blocked=global_blocked;
503 xbt_free(leader_list);
506 smpi_process_set_replaying(true);
509 int smpi_comm_attr_delete(MPI_Comm comm, int keyval){
510 smpi_comm_key_elem elem =
511 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
514 if(elem->delete_fn!=MPI_NULL_DELETE_FN){
515 void* value = nullptr;
517 if(smpi_comm_attr_get(comm, keyval, &value, &flag)==MPI_SUCCESS){
518 int ret = elem->delete_fn(comm, keyval, value, &flag);
523 if(comm->attributes==nullptr)
526 xbt_dict_remove_ext(comm->attributes, reinterpret_cast<const char*>(&keyval), sizeof(int));
530 int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){
531 smpi_comm_key_elem elem =
532 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
535 if(comm->attributes==nullptr){
540 *static_cast<void**>(attr_value) =
541 xbt_dict_get_ext(comm->attributes, reinterpret_cast<const char*>(&keyval), sizeof(int));
550 int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){
551 if(smpi_comm_keyvals==nullptr)
552 smpi_comm_keyvals = xbt_dict_new_homogeneous(nullptr);
553 smpi_comm_key_elem elem =
554 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(&keyval), sizeof(int)));
558 void* value = nullptr;
559 smpi_comm_attr_get(comm, keyval, &value, &flag);
560 if(flag!=0 && elem->delete_fn!=MPI_NULL_DELETE_FN){
561 int ret = elem->delete_fn(comm, keyval, value, &flag);
565 if(comm->attributes==nullptr)
566 comm->attributes = xbt_dict_new_homogeneous(nullptr);
568 xbt_dict_set_ext(comm->attributes, reinterpret_cast<const char*>(&keyval), sizeof(int), attr_value, nullptr);
572 int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval,
574 if(smpi_comm_keyvals==nullptr)
575 smpi_comm_keyvals = xbt_dict_new_homogeneous(nullptr);
577 smpi_comm_key_elem value = static_cast<smpi_comm_key_elem>(xbt_new0(s_smpi_mpi_comm_key_elem_t,1));
579 value->copy_fn=copy_fn;
580 value->delete_fn=delete_fn;
582 *keyval = comm_keyval_id;
583 xbt_dict_set_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int),static_cast<void*>(value), nullptr);
588 int smpi_comm_keyval_free(int* keyval){
589 smpi_comm_key_elem elem =
590 static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int)));
593 xbt_dict_remove_ext(smpi_comm_keyvals, reinterpret_cast<const char*>(keyval), sizeof(int));