1 /* Copyright (c) 2010-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
11 #include "smpi_mpi_dt_private.h"
13 #include "src/simix/smx_private.h"
14 #include "colls/colls.h"
17 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
18 "Logging specific to SMPI (comm)");
20 xbt_dict_t smpi_comm_keyvals = NULL;
21 int comm_keyval_id = 0;//avoid collisions
23 /* Support for cartesian topology was added, but there are 2 other types of
24 * topology, graph et dist graph. In order to support them, we have to add a
25 * field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
27 typedef struct s_smpi_mpi_communicator {
29 MPIR_Topo_type topoType;
30 MPI_Topology topo; // to be replaced by an union
32 MPI_Comm leaders_comm;//inter-node communicator
33 MPI_Comm intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global.
34 //use an intracomm stored in the process data instead
35 int* leaders_map; //who is the leader of each process
37 int* non_uniform_map; //set if smp nodes have a different number of processes allocated
38 int is_blocked;// are ranks allocated on the same smp node contiguous ?
39 xbt_dict_t attributes;
40 } s_smpi_mpi_communicator_t;
42 static int smpi_compare_rankmap(const void *a, const void *b)
44 const int* x = (const int*)a;
45 const int* y = (const int*)b;
62 MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo)
66 comm = xbt_new(s_smpi_mpi_communicator_t, 1);
68 smpi_group_use(comm->group);
72 comm->intra_comm = MPI_COMM_NULL;
73 comm->leaders_comm = MPI_COMM_NULL;
75 comm->non_uniform_map = NULL;
76 comm->leaders_map = NULL;
78 comm->attributes=NULL;
82 void smpi_comm_destroy(MPI_Comm comm)
84 if (comm == MPI_COMM_UNINITIALIZED)
85 comm = smpi_process_comm_world();
86 smpi_group_unuse(comm->group);
87 smpi_topo_destroy(comm->topo); // there's no use count on topos
88 smpi_comm_unuse(comm);
91 int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){
92 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
93 smpi_switch_data_segment(smpi_process_index());
95 (*newcomm) = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
96 int ret = MPI_SUCCESS;
97 //todo: faire en sorte que ça fonctionne avec un communicator dupliqué (refaire un init_smp ?)
99 /* MPI_Comm tmp=smpi_comm_get_intra_comm(comm);
100 if( tmp != MPI_COMM_NULL)
101 smpi_comm_set_intra_comm((*newcomm), smpi_comm_dup(tmp));
102 tmp=smpi_comm_get_leaders_comm(comm);
103 if( tmp != MPI_COMM_NULL)
104 smpi_comm_set_leaders_comm((*newcomm), smpi_comm_dup(tmp));
105 if(comm->non_uniform_map !=NULL){
106 (*newcomm)->non_uniform_map=
107 xbt_malloc(smpi_comm_size(comm->leaders_comm)*sizeof(int));
108 memcpy((*newcomm)->non_uniform_map,
109 comm->non_uniform_map,smpi_comm_size(comm->leaders_comm)*sizeof(int) );
111 if(comm->leaders_map !=NULL){
112 (*newcomm)->leaders_map=xbt_malloc(smpi_comm_size(comm)*sizeof(int));
113 memcpy((*newcomm)->leaders_map,
114 comm->leaders_map,smpi_comm_size(comm)*sizeof(int) );
116 if(comm->attributes !=NULL){
117 (*newcomm)->attributes=xbt_dict_new();
118 xbt_dict_cursor_t cursor = NULL;
123 xbt_dict_foreach(comm->attributes, cursor, key, value_in){
124 smpi_comm_key_elem elem = xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)key, sizeof(int));
125 if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){
126 ret = elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag );
127 if(ret!=MPI_SUCCESS){
128 smpi_comm_destroy(*newcomm);
129 *newcomm=MPI_COMM_NULL;
133 xbt_dict_set_ext((*newcomm)->attributes, (const char*)key, sizeof(int),value_out, NULL);
141 MPI_Group smpi_comm_group(MPI_Comm comm)
143 if (comm == MPI_COMM_UNINITIALIZED)
144 comm = smpi_process_comm_world();
149 MPI_Topology smpi_comm_topo(MPI_Comm comm) {
150 if (comm != MPI_COMM_NULL)
155 int smpi_comm_size(MPI_Comm comm)
157 if (comm == MPI_COMM_UNINITIALIZED)
158 comm = smpi_process_comm_world();
160 return smpi_group_size(smpi_comm_group(comm));
163 int smpi_comm_rank(MPI_Comm comm)
165 if (comm == MPI_COMM_UNINITIALIZED)
166 comm = smpi_process_comm_world();
167 return smpi_group_rank(smpi_comm_group(comm), smpi_process_index());
170 void smpi_comm_get_name (MPI_Comm comm, char* name, int* len)
172 if (comm == MPI_COMM_UNINITIALIZED)
173 comm = smpi_process_comm_world();
174 if(comm == MPI_COMM_WORLD) {
175 strcpy(name, "WORLD");
178 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm);
182 void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){
183 if (comm == MPI_COMM_UNINITIALIZED)
184 comm = smpi_process_comm_world();
185 comm->leaders_comm=leaders;
188 void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){
189 comm->intra_comm=leaders;
192 int* smpi_comm_get_non_uniform_map(MPI_Comm comm){
193 if (comm == MPI_COMM_UNINITIALIZED)
194 comm = smpi_process_comm_world();
195 return comm->non_uniform_map;
198 int* smpi_comm_get_leaders_map(MPI_Comm comm){
199 if (comm == MPI_COMM_UNINITIALIZED)
200 comm = smpi_process_comm_world();
201 return comm->leaders_map;
204 MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){
205 if (comm == MPI_COMM_UNINITIALIZED)
206 comm = smpi_process_comm_world();
207 return comm->leaders_comm;
210 MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){
211 if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD)
212 return smpi_process_get_comm_intra();
213 else return comm->intra_comm;
216 int smpi_comm_is_uniform(MPI_Comm comm){
217 if (comm == MPI_COMM_UNINITIALIZED)
218 comm = smpi_process_comm_world();
219 return comm->is_uniform;
222 int smpi_comm_is_blocked(MPI_Comm comm){
223 if (comm == MPI_COMM_UNINITIALIZED)
224 comm = smpi_process_comm_world();
225 return comm->is_blocked;
228 MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key)
230 if (comm == MPI_COMM_UNINITIALIZED)
231 comm = smpi_process_comm_world();
232 int system_tag = 123;
233 int index, rank, size, i, j, count, reqs;
237 MPI_Group group, group_root, group_out;
238 MPI_Request* requests;
240 group_root = group_out = NULL;
241 group = smpi_comm_group(comm);
242 rank = smpi_comm_rank(comm);
243 size = smpi_comm_size(comm);
244 /* Gather all colors and keys on rank 0 */
245 sendbuf = xbt_new(int, 2);
249 recvbuf = xbt_new(int, 2 * size);
253 smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm);
255 /* Do the actual job */
257 rankmap = xbt_new(int, 2 * size);
258 for(i = 0; i < size; i++) {
259 if(recvbuf[2 * i] == MPI_UNDEFINED) {
263 for(j = i + 1; j < size; j++) {
264 if(recvbuf[2 * i] == recvbuf[2 * j]) {
265 recvbuf[2 * j] = MPI_UNDEFINED;
266 rankmap[2 * count] = j;
267 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
271 /* Add self in the group */
272 recvbuf[2 * i] = MPI_UNDEFINED;
273 rankmap[2 * count] = i;
274 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
276 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
277 group_out = smpi_group_new(count);
279 group_root = group_out; /* Save root's group */
281 for(j = 0; j < count; j++) {
282 //increment refcounter in order to avoid freeing the group too quick before copy
283 index = smpi_group_index(group, rankmap[2 * j]);
284 smpi_group_set_mapping(group_out, index, j);
286 requests = xbt_new(MPI_Request, count);
288 for(j = 0; j < count; j++) {
289 if(rankmap[2 * j] != 0) {
290 requests[reqs] = smpi_isend_init(&group_out, 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
294 smpi_mpi_startall(reqs, requests);
295 smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
299 group_out = group_root; /* exit with root's group */
301 if(color != MPI_UNDEFINED) {
302 smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE);
304 group_out=smpi_group_copy(group_out);
306 } /* otherwise, exit with group_out == NULL */
308 return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL;
311 void smpi_comm_use(MPI_Comm comm){
312 if (comm == MPI_COMM_UNINITIALIZED)
313 comm = smpi_process_comm_world();
317 void smpi_comm_unuse(MPI_Comm comm){
318 if (comm == MPI_COMM_UNINITIALIZED)
319 comm = smpi_process_comm_world();
321 if(comm->refcount==0){
322 if(comm->intra_comm != MPI_COMM_NULL)
323 smpi_comm_unuse(comm->intra_comm);
324 if(comm->leaders_comm != MPI_COMM_NULL)
325 smpi_comm_unuse(comm->leaders_comm);
326 if(comm->non_uniform_map !=NULL)
327 xbt_free(comm->non_uniform_map);
328 if(comm->leaders_map !=NULL)
329 xbt_free(comm->leaders_map);
330 if(comm->attributes !=NULL){
331 xbt_dict_cursor_t cursor = NULL;
335 xbt_dict_foreach(comm->attributes, cursor, key, value){
336 smpi_comm_key_elem elem = xbt_dict_get_or_null(smpi_comm_keyvals, (const char*)key);
337 if(elem && elem->delete_fn)
338 elem->delete_fn(comm, *key, value, &flag);
346 compare_ints (const void *a, const void *b)
348 const int *da = (const int *) a;
349 const int *db = (const int *) b;
351 return (*da > *db) - (*da < *db);
354 void smpi_comm_init_smp(MPI_Comm comm){
357 if (comm == MPI_COMM_UNINITIALIZED)
358 comm = smpi_process_comm_world();
360 int comm_size =smpi_comm_size(comm);
362 // If we are in replay - perform an ugly hack
363 // say to SimGrid that we are not in replay for a while, because we need
364 // the buffers to be copied for the following calls
365 int replaying = 0; //cache data to set it back again after
366 if(smpi_process_get_replaying()){
368 smpi_process_set_replaying(0);
371 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
372 smpi_switch_data_segment(smpi_process_index());
374 //identify neighbours in comm
375 //get the indexes of all processes sharing the same simix host
376 xbt_swag_t process_list = simcall_host_get_process_list(SIMIX_host_self());
377 int intra_comm_size = 0;
378 //only one process/node, disable SMP support and return
379 // if(intra_comm_size==1){
380 // smpi_comm_set_intra_comm(comm, MPI_COMM_SELF);
381 // //smpi_comm_set_leaders_comm(comm, comm);
382 // smpi_process_set_comm_intra(MPI_COMM_SELF);
388 int min_index=INT_MAX;//the minimum index will be the leader
389 smx_process_t process = NULL;
390 xbt_swag_foreach(process, process_list) {
392 int index = SIMIX_process_get_PID(process) -1;
394 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
396 //the process is in the comm
397 if(index < min_index)
402 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
403 MPI_Group group_intra = smpi_group_new(intra_comm_size);
406 xbt_swag_foreach(process, process_list) {
408 int index = SIMIX_process_get_PID(process) -1;
409 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
410 smpi_group_set_mapping(group_intra, index, i);
416 MPI_Comm comm_intra = smpi_comm_new(group_intra, NULL);
417 //MPI_Comm shmem_comm = smpi_process_comm_intra();
418 //int intra_rank = smpi_comm_rank(shmem_comm);
421 //if(smpi_process_index()==min_index)
424 int * leaders_map= (int*)xbt_malloc0(sizeof(int)*comm_size);
425 int * leader_list= (int*)xbt_malloc0(sizeof(int)*comm_size);
426 for(i=0; i<comm_size; i++){
430 smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm);
432 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
433 smpi_switch_data_segment(smpi_process_index());
436 if(!comm->leaders_map){
437 comm->leaders_map= leaders_map;
439 xbt_free(leaders_map);
442 int leader_group_size = 0;
443 for(i=0; i<comm_size; i++){
445 for(j=0;j<leader_group_size; j++){
446 if(comm->leaders_map[i]==leader_list[j]){
451 leader_list[leader_group_size]=comm->leaders_map[i];
455 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
457 MPI_Group leaders_group = smpi_group_new(leader_group_size);
460 MPI_Comm leader_comm = MPI_COMM_NULL;
461 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){
462 //create leader_communicator
463 for (i=0; i< leader_group_size;i++)
464 smpi_group_set_mapping(leaders_group, leader_list[i], i);
465 leader_comm = smpi_comm_new(leaders_group, NULL);
466 smpi_comm_set_leaders_comm(comm, leader_comm);
467 smpi_comm_set_intra_comm(comm, comm_intra);
469 //create intracommunicator
470 // smpi_comm_set_intra_comm(comm, smpi_comm_split(comm, *(int*)SIMIX_host_self(), comm_rank));
472 for (i=0; i< leader_group_size;i++)
473 smpi_group_set_mapping(leaders_group, leader_list[i], i);
475 leader_comm = smpi_comm_new(leaders_group, NULL);
476 if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL)
477 smpi_comm_set_leaders_comm(comm, leader_comm);
478 smpi_process_set_comm_intra(comm_intra);
483 // Are the nodes uniform ? = same number of process/node
484 int my_local_size=smpi_comm_size(comm_intra);
485 if(smpi_comm_rank(comm_intra)==0) {
486 int* non_uniform_map = xbt_malloc0(sizeof(int)*leader_group_size);
487 smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
488 non_uniform_map, 1, MPI_INT, leader_comm);
489 for(i=0; i < leader_group_size; i++) {
490 if(non_uniform_map[0] != non_uniform_map[i]) {
495 if(!is_uniform && smpi_comm_is_uniform(comm)){
496 comm->non_uniform_map= non_uniform_map;
498 xbt_free(non_uniform_map);
500 comm->is_uniform=is_uniform;
502 smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
504 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
505 smpi_switch_data_segment(smpi_process_index());
507 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
509 int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0));
510 for (i=1; i<my_local_size; i++){
511 int this=smpi_group_rank(smpi_comm_group(comm),smpi_group_index(smpi_comm_group(comm_intra), i));
520 smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1,
521 MPI_INT, MPI_LAND, comm);
523 if(MPI_COMM_WORLD==SMPI_UNINITIALIZED || comm==MPI_COMM_WORLD){
524 if(smpi_comm_rank(comm)==0){
525 comm->is_blocked=global_blocked;
528 comm->is_blocked=global_blocked;
530 xbt_free(leader_list);
533 smpi_process_set_replaying(1);
536 int smpi_comm_attr_delete(MPI_Comm comm, int keyval){
537 smpi_comm_key_elem elem = xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int));
540 if(elem->delete_fn!=MPI_NULL_DELETE_FN){
543 if(smpi_comm_attr_get(comm, keyval, &value, &flag)==MPI_SUCCESS){
544 int ret = elem->delete_fn(comm, keyval, value, &flag);
545 if(ret!=MPI_SUCCESS) return ret;
548 if(comm->attributes==NULL)
551 xbt_dict_remove_ext(comm->attributes, (const char*)&keyval, sizeof(int));
555 int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){
556 smpi_comm_key_elem elem = xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int));
560 if(comm->attributes==NULL){
565 *(void**)attr_value = xbt_dict_get_ext(comm->attributes, (const char*)&keyval, sizeof(int));
575 int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){
576 if(!smpi_comm_keyvals)
577 smpi_comm_keyvals = xbt_dict_new();
578 smpi_comm_key_elem elem = xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int));
583 smpi_comm_attr_get(comm, keyval, &value, &flag);
584 if(flag && elem->delete_fn!=MPI_NULL_DELETE_FN){
585 int ret = elem->delete_fn(comm, keyval, value, &flag);
586 if(ret!=MPI_SUCCESS) return ret;
588 if(comm->attributes==NULL)
589 comm->attributes=xbt_dict_new();
591 xbt_dict_set_ext(comm->attributes, (const char*)&keyval, sizeof(int), attr_value, NULL);
595 int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state){
597 if(!smpi_comm_keyvals)
598 smpi_comm_keyvals = xbt_dict_new();
600 smpi_comm_key_elem value = (smpi_comm_key_elem) xbt_new0(s_smpi_mpi_comm_key_elem_t,1);
602 value->copy_fn=copy_fn;
603 value->delete_fn=delete_fn;
605 *keyval = comm_keyval_id;
606 xbt_dict_set_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int),(void*)value, NULL);
611 int smpi_comm_keyval_free(int* keyval){
612 smpi_comm_key_elem elem = xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int));
616 xbt_dict_remove_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int));