1 /* Copyright (c) 2010-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
11 #include "smpi_mpi_dt_private.h"
13 #include "simix/smx_private.h"
14 #include "colls/colls.h"
18 extern xbt_dict_t smpi_keyvals;
19 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
20 "Logging specific to SMPI (comm)");
24 /* Support for cartesian topology was added, but there are 2 other types of
25 * topology, graph et dist graph. In order to support them, we have to add a
26 * field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
28 typedef struct s_smpi_mpi_communicator {
30 MPIR_Topo_type topoType;
31 MPI_Topology topo; // to be replaced by an union
33 MPI_Comm leaders_comm;//inter-node communicator
34 MPI_Comm intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global.
35 //use an intracomm stored in the process data instead
36 int* leaders_map; //who is the leader of each process
38 int* non_uniform_map; //set if smp nodes have a different number of processes allocated
39 int is_blocked;// are ranks allocated on the same smp node contiguous ?
40 xbt_dict_t attributes;
41 } s_smpi_mpi_communicator_t;
43 static int smpi_compare_rankmap(const void *a, const void *b)
45 const int* x = (const int*)a;
46 const int* y = (const int*)b;
63 MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo)
67 comm = xbt_new(s_smpi_mpi_communicator_t, 1);
69 smpi_group_use(comm->group);
73 comm->intra_comm = MPI_COMM_NULL;
74 comm->leaders_comm = MPI_COMM_NULL;
76 comm->non_uniform_map = NULL;
77 comm->leaders_map = NULL;
79 comm->attributes=NULL;
83 void smpi_comm_destroy(MPI_Comm comm)
85 if (comm == MPI_COMM_UNINITIALIZED)
86 comm = smpi_process_comm_world();
87 smpi_group_unuse(comm->group);
88 smpi_topo_destroy(comm->topo); // there's no use count on topos
89 smpi_comm_unuse(comm);
92 int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){
93 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
94 smpi_switch_data_segment(smpi_process_index());
96 (*newcomm) = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
97 int ret = MPI_SUCCESS;
98 //todo: faire en sorte que ça fonctionne avec un communicator dupliqué (refaire un init_smp ?)
100 /* MPI_Comm tmp=smpi_comm_get_intra_comm(comm);
101 if( tmp != MPI_COMM_NULL)
102 smpi_comm_set_intra_comm((*newcomm), smpi_comm_dup(tmp));
103 tmp=smpi_comm_get_leaders_comm(comm);
104 if( tmp != MPI_COMM_NULL)
105 smpi_comm_set_leaders_comm((*newcomm), smpi_comm_dup(tmp));
106 if(comm->non_uniform_map !=NULL){
107 (*newcomm)->non_uniform_map=
108 xbt_malloc(smpi_comm_size(comm->leaders_comm)*sizeof(int));
109 memcpy((*newcomm)->non_uniform_map,
110 comm->non_uniform_map,smpi_comm_size(comm->leaders_comm)*sizeof(int) );
112 if(comm->leaders_map !=NULL){
113 (*newcomm)->leaders_map=xbt_malloc(smpi_comm_size(comm)*sizeof(int));
114 memcpy((*newcomm)->leaders_map,
115 comm->leaders_map,smpi_comm_size(comm)*sizeof(int) );
117 if(comm->attributes !=NULL){
118 (*newcomm)->attributes=xbt_dict_new();
119 xbt_dict_cursor_t cursor = NULL;
124 xbt_dict_foreach(comm->attributes, cursor, key, value_in){
125 smpi_key_elem elem = xbt_dict_get_or_null(smpi_keyvals, (const char*)key);
126 if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){
127 ret = elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag );
128 if(ret!=MPI_SUCCESS){
129 smpi_comm_destroy(*newcomm);
130 *newcomm=MPI_COMM_NULL;
134 xbt_dict_set((*newcomm)->attributes, (const char*)key,value_out, NULL);
142 MPI_Group smpi_comm_group(MPI_Comm comm)
144 if (comm == MPI_COMM_UNINITIALIZED)
145 comm = smpi_process_comm_world();
150 MPI_Topology smpi_comm_topo(MPI_Comm comm) {
151 if (comm != MPI_COMM_NULL)
156 int smpi_comm_size(MPI_Comm comm)
158 if (comm == MPI_COMM_UNINITIALIZED)
159 comm = smpi_process_comm_world();
161 return smpi_group_size(smpi_comm_group(comm));
164 int smpi_comm_rank(MPI_Comm comm)
166 if (comm == MPI_COMM_UNINITIALIZED)
167 comm = smpi_process_comm_world();
168 return smpi_group_rank(smpi_comm_group(comm), smpi_process_index());
171 void smpi_comm_get_name (MPI_Comm comm, char* name, int* len)
173 if (comm == MPI_COMM_UNINITIALIZED)
174 comm = smpi_process_comm_world();
175 if(comm == MPI_COMM_WORLD) {
176 strcpy(name, "WORLD");
179 *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm);
183 void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){
184 if (comm == MPI_COMM_UNINITIALIZED)
185 comm = smpi_process_comm_world();
186 comm->leaders_comm=leaders;
189 void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){
190 comm->intra_comm=leaders;
193 int* smpi_comm_get_non_uniform_map(MPI_Comm comm){
194 if (comm == MPI_COMM_UNINITIALIZED)
195 comm = smpi_process_comm_world();
196 return comm->non_uniform_map;
199 int* smpi_comm_get_leaders_map(MPI_Comm comm){
200 if (comm == MPI_COMM_UNINITIALIZED)
201 comm = smpi_process_comm_world();
202 return comm->leaders_map;
205 MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){
206 if (comm == MPI_COMM_UNINITIALIZED)
207 comm = smpi_process_comm_world();
208 return comm->leaders_comm;
211 MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){
212 if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD)
213 return smpi_process_get_comm_intra();
214 else return comm->intra_comm;
217 int smpi_comm_is_uniform(MPI_Comm comm){
218 if (comm == MPI_COMM_UNINITIALIZED)
219 comm = smpi_process_comm_world();
220 return comm->is_uniform;
223 int smpi_comm_is_blocked(MPI_Comm comm){
224 if (comm == MPI_COMM_UNINITIALIZED)
225 comm = smpi_process_comm_world();
226 return comm->is_blocked;
229 MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key)
231 if (comm == MPI_COMM_UNINITIALIZED)
232 comm = smpi_process_comm_world();
233 int system_tag = 123;
234 int index, rank, size, i, j, count, reqs;
238 MPI_Group group, group_root, group_out;
239 MPI_Request* requests;
241 group_root = group_out = NULL;
242 group = smpi_comm_group(comm);
243 rank = smpi_comm_rank(comm);
244 size = smpi_comm_size(comm);
245 /* Gather all colors and keys on rank 0 */
246 sendbuf = xbt_new(int, 2);
250 recvbuf = xbt_new(int, 2 * size);
254 smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm);
256 /* Do the actual job */
258 rankmap = xbt_new(int, 2 * size);
259 for(i = 0; i < size; i++) {
260 if(recvbuf[2 * i] == MPI_UNDEFINED) {
264 for(j = i + 1; j < size; j++) {
265 if(recvbuf[2 * i] == recvbuf[2 * j]) {
266 recvbuf[2 * j] = MPI_UNDEFINED;
267 rankmap[2 * count] = j;
268 rankmap[2 * count + 1] = recvbuf[2 * j + 1];
272 /* Add self in the group */
273 recvbuf[2 * i] = MPI_UNDEFINED;
274 rankmap[2 * count] = i;
275 rankmap[2 * count + 1] = recvbuf[2 * i + 1];
277 qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap);
278 group_out = smpi_group_new(count);
280 group_root = group_out; /* Save root's group */
282 for(j = 0; j < count; j++) {
283 //increment refcounter in order to avoid freeing the group too quick before copy
284 index = smpi_group_index(group, rankmap[2 * j]);
285 smpi_group_set_mapping(group_out, index, j);
287 requests = xbt_new(MPI_Request, count);
289 for(j = 0; j < count; j++) {
290 if(rankmap[2 * j] != 0) {
291 requests[reqs] = smpi_isend_init(&group_out, 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
295 smpi_mpi_startall(reqs, requests);
296 smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
300 group_out = group_root; /* exit with root's group */
302 if(color != MPI_UNDEFINED) {
303 smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE);
305 group_out=smpi_group_copy(group_out);
307 } /* otherwise, exit with group_out == NULL */
309 return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL;
312 void smpi_comm_use(MPI_Comm comm){
313 if (comm == MPI_COMM_UNINITIALIZED)
314 comm = smpi_process_comm_world();
318 void smpi_comm_unuse(MPI_Comm comm){
319 if (comm == MPI_COMM_UNINITIALIZED)
320 comm = smpi_process_comm_world();
322 if(comm->refcount==0){
323 if(comm->intra_comm != MPI_COMM_NULL)
324 smpi_comm_unuse(comm->intra_comm);
325 if(comm->leaders_comm != MPI_COMM_NULL)
326 smpi_comm_unuse(comm->leaders_comm);
327 if(comm->non_uniform_map !=NULL)
328 xbt_free(comm->non_uniform_map);
329 if(comm->leaders_map !=NULL)
330 xbt_free(comm->leaders_map);
331 if(comm->attributes !=NULL){
332 xbt_dict_cursor_t cursor = NULL;
336 xbt_dict_foreach(comm->attributes, cursor, key, value){
337 smpi_key_elem elem = xbt_dict_get_or_null(smpi_keyvals, (const char*)key);
339 elem->delete_fn(comm, *key, &value, &flag);
347 compare_ints (const void *a, const void *b)
349 const int *da = (const int *) a;
350 const int *db = (const int *) b;
352 return (*da > *db) - (*da < *db);
355 void smpi_comm_init_smp(MPI_Comm comm){
358 if (comm == MPI_COMM_UNINITIALIZED)
359 comm = smpi_process_comm_world();
361 int comm_size =smpi_comm_size(comm);
363 // If we are in replay - perform an ugly hack
364 // say to SimGrid that we are not in replay for a while, because we need
365 // the buffers to be copied for the following calls
366 int replaying = 0; //cache data to set it back again after
367 if(smpi_process_get_replaying()){
369 smpi_process_set_replaying(0);
372 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
373 smpi_switch_data_segment(smpi_process_index());
375 //identify neighbours in comm
376 //get the indexes of all processes sharing the same simix host
377 xbt_swag_t process_list = simcall_host_get_process_list(SIMIX_host_self());
378 int intra_comm_size = 0;
379 //only one process/node, disable SMP support and return
380 // if(intra_comm_size==1){
381 // smpi_comm_set_intra_comm(comm, MPI_COMM_SELF);
382 // //smpi_comm_set_leaders_comm(comm, comm);
383 // smpi_process_set_comm_intra(MPI_COMM_SELF);
389 int min_index=INT_MAX;//the minimum index will be the leader
390 msg_process_t process = NULL;
391 xbt_swag_foreach(process, process_list) {
393 int index = SIMIX_process_get_PID(process) -1;
395 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
397 //the process is in the comm
398 if(index < min_index)
403 XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
404 MPI_Group group_intra = smpi_group_new(intra_comm_size);
407 xbt_swag_foreach(process, process_list) {
409 int index = SIMIX_process_get_PID(process) -1;
410 if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){
411 smpi_group_set_mapping(group_intra, index, i);
417 MPI_Comm comm_intra = smpi_comm_new(group_intra, NULL);
418 //MPI_Comm shmem_comm = smpi_process_comm_intra();
419 //int intra_rank = smpi_comm_rank(shmem_comm);
422 //if(smpi_process_index()==min_index)
425 int * leaders_map= (int*)xbt_malloc0(sizeof(int)*comm_size);
426 int * leader_list= (int*)xbt_malloc0(sizeof(int)*comm_size);
427 for(i=0; i<comm_size; i++){
431 smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm);
433 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
434 smpi_switch_data_segment(smpi_process_index());
437 if(!comm->leaders_map){
438 comm->leaders_map= leaders_map;
440 xbt_free(leaders_map);
443 int leader_group_size = 0;
444 for(i=0; i<comm_size; i++){
446 for(j=0;j<leader_group_size; j++){
447 if(comm->leaders_map[i]==leader_list[j]){
452 leader_list[leader_group_size]=comm->leaders_map[i];
456 qsort(leader_list, leader_group_size, sizeof(int),compare_ints);
458 MPI_Group leaders_group = smpi_group_new(leader_group_size);
461 MPI_Comm leader_comm = MPI_COMM_NULL;
462 if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){
463 //create leader_communicator
464 for (i=0; i< leader_group_size;i++)
465 smpi_group_set_mapping(leaders_group, leader_list[i], i);
466 leader_comm = smpi_comm_new(leaders_group, NULL);
467 smpi_comm_set_leaders_comm(comm, leader_comm);
468 smpi_comm_set_intra_comm(comm, comm_intra);
470 //create intracommunicator
471 // smpi_comm_set_intra_comm(comm, smpi_comm_split(comm, *(int*)SIMIX_host_self(), comm_rank));
473 for (i=0; i< leader_group_size;i++)
474 smpi_group_set_mapping(leaders_group, leader_list[i], i);
476 leader_comm = smpi_comm_new(leaders_group, NULL);
477 if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL)
478 smpi_comm_set_leaders_comm(comm, leader_comm);
479 smpi_process_set_comm_intra(comm_intra);
484 // Are the nodes uniform ? = same number of process/node
485 int my_local_size=smpi_comm_size(comm_intra);
486 if(smpi_comm_rank(comm_intra)==0) {
487 int* non_uniform_map = xbt_malloc0(sizeof(int)*leader_group_size);
488 smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
489 non_uniform_map, 1, MPI_INT, leader_comm);
490 for(i=0; i < leader_group_size; i++) {
491 if(non_uniform_map[0] != non_uniform_map[i]) {
496 if(!is_uniform && smpi_comm_is_uniform(comm)){
497 comm->non_uniform_map= non_uniform_map;
499 xbt_free(non_uniform_map);
501 comm->is_uniform=is_uniform;
503 smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
505 if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
506 smpi_switch_data_segment(smpi_process_index());
508 // Are the ranks blocked ? = allocated contiguously on the SMP nodes
510 int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0));
511 for (i=1; i<my_local_size; i++){
512 int this=smpi_group_rank(smpi_comm_group(comm),smpi_group_index(smpi_comm_group(comm_intra), i));
521 smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1,
522 MPI_INT, MPI_LAND, comm);
524 if(MPI_COMM_WORLD==SMPI_UNINITIALIZED || comm==MPI_COMM_WORLD){
525 if(smpi_comm_rank(comm)==0){
526 comm->is_blocked=global_blocked;
529 comm->is_blocked=global_blocked;
531 xbt_free(leader_list);
534 smpi_process_set_replaying(1);
538 int smpi_comm_attr_delete(MPI_Comm comm, int keyval){
539 if(comm->attributes==NULL)
542 xbt_dict_remove(comm->attributes, (const char*)&keyval);
545 int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){
547 if(comm->attributes==NULL){
552 *(void**)attr_value = xbt_dict_get(comm->attributes, (const char*)&keyval);
563 int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){
564 if(comm->attributes==NULL)
565 comm->attributes=xbt_dict_new();
567 xbt_dict_set(comm->attributes, (const char*)&keyval, attr_value, NULL);