X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/7c14726fd0862a988b835b24e85c6c5b83b81900..8a4ded6ee6330d304e1d3f69100eb7ec55c5fd9c:/src/smpi/smpi_comm.c diff --git a/src/smpi/smpi_comm.c b/src/smpi/smpi_comm.c index d71c475df1..d108f6d5f9 100644 --- a/src/smpi/smpi_comm.c +++ b/src/smpi/smpi_comm.c @@ -7,11 +7,15 @@ #include #include "private.h" +#include "xbt/dict.h" #include "smpi_mpi_dt_private.h" #include "limits.h" #include "simix/smx_private.h" #include "colls/colls.h" +#include "xbt/ex.h" + +extern xbt_dict_t smpi_keyvals; XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)"); @@ -33,6 +37,7 @@ typedef struct s_smpi_mpi_communicator { int is_uniform; int* non_uniform_map; //set if smp nodes have a different number of processes allocated int is_blocked;// are ranks allocated on the same smp node contiguous ? + xbt_dict_t attributes; } s_smpi_mpi_communicator_t; static int smpi_compare_rankmap(const void *a, const void *b) @@ -71,6 +76,7 @@ MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo) comm->non_uniform_map = NULL; comm->leaders_map = NULL; comm->is_blocked=0; + comm->attributes=NULL; return comm; } @@ -83,6 +89,32 @@ void smpi_comm_destroy(MPI_Comm comm) smpi_comm_unuse(comm); } +MPI_Comm smpi_comm_dup(MPI_Comm comm){ + if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables + smpi_switch_data_segment(smpi_process_index()); + } + MPI_Comm newcomm = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm)); + + if(comm->attributes !=NULL){ + newcomm->attributes=xbt_dict_new(); + xbt_dict_cursor_t cursor = NULL; + int *key; + int flag; + void* value_in; + void* value_out; + xbt_dict_foreach(comm->attributes, cursor, key, value_in){ + smpi_key_elem elem = xbt_dict_get_or_null(smpi_keyvals, (const char*)key); + if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){ + elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag ); + if(flag) + xbt_dict_set(newcomm->attributes, (const char*)key,value_out, NULL); + } + } + } + return newcomm; +} + + MPI_Group smpi_comm_group(MPI_Comm comm) { if (comm == MPI_COMM_UNINITIALIZED) @@ -125,6 +157,8 @@ void smpi_comm_get_name (MPI_Comm comm, char* name, int* len) } void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); comm->leaders_comm=leaders; } @@ -133,27 +167,38 @@ void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){ } int* smpi_comm_get_non_uniform_map(MPI_Comm comm){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); return comm->non_uniform_map; } int* smpi_comm_get_leaders_map(MPI_Comm comm){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); return comm->leaders_map; } MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); return comm->leaders_comm; } MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){ - if(comm==MPI_COMM_WORLD) return smpi_process_get_comm_intra(); + if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD) + return smpi_process_get_comm_intra(); else return comm->intra_comm; } int smpi_comm_is_uniform(MPI_Comm comm){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); return comm->is_uniform; } int smpi_comm_is_blocked(MPI_Comm comm){ + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); return comm->is_blocked; } @@ -259,6 +304,17 @@ void smpi_comm_unuse(MPI_Comm comm){ xbt_free(comm->non_uniform_map); if(comm->leaders_map !=NULL) xbt_free(comm->leaders_map); + if(comm->attributes !=NULL){ + xbt_dict_cursor_t cursor = NULL; + char *key; + smpi_key_elem elem; + void * value; + int flag; + xbt_dict_foreach(comm->attributes, cursor, key, elem){ + if(smpi_attr_get(comm, (int)key, &value, &flag)==MPI_SUCCESS) + elem->delete_fn(comm, (int)key, &value, &flag); + } + } xbt_free(comm); } } @@ -274,7 +330,20 @@ compare_ints (const void *a, const void *b) void smpi_comm_init_smp(MPI_Comm comm){ int leader = -1; + + if (comm == MPI_COMM_UNINITIALIZED) + comm = smpi_process_comm_world(); + int comm_size =smpi_comm_size(comm); + + // If we are in replay - perform an ugly hack + // say to SimGrid that we are not in replay for a while, because we need + // the buffers to be copied for the following calls + int replaying = 0; //cache data to set it back again after + if(smpi_process_get_replaying()){ + replaying=1; + smpi_process_set_replaying(0); + } if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables smpi_switch_data_segment(smpi_process_index()); @@ -290,7 +359,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ // smpi_process_set_comm_intra(MPI_COMM_SELF); // return; // } - XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size); + int i =0; int min_index=INT_MAX;//the minimum index will be the leader @@ -307,7 +376,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ i++; } } - + XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size); MPI_Group group_intra = smpi_group_new(intra_comm_size); i=0; process = NULL; @@ -337,6 +406,10 @@ void smpi_comm_init_smp(MPI_Comm comm){ smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm); + if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables + smpi_switch_data_segment(smpi_process_index()); + } + if(!comm->leaders_map){ comm->leaders_map= leaders_map; }else{ @@ -362,7 +435,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ MPI_Comm leader_comm = MPI_COMM_NULL; - if(comm!=MPI_COMM_WORLD){ + if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){ //create leader_communicator for (i=0; i< leader_group_size;i++) smpi_group_set_mapping(leaders_group, leader_list[i], i); @@ -387,7 +460,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ // Are the nodes uniform ? = same number of process/node int my_local_size=smpi_comm_size(comm_intra); if(smpi_comm_rank(comm_intra)==0) { - int* non_uniform_map = xbt_malloc(sizeof(int)*leader_group_size); + int* non_uniform_map = xbt_malloc0(sizeof(int)*leader_group_size); smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT, non_uniform_map, 1, MPI_INT, leader_comm); for(i=0; i < leader_group_size; i++) { @@ -405,7 +478,9 @@ void smpi_comm_init_smp(MPI_Comm comm){ } smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra ); - + if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables + smpi_switch_data_segment(smpi_process_index()); + } // Are the ranks blocked ? = allocated contiguously on the SMP nodes int is_blocked=1; int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0)); @@ -422,7 +497,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, comm); - if(comm==MPI_COMM_WORLD){ + if(MPI_COMM_WORLD==SMPI_UNINITIALIZED || comm==MPI_COMM_WORLD){ if(smpi_comm_rank(comm)==0){ comm->is_blocked=global_blocked; } @@ -430,5 +505,42 @@ void smpi_comm_init_smp(MPI_Comm comm){ comm->is_blocked=global_blocked; } xbt_free(leader_list); + + if(replaying==1) + smpi_process_set_replaying(1); +} + + +int smpi_comm_attr_delete(MPI_Comm comm, int keyval){ + if(comm->attributes==NULL) + return MPI_ERR_ARG; + + xbt_dict_remove(comm->attributes, (const char*)&keyval); + return MPI_SUCCESS; +} +int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){ + xbt_ex_t ex; + if(comm->attributes==NULL){ + *flag=0; + return MPI_SUCCESS; + } + TRY { + *(void**)attr_value = xbt_dict_get(comm->attributes, (const char*)&keyval); + *flag=1; + } + CATCH(ex) { + *flag=0; + xbt_ex_free(ex); + } + + return MPI_SUCCESS; +} + +int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){ + if(comm->attributes==NULL) + comm->attributes=xbt_dict_new(); + + xbt_dict_set(comm->attributes, (const char*)&keyval, attr_value, NULL); + return MPI_SUCCESS; }