#include <stdlib.h>
#include "private.h"
+#include "xbt/dict.h"
#include "smpi_mpi_dt_private.h"
#include "limits.h"
#include "simix/smx_private.h"
#include "colls/colls.h"
+#include "xbt/ex.h"
+
+extern xbt_dict_t smpi_keyvals;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
"Logging specific to SMPI (comm)");
int is_uniform;
int* non_uniform_map; //set if smp nodes have a different number of processes allocated
int is_blocked;// are ranks allocated on the same smp node contiguous ?
+ xbt_dict_t attributes;
} s_smpi_mpi_communicator_t;
static int smpi_compare_rankmap(const void *a, const void *b)
comm->non_uniform_map = NULL;
comm->leaders_map = NULL;
comm->is_blocked=0;
+ comm->attributes=NULL;
return comm;
}
smpi_comm_unuse(comm);
}
+int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){
+ if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ smpi_switch_data_segment(smpi_process_index());
+ }
+ (*newcomm) = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
+ int ret = MPI_SUCCESS;
+ //todo: faire en sorte que ça fonctionne avec un communicator dupliqué (refaire un init_smp ?)
+
+ /* MPI_Comm tmp=smpi_comm_get_intra_comm(comm);
+ if( tmp != MPI_COMM_NULL)
+ smpi_comm_set_intra_comm((*newcomm), smpi_comm_dup(tmp));
+ tmp=smpi_comm_get_leaders_comm(comm);
+ if( tmp != MPI_COMM_NULL)
+ smpi_comm_set_leaders_comm((*newcomm), smpi_comm_dup(tmp));
+ if(comm->non_uniform_map !=NULL){
+ (*newcomm)->non_uniform_map=
+ xbt_malloc(smpi_comm_size(comm->leaders_comm)*sizeof(int));
+ memcpy((*newcomm)->non_uniform_map,
+ comm->non_uniform_map,smpi_comm_size(comm->leaders_comm)*sizeof(int) );
+ }
+ if(comm->leaders_map !=NULL){
+ (*newcomm)->leaders_map=xbt_malloc(smpi_comm_size(comm)*sizeof(int));
+ memcpy((*newcomm)->leaders_map,
+ comm->leaders_map,smpi_comm_size(comm)*sizeof(int) );
+ }*/
+ if(comm->attributes !=NULL){
+ (*newcomm)->attributes=xbt_dict_new();
+ xbt_dict_cursor_t cursor = NULL;
+ int *key;
+ int flag;
+ void* value_in;
+ void* value_out;
+ xbt_dict_foreach(comm->attributes, cursor, key, value_in){
+ smpi_key_elem elem = xbt_dict_get_or_null(smpi_keyvals, (const char*)key);
+ if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){
+ ret = elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag );
+ if(ret!=MPI_SUCCESS){
+ smpi_comm_destroy(*newcomm);
+ *newcomm=MPI_COMM_NULL;
+ return ret;
+ }
+ if(flag)
+ xbt_dict_set((*newcomm)->attributes, (const char*)key,value_out, NULL);
+ }
+ }
+ }
+ return ret;
+}
+
+
MPI_Group smpi_comm_group(MPI_Comm comm)
{
if (comm == MPI_COMM_UNINITIALIZED)
}
void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
comm->leaders_comm=leaders;
}
}
int* smpi_comm_get_non_uniform_map(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
return comm->non_uniform_map;
}
int* smpi_comm_get_leaders_map(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
return comm->leaders_map;
}
MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
return comm->leaders_comm;
}
MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){
- if(comm==MPI_COMM_WORLD) return smpi_process_get_comm_intra();
+ if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD)
+ return smpi_process_get_comm_intra();
else return comm->intra_comm;
}
int smpi_comm_is_uniform(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
return comm->is_uniform;
}
int smpi_comm_is_blocked(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
return comm->is_blocked;
}
xbt_free(comm->non_uniform_map);
if(comm->leaders_map !=NULL)
xbt_free(comm->leaders_map);
+ if(comm->attributes !=NULL){
+ xbt_dict_cursor_t cursor = NULL;
+ int* key;
+ void * value;
+ int flag;
+ xbt_dict_foreach(comm->attributes, cursor, key, value){
+ smpi_key_elem elem = xbt_dict_get_or_null(smpi_keyvals, (const char*)key);
+ if(elem)
+ elem->delete_fn(comm, *key, &value, &flag);
+ }
+ }
xbt_free(comm);
}
}
void smpi_comm_init_smp(MPI_Comm comm){
int leader = -1;
+
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
+
int comm_size =smpi_comm_size(comm);
+
+ // If we are in replay - perform an ugly hack
+ // say to SimGrid that we are not in replay for a while, because we need
+ // the buffers to be copied for the following calls
+ int replaying = 0; //cache data to set it back again after
+ if(smpi_process_get_replaying()){
+ replaying=1;
+ smpi_process_set_replaying(0);
+ }
if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
- XBT_VERB("Applying operation, switch to the right data frame ");
- switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process_index());
}
//identify neighbours in comm
//get the indexes of all processes sharing the same simix host
// smpi_process_set_comm_intra(MPI_COMM_SELF);
// return;
// }
- XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
+
int i =0;
int min_index=INT_MAX;//the minimum index will be the leader
i++;
}
}
-
+ XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = smpi_group_new(intra_comm_size);
i=0;
process = NULL;
smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm);
+ if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ smpi_switch_data_segment(smpi_process_index());
+ }
+
if(!comm->leaders_map){
comm->leaders_map= leaders_map;
}else{
MPI_Comm leader_comm = MPI_COMM_NULL;
- if(comm!=MPI_COMM_WORLD){
+ if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
smpi_group_set_mapping(leaders_group, leader_list[i], i);
// Are the nodes uniform ? = same number of process/node
int my_local_size=smpi_comm_size(comm_intra);
if(smpi_comm_rank(comm_intra)==0) {
- int* non_uniform_map = xbt_malloc(sizeof(int)*leader_group_size);
+ int* non_uniform_map = xbt_malloc0(sizeof(int)*leader_group_size);
smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
non_uniform_map, 1, MPI_INT, leader_comm);
for(i=0; i < leader_group_size; i++) {
}
comm->is_uniform=is_uniform;
}
- mpi_coll_bcast_fun(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
-
+ smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
+ if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ smpi_switch_data_segment(smpi_process_index());
+ }
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0));
}
int global_blocked;
- mpi_coll_allreduce_fun(&is_blocked, &(global_blocked), 1,
+ smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1,
MPI_INT, MPI_LAND, comm);
- if(comm==MPI_COMM_WORLD){
+ if(MPI_COMM_WORLD==SMPI_UNINITIALIZED || comm==MPI_COMM_WORLD){
if(smpi_comm_rank(comm)==0){
comm->is_blocked=global_blocked;
}
comm->is_blocked=global_blocked;
}
xbt_free(leader_list);
+
+ if(replaying==1)
+ smpi_process_set_replaying(1);
+}
+
+
+int smpi_comm_attr_delete(MPI_Comm comm, int keyval){
+ if(comm->attributes==NULL)
+ return MPI_ERR_ARG;
+
+ xbt_dict_remove(comm->attributes, (const char*)&keyval);
+ return MPI_SUCCESS;
+}
+int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){
+ xbt_ex_t ex;
+ if(comm->attributes==NULL){
+ *flag=0;
+ return MPI_SUCCESS;
+ }
+ TRY {
+ *(void**)attr_value = xbt_dict_get(comm->attributes, (const char*)&keyval);
+ *flag=1;
+ }
+ CATCH(ex) {
+ *flag=0;
+ xbt_ex_free(ex);
+ }
+
+ return MPI_SUCCESS;
+}
+
+int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){
+ if(comm->attributes==NULL)
+ comm->attributes=xbt_dict_new();
+
+ xbt_dict_set(comm->attributes, (const char*)&keyval, attr_value, NULL);
+ return MPI_SUCCESS;
}