#include "colls/colls.h"
#include "xbt/ex.h"
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi,
- "Logging specific to SMPI (comm)");
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
xbt_dict_t smpi_comm_keyvals = NULL;
int comm_keyval_id = 0;//avoid collisions
-/* Support for cartesian topology was added, but there are 2 other types of
- * topology, graph et dist graph. In order to support them, we have to add a
- * field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
+/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
+ * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */
typedef struct s_smpi_mpi_communicator {
MPI_Group group;
comm = xbt_new(s_smpi_mpi_communicator_t, 1);
comm->group = group;
- smpi_group_use(comm->group);
comm->refcount=1;
comm->topoType = MPI_INVALID_TOPO;
comm->topo = topo;
{
if (comm == MPI_COMM_UNINITIALIZED)
comm = smpi_process_comm_world();
- smpi_group_unuse(comm->group);
smpi_topo_destroy(comm->topo); // there's no use count on topos
smpi_comm_unuse(comm);
}
int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){
- if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
}
- (*newcomm) = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
+ MPI_Group cp=smpi_group_copy(smpi_comm_group(comm));
+ (*newcomm) = smpi_comm_new(cp, smpi_comm_topo(comm));
int ret = MPI_SUCCESS;
//todo: faire en sorte que ça fonctionne avec un communicator dupliqué (refaire un init_smp ?)
void* value_in;
void* value_out;
xbt_dict_foreach(comm->attributes, cursor, key, value_in){
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)key, sizeof(int)));
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)key, sizeof(int)));
if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){
ret = elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag );
if(ret!=MPI_SUCCESS){
return ret;
}
-
MPI_Group smpi_comm_group(MPI_Comm comm)
{
if (comm == MPI_COMM_UNINITIALIZED)
comm = smpi_process_comm_world();
-
return comm->group;
}
{
if (comm == MPI_COMM_UNINITIALIZED)
comm = smpi_process_comm_world();
-
return smpi_group_size(smpi_comm_group(comm));
}
int* recvbuf;
int* rankmap;
MPI_Group group, group_root, group_out;
+ MPI_Group* group_snd;
MPI_Request* requests;
group_root = group_out = NULL;
xbt_free(sendbuf);
/* Do the actual job */
if(rank == 0) {
+ group_snd = xbt_new(MPI_Group, size);
rankmap = xbt_new(int, 2 * size);
for(i = 0; i < size; i++) {
if(recvbuf[2 * i] == MPI_UNDEFINED) {
group_root = group_out; /* Save root's group */
}
for(j = 0; j < count; j++) {
- //increment refcounter in order to avoid freeing the group too quick before copy
index = smpi_group_index(group, rankmap[2 * j]);
smpi_group_set_mapping(group_out, index, j);
}
reqs = 0;
for(j = 0; j < count; j++) {
if(rankmap[2 * j] != 0) {
- requests[reqs] = smpi_isend_init(&group_out, 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
+ group_snd[reqs]=smpi_group_copy(group_out);
+ requests[reqs] = smpi_isend_init(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, comm);
reqs++;
}
}
+ if(i != 0) {
+ smpi_group_destroy(group_out);
+ }
smpi_mpi_startall(reqs, requests);
smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE);
xbt_free(requests);
}
xbt_free(recvbuf);
+ xbt_free(rankmap);
+ xbt_free(group_snd);
group_out = group_root; /* exit with root's group */
} else {
if(color != MPI_UNDEFINED) {
smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE);
- if(group_out){
- group_out=smpi_group_copy(group_out);
- }
} /* otherwise, exit with group_out == NULL */
}
return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL;
void smpi_comm_use(MPI_Comm comm){
if (comm == MPI_COMM_UNINITIALIZED)
comm = smpi_process_comm_world();
+ smpi_group_use(comm->group);
comm->refcount++;
}
-void smpi_comm_unuse(MPI_Comm comm){
- if (comm == MPI_COMM_UNINITIALIZED)
- comm = smpi_process_comm_world();
- comm->refcount--;
- if(comm->refcount==0){
+void smpi_comm_cleanup_attributes(MPI_Comm comm){
+ if(comm->attributes !=NULL){
+ xbt_dict_cursor_t cursor = NULL;
+ int* key;
+ void * value;
+ int flag;
+ xbt_dict_foreach(comm->attributes, cursor, key, value){
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null(smpi_comm_keyvals, (const char*)key));
+ if(elem && elem->delete_fn)
+ elem->delete_fn(comm, *key, value, &flag);
+ }
+ }
+}
+
+void smpi_comm_cleanup_smp(MPI_Comm comm){
if(comm->intra_comm != MPI_COMM_NULL)
smpi_comm_unuse(comm->intra_comm);
if(comm->leaders_comm != MPI_COMM_NULL)
xbt_free(comm->non_uniform_map);
if(comm->leaders_map !=NULL)
xbt_free(comm->leaders_map);
- if(comm->attributes !=NULL){
- xbt_dict_cursor_t cursor = NULL;
- int* key;
- void * value;
- int flag;
- xbt_dict_foreach(comm->attributes, cursor, key, value){
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null(smpi_comm_keyvals, (const char*)key));
- if(elem && elem->delete_fn)
- elem->delete_fn(comm, *key, value, &flag);
- }
- }
+}
+
+void smpi_comm_unuse(MPI_Comm comm){
+ if (comm == MPI_COMM_UNINITIALIZED)
+ comm = smpi_process_comm_world();
+ comm->refcount--;
+ smpi_group_unuse(comm->group);
+
+ if(comm->refcount==0){
+ smpi_comm_cleanup_smp(comm);
+ smpi_comm_cleanup_attributes(comm);
xbt_free(comm);
}
}
-static int
-compare_ints (const void *a, const void *b)
+static int compare_ints (const void *a, const void *b)
{
const int *da = (const int *) a;
const int *db = (const int *) b;
int comm_size =smpi_comm_size(comm);
// If we are in replay - perform an ugly hack
- // say to SimGrid that we are not in replay for a while, because we need
- // the buffers to be copied for the following calls
+ // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls
int replaying = 0; //cache data to set it back again after
if(smpi_process_get_replaying()){
replaying=1;
smpi_process_set_replaying(0);
}
- if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
}
//identify neighbours in comm
// smpi_process_set_comm_intra(MPI_COMM_SELF);
// return;
// }
-
-
int i =0;
int min_index=INT_MAX;//the minimum index will be the leader
smx_process_t process = NULL;
}
}
-
MPI_Comm comm_intra = smpi_comm_new(group_intra, NULL);
//MPI_Comm shmem_comm = smpi_process_comm_intra();
//int intra_rank = smpi_comm_rank(shmem_comm);
-
//if(smpi_process_index()==min_index)
leader=min_index;
smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm);
- if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
}
-
+
if(!comm->leaders_map){
comm->leaders_map= leaders_map;
}else{
MPI_Group leaders_group = smpi_group_new(leader_group_size);
-
MPI_Comm leader_comm = MPI_COMM_NULL;
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){
//create leader_communicator
smpi_comm_set_leaders_comm(comm, leader_comm);
smpi_comm_set_intra_comm(comm, comm_intra);
- //create intracommunicator
+ //create intracommunicator
// smpi_comm_set_intra_comm(comm, smpi_comm_split(comm, *(int*)SIMIX_host_self(), comm_rank));
}else{
for (i=0; i< leader_group_size;i++)
smpi_group_set_mapping(leaders_group, leader_list[i], i);
- leader_comm = smpi_comm_new(leaders_group, NULL);
- if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL)
+ if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
+ leader_comm = smpi_comm_new(leaders_group, NULL);
smpi_comm_set_leaders_comm(comm, leader_comm);
+ }else{
+ leader_comm=smpi_comm_get_leaders_comm(comm);
+ smpi_group_unuse(leaders_group);
+ }
smpi_process_set_comm_intra(comm_intra);
}
}
smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra );
- if(smpi_privatize_global_variables){ //we need to switch here, as the called function may silently touch global variables
+ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
}
int global_blocked;
- smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1,
- MPI_INT, MPI_LAND, comm);
+ smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, comm);
if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD){
if(smpi_comm_rank(comm)==0){
}
int smpi_comm_attr_delete(MPI_Comm comm, int keyval){
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
if(!elem)
return MPI_ERR_ARG;
if(elem->delete_fn!=MPI_NULL_DELETE_FN){
int ret = elem->delete_fn(comm, keyval, value, &flag);
if(ret!=MPI_SUCCESS) return ret;
}
- }
+ }
if(comm->attributes==NULL)
return MPI_ERR_ARG;
}
int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
if(!elem)
return MPI_ERR_ARG;
xbt_ex_t ex;
TRY {
*(void**)attr_value = xbt_dict_get_ext(comm->attributes, (const char*)&keyval, sizeof(int));
*flag=1;
- }
- CATCH(ex) {
+ } CATCH(ex) {
*flag=0;
xbt_ex_free(ex);
}
int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){
if(!smpi_comm_keyvals)
smpi_comm_keyvals = xbt_dict_new();
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int)));
if(!elem )
return MPI_ERR_ARG;
int flag;
return MPI_SUCCESS;
}
-int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state){
-
+int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval,
+ void* extra_state){
if(!smpi_comm_keyvals)
smpi_comm_keyvals = xbt_dict_new();
-
+
smpi_comm_key_elem value = (smpi_comm_key_elem) xbt_new0(s_smpi_mpi_comm_key_elem_t,1);
-
+
value->copy_fn=copy_fn;
value->delete_fn=delete_fn;
-
+
*keyval = comm_keyval_id;
xbt_dict_set_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int),(void*)value, NULL);
comm_keyval_id++;
}
int smpi_comm_keyval_free(int* keyval){
- smpi_comm_key_elem elem = static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int)));
+ smpi_comm_key_elem elem =
+ static_cast<smpi_comm_key_elem>(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int)));
if(!elem){
return MPI_ERR_ARG;
}