X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/f00ffb4ea8272126f9a81ec6b8adc6e021182bde..c5a48995c0e24c9ae38c3d14203388523c565a5b:/src/smpi/smpi_comm.cpp diff --git a/src/smpi/smpi_comm.cpp b/src/smpi/smpi_comm.cpp index 3513ba66ff..97997d46fb 100644 --- a/src/smpi/smpi_comm.cpp +++ b/src/smpi/smpi_comm.cpp @@ -5,42 +5,29 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include +#include + +#include +#include +#include + +#include #include "private.h" -#include "xbt/dict.h" -#include "smpi_mpi_dt_private.h" -#include "limits.h" #include "src/simix/smx_private.h" -#include "colls/colls.h" -#include "xbt/ex.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)"); -xbt_dict_t smpi_comm_keyvals = NULL; -int comm_keyval_id = 0;//avoid collisions + Comm mpi_MPI_COMM_UNINITIALIZED; +MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED; /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */ -typedef struct s_smpi_mpi_communicator { - MPI_Group group; - MPIR_Topo_type topoType; - MPI_Topology topo; // to be replaced by an union - int refcount; - MPI_Comm leaders_comm;//inter-node communicator - MPI_Comm intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global. - //use an intracomm stored in the process data instead - int* leaders_map; //who is the leader of each process - int is_uniform; - int* non_uniform_map; //set if smp nodes have a different number of processes allocated - int is_blocked;// are ranks allocated on the same smp node contiguous ? - xbt_dict_t attributes; -} s_smpi_mpi_communicator_t; - static int smpi_compare_rankmap(const void *a, const void *b) { - const int* x = (const int*)a; - const int* y = (const int*)b; + const int* x = static_cast(a); + const int* y = static_cast(b); if (x[1] < y[1]) { return -1; @@ -57,242 +44,218 @@ static int smpi_compare_rankmap(const void *a, const void *b) return 1; } -MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo) +namespace simgrid{ +namespace smpi{ + +std::unordered_map Comm::keyvals_; +int Comm::keyval_id_=0; + +Comm::Comm(MPI_Group group, MPI_Topology topo) : group_(group), topo_(topo) { - MPI_Comm comm; - - comm = xbt_new(s_smpi_mpi_communicator_t, 1); - comm->group = group; - comm->refcount=1; - comm->topoType = MPI_INVALID_TOPO; - comm->topo = topo; - comm->intra_comm = MPI_COMM_NULL; - comm->leaders_comm = MPI_COMM_NULL; - comm->is_uniform=1; - comm->non_uniform_map = NULL; - comm->leaders_map = NULL; - comm->is_blocked=0; - comm->attributes=NULL; - return comm; + refcount_=1; + topoType_ = MPI_INVALID_TOPO; + intra_comm_ = MPI_COMM_NULL; + leaders_comm_ = MPI_COMM_NULL; + is_uniform_=1; + non_uniform_map_ = nullptr; + leaders_map_ = nullptr; + is_blocked_=0; } -void smpi_comm_destroy(MPI_Comm comm) +void Comm::destroy(Comm* comm) { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - smpi_topo_destroy(comm->topo); // there's no use count on topos - smpi_comm_unuse(comm); + if (comm == MPI_COMM_UNINITIALIZED){ + Comm::destroy(smpi_process()->comm_world()); + return; + } + delete comm->topo_; // there's no use count on topos + Comm::unref(comm); } -int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){ +int Comm::dup(MPI_Comm* newcomm){ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables - smpi_switch_data_segment(smpi_process_index()); + smpi_switch_data_segment(smpi_process()->index()); } - MPI_Group cp=smpi_group_copy(smpi_comm_group(comm)); - (*newcomm) = smpi_comm_new(cp, smpi_comm_topo(comm)); + MPI_Group cp = new Group(this->group()); + (*newcomm) = new Comm(cp, this->topo()); int ret = MPI_SUCCESS; - //todo: faire en sorte que ça fonctionne avec un communicator dupliqué (refaire un init_smp ?) - - /* MPI_Comm tmp=smpi_comm_get_intra_comm(comm); - if( tmp != MPI_COMM_NULL) - smpi_comm_set_intra_comm((*newcomm), smpi_comm_dup(tmp)); - tmp=smpi_comm_get_leaders_comm(comm); - if( tmp != MPI_COMM_NULL) - smpi_comm_set_leaders_comm((*newcomm), smpi_comm_dup(tmp)); - if(comm->non_uniform_map !=NULL){ - (*newcomm)->non_uniform_map= - xbt_malloc(smpi_comm_size(comm->leaders_comm)*sizeof(int)); - memcpy((*newcomm)->non_uniform_map, - comm->non_uniform_map,smpi_comm_size(comm->leaders_comm)*sizeof(int) ); - } - if(comm->leaders_map !=NULL){ - (*newcomm)->leaders_map=xbt_malloc(smpi_comm_size(comm)*sizeof(int)); - memcpy((*newcomm)->leaders_map, - comm->leaders_map,smpi_comm_size(comm)*sizeof(int) ); - }*/ - if(comm->attributes !=NULL){ - (*newcomm)->attributes=xbt_dict_new(); - xbt_dict_cursor_t cursor = NULL; - int *key; - int flag; - void* value_in; - void* value_out; - xbt_dict_foreach(comm->attributes, cursor, key, value_in){ - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)key, sizeof(int))); - if(elem && elem->copy_fn!=MPI_NULL_COPY_FN){ - ret = elem->copy_fn(comm, *key, NULL, value_in, &value_out, &flag ); - if(ret!=MPI_SUCCESS){ - smpi_comm_destroy(*newcomm); - *newcomm=MPI_COMM_NULL; - return ret; - } - if(flag) - xbt_dict_set_ext((*newcomm)->attributes, (const char*)key, sizeof(int),value_out, NULL); + + if(!attributes()->empty()){ + int flag; + void* value_out; + for(auto it : *attributes()){ + smpi_key_elem elem = keyvals_.at(it.first); + if (elem != nullptr && elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) { + ret = elem->copy_fn.comm_copy_fn(this, it.first, nullptr, it.second, &value_out, &flag); + if (ret != MPI_SUCCESS) { + Comm::destroy(*newcomm); + *newcomm = MPI_COMM_NULL; + return ret; + } + if (flag){ + elem->refcount++; + (*newcomm)->attributes()->insert({it.first, value_out}); } } + } } return ret; } -MPI_Group smpi_comm_group(MPI_Comm comm) +MPI_Group Comm::group() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->group; + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->group(); + return group_; } -MPI_Topology smpi_comm_topo(MPI_Comm comm) { - if (comm != MPI_COMM_NULL) - return comm->topo; - return NULL; +MPI_Topology Comm::topo() { + return topo_; } -int smpi_comm_size(MPI_Comm comm) +int Comm::size() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return smpi_group_size(smpi_comm_group(comm)); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->size(); + return group_->size(); } -int smpi_comm_rank(MPI_Comm comm) +int Comm::rank() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return smpi_group_rank(smpi_comm_group(comm), smpi_process_index()); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->rank(); + return group_->rank(smpi_process()->index()); } -void smpi_comm_get_name (MPI_Comm comm, char* name, int* len) +void Comm::get_name (char* name, int* len) { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - if(comm == MPI_COMM_WORLD) { - strcpy(name, "WORLD"); + if (this == MPI_COMM_UNINITIALIZED){ + smpi_process()->comm_world()->get_name(name, len); + return; + } + if(this == MPI_COMM_WORLD) { + strncpy(name, "WORLD",5); *len = 5; } else { - *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm); + *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this); } } -void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - comm->leaders_comm=leaders; +void Comm::set_leaders_comm(MPI_Comm leaders){ + if (this == MPI_COMM_UNINITIALIZED){ + smpi_process()->comm_world()->set_leaders_comm(leaders); + return; + } + leaders_comm_=leaders; } -void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){ - comm->intra_comm=leaders; +void Comm::set_intra_comm(MPI_Comm leaders){ + intra_comm_=leaders; } -int* smpi_comm_get_non_uniform_map(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->non_uniform_map; +int* Comm::get_non_uniform_map(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->get_non_uniform_map(); + return non_uniform_map_; } -int* smpi_comm_get_leaders_map(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->leaders_map; +int* Comm::get_leaders_map(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->get_leaders_map(); + return leaders_map_; } -MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->leaders_comm; +MPI_Comm Comm::get_leaders_comm(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->get_leaders_comm(); + return leaders_comm_; } -MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD) - return smpi_process_get_comm_intra(); - else return comm->intra_comm; +MPI_Comm Comm::get_intra_comm(){ + if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD) + return smpi_process()->comm_intra(); + else return intra_comm_; } -int smpi_comm_is_uniform(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->is_uniform; +int Comm::is_uniform(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->is_uniform(); + return is_uniform_; } -int smpi_comm_is_blocked(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->is_blocked; +int Comm::is_blocked(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->is_blocked(); + return is_blocked_; } -MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) +MPI_Comm Comm::split(int color, int key) { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process()->comm_world()->split(color, key); int system_tag = 123; - int index, rank, size, i, j, count, reqs; - int* sendbuf; int* recvbuf; - int* rankmap; - MPI_Group group, group_root, group_out; - MPI_Group* group_snd; - MPI_Request* requests; - - group_root = group_out = NULL; - group = smpi_comm_group(comm); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + + MPI_Group group_root = nullptr; + MPI_Group group_out = nullptr; + MPI_Group group = this->group(); + int rank = this->rank(); + int size = this->size(); /* Gather all colors and keys on rank 0 */ - sendbuf = xbt_new(int, 2); + int* sendbuf = xbt_new(int, 2); sendbuf[0] = color; sendbuf[1] = key; if(rank == 0) { recvbuf = xbt_new(int, 2 * size); } else { - recvbuf = NULL; + recvbuf = nullptr; } - smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm); + Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this); xbt_free(sendbuf); /* Do the actual job */ if(rank == 0) { - group_snd = xbt_new(MPI_Group, size); - rankmap = xbt_new(int, 2 * size); - for(i = 0; i < size; i++) { - if(recvbuf[2 * i] == MPI_UNDEFINED) { - continue; - } - count = 0; - for(j = i + 1; j < size; j++) { - if(recvbuf[2 * i] == recvbuf[2 * j]) { - recvbuf[2 * j] = MPI_UNDEFINED; - rankmap[2 * count] = j; - rankmap[2 * count + 1] = recvbuf[2 * j + 1]; - count++; + MPI_Group* group_snd = xbt_new(MPI_Group, size); + int* rankmap = xbt_new(int, 2 * size); + for (int i = 0; i < size; i++) { + if (recvbuf[2 * i] != MPI_UNDEFINED) { + int count = 0; + for (int j = i + 1; j < size; j++) { + if(recvbuf[2 * i] == recvbuf[2 * j]) { + recvbuf[2 * j] = MPI_UNDEFINED; + rankmap[2 * count] = j; + rankmap[2 * count + 1] = recvbuf[2 * j + 1]; + count++; + } } - } - /* Add self in the group */ - recvbuf[2 * i] = MPI_UNDEFINED; - rankmap[2 * count] = i; - rankmap[2 * count + 1] = recvbuf[2 * i + 1]; - count++; - qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap); - group_out = smpi_group_new(count); - if(i == 0) { - group_root = group_out; /* Save root's group */ - } - for(j = 0; j < count; j++) { - index = smpi_group_index(group, rankmap[2 * j]); - smpi_group_set_mapping(group_out, index, j); - } - requests = xbt_new(MPI_Request, count); - reqs = 0; - for(j = 0; j < count; j++) { - if(rankmap[2 * j] != 0) { - group_snd[reqs]=smpi_group_copy(group_out); - requests[reqs] = smpi_mpi_isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, comm); - reqs++; + /* Add self in the group */ + recvbuf[2 * i] = MPI_UNDEFINED; + rankmap[2 * count] = i; + rankmap[2 * count + 1] = recvbuf[2 * i + 1]; + count++; + qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap); + group_out = new Group(count); + if (i == 0) { + group_root = group_out; /* Save root's group */ } + for (int j = 0; j < count; j++) { + int index = group->index(rankmap[2 * j]); + group_out->set_mapping(index, j); + } + MPI_Request* requests = xbt_new(MPI_Request, count); + int reqs = 0; + for (int j = 0; j < count; j++) { + if(rankmap[2 * j] != 0) { + group_snd[reqs]=new Group(group_out); + requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this); + reqs++; + } + } + if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY) + Group::unref(group_out); + + Request::waitall(reqs, requests, MPI_STATUS_IGNORE); + xbt_free(requests); } - if(i != 0) { - smpi_group_destroy(group_out); - } - smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE); - xbt_free(requests); } xbt_free(recvbuf); xbt_free(rankmap); @@ -300,146 +263,119 @@ MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) group_out = group_root; /* exit with root's group */ } else { if(color != MPI_UNDEFINED) { - smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE); - } /* otherwise, exit with group_out == NULL */ + Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE); + } /* otherwise, exit with group_out == nullptr */ } - return group_out ? smpi_comm_new(group_out, NULL) : MPI_COMM_NULL; -} - -void smpi_comm_use(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - smpi_group_use(comm->group); - comm->refcount++; + return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL; } -void smpi_comm_cleanup_attributes(MPI_Comm comm){ - if(comm->attributes !=NULL){ - xbt_dict_cursor_t cursor = NULL; - int* key; - void * value; - int flag; - xbt_dict_foreach(comm->attributes, cursor, key, value){ - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null(smpi_comm_keyvals, (const char*)key)); - if(elem && elem->delete_fn) - elem->delete_fn(comm, *key, value, &flag); - } - xbt_dict_free(&comm->attributes); +void Comm::ref(){ + if (this == MPI_COMM_UNINITIALIZED){ + smpi_process()->comm_world()->ref(); + return; } + group_->ref(); + refcount_++; } -void smpi_comm_cleanup_smp(MPI_Comm comm){ - if(comm->intra_comm != MPI_COMM_NULL) - smpi_comm_unuse(comm->intra_comm); - if(comm->leaders_comm != MPI_COMM_NULL) - smpi_comm_unuse(comm->leaders_comm); - if(comm->non_uniform_map !=NULL) - xbt_free(comm->non_uniform_map); - if(comm->leaders_map !=NULL) - xbt_free(comm->leaders_map); +void Comm::cleanup_smp(){ + if (intra_comm_ != MPI_COMM_NULL) + Comm::unref(intra_comm_); + if (leaders_comm_ != MPI_COMM_NULL) + Comm::unref(leaders_comm_); + if (non_uniform_map_ != nullptr) + xbt_free(non_uniform_map_); + if (leaders_map_ != nullptr) + xbt_free(leaders_map_); } -void smpi_comm_unuse(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - comm->refcount--; - smpi_group_unuse(comm->group); +void Comm::unref(Comm* comm){ + if (comm == MPI_COMM_UNINITIALIZED){ + Comm::unref(smpi_process()->comm_world()); + return; + } + comm->refcount_--; + Group::unref(comm->group_); - if(comm->refcount==0){ - smpi_comm_cleanup_smp(comm); - smpi_comm_cleanup_attributes(comm); - xbt_free(comm); + if(comm->refcount_==0){ + comm->cleanup_smp(); + comm->cleanup_attr(); + delete comm; } } static int compare_ints (const void *a, const void *b) { - const int *da = (const int *) a; - const int *db = (const int *) b; + const int *da = static_cast(a); + const int *db = static_cast(b); - return (*da > *db) - (*da < *db); + return static_cast(*da > *db) - static_cast(*da < *db); } -void smpi_comm_init_smp(MPI_Comm comm){ +void Comm::init_smp(){ int leader = -1; - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); + if (this == MPI_COMM_UNINITIALIZED) + smpi_process()->comm_world()->init_smp(); - int comm_size =smpi_comm_size(comm); + int comm_size = this->size(); // If we are in replay - perform an ugly hack // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls - int replaying = 0; //cache data to set it back again after - if(smpi_process_get_replaying()){ - replaying=1; - smpi_process_set_replaying(0); + bool replaying = false; //cache data to set it back again after + if(smpi_process()->replaying()){ + replaying=true; + smpi_process()->set_replaying(false); } if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables - smpi_switch_data_segment(smpi_process_index()); + smpi_switch_data_segment(smpi_process()->index()); } //identify neighbours in comm //get the indexes of all processes sharing the same simix host - xbt_swag_t process_list = simcall_host_get_process_list(SIMIX_host_self()); - int intra_comm_size = 0; - //only one process/node, disable SMP support and return -// if(intra_comm_size==1){ -// smpi_comm_set_intra_comm(comm, MPI_COMM_SELF); -// //smpi_comm_set_leaders_comm(comm, comm); -// smpi_process_set_comm_intra(MPI_COMM_SELF); -// return; -// } - int i =0; - int min_index=INT_MAX;//the minimum index will be the leader - smx_process_t process = NULL; - xbt_swag_foreach(process, process_list) { - //is_in_comm=0; - int index = SIMIX_process_get_PID(process) -1; - - if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){ - intra_comm_size++; + xbt_swag_t process_list = SIMIX_host_self()->extension()->process_list; + int intra_comm_size = 0; + int min_index = INT_MAX;//the minimum index will be the leader + smx_actor_t actor = nullptr; + xbt_swag_foreach(actor, process_list) { + int index = actor->pid -1; + + if(this->group()->rank(index)!=MPI_UNDEFINED){ + intra_comm_size++; //the process is in the comm if(index < min_index) min_index=index; - i++; } } XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size); - MPI_Group group_intra = smpi_group_new(intra_comm_size); - i=0; - process = NULL; - xbt_swag_foreach(process, process_list) { - //is_in_comm=0; - int index = SIMIX_process_get_PID(process) -1; - if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){ - smpi_group_set_mapping(group_intra, index, i); + MPI_Group group_intra = new Group(intra_comm_size); + int i = 0; + actor = nullptr; + xbt_swag_foreach(actor, process_list) { + int index = actor->pid -1; + if(this->group()->rank(index)!=MPI_UNDEFINED){ + group_intra->set_mapping(index, i); i++; } } - MPI_Comm comm_intra = smpi_comm_new(group_intra, NULL); - //MPI_Comm shmem_comm = smpi_process_comm_intra(); - //int intra_rank = smpi_comm_rank(shmem_comm); - - //if(smpi_process_index()==min_index) + MPI_Comm comm_intra = new Comm(group_intra, nullptr); leader=min_index; - int * leaders_map= (int*)xbt_malloc0(sizeof(int)*comm_size); - int * leader_list= (int*)xbt_malloc0(sizeof(int)*comm_size); + int * leaders_map= static_cast(xbt_malloc0(sizeof(int)*comm_size)); + int * leader_list= static_cast(xbt_malloc0(sizeof(int)*comm_size)); for(i=0; iindex()); } - if(!comm->leaders_map){ - comm->leaders_map= leaders_map; + if(leaders_map_==nullptr){ + leaders_map_= leaders_map; }else{ xbt_free(leaders_map); } @@ -448,51 +384,50 @@ void smpi_comm_init_smp(MPI_Comm comm){ for(i=0; ileaders_map[i]==leader_list[j]){ + if(leaders_map_[i]==leader_list[j]){ already_done=1; } } - if(!already_done){ - leader_list[leader_group_size]=comm->leaders_map[i]; + if(already_done==0){ + leader_list[leader_group_size]=leaders_map_[i]; leader_group_size++; } } qsort(leader_list, leader_group_size, sizeof(int),compare_ints); - MPI_Group leaders_group = smpi_group_new(leader_group_size); + MPI_Group leaders_group = new Group(leader_group_size); MPI_Comm leader_comm = MPI_COMM_NULL; - if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){ + if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){ //create leader_communicator for (i=0; i< leader_group_size;i++) - smpi_group_set_mapping(leaders_group, leader_list[i], i); - leader_comm = smpi_comm_new(leaders_group, NULL); - smpi_comm_set_leaders_comm(comm, leader_comm); - smpi_comm_set_intra_comm(comm, comm_intra); + leaders_group->set_mapping(leader_list[i], i); + leader_comm = new Comm(leaders_group, nullptr); + this->set_leaders_comm(leader_comm); + this->set_intra_comm(comm_intra); //create intracommunicator - // smpi_comm_set_intra_comm(comm, smpi_comm_split(comm, *(int*)SIMIX_host_self(), comm_rank)); }else{ for (i=0; i< leader_group_size;i++) - smpi_group_set_mapping(leaders_group, leader_list[i], i); + leaders_group->set_mapping(leader_list[i], i); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - leader_comm = smpi_comm_new(leaders_group, NULL); - smpi_comm_set_leaders_comm(comm, leader_comm); + if(this->get_leaders_comm()==MPI_COMM_NULL){ + leader_comm = new Comm(leaders_group, nullptr); + this->set_leaders_comm(leader_comm); }else{ - leader_comm=smpi_comm_get_leaders_comm(comm); - smpi_group_unuse(leaders_group); + leader_comm=this->get_leaders_comm(); + Group::unref(leaders_group); } - smpi_process_set_comm_intra(comm_intra); + smpi_process()->set_comm_intra(comm_intra); } int is_uniform = 1; // Are the nodes uniform ? = same number of process/node - int my_local_size=smpi_comm_size(comm_intra); - if(smpi_comm_rank(comm_intra)==0) { + int my_local_size=comm_intra->size(); + if(comm_intra->rank()==0) { int* non_uniform_map = xbt_new0(int,leader_group_size); - smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT, + Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT, non_uniform_map, 1, MPI_INT, leader_comm); for(i=0; i < leader_group_size; i++) { if(non_uniform_map[0] != non_uniform_map[i]) { @@ -500,23 +435,23 @@ void smpi_comm_init_smp(MPI_Comm comm){ break; } } - if(!is_uniform && smpi_comm_is_uniform(comm)){ - comm->non_uniform_map= non_uniform_map; + if(is_uniform==0 && this->is_uniform()!=0){ + non_uniform_map_= non_uniform_map; }else{ xbt_free(non_uniform_map); } - comm->is_uniform=is_uniform; + is_uniform_=is_uniform; } - smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra ); + Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra ); if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables - smpi_switch_data_segment(smpi_process_index()); + smpi_switch_data_segment(smpi_process()->index()); } // Are the ranks blocked ? = allocated contiguously on the SMP nodes int is_blocked=1; - int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0)); + int prev=this->group()->rank(comm_intra->group()->index(0)); for (i=1; igroup()->rank(comm_intra->group()->index(i)); if(that!=prev+1){ is_blocked=0; break; @@ -525,105 +460,70 @@ void smpi_comm_init_smp(MPI_Comm comm){ } int global_blocked; - smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, comm); + Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this); - if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD){ - if(smpi_comm_rank(comm)==0){ - comm->is_blocked=global_blocked; + if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){ + if(this->rank()==0){ + is_blocked_=global_blocked; } }else{ - comm->is_blocked=global_blocked; + is_blocked_=global_blocked; } xbt_free(leader_list); - if(replaying==1) - smpi_process_set_replaying(1); + if(replaying) + smpi_process()->set_replaying(true); } -int smpi_comm_attr_delete(MPI_Comm comm, int keyval){ - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int))); - if(!elem) - return MPI_ERR_ARG; - if(elem->delete_fn!=MPI_NULL_DELETE_FN){ - void * value; - int flag; - if(smpi_comm_attr_get(comm, keyval, &value, &flag)==MPI_SUCCESS){ - int ret = elem->delete_fn(comm, keyval, value, &flag); - if(ret!=MPI_SUCCESS) return ret; - } +MPI_Comm Comm::f2c(int id) { + if(id == -2) { + return MPI_COMM_SELF; + } else if(id==0){ + return MPI_COMM_WORLD; + } else if(F2C::f2c_lookup() != nullptr && id >= 0) { + char key[KEY_SIZE]; + MPI_Comm tmp = static_cast(xbt_dict_get_or_null(F2C::f2c_lookup(),get_key_id(key, id))); + return tmp != nullptr ? tmp : MPI_COMM_NULL ; + } else { + return MPI_COMM_NULL; } - if(comm->attributes==NULL) - return MPI_ERR_ARG; +} - xbt_dict_remove_ext(comm->attributes, (const char*)&keyval, sizeof(int)); - return MPI_SUCCESS; +void Comm::free_f(int id) { + char key[KEY_SIZE]; + xbt_dict_remove(F2C::f2c_lookup(), id==0? get_key(key, id) : get_key_id(key, id)); } -int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){ - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int))); - if(!elem) - return MPI_ERR_ARG; - xbt_ex_t ex; - if(comm->attributes==NULL){ - *flag=0; - return MPI_SUCCESS; - } - TRY { - *(void**)attr_value = xbt_dict_get_ext(comm->attributes, (const char*)&keyval, sizeof(int)); - *flag=1; - } CATCH(ex) { - *flag=0; - xbt_ex_free(ex); +int Comm::add_f() { + if(F2C::f2c_lookup()==nullptr){ + F2C::set_f2c_lookup(xbt_dict_new_homogeneous(nullptr)); } - return MPI_SUCCESS; + char key[KEY_SIZE]; + xbt_dict_set(F2C::f2c_lookup(), this==MPI_COMM_WORLD? get_key(key, F2C::f2c_id()) : get_key_id(key,F2C::f2c_id()), this, nullptr); + f2c_id_increment(); + return F2C::f2c_id()-1; } -int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){ - if(!smpi_comm_keyvals) - smpi_comm_keyvals = xbt_dict_new(); - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)&keyval, sizeof(int))); - if(!elem ) - return MPI_ERR_ARG; - int flag; - void* value; - smpi_comm_attr_get(comm, keyval, &value, &flag); - if(flag && elem->delete_fn!=MPI_NULL_DELETE_FN){ - int ret = elem->delete_fn(comm, keyval, value, &flag); - if(ret!=MPI_SUCCESS) return ret; - } - if(comm->attributes==NULL) - comm->attributes=xbt_dict_new(); - xbt_dict_set_ext(comm->attributes, (const char*)&keyval, sizeof(int), attr_value, NULL); - return MPI_SUCCESS; +void Comm::add_rma_win(MPI_Win win){ + rma_wins_.push_back(win); } -int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, - void* extra_state){ - if(!smpi_comm_keyvals) - smpi_comm_keyvals = xbt_dict_new(); +void Comm::remove_rma_win(MPI_Win win){ + rma_wins_.remove(win); +} - smpi_comm_key_elem value = (smpi_comm_key_elem) xbt_new0(s_smpi_mpi_comm_key_elem_t,1); +void Comm::finish_rma_calls(){ + for(auto it : rma_wins_){ + if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)? + int finished = it->finish_comms(); + XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished); + } + } +} - value->copy_fn=copy_fn; - value->delete_fn=delete_fn; - *keyval = comm_keyval_id; - xbt_dict_set_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int),(void*)value, NULL); - comm_keyval_id++; - return MPI_SUCCESS; } - -int smpi_comm_keyval_free(int* keyval){ - smpi_comm_key_elem elem = - static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int))); - if(!elem){ - return MPI_ERR_ARG; - } - xbt_dict_remove_ext(smpi_comm_keyvals, (const char*)keyval, sizeof(int)); - xbt_free(elem); - return MPI_SUCCESS; } + +