attributes_=nullptr;
}
-void Comm::destroy()
+void Comm::destroy(Comm* comm)
{
- if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->destroy();
+ if (comm == MPI_COMM_UNINITIALIZED){
+ Comm::destroy(smpi_process_comm_world());
return;
}
- delete topo_; // there's no use count on topos
- this->unuse();
+ delete comm->topo_; // there's no use count on topos
+ Comm::unref(comm);
}
int Comm::dup(MPI_Comm* newcomm){
if (elem != nullptr && elem->copy_fn != MPI_NULL_COPY_FN) {
ret = elem->copy_fn(this, atoi(key), nullptr, value_in, &value_out, &flag);
if (ret != MPI_SUCCESS) {
- (*newcomm)->destroy();
+ Comm::destroy(*newcomm);
*newcomm = MPI_COMM_NULL;
xbt_dict_cursor_free(&cursor);
return ret;
} else {
recvbuf = nullptr;
}
- smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
+ Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
xbt_free(sendbuf);
/* Do the actual job */
if(rank == 0) {
}
}
if(i != 0) {
- group_out->destroy();
+ if(group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
+ Group::unref(group_out);
}
Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
xbt_free(requests);
return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
}
-void Comm::use(){
+void Comm::ref(){
if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->use();
+ smpi_process_comm_world()->ref();
return;
}
- group_->use();
+ group_->ref();
refcount_++;
}
void Comm::cleanup_smp(){
if (intra_comm_ != MPI_COMM_NULL)
- intra_comm_->unuse();
+ Comm::unref(intra_comm_);
if (leaders_comm_ != MPI_COMM_NULL)
- leaders_comm_->unuse();
+ Comm::unref(leaders_comm_);
if (non_uniform_map_ != nullptr)
xbt_free(non_uniform_map_);
if (leaders_map_ != nullptr)
xbt_free(leaders_map_);
}
-void Comm::unuse(){
- if (this == MPI_COMM_UNINITIALIZED){
- smpi_process_comm_world()->unuse();
+void Comm::unref(Comm* comm){
+ if (comm == MPI_COMM_UNINITIALIZED){
+ Comm::unref(smpi_process_comm_world());
return;
}
- refcount_--;
- group_->unuse();
+ comm->refcount_--;
+ Group::unref(comm->group_);
- if(refcount_==0){
- this->cleanup_smp();
- this->cleanup_attributes();
- delete this;
+ if(comm->refcount_==0){
+ comm->cleanup_smp();
+ comm->cleanup_attributes();
+ delete comm;
}
}
}
//identify neighbours in comm
//get the indexes of all processes sharing the same simix host
- xbt_swag_t process_list = SIMIX_host_self()->processes();
- int intra_comm_size = 0;
- int i =0;
- int min_index=INT_MAX;//the minimum index will be the leader
- smx_actor_t process = nullptr;
- xbt_swag_foreach(process, process_list) {
- int index = process->pid -1;
+ xbt_swag_t process_list = SIMIX_host_self()->extension<simgrid::simix::Host>()->process_list;
+ int intra_comm_size = 0;
+ int min_index = INT_MAX;//the minimum index will be the leader
+ smx_actor_t actor = nullptr;
+ xbt_swag_foreach(actor, process_list) {
+ int index = actor->pid -1;
if(this->group()->rank(index)!=MPI_UNDEFINED){
- intra_comm_size++;
+ intra_comm_size++;
//the process is in the comm
if(index < min_index)
min_index=index;
- i++;
}
}
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
- i=0;
- process = nullptr;
- xbt_swag_foreach(process, process_list) {
- int index = process->pid -1;
+ int i = 0;
+ actor = nullptr;
+ xbt_swag_foreach(actor, process_list) {
+ int index = actor->pid -1;
if(this->group()->rank(index)!=MPI_UNDEFINED){
group_intra->set_mapping(index, i);
i++;
leader_list[i]=-1;
}
- smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
+ Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
this->set_leaders_comm(leader_comm);
}else{
leader_comm=this->get_leaders_comm();
- leaders_group->unuse();
+ Group::unref(leaders_group);
}
smpi_process_set_comm_intra(comm_intra);
}
int my_local_size=comm_intra->size();
if(comm_intra->rank()==0) {
int* non_uniform_map = xbt_new0(int,leader_group_size);
- smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT,
+ Coll_allgather_mpich::allgather(&my_local_size, 1, MPI_INT,
non_uniform_map, 1, MPI_INT, leader_comm);
for(i=0; i < leader_group_size; i++) {
if(non_uniform_map[0] != non_uniform_map[i]) {
}
is_uniform_=is_uniform;
}
- smpi_coll_tuned_bcast_mpich(&(is_uniform_),1, MPI_INT, 0, comm_intra );
+ Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process_index());
}
int global_blocked;
- smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
+ Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
if(this->rank()==0){
return MPI_SUCCESS;
}
+MPI_Comm Comm::f2c(int id) {
+ if(id == -2) {
+ return MPI_COMM_SELF;
+ } else if(id==0){
+ return MPI_COMM_WORLD;
+ } else if(Comm::f2c_lookup_ != nullptr && id >= 0) {
+ char key[KEY_SIZE];
+ MPI_Comm tmp = static_cast<MPI_Comm>(xbt_dict_get_or_null(Comm::f2c_lookup_,get_key_id(key, id)));
+ return tmp != nullptr ? tmp : MPI_COMM_NULL ;
+ } else {
+ return MPI_COMM_NULL;
+ }
+}
+
+void Comm::free_f(int id) {
+ char key[KEY_SIZE];
+ xbt_dict_remove(Comm::f2c_lookup_, id==0? get_key(key, id) : get_key_id(key, id));
+}
+
+int Comm::add_f() {
+ if(Comm::f2c_lookup_==nullptr){
+ Comm::f2c_lookup_=xbt_dict_new_homogeneous(nullptr);
+ }
+ char key[KEY_SIZE];
+ xbt_dict_set(Comm::f2c_lookup_, this==MPI_COMM_WORLD? get_key(key, Comm::f2c_id_) : get_key_id(key,Comm::f2c_id_), this, nullptr);
+ Comm::f2c_id_++;
+ return Comm::f2c_id_-1;
+}
+
+
}
}