-/* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2020. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
std::unordered_map<int, smpi_key_elem> Comm::keyvals_;
int Comm::keyval_id_=0;
-Comm::Comm(MPI_Group group, MPI_Topology topo, int smp, int in_id) : group_(group), topo_(topo),is_smp_comm_(smp), id_(in_id)
+Comm::Comm(MPI_Group group, MPI_Topology topo, bool smp, int in_id)
+ : group_(group), topo_(topo), is_smp_comm_(smp), id_(in_id)
{
- refcount_ = 1;
- topoType_ = MPI_INVALID_TOPO;
- intra_comm_ = MPI_COMM_NULL;
- leaders_comm_ = MPI_COMM_NULL;
- is_uniform_ = 1;
- non_uniform_map_ = nullptr;
- leaders_map_ = nullptr;
- is_blocked_ = 0;
- info_ = MPI_INFO_NULL;
- errhandler_ = MPI_ERRORS_ARE_FATAL;
+ errhandler_->ref();
//First creation of comm is done before SIMIX_run, so only do comms for others
if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
int id;
return group_;
}
-int Comm::size()
+int Comm::size() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->size();
return group_->size();
}
-int Comm::rank()
+int Comm::rank() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
return group_->rank(s4u::Actor::self());
}
-int Comm::id()
+int Comm::id() const
{
return id_;
}
-void Comm::get_name (char* name, int* len)
+void Comm::get_name(char* name, int* len) const
{
if (this == MPI_COMM_UNINITIALIZED){
smpi_process()->comm_world()->get_name(name, len);
leaders_comm_=leaders;
}
-int* Comm::get_non_uniform_map(){
+int* Comm::get_non_uniform_map() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_non_uniform_map();
return non_uniform_map_;
}
-int* Comm::get_leaders_map(){
+int* Comm::get_leaders_map() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_leaders_map();
return leaders_map_;
}
-MPI_Comm Comm::get_leaders_comm(){
+MPI_Comm Comm::get_leaders_comm() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_leaders_comm();
return leaders_comm_;
}
-MPI_Comm Comm::get_intra_comm(){
+MPI_Comm Comm::get_intra_comm() const
+{
if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
return smpi_process()->comm_intra();
else return intra_comm_;
}
-int Comm::is_uniform(){
+bool Comm::is_uniform() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_uniform();
- return is_uniform_;
+ return is_uniform_ != 0;
}
-int Comm::is_blocked(){
+bool Comm::is_blocked() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_blocked();
- return is_blocked_;
+ return is_blocked_ != 0;
}
-int Comm::is_smp_comm(){
+bool Comm::is_smp_comm() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_smp_comm();
return is_smp_comm_;
Request::recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE);
} /* otherwise, exit with group_out == nullptr */
}
- return group_out!=nullptr ? new Comm(group_out, nullptr) : MPI_COMM_NULL;
+ return group_out!=nullptr ? new Comm(group_out, topo_) : MPI_COMM_NULL;
}
void Comm::ref(){
comm->cleanup_attr<Comm>();
if (comm->info_ != MPI_INFO_NULL)
simgrid::smpi::Info::unref(comm->info_);
- delete comm->topo_; // there's no use count on topos
+ if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(comm->errhandler_);
delete comm;
}
}
}
}
*leader=min_index;
- return new Comm(group_intra, nullptr, 1);
+ return new Comm(group_intra, nullptr, true);
}
void Comm::init_smp(){
leader_group_size++;
}
}
+ xbt_assert(leader_group_size > 0);
std::sort(leader_list, leader_list + leader_group_size);
MPI_Group leaders_group = new Group(leader_group_size);
//create leader_communicator
for (i=0; i< leader_group_size;i++)
leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
- leader_comm = new Comm(leaders_group, nullptr,1);
+ leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
- leader_comm = new Comm(leaders_group, nullptr,1);
+ leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
}else{
leader_comm=this->get_leaders_comm();
break;
}
}
- if(is_uniform==0 && this->is_uniform()!=0){
+ if (is_uniform == 0 && this->is_uniform()) {
non_uniform_map_ = non_uniform_map;
}else{
xbt_free(non_uniform_map);
info->ref();
}
-MPI_Errhandler Comm::errhandler(){
+MPI_Errhandler Comm::errhandler()
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
return errhandler_;
}
-void Comm::set_errhandler(MPI_Errhandler errhandler){
- errhandler_=errhandler;
- if(errhandler_!= MPI_ERRHANDLER_NULL)
- errhandler->ref();
+void Comm::set_errhandler(MPI_Errhandler errhandler)
+{
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
+ errhandler_ = errhandler;
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
}
-MPI_Comm Comm::split_type(int type, int /*key*/, MPI_Info)
+MPI_Comm Comm::split_type(int type, int /*key*/, const Info*)
{
//MPI_UNDEFINED can be given to some nodes... but we need them to still perform the smp part which is collective
if(type != MPI_COMM_TYPE_SHARED && type != MPI_UNDEFINED){