is_blocked_ = 0;
info_ = MPI_INFO_NULL;
errhandler_ = MPI_ERRORS_ARE_FATAL;
- static int global_id_=0;
//First creation of comm is done before SIMIX_run, so only do comms for others
if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
int id;
if(this->rank()==0){
+ static int global_id_ = 0;
id=global_id_;
global_id_++;
}
- Colls::bcast(&id, 1, MPI_INT, 0, this);
+ colls::bcast(&id, 1, MPI_INT, 0, this);
XBT_DEBUG("Communicator %p has id %d", this, id);
id_=id;//only set here, as we don't want to change it in the middle of the bcast
- Colls::barrier(this);
+ colls::barrier(this);
}
}
Comm::destroy(smpi_process()->comm_world());
return;
}
- delete comm->topo_; // there's no use count on topos
Comm::unref(comm);
}
int Comm::dup(MPI_Comm* newcomm){
- if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
} else {
recvbuf = nullptr;
}
- Coll_gather_default::gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
+ gather__default(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
xbt_free(sendbuf);
/* Do the actual job */
if (myrank == 0) {
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- s4u::ActorPtr actor = group->actor(rankmap[j].second);
+ s4u::Actor* actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
if(comm->refcount_==0){
comm->cleanup_smp();
comm->cleanup_attr<Comm>();
+ delete comm->topo_; // there's no use count on topos
delete comm;
}
}
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
- auto& process_list = sg_host_self()->pimpl_->process_list_;
+ auto& actor_list = sg_host_self()->pimpl_->actor_list_;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- for (auto& actor : process_list) {
+ for (auto& actor : actor_list) {
int index = actor.get_pid();
- if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
min_index = index;
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
- for (auto& actor : process_list) {
- if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.iface(), i);
+ for (auto& actor : actor_list) {
+ if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.ciface(), i);
i++;
}
}
smpi_process()->set_replaying(false);
}
- if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
- //identify neighbours in comm
+ // identify neighbors in comm
MPI_Comm comm_intra = find_intra_comm(&leader);
std::fill_n(leaders_map, comm_size, 0);
std::fill_n(leader_list, comm_size, -1);
- Coll_allgather_ring::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
+ allgather__ring(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr,1);
if(comm_intra->rank()==0) {
int is_uniform = 1;
int* non_uniform_map = xbt_new0(int,leader_group_size);
- Coll_allgather_ring::allgather(&my_local_size, 1, MPI_INT,
+ allgather__ring(&my_local_size, 1, MPI_INT,
non_uniform_map, 1, MPI_INT, leader_comm);
for(i=0; i < leader_group_size; i++) {
if(non_uniform_map[0] != non_uniform_map[i]) {
}
is_uniform_=is_uniform;
}
- Coll_bcast_scatter_LR_allgather::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
+ bcast__scatter_LR_allgather(&(is_uniform_),1, MPI_INT, 0, comm_intra );
- if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
+ if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(s4u::Actor::self());
}
}
int global_blocked;
- Coll_allreduce_default::allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
+ allreduce__default(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this);
if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
if(this->rank()==0){
if(type != MPI_UNDEFINED)
return res;
else{
+ xbt_assert(res->refcount_ == 1); // ensure the next call to Comm::destroy really frees the comm
Comm::destroy(res);
return MPI_COMM_NULL;
}