-/* Copyright (c) 2010-2021. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_comm.hpp"
+#include "simgrid/host.h"
#include "smpi_coll.hpp"
#include "smpi_datatype.hpp"
+#include "smpi_info.hpp"
#include "smpi_request.hpp"
#include "smpi_win.hpp"
-#include "smpi_info.hpp"
#include "src/smpi/include/smpi_actor.hpp"
#include "src/surf/HostImpl.hpp"
-#include <climits>
+#include <limits>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
colls::bcast(&id, 1, MPI_INT, 0, this);
XBT_DEBUG("Communicator %p has id %d", this, id);
id_=id;//only set here, as we don't want to change it in the middle of the bcast
- colls::barrier(this);
}
}
Comm::destroy(smpi_process()->comm_world());
return;
}
- if(comm != MPI_COMM_WORLD)
+ if (comm != MPI_COMM_WORLD && not comm->deleted()) {
+ comm->cleanup_attr<Comm>();
comm->mark_as_deleted();
+ }
Comm::unref(comm);
}
int Comm::dup(MPI_Comm* newcomm){
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- // we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(s4u::Actor::self());
- }
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(s4u::Actor::self());
+
auto* cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
- int ret = MPI_SUCCESS;
-
- if (not attributes()->empty()) {
- int flag=0;
- void* value_out=nullptr;
- for (auto const& it : *attributes()) {
- smpi_key_elem elem = keyvals_.at(it.first);
- if (elem != nullptr){
- if( elem->copy_fn.comm_copy_fn != MPI_NULL_COPY_FN &&
- elem->copy_fn.comm_copy_fn != MPI_COMM_DUP_FN)
- ret = elem->copy_fn.comm_copy_fn(this, it.first, elem->extra_state, it.second, &value_out, &flag);
- else if ( elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN &&
- *(int*)*elem->copy_fn.comm_copy_fn_fort != 1){
- value_out=(int*)xbt_malloc(sizeof(int));
- elem->copy_fn.comm_copy_fn_fort(this, it.first, elem->extra_state, it.second, value_out, &flag,&ret);
- }
- if (ret != MPI_SUCCESS) {
- Comm::destroy(*newcomm);
- *newcomm = MPI_COMM_NULL;
- return ret;
- }
- if (elem->copy_fn.comm_copy_fn == MPI_COMM_DUP_FN ||
- ((elem->copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN) && *(int*)*elem->copy_fn.comm_copy_fn_fort == 1)){
- elem->refcount++;
- (*newcomm)->attributes()->insert({it.first, it.second});
- }else if (flag){
- elem->refcount++;
- (*newcomm)->attributes()->insert({it.first, value_out});
- }
+
+ for (auto const& [key, value] : attributes()) {
+ auto elem_it = keyvals_.find(key);
+ xbt_assert(elem_it != keyvals_.end(), "Keyval not found for Comm: %d", key);
+
+ smpi_key_elem& elem = elem_it->second;
+ int ret = MPI_SUCCESS;
+ int flag = 0;
+ void* value_out = nullptr;
+ if (elem.copy_fn.comm_copy_fn == MPI_COMM_DUP_FN) {
+ value_out = value;
+ flag = 1;
+ } else if (elem.copy_fn.comm_copy_fn != MPI_NULL_COPY_FN) {
+ ret = elem.copy_fn.comm_copy_fn(this, key, elem.extra_state, value, &value_out, &flag);
+ }
+ if (elem.copy_fn.comm_copy_fn_fort != MPI_NULL_COPY_FN) {
+ value_out = xbt_new(int, 1);
+ if (*(int*)*elem.copy_fn.comm_copy_fn_fort == 1) { // MPI_COMM_DUP_FN
+ memcpy(value_out, value, sizeof(int));
+ flag = 1;
+ } else { // not null, nor dup
+ elem.copy_fn.comm_copy_fn_fort(this, key, elem.extra_state, value, value_out, &flag, &ret);
}
+ if (ret != MPI_SUCCESS)
+ xbt_free(value_out);
+ }
+ if (ret != MPI_SUCCESS) {
+ Comm::destroy(*newcomm);
+ *newcomm = MPI_COMM_NULL;
+ return ret;
+ }
+ if (flag) {
+ elem.refcount++;
+ (*newcomm)->attributes().try_emplace(key, value_out);
}
}
//duplicate info if present
(*newcomm)->set_errhandler(errhandlers_[this->rank()]);
else
(*newcomm)->set_errhandler(errhandler_);
- return ret;
+ return MPI_SUCCESS;
}
int Comm::dup_with_info(MPI_Info info, MPI_Comm* newcomm){
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(s4u::Actor::self());
+ return group_->rank(s4u::this_actor::get_pid());
}
int Comm::id() const
if(this == MPI_COMM_WORLD && name_.empty()) {
strncpy(name, "MPI_COMM_WORLD", 15);
*len = 14;
- } else if(this == MPI_COMM_SELF && name_.empty()) {
- strncpy(name, "MPI_COMM_SELF", 14);
- *len = 13;
} else {
*len = snprintf(name, MPI_MAX_NAME_STRING+1, "%s", name_.c_str());
}
std::string Comm::name() const
{
int size;
- char name[MPI_MAX_NAME_STRING];
- this->get_name(name, &size);
- return std::string(name);
+ std::array<char, MPI_MAX_NAME_STRING + 1> name;
+ this->get_name(name.data(), &size);
+ if (name[0]=='\0')
+ return std::string("MPI_Comm");
+ else
+ return std::string(name.data());
}
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_uniform();
- return is_uniform_ != 0;
+ return is_uniform_;
}
bool Comm::is_blocked() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_blocked();
- return is_blocked_ != 0;
+ return is_blocked_;
}
bool Comm::is_smp_comm() const
MPI_Group group_root = nullptr;
MPI_Group group_out = nullptr;
- MPI_Group group = this->group();
+ const Group* group = this->group();
int myrank = this->rank();
int size = this->size();
/* Gather all colors and keys on rank 0 */
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- s4u::Actor* actor = group->actor(rankmap[j].second);
+ aid_t actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
std::vector<MPI_Request> requests(rankmap.size());
int reqs = 0;
- for (auto const& rank : rankmap) {
- if (rank.second != 0) {
+ for (auto const& [_, rank] : rankmap) {
+ if (rank != 0) {
group_snd[reqs]=new Group(group_out);
- requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rank.second, system_tag, this);
+ requests[reqs] = Request::isend(&(group_snd[reqs]), 1, MPI_PTR, rank, system_tag, this);
reqs++;
}
}
simgrid::smpi::Info::unref(comm->info_);
if(comm->errhandlers_!=nullptr){
for (int i=0; i<comm->size(); i++)
- if (comm->errhandlers_[i]!=MPI_ERRHANDLER_NULL)
+ if (comm->errhandlers_[i]!=MPI_ERRHANDLER_NULL)
simgrid::smpi::Errhandler::unref(comm->errhandlers_[i]);
delete[] comm->errhandlers_;
} else if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
int intra_comm_size = 0;
- int min_index = INT_MAX; // the minimum index will be the leader
+ aid_t min_index = std::numeric_limits<aid_t>::max(); // the minimum index will be the leader
sg_host_self()->get_impl()->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
- int index = actor.get_pid();
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ aid_t index = actor.get_pid();
+ if (this->group()->rank(index) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
min_index = index;
auto* group_intra = new Group(intra_comm_size);
int i = 0;
sg_host_self()->get_impl()->foreach_actor([this, group_intra, &i](auto& actor) {
- if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.get_ciface(), i);
+ if (this->group()->rank(actor.get_pid()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.get_pid(), i);
i++;
}
});
smpi_process()->set_replaying(false);
}
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- // we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(s4u::Actor::self());
- }
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(s4u::Actor::self());
+
// identify neighbors in comm
MPI_Comm comm_intra = find_intra_comm(&leader);
allgather__ring(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- // we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(s4u::Actor::self());
- }
-
if(leaders_map_==nullptr){
leaders_map_= leaders_map;
}else{
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
+ leaders_group->set_mapping(leader_list[i], i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr, true);
// Are the nodes uniform ? = same number of process/node
int my_local_size=comm_intra->size();
+ int is_uniform;
if(comm_intra->rank()==0) {
- int is_uniform = 1;
- int* non_uniform_map = xbt_new0(int,leader_group_size);
+ is_uniform = 1;
+ auto* non_uniform_map = xbt_new0(int, leader_group_size);
allgather__ring(&my_local_size, 1, MPI_INT,
non_uniform_map, 1, MPI_INT, leader_comm);
for(i=0; i < leader_group_size; i++) {
}else{
xbt_free(non_uniform_map);
}
- is_uniform_=is_uniform;
}
- bcast__scatter_LR_allgather(&is_uniform_, 1, MPI_INT, 0, comm_intra);
+ bcast__scatter_LR_allgather(&is_uniform, 1, MPI_INT, 0, comm_intra);
+ is_uniform_ = (is_uniform != 0);
+
+ // we need to switch as the called function may silently touch global variables
+ smpi_switch_data_segment(s4u::Actor::self());
- if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
- // we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(s4u::Actor::self());
- }
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->actor(0));
+ int prev = this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {
int global_blocked;
allreduce__default(&is_blocked, &global_blocked, 1, MPI_INT, MPI_LAND, this);
- if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){
- if(this->rank()==0){
- is_blocked_ = global_blocked;
- }
- }else{
- is_blocked_=global_blocked;
- }
+ if ((MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED && this != MPI_COMM_WORLD) || this->rank() == 0)
+ is_blocked_ = (global_blocked != 0);
delete[] leader_list;
if(replaying)
void Comm::finish_rma_calls() const
{
+ const int myrank = rank();
for (auto const& it : rma_wins_) {
- if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
+ if (it->rank() == myrank) { // is it ours (for MPI_COMM_WORLD)?
int finished = it->finish_comms();
- XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls",this->rank(), finished);
+ XBT_DEBUG("Barrier for rank %d - Finished %d RMA calls", myrank, finished);
}
}
}
MPI_Info Comm::info()
{
- if (info_ == MPI_INFO_NULL)
- info_ = new Info();
- info_->ref();
return info_;
}
errhandler_->ref();
return errhandler_;
} else {
- if(errhandlers_==nullptr)
- return MPI_ERRORS_ARE_FATAL;
- else {
+ if(errhandlers_==nullptr){
+ if (_smpi_cfg_default_errhandler_is_error)
+ return MPI_ERRORS_ARE_FATAL;
+ else
+ return MPI_ERRORS_RETURN;
+ } else {
if(errhandlers_[this->rank()] != MPI_ERRHANDLER_NULL)
errhandlers_[this->rank()]->ref();
return errhandlers_[this->rank()];
}
}
+static inline std::string hash_message(int src, int dst, int tag){
+ return std::to_string(tag) + '_' + std::to_string(src) + '_' + std::to_string(dst);
+}
+
+unsigned int Comm::get_sent_messages_count(int src, int dst, int tag)
+{
+ return sent_messages_[hash_message(src, dst, tag)];
+}
+
+void Comm::increment_sent_messages_count(int src, int dst, int tag)
+{
+ sent_messages_[hash_message(src, dst, tag)]++;
+}
+
+unsigned int Comm::get_received_messages_count(int src, int dst, int tag)
+{
+ return recv_messages_[hash_message(src, dst, tag)];
+}
+
+void Comm::increment_received_messages_count(int src, int dst, int tag)
+{
+ recv_messages_[hash_message(src, dst, tag)]++;
+}
+
+unsigned int Comm::get_collectives_count()
+{
+ if (this==MPI_COMM_UNINITIALIZED){
+ return smpi_process()->comm_world()->get_collectives_count();
+ }else if(this == MPI_COMM_WORLD || this == smpi_process()->comm_world()){
+ if (collectives_counts_.empty())
+ collectives_counts_.resize(this->size());
+ return collectives_counts_[this->rank()];
+ }else{
+ return collectives_count_;
+ }
+}
+
+void Comm::increment_collectives_count()
+{
+ if (this==MPI_COMM_UNINITIALIZED){
+ smpi_process()->comm_world()->increment_collectives_count();
+ }else if (this == MPI_COMM_WORLD || this == smpi_process()->comm_world()){
+ if (collectives_counts_.empty())
+ collectives_counts_.resize(this->size());
+ collectives_counts_[this->rank()]++;
+ }else{
+ collectives_count_++;
+ }
+}
+
} // namespace smpi
} // namespace simgrid