-/* Copyright (c) 2010-2020. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
-simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
-MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
+simgrid::smpi::Comm smpi_MPI_COMM_UNINITIALIZED;
+MPI_Comm MPI_COMM_UNINITIALIZED=&smpi_MPI_COMM_UNINITIALIZED;
+/**
+ * Setting MPI_COMM_WORLD to MPI_COMM_UNINITIALIZED (it's a variable)
+ * is important because the implementation of MPI_Comm checks
+ * "this == MPI_COMM_UNINITIALIZED"? If yes, it uses smpi_process()->comm_world()
+ * instead of "this".
+ * This is basically how we only have one global variable but all processes have
+ * different communicators (the one their SMPI instance uses).
+ *
+ */
+MPI_Comm MPI_COMM_WORLD = MPI_COMM_UNINITIALIZED;
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
errhandler_->ref();
//First creation of comm is done before SIMIX_run, so only do comms for others
if(in_id==MPI_UNDEFINED && smp==0 && this->rank()!=MPI_UNDEFINED ){
+ this->add_f();
+ group->c2f();
int id;
if(this->rank()==0){
static int global_id_ = 0;
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->split(color, key);
int system_tag = -123;
- int* recvbuf;
MPI_Group group_root = nullptr;
MPI_Group group_out = nullptr;
int myrank = this->rank();
int size = this->size();
/* Gather all colors and keys on rank 0 */
- int* sendbuf = xbt_new(int, 2);
- sendbuf[0] = color;
- sendbuf[1] = key;
- if (myrank == 0) {
- recvbuf = xbt_new(int, 2 * size);
- } else {
- recvbuf = nullptr;
- }
- gather__default(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this);
- xbt_free(sendbuf);
+ const std::array<int, 2> sendbuf = {{color, key}};
+ std::vector<int> recvbuf;
+ if (myrank == 0)
+ recvbuf.resize(2 * size);
+ gather__default(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, 0, this);
/* Do the actual job */
if (myrank == 0) {
- MPI_Group* group_snd = xbt_new(MPI_Group, size);
+ std::vector<MPI_Group> group_snd(size);
std::vector<std::pair<int, int>> rankmap;
rankmap.reserve(size);
for (int i = 0; i < size; i++) {
s4u::Actor* actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
- MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
+ std::vector<MPI_Request> requests(rankmap.size());
int reqs = 0;
for (auto const& rank : rankmap) {
if (rank.second != 0) {
if(i != 0 && group_out != MPI_COMM_WORLD->group() && group_out != MPI_GROUP_EMPTY)
Group::unref(group_out);
- Request::waitall(reqs, requests, MPI_STATUS_IGNORE);
- xbt_free(requests);
+ Request::waitall(reqs, requests.data(), MPI_STATUS_IGNORE);
}
}
- xbt_free(recvbuf);
- xbt_free(group_snd);
group_out = group_root; /* exit with root's group */
} else {
if(color != MPI_UNDEFINED) {
return;
}
comm->refcount_--;
- Group::unref(comm->group_);
if(comm->refcount_==0){
+ if(simgrid::smpi::F2C::lookup() != nullptr)
+ F2C::free_f(comm->c2f());
comm->cleanup_smp();
comm->cleanup_attr<Comm>();
if (comm->info_ != MPI_INFO_NULL)
simgrid::smpi::Info::unref(comm->info_);
- if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
+ if(comm->errhandlers_!=nullptr){
+ for (int i=0; i<comm->size(); i++)
+ if (comm->errhandlers_[i]!=MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(comm->errhandlers_[i]);
+ delete[] comm->errhandlers_;
+ } else if (comm->errhandler_ != MPI_ERRHANDLER_NULL)
simgrid::smpi::Errhandler::unref(comm->errhandler_);
- delete comm;
}
+ Group::unref(comm->group_);
+ if(comm->refcount_==0)
+ delete comm;
}
MPI_Comm Comm::find_intra_comm(int * leader){
//get the indices of all processes sharing the same simix host
- auto& actor_list = sg_host_self()->pimpl_->actor_list_;
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
- for (auto& actor : actor_list) {
+ sg_host_self()->pimpl_->foreach_actor([this, &intra_comm_size, &min_index](auto& actor) {
int index = actor.get_pid();
- if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
+ if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
min_index = index;
}
- }
+ });
XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size);
auto* group_intra = new Group(intra_comm_size);
int i = 0;
- for (auto& actor : actor_list) {
- if (this->group()->rank(actor.ciface()) != MPI_UNDEFINED) {
- group_intra->set_mapping(actor.ciface(), i);
+ sg_host_self()->pimpl_->foreach_actor([this, group_intra, &i](auto& actor) {
+ if (this->group()->rank(actor.get_ciface()) != MPI_UNDEFINED) {
+ group_intra->set_mapping(actor.get_ciface(), i);
i++;
}
- }
+ });
*leader=min_index;
return new Comm(group_intra, nullptr, true);
}
return MPI_COMM_SELF;
} else if(id==0){
return MPI_COMM_WORLD;
- } else if(F2C::f2c_lookup() != nullptr && id >= 0) {
- char key[KEY_SIZE];
- const auto& lookup = F2C::f2c_lookup();
- auto comm = lookup->find(get_key(key, id));
+ } else if (F2C::lookup() != nullptr && id >= 0) {
+ const auto& lookup = F2C::lookup();
+ auto comm = lookup->find(id);
return comm == lookup->end() ? MPI_COMM_NULL : static_cast<MPI_Comm>(comm->second);
} else {
return MPI_COMM_NULL;
}
void Comm::free_f(int id) {
- char key[KEY_SIZE];
- F2C::f2c_lookup()->erase(get_key(key, id));
+ F2C::lookup()->erase(id);
}
void Comm::add_rma_win(MPI_Win win){
MPI_Errhandler Comm::errhandler()
{
- if (errhandler_ != MPI_ERRHANDLER_NULL)
- errhandler_->ref();
- return errhandler_;
+ if (this != MPI_COMM_WORLD){
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ errhandler_->ref();
+ return errhandler_;
+ } else {
+ if(errhandlers_==nullptr)
+ return MPI_ERRORS_ARE_FATAL;
+ else {
+ if(errhandlers_[this->rank()] != MPI_ERRHANDLER_NULL)
+ errhandlers_[this->rank()]->ref();
+ return errhandlers_[this->rank()];
+ }
+ }
}
void Comm::set_errhandler(MPI_Errhandler errhandler)
{
- if (errhandler_ != MPI_ERRHANDLER_NULL)
- simgrid::smpi::Errhandler::unref(errhandler_);
- errhandler_ = errhandler;
- if (errhandler_ != MPI_ERRHANDLER_NULL)
- errhandler_->ref();
+ if(this != MPI_COMM_WORLD){
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandler_);
+ errhandler_ = errhandler;
+ }else{
+ if(errhandlers_==nullptr)
+ errhandlers_= new MPI_Errhandler[this->size()]{MPI_ERRHANDLER_NULL};
+ if(errhandlers_[this->rank()] != MPI_ERRHANDLER_NULL)
+ simgrid::smpi::Errhandler::unref(errhandlers_[this->rank()]);
+ errhandlers_[this->rank()]=errhandler;
+ }
+ if (errhandler != MPI_ERRHANDLER_NULL)
+ errhandler->ref();
}
MPI_Comm Comm::split_type(int type, int /*key*/, const Info*)