namespace simgrid {
namespace smpi {
-using simgrid::s4u::Actor;
-using simgrid::s4u::ActorPtr;
-
-ActorExt::ActorExt(ActorPtr actor, simgrid::s4u::Barrier* finalization_barrier)
+ActorExt::ActorExt(s4u::ActorPtr actor, s4u::Barrier* finalization_barrier)
: finalization_barrier_(finalization_barrier), actor_(actor)
{
mailbox_ = s4u::Mailbox::by_name("SMPI-" + std::to_string(actor_->get_pid()));
return replaying_;
}
-ActorPtr ActorExt::get_actor()
+s4u::ActorPtr ActorExt::get_actor()
{
return actor_;
}
std::map</* computation unit name */ std::string, papi_process_data> units2papi_setup;
#endif
-using simgrid::s4u::Actor;
-using simgrid::s4u::ActorPtr;
std::unordered_map<std::string, double> location2speedup;
-static std::map</*process_id*/ ActorPtr, simgrid::smpi::ActorExt*> process_data;
+static std::map</*process_id*/ simgrid::s4u::ActorPtr, simgrid::smpi::ActorExt*> process_data;
int process_count = 0;
static int smpi_exit_status = 0;
int smpi_universe_size = 0;
simgrid::smpi::ActorExt* smpi_process()
{
- ActorPtr me = Actor::self();
+ simgrid::s4u::ActorPtr me = simgrid::s4u::Actor::self();
if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...)
return nullptr;
return process_data.at(me);
}
-simgrid::smpi::ActorExt* smpi_process_remote(ActorPtr actor)
+simgrid::smpi::ActorExt* smpi_process_remote(simgrid::s4u::ActorPtr actor)
{
return process_data.at(actor);
}
}
void * smpi_process_get_user_data(){
- return Actor::self()->get_impl()->get_user_data();
+ return simgrid::s4u::Actor::self()->get_impl()->get_user_data();
}
void smpi_process_set_user_data(void *data){
- Actor::self()->get_impl()->set_user_data(data);
+ simgrid::s4u::Actor::self()->get_impl()->set_user_data(data);
}
simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
-using simgrid::s4u::ActorPtr;
-
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
int Comm::dup(MPI_Comm* newcomm){
if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
MPI_Group cp = new Group(this->group());
(*newcomm) = new Comm(cp, this->topo());
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
- return group_->rank(simgrid::s4u::Actor::self());
+ return group_->rank(s4u::Actor::self());
}
void Comm::get_name (char* name, int* len)
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- ActorPtr actor = group->actor(rankmap[j].second);
+ s4u::ActorPtr actor = group->actor(rankmap[j].second);
group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
//identify neighbours in comm
//get the indices of all processes sharing the same simix host
if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
if(leaders_map_==nullptr){
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
leader_comm = new Comm(leaders_group, nullptr,1);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::by_pid(leader_list[i]), i);
+ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr,1);
if (smpi_privatize_global_variables == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
- smpi_switch_data_segment(simgrid::s4u::Actor::self());
+ smpi_switch_data_segment(s4u::Actor::self());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
namespace simgrid{
namespace smpi{
-using simgrid::s4u::ActorPtr;
-
Group::Group(Group* origin)
{
if (origin != MPI_GROUP_NULL && origin != MPI_GROUP_EMPTY) {
}
}
-void Group::set_mapping(simgrid::s4u::ActorPtr actor, int rank)
+void Group::set_mapping(s4u::ActorPtr actor, int rank)
{
if (0 <= rank && rank < size_) {
int index = actor->get_pid();
return rank;
}
-simgrid::s4u::ActorPtr Group::actor(int rank) {
+s4u::ActorPtr Group::actor(int rank)
+{
if (0 <= rank && rank < size_)
return rank_to_actor_map_[rank];
else
return nullptr;
}
-int Group::rank(const simgrid::s4u::ActorPtr actor) {
+int Group::rank(const s4u::ActorPtr actor)
+{
auto iterator = actor_to_rank_map_.find(actor);
return (iterator == actor_to_rank_map_.end()) ? MPI_UNDEFINED : (*iterator).second;
}
result = MPI_UNEQUAL;
} else {
for (int i = 0; i < size_; i++) {
- ActorPtr actor = this->actor(i);
+ s4u::ActorPtr actor = this->actor(i);
int rank = group2->rank(actor);
if (rank == MPI_UNDEFINED) {
result = MPI_UNEQUAL;
} else {
*newgroup = new Group(n);
for (i = 0; i < n; i++) {
- ActorPtr actor = this->actor(ranks[i]); // ranks[] was passed as a param!
+ s4u::ActorPtr actor = this->actor(ranks[i]); // ranks[] was passed as a param!
(*newgroup)->set_mapping(actor, i);
}
}
int size1 = size_;
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- ActorPtr actor = group2->actor(i);
+ s4u::ActorPtr actor = group2->actor(i);
int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size1++;
*newgroup = new Group(size1);
size2 = this->size();
for (int i = 0; i < size2; i++) {
- ActorPtr actor1 = this->actor(i);
+ s4u::ActorPtr actor1 = this->actor(i);
(*newgroup)->set_mapping(actor1, i);
}
for (int i = size2; i < size1; i++) {
- ActorPtr actor = group2->actor(i - size2);
+ s4u::ActorPtr actor = group2->actor(i - size2);
(*newgroup)->set_mapping(actor, i);
}
}
{
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- ActorPtr actor = group2->actor(i);
+ s4u::ActorPtr actor = group2->actor(i);
int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size2--;
*newgroup = new Group(size2);
int j=0;
for (int i = 0; i < group2->size(); i++) {
- ActorPtr actor = group2->actor(i);
+ s4u::ActorPtr actor = group2->actor(i);
int proc1 = this->rank(actor);
if (proc1 != MPI_UNDEFINED) {
(*newgroup)->set_mapping(actor, j);
int newsize = size_;
int size2 = size_;
for (int i = 0; i < size2; i++) {
- ActorPtr actor = this->actor(i);
+ s4u::ActorPtr actor = this->actor(i);
int proc2 = group2->rank(actor);
if (proc2 != MPI_UNDEFINED) {
newsize--;
} else {
*newgroup = new Group(newsize);
for (int i = 0; i < size2; i++) {
- ActorPtr actor = this->actor(i);
+ s4u::ActorPtr actor = this->actor(i);
int proc2 = group2->rank(actor);
if (proc2 == MPI_UNDEFINED) {
(*newgroup)->set_mapping(actor, i);
int j = 0;
for (int i = 0; i < oldsize; i++) {
if(to_exclude[i]==0){
- ActorPtr actor = this->actor(i);
+ s4u::ActorPtr actor = this->actor(i);
(*newgroup)->set_mapping(actor, j);
j++;
}
for (int rank = ranges[i][0]; /* First */
rank >= 0 && rank < size_; /* Last */
) {
- ActorPtr actor = this->actor(rank);
+ s4u::ActorPtr actor = this->actor(rank);
(*newgroup)->set_mapping(actor, j);
j++;
if(rank == ranges[i][1]){/*already last ?*/
}
}
if(add==1){
- ActorPtr actor = this->actor(oldrank);
+ s4u::ActorPtr actor = this->actor(oldrank);
(*newgroup)->set_mapping(actor, newrank);
newrank++;
}
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_rma, smpi, "Logging specific to SMPI (RMA operations)");
-using simgrid::s4u::Actor;
namespace simgrid{
namespace smpi{
connected_wins_[rank_] = this;
count_ = 0;
if(rank_==0){
- bar_ = new simgrid::s4u::Barrier(comm_size);
+ bar_ = new s4u::Barrier(comm_size);
}
mode_=0;
Colls::allgather(&(connected_wins_[rank_]), sizeof(MPI_Win), MPI_BYTE, connected_wins_, sizeof(MPI_Win),
MPI_BYTE, comm);
- Colls::bcast(&(bar_), sizeof(simgrid::s4u::Barrier*), MPI_BYTE, 0, comm);
+ Colls::bcast(&(bar_), sizeof(s4u::Barrier*), MPI_BYTE, 0, comm);
Colls::barrier(comm);
}