From: Arnaud Giersch Date: Sun, 18 Apr 2021 19:25:37 +0000 (+0200) Subject: Finally rename smpi::Group::actor_pid() back to actor(). X-Git-Tag: v3.28~455^2 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/6b01c1856641f3a79c1eec8f4f0f02e941302090?hp=-c Finally rename smpi::Group::actor_pid() back to actor(). --- 6b01c1856641f3a79c1eec8f4f0f02e941302090 diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp index 744b0a37a9..72cb977927 100644 --- a/src/smpi/bindings/smpi_pmpi.cpp +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -131,7 +131,7 @@ int PMPI_Abort(MPI_Comm comm, int /*errorcode*/) XBT_WARN("MPI_Abort was called, something went probably wrong in this simulation ! Killing all processes sharing the same MPI_COMM_WORLD"); smx_actor_t myself = SIMIX_process_self(); for (int i = 0; i < comm->size(); i++){ - smx_actor_t actor = simgrid::kernel::actor::ActorImpl::by_pid(comm->group()->actor_pid(i)); + smx_actor_t actor = simgrid::kernel::actor::ActorImpl::by_pid(comm->group()->actor(i)); if (actor != nullptr && actor != myself) simgrid::kernel::actor::simcall([actor] { actor->exit(); }); } diff --git a/src/smpi/bindings/smpi_pmpi_group.cpp b/src/smpi/bindings/smpi_pmpi_group.cpp index 09200e0871..9797acb8d7 100644 --- a/src/smpi/bindings/smpi_pmpi_group.cpp +++ b/src/smpi/bindings/smpi_pmpi_group.cpp @@ -55,7 +55,7 @@ int PMPI_Group_translate_ranks(MPI_Group group1, int n, const int *ranks1, MPI_G if(ranks1[i]==MPI_PROC_NULL){ ranks2[i]=MPI_PROC_NULL; }else{ - aid_t actor = group1->actor_pid(ranks1[i]); + aid_t actor = group1->actor(ranks1[i]); ranks2[i] = group2->rank(actor); } } diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp index 33fb033b8b..4871b408dd 100644 --- a/src/smpi/bindings/smpi_pmpi_request.cpp +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -13,7 +13,7 @@ XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi); static int getPid(MPI_Comm comm, int id) { - return comm->group()->actor_pid(id); + return comm->group()->actor(id); } #define CHECK_SEND_INPUTS\ @@ -602,7 +602,7 @@ static void trace_smpi_recv_helper(MPI_Request* request, MPI_Status* status) int dst_traced = req->dst(); // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) if (src_traced == MPI_ANY_SOURCE && status != MPI_STATUS_IGNORE) - src_traced = req->comm()->group()->actor_pid(status->MPI_SOURCE); + src_traced = req->comm()->group()->actor(status->MPI_SOURCE); TRACE_smpi_recv(src_traced, dst_traced, req->tag()); } } diff --git a/src/smpi/bindings/smpi_pmpi_win.cpp b/src/smpi/bindings/smpi_pmpi_win.cpp index 8c2cb9a9ad..ae2211d7fb 100644 --- a/src/smpi/bindings/smpi_pmpi_win.cpp +++ b/src/smpi/bindings/smpi_pmpi_win.cpp @@ -227,7 +227,7 @@ int PMPI_Put(const void *origin_addr, int origin_count, MPI_Datatype origin_data int my_proc_id = simgrid::s4u::this_actor::get_pid(); MPI_Group group; win->get_group(&group); - int dst_traced = group->actor_pid(target_rank); + int dst_traced = group->actor(target_rank); TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("Put", target_rank, origin_datatype->is_replayable() ? origin_count @@ -258,7 +258,7 @@ int PMPI_Rput(const void *origin_addr, int origin_count, MPI_Datatype origin_dat int my_proc_id = simgrid::s4u::this_actor::get_pid(); MPI_Group group; win->get_group(&group); - int dst_traced = group->actor_pid(target_rank); + int dst_traced = group->actor(target_rank); TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData( "Rput", target_rank, diff --git a/src/smpi/include/smpi_group.hpp b/src/smpi/include/smpi_group.hpp index d16e40d15b..322cb6b97b 100644 --- a/src/smpi/include/smpi_group.hpp +++ b/src/smpi/include/smpi_group.hpp @@ -34,7 +34,7 @@ public: void set_mapping(aid_t pid, int rank); int rank(aid_t pid) const; - aid_t actor_pid(int rank) const; + aid_t actor(int rank) const; std::string name() const override {return std::string("MPI_Group");} void ref(); static void unref(MPI_Group group); diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index dfec134f00..97c73047c4 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -128,9 +128,8 @@ public: /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */ void addNullRequest(int src, int dst, int tag) { - store.insert( - {req_key_t(MPI_COMM_WORLD->group()->actor_pid(src) - 1, MPI_COMM_WORLD->group()->actor_pid(dst) - 1, tag), - MPI_REQUEST_NULL}); + store.insert({req_key_t(MPI_COMM_WORLD->group()->actor(src) - 1, MPI_COMM_WORLD->group()->actor(dst) - 1, tag), + MPI_REQUEST_NULL}); } }; @@ -433,7 +432,7 @@ void WaitAction::kernel(simgrid::xbt::ReplayAction& action) void SendAction::kernel(simgrid::xbt::ReplayAction&) { const SendRecvParser& args = get_args(); - int dst_traced = MPI_COMM_WORLD->group()->actor_pid(args.partner); + int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner); TRACE_smpi_comm_in( get_pid(), __func__, @@ -481,7 +480,7 @@ void RecvAction::kernel(simgrid::xbt::ReplayAction&) TRACE_smpi_comm_out(get_pid()); if (is_recv && not TRACE_smpi_view_internals()) { - int src_traced = MPI_COMM_WORLD->group()->actor_pid(status.MPI_SOURCE); + int src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE); TRACE_smpi_recv(src_traced, get_pid(), args.tag); } } @@ -587,8 +586,8 @@ void BcastAction::kernel(simgrid::xbt::ReplayAction&) { const BcastArgParser& args = get_args(); TRACE_smpi_comm_in(get_pid(), "action_bcast", - new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor_pid(args.root), -1.0, - args.size, -1, Datatype::encode(args.datatype1), "")); + new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root), -1.0, args.size, + -1, Datatype::encode(args.datatype1), "")); colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD); @@ -599,9 +598,8 @@ void ReduceAction::kernel(simgrid::xbt::ReplayAction&) { const ReduceArgParser& args = get_args(); TRACE_smpi_comm_in(get_pid(), "action_reduce", - new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor_pid(args.root), - args.comp_size, args.comm_size, -1, - Datatype::encode(args.datatype1), "")); + new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root), args.comp_size, + args.comm_size, -1, Datatype::encode(args.datatype1), "")); colls::reduce(send_buffer(args.comm_size * args.datatype1->size()), recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, diff --git a/src/smpi/mpi/smpi_comm.cpp b/src/smpi/mpi/smpi_comm.cpp index 7afc99450f..c1226526b0 100644 --- a/src/smpi/mpi/smpi_comm.cpp +++ b/src/smpi/mpi/smpi_comm.cpp @@ -294,7 +294,7 @@ MPI_Comm Comm::split(int color, int key) group_root = group_out; /* Save root's group */ } for (unsigned j = 0; j < rankmap.size(); j++) { - aid_t actor = group->actor_pid(rankmap[j].second); + aid_t actor = group->actor(rankmap[j].second); group_out->set_mapping(actor, j); } std::vector requests(rankmap.size()); @@ -501,9 +501,9 @@ void Comm::init_smp(){ } // Are the ranks blocked ? = allocated contiguously on the SMP nodes int is_blocked=1; - int prev = this->group()->rank(comm_intra->group()->actor_pid(0)); + int prev = this->group()->rank(comm_intra->group()->actor(0)); for (i = 1; i < my_local_size; i++) { - int that = this->group()->rank(comm_intra->group()->actor_pid(i)); + int that = this->group()->rank(comm_intra->group()->actor(i)); if (that != prev + 1) { is_blocked = 0; break; diff --git a/src/smpi/mpi/smpi_group.cpp b/src/smpi/mpi/smpi_group.cpp index 13d0d00fed..797fae4556 100644 --- a/src/smpi/mpi/smpi_group.cpp +++ b/src/smpi/mpi/smpi_group.cpp @@ -44,7 +44,7 @@ int Group::rank(aid_t pid) const return res; } -aid_t Group::actor_pid(int rank) const +aid_t Group::actor(int rank) const { return (0 <= rank && rank < size()) ? rank_to_pid_map_[rank] : -1; } @@ -73,7 +73,7 @@ int Group::compare(const Group* group2) const result = MPI_UNEQUAL; } else { for (int i = 0; i < size(); i++) { - int rank = group2->rank(actor_pid(i)); + int rank = group2->rank(actor(i)); if (rank == MPI_UNDEFINED) { result = MPI_UNEQUAL; break; @@ -95,7 +95,7 @@ int Group::incl(int n, const int* ranks, MPI_Group* newgroup) const *newgroup = new Group(n); for (int i = 0; i < n; i++) { - aid_t actor = this->actor_pid(ranks[i]); + aid_t actor = this->actor(ranks[i]); (*newgroup)->set_mapping(actor, i); } (*newgroup)->add_f(); @@ -121,7 +121,7 @@ int Group::group_union(const Group* group2, MPI_Group* newgroup) const { std::vector ranks2; for (int i = 0; i < group2->size(); i++) { - aid_t actor = group2->actor_pid(i); + aid_t actor = group2->actor(i); if (rank(actor) == MPI_UNDEFINED) ranks2.push_back(i); } @@ -135,11 +135,11 @@ int Group::group_union(const Group* group2, MPI_Group* newgroup) const *newgroup = new Group(newsize); int i; for (i = 0; i < size(); i++) { - aid_t actor1 = actor_pid(i); + aid_t actor1 = actor(i); (*newgroup)->set_mapping(actor1, i); } for (int j : ranks2) { - aid_t actor2 = group2->actor_pid(j); + aid_t actor2 = group2->actor(j); (*newgroup)->set_mapping(actor2, i); i++; } @@ -151,7 +151,7 @@ int Group::intersection(const Group* group2, MPI_Group* newgroup) const { std::vector ranks2; for (int i = 0; i < group2->size(); i++) { - aid_t actor = group2->actor_pid(i); + aid_t actor = group2->actor(i); if (rank(actor) != MPI_UNDEFINED) ranks2.push_back(i); } @@ -162,7 +162,7 @@ int Group::difference(const Group* group2, MPI_Group* newgroup) const { std::vector ranks; for (int i = 0; i < size(); i++) { - aid_t actor = this->actor_pid(i); + aid_t actor = this->actor(i); if (group2->rank(actor) == MPI_UNDEFINED) ranks.push_back(i); } diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index ec43755892..e37e60d8ff 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -180,28 +180,28 @@ void Request::print_request(const char* message) const MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND); } MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED); } MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED); } MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED); } @@ -211,12 +211,12 @@ MPI_Request Request::rma_send_init(const void *buf, int count, MPI_Datatype data { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ if(op==MPI_OP_NULL){ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src), + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED); }else{ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src), + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op); @@ -230,7 +230,7 @@ MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int if (src == MPI_ANY_SOURCE) source = MPI_ANY_SOURCE; else if (src != MPI_PROC_NULL) - source = comm->group()->actor_pid(src); + source = comm->group()->actor(src); return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, simgrid::s4u::this_actor::get_pid(), tag, comm, @@ -245,15 +245,15 @@ MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, if (src == MPI_ANY_SOURCE) source = MPI_ANY_SOURCE; else if (src != MPI_PROC_NULL) - source = comm->group()->actor_pid(src); + source = comm->group()->actor(src); if(op==MPI_OP_NULL){ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); }else{ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op); } return request; @@ -265,7 +265,7 @@ MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int if (src == MPI_ANY_SOURCE) source = MPI_ANY_SOURCE; else if (src != MPI_PROC_NULL) - source = comm->group()->actor_pid(src); + source = comm->group()->actor(src); return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); @@ -275,7 +275,7 @@ MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND); if(dst != MPI_PROC_NULL) request->start(); @@ -286,7 +286,7 @@ MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, in { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND); if(dst != MPI_PROC_NULL) request->start(); @@ -297,7 +297,7 @@ MPI_Request Request::issend(const void *buf, int count, MPI_Datatype datatype, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND); if(dst != MPI_PROC_NULL) request->start(); @@ -312,7 +312,7 @@ MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, if (src == MPI_ANY_SOURCE) source = MPI_ANY_SOURCE; else if (src != MPI_PROC_NULL) - source = comm->group()->actor_pid(src); + source = comm->group()->actor(src); request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source, simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV); @@ -334,7 +334,7 @@ void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND); if(dst != MPI_PROC_NULL) @@ -347,7 +347,7 @@ void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, i { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND); if(dst != MPI_PROC_NULL) request->start(); @@ -359,7 +359,7 @@ void Request::ssend(const void *buf, int count, MPI_Datatype datatype, int dst, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), - dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm, + dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND); if(dst != MPI_PROC_NULL) @@ -376,8 +376,8 @@ void Request::sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype if (src == MPI_ANY_SOURCE) source = MPI_ANY_SOURCE; else if (src != MPI_PROC_NULL) - source = comm->group()->actor_pid(src); - int destination = dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL; + source = comm->group()->actor(src); + int destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL; std::array requests; std::array stats; @@ -804,7 +804,7 @@ void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* double speed = s4u::this_actor::get_host()->get_speed(); double maxrate = smpi_cfg_iprobe_cpu_usage(); auto request = - new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor_pid(source), + new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source), simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE); if (smpi_iprobe_sleep > 0) { /** Compute the number of flops we will sleep **/ diff --git a/src/smpi/mpi/smpi_topo.cpp b/src/smpi/mpi/smpi_topo.cpp index 93cf8f03be..31864fb51e 100644 --- a/src/smpi/mpi/smpi_topo.cpp +++ b/src/smpi/mpi/smpi_topo.cpp @@ -68,7 +68,7 @@ Topo_Cart::Topo_Cart(MPI_Comm comm_old, int ndims, const int dims[], const int p const Group* oldGroup = comm_old->group(); auto* newGroup = new Group(newSize); for (int i = 0 ; i < newSize ; i++) { - newGroup->set_mapping(oldGroup->actor_pid(i), i); + newGroup->set_mapping(oldGroup->actor(i), i); } *comm_cart = new Comm(newGroup, std::shared_ptr(this)); } diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 52aff5ade2..30052361e1 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -346,7 +346,7 @@ int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype orig // prepare receiver request MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(), - recv_win->comm_->group()->rank(comm_->group()->actor_pid(target_rank)), + recv_win->comm_->group()->rank(comm_->group()->actor(target_rank)), SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op); count_++; @@ -443,7 +443,7 @@ int Win::start(MPI_Group group, int /*assert*/) XBT_DEBUG("Entering MPI_Win_Start"); while (j != size) { - int src = comm_->group()->rank(group->actor_pid(j)); + int src = comm_->group()->rank(group->actor(j)); if (src != rank_ && src != MPI_UNDEFINED) { // TODO cheinrich: The check of MPI_UNDEFINED should be useless here reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, comm_); i++; @@ -473,7 +473,7 @@ int Win::post(MPI_Group group, int /*assert*/) XBT_DEBUG("Entering MPI_Win_Post"); while(j!=size){ - int dst = comm_->group()->rank(group->actor_pid(j)); + int dst = comm_->group()->rank(group->actor(j)); if (dst != rank_ && dst != MPI_UNDEFINED) { reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 4, comm_); i++; @@ -504,7 +504,7 @@ int Win::complete(){ std::vector reqs(size); while(j!=size){ - int dst = comm_->group()->rank(group_->actor_pid(j)); + int dst = comm_->group()->rank(group_->actor(j)); if (dst != rank_ && dst != MPI_UNDEFINED) { reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 5, comm_); i++; @@ -537,7 +537,7 @@ int Win::wait(){ std::vector reqs(size); while(j!=size){ - int src = comm_->group()->rank(group_->actor_pid(j)); + int src = comm_->group()->rank(group_->actor(j)); if (src != rank_ && src != MPI_UNDEFINED) { reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 5, comm_); i++; @@ -672,7 +672,7 @@ int Win::finish_comms(int rank){ // because we only wait for requests that we are responsible for. // Also use the process id here since the request itself returns from src() // and dst() the process id, NOT the rank (which only exists in the context of a communicator). - int proc_id = comm_->group()->actor_pid(rank); + int proc_id = comm_->group()->actor(rank); auto it = std::stable_partition(begin(requests_), end(requests_), [proc_id](const MPI_Request& req) { return (req == MPI_REQUEST_NULL || (req->src() != proc_id && req->dst() != proc_id)); });