/* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
void addNullRequest(int src, int dst, int tag)
{
- store.insert(
- {req_key_t(MPI_COMM_WORLD->group()->actor_pid(src) - 1, MPI_COMM_WORLD->group()->actor_pid(dst) - 1, tag),
- MPI_REQUEST_NULL});
+ store.insert({req_key_t(MPI_COMM_WORLD->group()->actor(src) - 1, MPI_COMM_WORLD->group()->actor(dst) - 1, tag),
+ MPI_REQUEST_NULL});
}
};
void SendAction::kernel(simgrid::xbt::ReplayAction&)
{
const SendRecvParser& args = get_args();
- int dst_traced = MPI_COMM_WORLD->group()->actor_pid(args.partner);
+ int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner);
TRACE_smpi_comm_in(
get_pid(), __func__,
TRACE_smpi_comm_out(get_pid());
if (is_recv && not TRACE_smpi_view_internals()) {
- int src_traced = MPI_COMM_WORLD->group()->actor_pid(status.MPI_SOURCE);
+ int src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE);
TRACE_smpi_recv(src_traced, get_pid(), args.tag);
}
}
{
const BcastArgParser& args = get_args();
TRACE_smpi_comm_in(get_pid(), "action_bcast",
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor_pid(args.root), -1.0,
- args.size, -1, Datatype::encode(args.datatype1), ""));
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root), -1.0, args.size,
+ -1, Datatype::encode(args.datatype1), ""));
colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
{
const ReduceArgParser& args = get_args();
TRACE_smpi_comm_in(get_pid(), "action_reduce",
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor_pid(args.root),
- args.comp_size, args.comm_size, -1,
- Datatype::encode(args.datatype1), ""));
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root), args.comp_size,
+ args.comm_size, -1, Datatype::encode(args.datatype1), ""));
colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL,
MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
}
MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
MPI_Request Request::ssend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
MPI_Request Request::isend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
if(op==MPI_OP_NULL){
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED);
}else{
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor_pid(src),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src),
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_PREPARED |
MPI_REQ_ACCUMULATE,
op);
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
source,
simgrid::s4u::this_actor::get_pid(), tag, comm,
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
if(op==MPI_OP_NULL){
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
}else{
request =
new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, source,
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_RMA | MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED | MPI_REQ_ACCUMULATE, op);
}
return request;
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
source, simgrid::s4u::this_actor::get_pid(), tag, comm,
MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
if(dst != MPI_PROC_NULL)
request->start();
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SSEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
+ source = comm->group()->actor(src);
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
source, simgrid::s4u::this_actor::get_pid(), tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_RECV);
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
if(dst != MPI_PROC_NULL)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
request->start();
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
- dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL, tag, comm,
+ dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL, tag, comm,
MPI_REQ_NON_PERSISTENT | MPI_REQ_SSEND | MPI_REQ_SEND);
if(dst != MPI_PROC_NULL)
if (src == MPI_ANY_SOURCE)
source = MPI_ANY_SOURCE;
else if (src != MPI_PROC_NULL)
- source = comm->group()->actor_pid(src);
- int destination = dst != MPI_PROC_NULL ? comm->group()->actor_pid(dst) : MPI_PROC_NULL;
+ source = comm->group()->actor(src);
+ int destination = dst != MPI_PROC_NULL ? comm->group()->actor(dst) : MPI_PROC_NULL;
std::array<MPI_Request, 2> requests;
std::array<MPI_Status, 2> stats;
double speed = s4u::this_actor::get_host()->get_speed();
double maxrate = smpi_cfg_iprobe_cpu_usage();
auto request =
- new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor_pid(source),
+ new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source),
simgrid::s4u::this_actor::get_pid(), tag, comm, MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PROBE);
if (smpi_iprobe_sleep > 0) {
/** Compute the number of flops we will sleep **/
// prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, recv_win->comm_->rank(),
- recv_win->comm_->group()->rank(comm_->group()->actor_pid(target_rank)),
+ recv_win->comm_->group()->rank(comm_->group()->actor(target_rank)),
SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op);
count_++;
XBT_DEBUG("Entering MPI_Win_Start");
while (j != size) {
- int src = comm_->group()->rank(group->actor_pid(j));
+ int src = comm_->group()->rank(group->actor(j));
if (src != rank_ && src != MPI_UNDEFINED) { // TODO cheinrich: The check of MPI_UNDEFINED should be useless here
reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, comm_);
i++;
XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
- int dst = comm_->group()->rank(group->actor_pid(j));
+ int dst = comm_->group()->rank(group->actor(j));
if (dst != rank_ && dst != MPI_UNDEFINED) {
reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 4, comm_);
i++;
std::vector<MPI_Request> reqs(size);
while(j!=size){
- int dst = comm_->group()->rank(group_->actor_pid(j));
+ int dst = comm_->group()->rank(group_->actor(j));
if (dst != rank_ && dst != MPI_UNDEFINED) {
reqs[i] = Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG + 5, comm_);
i++;
std::vector<MPI_Request> reqs(size);
while(j!=size){
- int src = comm_->group()->rank(group_->actor_pid(j));
+ int src = comm_->group()->rank(group_->actor(j));
if (src != rank_ && src != MPI_UNDEFINED) {
reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 5, comm_);
i++;
// because we only wait for requests that we are responsible for.
// Also use the process id here since the request itself returns from src()
// and dst() the process id, NOT the rank (which only exists in the context of a communicator).
- int proc_id = comm_->group()->actor_pid(rank);
+ int proc_id = comm_->group()->actor(rank);
auto it = std::stable_partition(begin(requests_), end(requests_), [proc_id](const MPI_Request& req) {
return (req == MPI_REQUEST_NULL || (req->src() != proc_id && req->dst() != proc_id));
});