if (flag == nullptr) {
return MPI_ERR_ARG;
} else {
- *flag = smpi_process()->index() == 0; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
+ *flag = smpi_process()->index() == 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1,
// regardless of whether this process called MPI_Thread_Init() or not.
return MPI_SUCCESS;
}
static int getPid(MPI_Comm comm, int id)
{
simgrid::s4u::ActorPtr actor = comm->group()->actor(id);
- return (actor == nullptr) ? MPI_UNDEFINED : actor->getPid() - 1;
+ return (actor == nullptr) ? MPI_UNDEFINED : actor->getPid();
}
/* PMPI User level calls */
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
- int src_traced = getPid(comm, src);
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("Irecv", src_traced,
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int trace_dst = getPid(comm, dst);
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("Isend", trace_dst,
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int trace_dst = getPid(comm, dst);
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("ISsend", trace_dst,
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int src_traced = getPid(comm, src);
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("recv", src_traced,
} else if(tag < 0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int dst_traced = getPid(comm, dst);
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("send", dst_traced,
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int dst_traced = getPid(comm, dst);
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("Ssend", dst_traced,
} else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
retval = MPI_ERR_TAG;
} else {
- int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1;
+ int my_proc_id = simgrid::s4u::Actor::self()->getPid();
int dst_traced = getPid(comm, dst);
int src_traced = getPid(comm, src);
retval = MPI_SUCCESS;
} else {
int my_proc_id = (*request)->comm() != MPI_COMM_NULL
- ? simgrid::s4u::Actor::self()->getPid() - 1
+ ? simgrid::s4u::Actor::self()->getPid()
: -1; // TODO: cheinrich: Check if this correct or if it should be MPI_UNDEFINED
TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("wait"));
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_fence"));
retval = win->fence(assert);
TRACE_smpi_comm_out(rank);
((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) {
retval = MPI_ERR_TYPE;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get", target_rank,
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget", target_rank,
((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) {
retval = MPI_ERR_TYPE;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->actor(target_rank)->getPid()-1;
+ int dst_traced = group->actor(target_rank)->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Put", dst_traced,
origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->actor(target_rank)->getPid()-1;
+ int dst_traced = group->actor(target_rank)->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rput", dst_traced,
origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Accumulate", target_rank,
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Raccumulate", target_rank,
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get_accumulate", target_rank,
} else if(request == nullptr){
retval = MPI_ERR_REQUEST;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget_accumulate", target_rank,
} else if ((datatype == MPI_DATATYPE_NULL) || (not datatype->is_valid())) {
retval = MPI_ERR_TYPE;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
MPI_Group group;
win->get_group(&group);
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Compare_and_swap", target_rank,
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_post"));
retval = win->post(group,assert);
TRACE_smpi_comm_out(rank);
} else if (group==MPI_GROUP_NULL){
retval = MPI_ERR_GROUP;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_start"));
retval = win->start(group,assert);
TRACE_smpi_comm_out(rank);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_complete"));
retval = win->complete();
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_wait"));
retval = win->wait();
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __func__, new simgrid::instr::NoOpTIData("Win_lock"));
retval = win->lock(lock_type,rank,assert);
TRACE_smpi_comm_out(myrank);
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock"));
retval = win->unlock(rank);
TRACE_smpi_comm_out(myrank);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_lock_all"));
retval = win->lock_all(assert);
TRACE_smpi_comm_out(myrank);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock_all"));
retval = win->unlock_all();
TRACE_smpi_comm_out(myrank);
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush"));
retval = win->flush(rank);
TRACE_smpi_comm_out(myrank);
} else if (rank == MPI_PROC_NULL){
retval = MPI_SUCCESS;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local"));
retval = win->flush_local(rank);
TRACE_smpi_comm_out(myrank);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_all"));
retval = win->flush_all();
TRACE_smpi_comm_out(myrank);
if (win == MPI_WIN_NULL) {
retval = MPI_ERR_WIN;
} else {
- int myrank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int myrank = simgrid::s4u::Actor::self()->getPid();
TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local_all"));
retval = win->flush_local_all();
TRACE_smpi_comm_out(myrank);
{
char name[MAILBOX_NAME_MAXLEN];
process_ = actor;
- int index = actor->getPid() - 1; // TODO cheinrich: This needs to be removed! Just a quick hack to make the following 2 lines work
+ int index = actor->getPid(); // TODO cheinrich: This needs to be removed! Just a quick hack to make the following 2 lines work
mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index));
mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index));
mailboxes_mutex_ = xbt_mutex_init();
simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self();
proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX);
- int my_proc_id = proc->getPid() - 1; // The maestro process has always ID 0 but we don't need that process here
-
char* instance_id = (*argv)[1];
try {
int rank = std::stoi(std::string((*argv)[2]));
// cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved
// this up here so that I can set the privatized region before the switch.
Process* process = smpi_process_remote(proc);
+ int my_proc_id = proc->getPid();
if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
/* Now using the segment index of this process */
my_proc_id = proc->getImpl()->segment_index;
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, -1.0, size, -1,
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid(), -1.0, size, -1,
encode_datatype(MPI_CURRENT_TYPE), ""));
void *sendbuf = smpi_get_tmp_sendbuffer(size* MPI_CURRENT_TYPE->size());
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, comp_size,
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid(), comp_size,
comm_size, -1, encode_datatype(MPI_CURRENT_TYPE), ""));
void *recvbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size());
int intra_comm_size = 0;
int min_index = INT_MAX; // the minimum index will be the leader
for (auto& actor : process_list) {
- int index = actor.pid - 1;
+ int index = actor.pid;
if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
if (index < min_index)
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i);
leader_comm = new Comm(leaders_group, nullptr);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr);
void Group::set_mapping(simgrid::s4u::ActorPtr actor, int rank)
{
if (0 <= rank && rank < size_) {
- int index = actor->getPid()-1;
+ int index = actor->getPid();
rank_to_index_map_[rank] = index;
if (index != MPI_UNDEFINED) {
if ((unsigned)index >= index_to_rank_map_.size())
MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
}
MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
}
MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
}
MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1,
- simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV | PREPARED);
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1,
- simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV | PREPARED);
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | ISEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
request->start();
return request;
}
MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
request->start();
return request;
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1,
- simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, NON_PERSISTENT | RECV);
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, NON_PERSISTENT | RECV);
request->start();
return request;
}
void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
request->start();
wait(&request, MPI_STATUS_IGNORE);
void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1,
- comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
request->start();
wait(&request,MPI_STATUS_IGNORE);
{
MPI_Request requests[2];
MPI_Status stats[2];
- unsigned int myid = simgrid::s4u::Actor::self()->getPid() - 1;
- if ((comm->group()->actor(dst)->getPid()-1 == myid) && (comm->group()->actor(src)->getPid()-1 == myid)){
+ unsigned int myid = simgrid::s4u::Actor::self()->getPid();
+ if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
return;
}
if ((flags_ & RECV) != 0) {
this->print_request("New recv");
- simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_+1));
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
xbt_mutex_release(mut);
} else { /* the RECV flag was not set, so this is a send */
- simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_+1));
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int rank = src_;
if (TRACE_smpi_view_internals()) {
TRACE_smpi_send(rank, rank, dst_, tag_, size_);
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_isend(
- simgrid::s4u::Actor::byPid(src_ + 1)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
+ simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
MPI_Request request = new Request(
- nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid() - 1,
- simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV);
+ nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV);
if (smpi_iprobe_sleep > 0) {
smx_activity_t iprobe_sleep = simcall_execution_start(
"iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid() - 1);
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if(datatype->flags() & DT_FLAG_DERIVED){
}
if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
- int rank = simgrid::s4u::Actor::self()->getPid() - 1;
+ int rank = simgrid::s4u::Actor::self()->getPid();
int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
TRACE_smpi_recv(src_traced, rank,req->tag_);
}
if (target_rank != comm_->rank()) {
//prepare send_request
MPI_Request sreq =
- Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(),
- comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG + 1, comm_, MPI_OP_NULL);
+ Request::rma_send_init(origin_addr, origin_count, origin_datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG + 1, comm_, MPI_OP_NULL);
//prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, comm_->rank(),
- comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG + 1,
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG + 1,
recv_win->comm_, MPI_OP_NULL);
//start send
if(target_rank != comm_->rank()){
//prepare send_request
MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype,
- comm_->group()->actor(target_rank)->getPid() - 1, comm_->rank(),
+ comm_->group()->actor(target_rank)->getPid(), simgrid::s4u::Actor::self()->getPid(),
SMPI_RMA_TAG + 2, send_win->comm_, MPI_OP_NULL);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype,
- comm_->group()->actor(target_rank)->getPid() - 1, comm_->rank(),
+ comm_->group()->actor(target_rank)->getPid(), simgrid::s4u::Actor::self()->getPid(),
SMPI_RMA_TAG + 2, comm_, MPI_OP_NULL);
//start the send, with another process than us as sender.
//prepare send_request
MPI_Request sreq =
- Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(),
- comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG - 3 - count_, comm_, op);
+ Request::rma_send_init(origin_addr, origin_count, origin_datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG - 3 - count_, comm_, op);
// prepare receiver request
- MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, comm_->rank(),
- comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG - 3 - count_,
+ MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG - 3 - count_,
recv_win->comm_, op);
count_++;
XBT_DEBUG("Entering MPI_Win_Start");
while (j != size) {
- int src = group->actor(j)->getPid()-1;
- if (src != comm_->rank() && src != MPI_UNDEFINED) {
+ int src = group->actor(j)->getPid();
+ if ((unsigned)src != simgrid::s4u::Actor::self()->getPid() && src != MPI_UNDEFINED) {
reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD);
i++;
}
XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
- int dst=group->actor(j)->getPid()-1;
- if (dst != comm_->rank() && dst != MPI_UNDEFINED) {
+ int dst=group->actor(j)->getPid();
+ if ((unsigned)dst != simgrid::s4u::Actor::self()->getPid() && dst != MPI_UNDEFINED) {
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD);
i++;
}
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int dst=group_->actor(j)->getPid()-1;
- if (dst != comm_->rank() && dst != MPI_UNDEFINED) {
+ int dst=group_->actor(j)->getPid();
+ if ((unsigned)dst != simgrid::s4u::Actor::self()->getPid() && dst != MPI_UNDEFINED) {
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;
}
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int src=group_->actor(j)->getPid()-1;
- if (src != comm_->rank() && src != MPI_UNDEFINED) {
+ int src=group_->actor(j)->getPid();
+ if ((unsigned)src != simgrid::s4u::Actor::self()->getPid() && src != MPI_UNDEFINED) {
reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;
}