From: Christian Heinrich Date: Wed, 17 Jan 2018 17:30:35 +0000 (+0100) Subject: [SMPI] (FIRST PATCH) Remove 'getPid() - 1' arithmetic from SMPI. X-Git-Tag: v3.19~312^2~31 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/d74e061a2eb65f5b3be26db160dad6c5f74d8aff [SMPI] (FIRST PATCH) Remove 'getPid() - 1' arithmetic from SMPI. THIS COMMIT WILL NOT BUILD PROPERLY OR HAS FAILING TESTS. I decided to split up a huge commit into several logical chunks in order to make things easier to read. The 'getPid() - 1' expressions were the (temporary) replacement for smpi_process()->index() but it's better to remove it completely: This is often confused for being an MPI rank although it is not (ranks depend on the communicator in use). Also, process id 0 is exclusively the maestro so we should not artificially pretend we start counting from 0 when we're clearly not. So now we don't have to say that 'process with id 1 has index 0'. This patch substitutes almost exclusively 'getPid() - 1' computations with just 'getPid()'. There are a few other replacements, especially in the smpi_win.cpp, where former commits introduced logically wrong usage of 'comm_->rank()'. The next few commits after this should also deal with a similar problematic.' --- diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp index c8129b9195..5b79d95493 100644 --- a/src/smpi/bindings/smpi_pmpi.cpp +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -109,7 +109,7 @@ int PMPI_Is_thread_main(int *flag) if (flag == nullptr) { return MPI_ERR_ARG; } else { - *flag = smpi_process()->index() == 0; // FIXME: I don't think this is correct: This just returns true if the process ID is 1, + *flag = smpi_process()->index() == 1; // FIXME: I don't think this is correct: This just returns true if the process ID is 1, // regardless of whether this process called MPI_Thread_Init() or not. return MPI_SUCCESS; } diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp index 02765b942e..b4b111072c 100644 --- a/src/smpi/bindings/smpi_pmpi_request.cpp +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -15,7 +15,7 @@ static int getPid(MPI_Comm, int); static int getPid(MPI_Comm comm, int id) { simgrid::s4u::ActorPtr actor = comm->group()->actor(id); - return (actor == nullptr) ? MPI_UNDEFINED : actor->getPid() - 1; + return (actor == nullptr) ? MPI_UNDEFINED : actor->getPid(); } /* PMPI User level calls */ @@ -164,8 +164,7 @@ int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MP retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; - int src_traced = getPid(comm, src); + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Irecv", src_traced, @@ -206,7 +205,7 @@ int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MP } else if(tag<0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int trace_dst = getPid(comm, dst); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Isend", trace_dst, @@ -248,7 +247,7 @@ int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, M } else if(tag<0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int trace_dst = getPid(comm, dst); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("ISsend", trace_dst, @@ -288,7 +287,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI } else if(tag<0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int src_traced = getPid(comm, src); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("recv", src_traced, @@ -331,7 +330,7 @@ int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI } else if(tag < 0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int dst_traced = getPid(comm, dst); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("send", dst_traced, @@ -369,7 +368,7 @@ int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MP } else if(tag<0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int dst_traced = getPid(comm, dst); TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Ssend", dst_traced, @@ -411,7 +410,7 @@ int PMPI_Sendrecv(void* sendbuf, int sendcount, MPI_Datatype sendtype, int dst, } else if((sendtag<0 && sendtag != MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::Actor::self()->getPid() - 1; + int my_proc_id = simgrid::s4u::Actor::self()->getPid(); int dst_traced = getPid(comm, dst); int src_traced = getPid(comm, src); @@ -587,7 +586,7 @@ int PMPI_Wait(MPI_Request * request, MPI_Status * status) retval = MPI_SUCCESS; } else { int my_proc_id = (*request)->comm() != MPI_COMM_NULL - ? simgrid::s4u::Actor::self()->getPid() - 1 + ? simgrid::s4u::Actor::self()->getPid() : -1; // TODO: cheinrich: Check if this correct or if it should be MPI_UNDEFINED TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("wait")); diff --git a/src/smpi/bindings/smpi_pmpi_win.cpp b/src/smpi/bindings/smpi_pmpi_win.cpp index 98664973be..f19c60db1d 100644 --- a/src/smpi/bindings/smpi_pmpi_win.cpp +++ b/src/smpi/bindings/smpi_pmpi_win.cpp @@ -166,7 +166,7 @@ int PMPI_Win_fence( int assert, MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_fence")); retval = win->fence(assert); TRACE_smpi_comm_out(rank); @@ -195,7 +195,7 @@ int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, ((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) { retval = MPI_ERR_TYPE; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get", target_rank, @@ -234,7 +234,7 @@ int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype } else if(request == nullptr){ retval = MPI_ERR_REQUEST; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget", target_rank, @@ -270,10 +270,10 @@ int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, ((not origin_datatype->is_valid()) || (not target_datatype->is_valid()))) { retval = MPI_ERR_TYPE; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); - int dst_traced = group->actor(target_rank)->getPid()-1; + int dst_traced = group->actor(target_rank)->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Put", dst_traced, origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(), encode_datatype(origin_datatype))); @@ -311,10 +311,10 @@ int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype } else if(request == nullptr){ retval = MPI_ERR_REQUEST; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); - int dst_traced = group->actor(target_rank)->getPid()-1; + int dst_traced = group->actor(target_rank)->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rput", dst_traced, origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(), encode_datatype(origin_datatype))); @@ -351,7 +351,7 @@ int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da } else if (op == MPI_OP_NULL) { retval = MPI_ERR_OP; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Accumulate", target_rank, @@ -391,7 +391,7 @@ int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_d } else if(request == nullptr){ retval = MPI_ERR_REQUEST; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Raccumulate", target_rank, @@ -431,7 +431,7 @@ MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ } else if (op == MPI_OP_NULL) { retval = MPI_ERR_OP; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Get_accumulate", target_rank, @@ -476,7 +476,7 @@ MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){ } else if(request == nullptr){ retval = MPI_ERR_REQUEST; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rget_accumulate", target_rank, @@ -516,7 +516,7 @@ int PMPI_Compare_and_swap(void* origin_addr, void* compare_addr, void* result_ad } else if ((datatype == MPI_DATATYPE_NULL) || (not datatype->is_valid())) { retval = MPI_ERR_TYPE; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); MPI_Group group; win->get_group(&group); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Compare_and_swap", target_rank, @@ -539,7 +539,7 @@ int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){ } else if (group==MPI_GROUP_NULL){ retval = MPI_ERR_GROUP; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_post")); retval = win->post(group,assert); TRACE_smpi_comm_out(rank); @@ -556,7 +556,7 @@ int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win){ } else if (group==MPI_GROUP_NULL){ retval = MPI_ERR_GROUP; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_start")); retval = win->start(group,assert); TRACE_smpi_comm_out(rank); @@ -571,7 +571,7 @@ int PMPI_Win_complete(MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_complete")); retval = win->complete(); @@ -588,7 +588,7 @@ int PMPI_Win_wait(MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_wait")); retval = win->wait(); @@ -610,7 +610,7 @@ int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win){ } else if (rank == MPI_PROC_NULL){ retval = MPI_SUCCESS; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __func__, new simgrid::instr::NoOpTIData("Win_lock")); retval = win->lock(lock_type,rank,assert); TRACE_smpi_comm_out(myrank); @@ -627,7 +627,7 @@ int PMPI_Win_unlock(int rank, MPI_Win win){ } else if (rank == MPI_PROC_NULL){ retval = MPI_SUCCESS; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock")); retval = win->unlock(rank); TRACE_smpi_comm_out(myrank); @@ -642,7 +642,7 @@ int PMPI_Win_lock_all(int assert, MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_lock_all")); retval = win->lock_all(assert); TRACE_smpi_comm_out(myrank); @@ -657,7 +657,7 @@ int PMPI_Win_unlock_all(MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_unlock_all")); retval = win->unlock_all(); TRACE_smpi_comm_out(myrank); @@ -674,7 +674,7 @@ int PMPI_Win_flush(int rank, MPI_Win win){ } else if (rank == MPI_PROC_NULL){ retval = MPI_SUCCESS; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush")); retval = win->flush(rank); TRACE_smpi_comm_out(myrank); @@ -691,7 +691,7 @@ int PMPI_Win_flush_local(int rank, MPI_Win win){ } else if (rank == MPI_PROC_NULL){ retval = MPI_SUCCESS; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local")); retval = win->flush_local(rank); TRACE_smpi_comm_out(myrank); @@ -706,7 +706,7 @@ int PMPI_Win_flush_all(MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_all")); retval = win->flush_all(); TRACE_smpi_comm_out(myrank); @@ -721,7 +721,7 @@ int PMPI_Win_flush_local_all(MPI_Win win){ if (win == MPI_WIN_NULL) { retval = MPI_ERR_WIN; } else { - int myrank = simgrid::s4u::Actor::self()->getPid() - 1; + int myrank = simgrid::s4u::Actor::self()->getPid(); TRACE_smpi_comm_in(myrank, __FUNCTION__, new simgrid::instr::NoOpTIData("Win_flush_local_all")); retval = win->flush_local_all(); TRACE_smpi_comm_out(myrank); diff --git a/src/smpi/internals/smpi_process.cpp b/src/smpi/internals/smpi_process.cpp index dc764faaf9..1001023d98 100644 --- a/src/smpi/internals/smpi_process.cpp +++ b/src/smpi/internals/smpi_process.cpp @@ -39,7 +39,7 @@ Process::Process(ActorPtr actor, msg_bar_t finalization_barrier) { char name[MAILBOX_NAME_MAXLEN]; process_ = actor; - int index = actor->getPid() - 1; // TODO cheinrich: This needs to be removed! Just a quick hack to make the following 2 lines work + int index = actor->getPid(); // TODO cheinrich: This needs to be removed! Just a quick hack to make the following 2 lines work mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index)); mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index)); mailboxes_mutex_ = xbt_mutex_init(); @@ -278,8 +278,6 @@ void Process::init(int *argc, char ***argv){ simgrid::s4u::ActorPtr proc = simgrid::s4u::Actor::self(); proc->getImpl()->context->set_cleanup(&MSG_process_cleanup_from_SIMIX); - int my_proc_id = proc->getPid() - 1; // The maestro process has always ID 0 but we don't need that process here - char* instance_id = (*argv)[1]; try { int rank = std::stoi(std::string((*argv)[2])); @@ -291,6 +289,7 @@ void Process::init(int *argc, char ***argv){ // cheinrich: I'm not sure what the impact of the SMPI_switch_data_segment on this call is. I moved // this up here so that I can set the privatized region before the switch. Process* process = smpi_process_remote(proc); + int my_proc_id = proc->getPid(); if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ /* Now using the segment index of this process */ my_proc_id = proc->getImpl()->segment_index; diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index 0461c1c715..db2dfa6ee0 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -443,7 +443,7 @@ static void action_bcast(const char *const *action) int rank = smpi_process()->index(); TRACE_smpi_comm_in(rank, __FUNCTION__, - new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, -1.0, size, -1, + new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid(), -1.0, size, -1, encode_datatype(MPI_CURRENT_TYPE), "")); void *sendbuf = smpi_get_tmp_sendbuffer(size* MPI_CURRENT_TYPE->size()); @@ -471,7 +471,7 @@ static void action_reduce(const char *const *action) int rank = smpi_process()->index(); TRACE_smpi_comm_in(rank, __FUNCTION__, - new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, comp_size, + new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid(), comp_size, comm_size, -1, encode_datatype(MPI_CURRENT_TYPE), "")); void *recvbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size()); diff --git a/src/smpi/mpi/smpi_comm.cpp b/src/smpi/mpi/smpi_comm.cpp index 267ebdafe7..187f09068a 100644 --- a/src/smpi/mpi/smpi_comm.cpp +++ b/src/smpi/mpi/smpi_comm.cpp @@ -308,7 +308,7 @@ void Comm::init_smp(){ int intra_comm_size = 0; int min_index = INT_MAX; // the minimum index will be the leader for (auto& actor : process_list) { - int index = actor.pid - 1; + int index = actor.pid; if (this->group()->rank(actor.iface()) != MPI_UNDEFINED) { // Is this process in the current group? intra_comm_size++; if (index < min_index) @@ -365,7 +365,7 @@ void Comm::init_smp(){ if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){ //create leader_communicator for (i=0; i< leader_group_size;i++) - leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i); + leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i); leader_comm = new Comm(leaders_group, nullptr); this->set_leaders_comm(leader_comm); this->set_intra_comm(comm_intra); @@ -373,7 +373,7 @@ void Comm::init_smp(){ // create intracommunicator }else{ for (i=0; i< leader_group_size;i++) - leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i); + leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]), i); if(this->get_leaders_comm()==MPI_COMM_NULL){ leader_comm = new Comm(leaders_group, nullptr); diff --git a/src/smpi/mpi/smpi_group.cpp b/src/smpi/mpi/smpi_group.cpp index b22a46ce16..f3715b1594 100644 --- a/src/smpi/mpi/smpi_group.cpp +++ b/src/smpi/mpi/smpi_group.cpp @@ -45,7 +45,7 @@ Group::Group(MPI_Group origin) void Group::set_mapping(simgrid::s4u::ActorPtr actor, int rank) { if (0 <= rank && rank < size_) { - int index = actor->getPid()-1; + int index = actor->getPid(); rank_to_index_map_[rank] = index; if (index != MPI_UNDEFINED) { if ((unsigned)index >= index_to_rank_map_.size()) diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index 8e9f37fd6c..a248921dc1 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -178,20 +178,20 @@ void Request::print_request(const char *message) MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | SEND | PREPARED); + return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED); } MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | SSEND | SEND | PREPARED); + return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED); } MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { - return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, PERSISTENT | ISEND | SEND | PREPARED); + return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED); } @@ -213,8 +213,8 @@ MPI_Request Request::rma_send_init(void *buf, int count, MPI_Datatype datatype, MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1, - simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV | PREPARED); + src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(), + simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED); } MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, @@ -235,15 +235,15 @@ MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm) { return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1, - simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV | PREPARED); + src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(), + simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED); } MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | ISEND | SEND); + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND); request->start(); return request; } @@ -251,8 +251,8 @@ MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND); + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND); request->start(); return request; } @@ -262,8 +262,8 @@ MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid() - 1, - simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, NON_PERSISTENT | RECV); + src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(), + simgrid::s4u::Actor::self()->getPid(), tag, comm, NON_PERSISTENT | RECV); request->start(); return request; } @@ -279,8 +279,8 @@ void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | SEND); + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND); request->start(); wait(&request, MPI_STATUS_IGNORE); @@ -290,8 +290,8 @@ void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ - request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid() - 1, - comm->group()->actor(dst)->getPid() - 1, tag, comm, NON_PERSISTENT | SSEND | SEND); + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(), + comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND); request->start(); wait(&request,MPI_STATUS_IGNORE); @@ -304,8 +304,8 @@ void Request::sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int d { MPI_Request requests[2]; MPI_Status stats[2]; - unsigned int myid = simgrid::s4u::Actor::self()->getPid() - 1; - if ((comm->group()->actor(dst)->getPid()-1 == myid) && (comm->group()->actor(src)->getPid()-1 == myid)){ + unsigned int myid = simgrid::s4u::Actor::self()->getPid(); + if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){ Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype); return; } @@ -333,7 +333,7 @@ void Request::start() if ((flags_ & RECV) != 0) { this->print_request("New recv"); - simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_+1)); + simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_)); int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh"); @@ -385,7 +385,7 @@ void Request::start() if (async_small_thresh != 0 || (flags_ & RMA) != 0 ) xbt_mutex_release(mut); } else { /* the RECV flag was not set, so this is a send */ - simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_+1)); + simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_)); int rank = src_; if (TRACE_smpi_view_internals()) { TRACE_smpi_send(rank, rank, dst_, tag_, size_); @@ -465,7 +465,7 @@ void Request::start() // we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later real_size_=size_; action_ = simcall_comm_isend( - simgrid::s4u::Actor::byPid(src_ + 1)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send, + simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send, &xbt_free_f, // how to free the userdata if a detached send fails not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this, // detach if msg size < eager/rdv switch limit @@ -628,8 +628,8 @@ void Request::iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed(); double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage"); MPI_Request request = new Request( - nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid() - 1, - simgrid::s4u::Actor::self()->getPid() - 1, tag, comm, PERSISTENT | RECV); + nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(), + simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV); if (smpi_iprobe_sleep > 0) { smx_activity_t iprobe_sleep = simcall_execution_start( "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0, @@ -700,7 +700,7 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) static_cast(req->old_buf_) >= smpi_data_exe_start && static_cast(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) { XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment "); - smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid() - 1); + smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid()); } if(datatype->flags() & DT_FLAG_DERIVED){ @@ -719,7 +719,7 @@ void Request::finish_wait(MPI_Request* request, MPI_Status * status) } if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){ - int rank = simgrid::s4u::Actor::self()->getPid() - 1; + int rank = simgrid::s4u::Actor::self()->getPid(); int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_); TRACE_smpi_recv(src_traced, rank,req->tag_); } diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 58c9b96df1..771ad4027f 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -210,12 +210,12 @@ int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if (target_rank != comm_->rank()) { //prepare send_request MPI_Request sreq = - Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), - comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG + 1, comm_, MPI_OP_NULL); + Request::rma_send_init(origin_addr, origin_count, origin_datatype, simgrid::s4u::Actor::self()->getPid(), + comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG + 1, comm_, MPI_OP_NULL); //prepare receiver request - MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, comm_->rank(), - comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG + 1, + MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, simgrid::s4u::Actor::self()->getPid(), + comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG + 1, recv_win->comm_, MPI_OP_NULL); //start send @@ -269,12 +269,12 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, if(target_rank != comm_->rank()){ //prepare send_request MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype, - comm_->group()->actor(target_rank)->getPid() - 1, comm_->rank(), + comm_->group()->actor(target_rank)->getPid(), simgrid::s4u::Actor::self()->getPid(), SMPI_RMA_TAG + 2, send_win->comm_, MPI_OP_NULL); //prepare receiver request MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype, - comm_->group()->actor(target_rank)->getPid() - 1, comm_->rank(), + comm_->group()->actor(target_rank)->getPid(), simgrid::s4u::Actor::self()->getPid(), SMPI_RMA_TAG + 2, comm_, MPI_OP_NULL); //start the send, with another process than us as sender. @@ -332,12 +332,12 @@ int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da //prepare send_request MPI_Request sreq = - Request::rma_send_init(origin_addr, origin_count, origin_datatype, comm_->rank(), - comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG - 3 - count_, comm_, op); + Request::rma_send_init(origin_addr, origin_count, origin_datatype, simgrid::s4u::Actor::self()->getPid(), + comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG - 3 - count_, comm_, op); // prepare receiver request - MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, comm_->rank(), - comm_->group()->actor(target_rank)->getPid() - 1, SMPI_RMA_TAG - 3 - count_, + MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, simgrid::s4u::Actor::self()->getPid(), + comm_->group()->actor(target_rank)->getPid(), SMPI_RMA_TAG - 3 - count_, recv_win->comm_, op); count_++; @@ -452,8 +452,8 @@ int Win::start(MPI_Group group, int assert){ XBT_DEBUG("Entering MPI_Win_Start"); while (j != size) { - int src = group->actor(j)->getPid()-1; - if (src != comm_->rank() && src != MPI_UNDEFINED) { + int src = group->actor(j)->getPid(); + if ((unsigned)src != simgrid::s4u::Actor::self()->getPid() && src != MPI_UNDEFINED) { reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD); i++; } @@ -482,8 +482,8 @@ int Win::post(MPI_Group group, int assert){ XBT_DEBUG("Entering MPI_Win_Post"); while(j!=size){ - int dst=group->actor(j)->getPid()-1; - if (dst != comm_->rank() && dst != MPI_UNDEFINED) { + int dst=group->actor(j)->getPid(); + if ((unsigned)dst != simgrid::s4u::Actor::self()->getPid() && dst != MPI_UNDEFINED) { reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD); i++; } @@ -515,8 +515,8 @@ int Win::complete(){ MPI_Request* reqs = xbt_new0(MPI_Request, size); while(j!=size){ - int dst=group_->actor(j)->getPid()-1; - if (dst != comm_->rank() && dst != MPI_UNDEFINED) { + int dst=group_->actor(j)->getPid(); + if ((unsigned)dst != simgrid::s4u::Actor::self()->getPid() && dst != MPI_UNDEFINED) { reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; } @@ -549,8 +549,8 @@ int Win::wait(){ MPI_Request* reqs = xbt_new0(MPI_Request, size); while(j!=size){ - int src=group_->actor(j)->getPid()-1; - if (src != comm_->rank() && src != MPI_UNDEFINED) { + int src=group_->actor(j)->getPid(); + if ((unsigned)src != simgrid::s4u::Actor::self()->getPid() && src != MPI_UNDEFINED) { reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; }