} else {
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("bcast", comm->group()->index(root), -1.0,
+ new simgrid::instr::CollTIData("bcast", comm->group()->actor(root)->getPid()-1, -1.0,
datatype->is_replayable() ? count : count * datatype->size(), -1,
encode_datatype(datatype), ""));
if (comm->size() > 1)
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData(
- "gather", comm->group()->index(root), -1.0,
+ "gather", comm->group()->actor(root)->getPid()-1, -1.0,
sendtmptype->is_replayable() ? sendtmpcount : sendtmpcount * sendtmptype->size(),
(comm->rank() != root || recvtype->is_replayable()) ? recvcount : recvcount * recvtype->size(),
encode_datatype(sendtmptype), encode_datatype(recvtype)));
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::VarCollTIData(
- "gatherV", comm->group()->index(root),
+ "gatherV", comm->group()->actor(root)->getPid()-1,
sendtmptype->is_replayable() ? sendtmpcount : sendtmpcount * sendtmptype->size(), nullptr,
dt_size_recv, trace_recvcounts, encode_datatype(sendtmptype), encode_datatype(recvtype)));
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::CollTIData(
- "scatter", comm->group()->index(root), -1.0,
+ "scatter", comm->group()->actor(root)->getPid()-1, -1.0,
(comm->rank() != root || sendtype->is_replayable()) ? sendcount : sendcount * sendtype->size(),
recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), encode_datatype(sendtype),
encode_datatype(recvtype)));
}
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData(
- "scatterV", comm->group()->index(root), dt_size_send, trace_sendcounts,
+ "scatterV", comm->group()->actor(root)->getPid()-1, dt_size_send, trace_sendcounts,
recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), nullptr,
encode_datatype(sendtype), encode_datatype(recvtype)));
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("reduce", comm->group()->index(root), 0,
+ new simgrid::instr::CollTIData("reduce", comm->group()->actor(root)->getPid()-1, 0,
datatype->is_replayable() ? count : count * datatype->size(), -1,
encode_datatype(datatype), ""));
if(ranks1[i]==MPI_PROC_NULL){
ranks2[i]=MPI_PROC_NULL;
}else{
- int index = group1->index(ranks1[i]);
- ranks2[i] = group2->rank(index);
+ simgrid::s4u::ActorPtr actor = group1->actor(ranks1[i]);
+ ranks2[i] = group2->rank(actor);
}
}
return MPI_SUCCESS;
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::Pt2PtTIData("Irecv", comm->group()->index(src),
+ new simgrid::instr::Pt2PtTIData("Irecv", comm->group()->actor(src)->getPid()-1, // TODO cheinrich was before: index(src); -- make the "-1" go away
datatype->is_replayable() ? count : count * datatype->size(),
encode_datatype(datatype)));
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int trace_dst = comm->group()->index(dst);
+ int trace_dst = comm->group()->actor(dst)->getPid()-1; // TODO cheinrich
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("Isend", trace_dst,
datatype->is_replayable() ? count : count * datatype->size(),
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int trace_dst = comm->group()->index(dst);
+ int trace_dst = comm->group()->actor(dst)->getPid()-1; // TODO cheinrich
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("ISsend", trace_dst,
datatype->is_replayable() ? count : count * datatype->size(),
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int src_traced = comm->group()->index(src);
+ int src_traced = comm->group()->actor(src)->getPid()-1; // TODO cheinrich
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("recv", src_traced,
datatype->is_replayable() ? count : count * datatype->size(),
// the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
if (status != MPI_STATUS_IGNORE) {
- src_traced = comm->group()->index(status->MPI_SOURCE);
+ src_traced = comm->group()->actor(status->MPI_SOURCE)->getPid()-1;
if (not TRACE_smpi_view_internals()) {
TRACE_smpi_recv(src_traced, rank, tag);
}
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
+ int dst_traced = comm->group()->actor(dst)->getPid()-1; // TODO cheinrich
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("send", dst_traced,
datatype->is_replayable() ? count : count * datatype->size(),
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
+ int dst_traced = comm->group()->actor(dst)->getPid()-1;
TRACE_smpi_comm_in(rank, __FUNCTION__,
new simgrid::instr::Pt2PtTIData("Ssend", dst_traced,
datatype->is_replayable() ? count : count * datatype->size(),
retval = MPI_ERR_TAG;
} else {
int rank = smpi_process()->index();
- int dst_traced = comm->group()->index(dst);
- int src_traced = comm->group()->index(src);
+ int dst_traced = comm->group()->actor(dst)->getPid()-1;
+ int src_traced = comm->group()->actor(src)->getPid()-1;
// FIXME: Hack the way to trace this one
std::vector<int>* dst_hack = new std::vector<int>;
int rank = smpi_process()->index();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->index(target_rank);
+ int dst_traced = group->actor(target_rank)->getPid()-1;
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Put", dst_traced,
origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
int rank = smpi_process()->index();
MPI_Group group;
win->get_group(&group);
- int dst_traced = group->index(target_rank);
+ int dst_traced = group->actor(target_rank)->getPid()-1;
TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::Pt2PtTIData("Rput", dst_traced,
origin_datatype->is_replayable() ? origin_count : origin_count * origin_datatype->size(),
encode_datatype(origin_datatype)));
#include "smpi_f2c.hpp"
#include <smpi/smpi.h>
+#include <map>
#include <vector>
namespace simgrid{
* std::map here, but looking up a value there costs O(log(n)).
* For a vector, this costs O(1). We hence go with the vector.
*/
+ std::vector<simgrid::s4u::ActorPtr> rank_to_actor_map_;
+ std::map<simgrid::s4u::ActorPtr, int> actor_to_rank_map_;
std::vector<int> rank_to_index_map_;
std::vector<int> index_to_rank_map_;
+
int refcount_;
public:
explicit Group();
explicit Group(int size);
explicit Group(Group* origin);
- void set_mapping(int index, int rank);
- int index(int rank);
+ void set_mapping(simgrid::s4u::ActorPtr actor, int rank);
int rank(int index);
+ simgrid::s4u::ActorPtr actor(int rank);
+ int rank(const simgrid::s4u::ActorPtr process);
void ref();
static void unref(MPI_Group group);
int size();
smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
}
-void smpi_deployment_register_process(const char* instance_id, int rank, int index)
+void smpi_deployment_register_process(const char* instance_id, int rank, simgrid::s4u::ActorPtr actor)
{
if (smpi_instances.empty()) // no instance registered, we probably used smpirun.
return;
Instance& instance = smpi_instances.at(instance_id);
instance.present_processes++;
- instance.comm_world->group()->set_mapping(index, rank);
+ instance.comm_world->group()->set_mapping(actor, rank);
}
MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
if(comm_self_==MPI_COMM_NULL){
MPI_Group group = new Group(1);
comm_self_ = new Comm(group, nullptr);
- group->set_mapping(index_, 0);
+ group->set_mapping(process_, 0);
}
return comm_self_;
}
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->index(root), -1.0, size, -1,
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, -1.0, size, -1,
encode_datatype(MPI_CURRENT_TYPE), ""));
void *sendbuf = smpi_get_tmp_sendbuffer(size* MPI_CURRENT_TYPE->size());
int rank = smpi_process()->index();
TRACE_smpi_comm_in(rank, __FUNCTION__,
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->index(root), comp_size,
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(root)->getPid()-1, comp_size,
comm_size, -1, encode_datatype(MPI_CURRENT_TYPE), ""));
void *recvbuf = smpi_get_tmp_sendbuffer(comm_size* MPI_CURRENT_TYPE->size());
simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
+using simgrid::s4u::ActorPtr;
+
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
* support them, we have to add a field SMPI_Topo_type, and replace the MPI_Topology field by an union. */
group_root = group_out; /* Save root's group */
}
for (unsigned j = 0; j < rankmap.size(); j++) {
- int index = group->index(rankmap[j].second);
- group_out->set_mapping(index, j);
+ ActorPtr actor = group->actor(rankmap[j].second);
+ group_out->set_mapping(actor, j);
}
MPI_Request* requests = xbt_new(MPI_Request, rankmap.size());
int reqs = 0;
int min_index = INT_MAX; // the minimum index will be the leader
for (auto const& actor : process_list) {
int index = actor.pid - 1;
- if (this->group()->rank(index) != MPI_UNDEFINED) {
+ // TODO cheinrich: actor is of type ActorImpl here and I'm unsure how to convert that without the lookup byPid() ...
+ if (this->group()->rank(simgrid::s4u::Actor::byPid(actor.pid)) != MPI_UNDEFINED) { // Is this process in the current group?
intra_comm_size++;
- // the process is in the comm
if (index < min_index)
min_index = index;
}
MPI_Group group_intra = new Group(intra_comm_size);
int i = 0;
for (auto const& actor : process_list) {
- int index = actor.pid - 1;
- if(this->group()->rank(index)!=MPI_UNDEFINED){
- group_intra->set_mapping(index, i);
+ // TODO cheinrich : We should not need the const_cast here and above.
+ if(this->group()->rank(simgrid::s4u::Actor::byPid(actor.pid))!=MPI_UNDEFINED){
+ group_intra->set_mapping(simgrid::s4u::Actor::byPid(actor.pid), i);
i++;
}
}
if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i);
leader_comm = new Comm(leaders_group, nullptr);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
// create intracommunicator
}else{
for (i=0; i< leader_group_size;i++)
- leaders_group->set_mapping(leader_list[i], i);
+ leaders_group->set_mapping(simgrid::s4u::Actor::byPid(leader_list[i]+1), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
leader_comm = new Comm(leaders_group, nullptr);
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
int is_blocked=1;
- int prev=this->group()->rank(comm_intra->group()->index(0));
+ int prev=this->group()->rank(comm_intra->group()->actor(0));
for (i = 1; i < my_local_size; i++) {
- int that = this->group()->rank(comm_intra->group()->index(i));
+ int that = this->group()->rank(comm_intra->group()->actor(i));
if (that != prev + 1) {
is_blocked = 0;
break;
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "simgrid/s4u/Actor.hpp"
#include "smpi_group.hpp"
#include "smpi_comm.hpp"
#include <string>
namespace simgrid{
namespace smpi{
+using simgrid::s4u::ActorPtr;
+
Group::Group()
{
size_ = 0; /* size */
refcount_ = 1; /* refcount_: start > 0 so that this group never gets freed */
}
-Group::Group(int n) : size_(n), rank_to_index_map_(size_, MPI_UNDEFINED)
+Group::Group(int n) : size_(n), rank_to_actor_map_(size_, nullptr), rank_to_index_map_(size_, MPI_UNDEFINED), index_to_rank_map_(size_, MPI_UNDEFINED)
{
refcount_ = 1;
}
refcount_ = 1;
rank_to_index_map_ = origin->rank_to_index_map_;
index_to_rank_map_ = origin->index_to_rank_map_;
+ rank_to_actor_map_ = origin->rank_to_actor_map_;
+ actor_to_rank_map_ = origin->actor_to_rank_map_;
}
}
-void Group::set_mapping(int index, int rank)
+void Group::set_mapping(simgrid::s4u::ActorPtr actor, int rank)
{
if (0 <= rank && rank < size_) {
+ int index = actor->getPid()-1;
rank_to_index_map_[rank] = index;
if (index != MPI_UNDEFINED) {
if ((unsigned)index >= index_to_rank_map_.size())
index_to_rank_map_.resize(index + 1, MPI_UNDEFINED);
index_to_rank_map_[index] = rank;
}
- }
-}
-int Group::index(int rank)
-{
- int index;
- if (0 <= rank && rank < size_)
- index = rank_to_index_map_[rank];
- else
- index = MPI_UNDEFINED;
- return index;
+ rank_to_actor_map_[rank] = actor;
+ if (actor != nullptr) {
+ actor_to_rank_map_.insert({actor, rank});
+ }
+ }
}
int Group::rank(int index)
rank = index_to_rank_map_[index];
else
rank = MPI_UNDEFINED;
+
return rank;
}
+simgrid::s4u::ActorPtr Group::actor(int rank) {
+ if (0 <= rank && rank < size_)
+ return rank_to_actor_map_[rank];
+ else
+ return nullptr;
+}
+
+int Group::rank(const simgrid::s4u::ActorPtr actor) {
+ auto iterator = actor_to_rank_map_.find(actor);
+ return (iterator == actor_to_rank_map_.end()) ? MPI_UNDEFINED : (*iterator).second;
+}
+
void Group::ref()
{
refcount_++;
result = MPI_UNEQUAL;
} else {
for (int i = 0; i < size_; i++) {
- int index = this->index(i);
- int rank = group2->rank(index);
+ ActorPtr actor = this->actor(i);
+ int rank = group2->rank(actor);
if (rank == MPI_UNDEFINED) {
result = MPI_UNEQUAL;
break;
int Group::incl(int n, int* ranks, MPI_Group* newgroup)
{
int i=0;
- int index=0;
if (n == 0) {
*newgroup = MPI_GROUP_EMPTY;
} else if (n == size_) {
} else {
*newgroup = new Group(n);
for (i = 0; i < n; i++) {
- index = this->index(ranks[i]);
- (*newgroup)->set_mapping(index, i);
+ ActorPtr actor = this->actor(ranks[i]);
+ (*newgroup)->set_mapping(actor, i);
}
}
return MPI_SUCCESS;
int size1 = size_;
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size1++;
}
*newgroup = new Group(size1);
size2 = this->size();
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- (*newgroup)->set_mapping(proc1, i);
+ ActorPtr actor1 = this->actor(i);
+ (*newgroup)->set_mapping(actor1, i);
}
for (int i = size2; i < size1; i++) {
- int proc2 = group2->index(i - size2);
- (*newgroup)->set_mapping(proc2, i);
+ ActorPtr actor = group2->actor(i - size2);
+ (*newgroup)->set_mapping(actor, i);
}
}
return MPI_SUCCESS;
{
int size2 = group2->size();
for (int i = 0; i < size2; i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 == MPI_UNDEFINED) {
size2--;
}
*newgroup = new Group(size2);
int j=0;
for (int i = 0; i < group2->size(); i++) {
- int proc2 = group2->index(i);
- int proc1 = this->rank(proc2);
+ ActorPtr actor = group2->actor(i);
+ int proc1 = this->rank(actor);
if (proc1 != MPI_UNDEFINED) {
- (*newgroup)->set_mapping(proc2, j);
+ (*newgroup)->set_mapping(actor, j);
j++;
}
}
int newsize = size_;
int size2 = size_;
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- int proc2 = group2->rank(proc1);
+ ActorPtr actor = this->actor(i);
+ int proc2 = group2->rank(actor);
if (proc2 != MPI_UNDEFINED) {
newsize--;
}
} else {
*newgroup = new Group(newsize);
for (int i = 0; i < size2; i++) {
- int proc1 = this->index(i);
- int proc2 = group2->rank(proc1);
+ ActorPtr actor = this->actor(i);
+ int proc2 = group2->rank(actor);
if (proc2 == MPI_UNDEFINED) {
- (*newgroup)->set_mapping(proc1, i);
+ (*newgroup)->set_mapping(actor, i);
}
}
}
int j = 0;
for (int i = 0; i < oldsize; i++) {
if(to_exclude[i]==0){
- int index = this->index(i);
- (*newgroup)->set_mapping(index, j);
+ ActorPtr actor = this->actor(i);
+ (*newgroup)->set_mapping(actor, j);
j++;
}
}
for (int rank = ranges[i][0]; /* First */
rank >= 0 && rank < size_; /* Last */
) {
- int index = this->index(rank);
- (*newgroup)->set_mapping(index, j);
+ ActorPtr actor = this->actor(rank);
+ (*newgroup)->set_mapping(actor, j);
j++;
if(rank == ranges[i][1]){/*already last ?*/
break;
}
}
if(add==1){
- int index = this->index(oldrank);
- (*newgroup)->set_mapping(index, newrank);
+ ActorPtr actor = this->actor(oldrank);
+ (*newgroup)->set_mapping(actor, newrank);
newrank++;
}
oldrank++;
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SEND | PREPARED);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, PERSISTENT | SEND | PREPARED);
}
MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
}
MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
+ comm->group()->actor(dst)->getPid()-1, tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
}
MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src),
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid()-1,
smpi_process()->index(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src), smpi_process()->index(), tag,
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid()-1, smpi_process()->index(), tag,
comm, PERSISTENT | RECV | PREPARED);
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, NON_PERSISTENT | ISEND | SEND);
request->start();
return request;
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
request->start();
return request;
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src), smpi_process()->index(),
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid()-1, smpi_process()->index(),
tag, comm, NON_PERSISTENT | RECV);
request->start();
return request;
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SEND);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, NON_PERSISTENT | SEND);
request->start();
wait(&request, MPI_STATUS_IGNORE);
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
+ comm->group()->actor(dst)->getPid()-1, tag, comm, NON_PERSISTENT | SSEND | SEND);
request->start();
wait(&request,MPI_STATUS_IGNORE);
{
MPI_Request requests[2];
MPI_Status stats[2];
- int myid=smpi_process()->index();
- if ((comm->group()->index(dst) == myid) && (comm->group()->index(src) == myid)){
+ unsigned int myid=smpi_process()->index();
+ if ((comm->group()->actor(dst)->getPid()-1 == myid) && (comm->group()->actor(src)->getPid()-1 == myid)){
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
return;
}
double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
MPI_Request request = new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(source), comm->rank(), tag, comm, PERSISTENT | RECV);
+ comm->group()->actor(source)->getPid()-1, comm->rank(), tag, comm, PERSISTENT | RECV);
if (smpi_iprobe_sleep > 0) {
smx_activity_t iprobe_sleep = simcall_execution_start(
"iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
oldGroup = comm_old->group();
newGroup = new Group(newSize);
for (int i = 0 ; i < newSize ; i++) {
- newGroup->set_mapping(oldGroup->index(i), i);
+ newGroup->set_mapping(oldGroup->actor(i), i);
}
nnodes_ = newSize;
if(target_rank != comm_->rank()){
//prepare send_request
MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process()->index(),
- comm_->group()->index(target_rank), SMPI_RMA_TAG+1, comm_, MPI_OP_NULL);
+ comm_->group()->actor(target_rank)->getPid()-1, SMPI_RMA_TAG+1, comm_, MPI_OP_NULL);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
- comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
+ comm_->group()->actor(target_rank)->getPid()-1, SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
//start send
sreq->start();
if(target_rank != comm_->rank()){
//prepare send_request
MPI_Request sreq = Request::rma_send_init(send_addr, target_count, target_datatype,
- comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, send_win->comm_,
+ comm_->group()->actor(target_rank)->getPid()-1, smpi_process()->index(), SMPI_RMA_TAG+2, send_win->comm_,
MPI_OP_NULL);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(origin_addr, origin_count, origin_datatype,
- comm_->group()->index(target_rank), smpi_process()->index(), SMPI_RMA_TAG+2, comm_,
+ comm_->group()->actor(target_rank)->getPid()-1, smpi_process()->index(), SMPI_RMA_TAG+2, comm_,
MPI_OP_NULL);
//start the send, with another process than us as sender.
//prepare send_request
MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, comm_, op);
+ smpi_process()->index(), comm_->group()->actor(target_rank)->getPid()-1, SMPI_RMA_TAG-3-count_, comm_, op);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
+ smpi_process()->index(), comm_->group()->actor(target_rank)->getPid()-1, SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
count_++;
XBT_DEBUG("Entering MPI_Win_Start");
while (j != size) {
- int src = group->index(j);
+ int src = group->actor(j)->getPid()-1;
if (src != smpi_process()->index() && src != MPI_UNDEFINED) {
reqs[i] = Request::irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD);
i++;
XBT_DEBUG("Entering MPI_Win_Post");
while(j!=size){
- int dst=group->index(j);
+ int dst=group->actor(j)->getPid()-1;
if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD);
i++;
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int dst=group_->index(j);
+ int dst=group_->actor(j)->getPid()-1;
if(dst!=smpi_process()->index() && dst!=MPI_UNDEFINED){
reqs[i]=Request::send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
- int src=group_->index(j);
+ int src=group_->actor(j)->getPid()-1;
if(src!=smpi_process()->index() && src!=MPI_UNDEFINED){
reqs[i]=Request::irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD);
i++;