#include "smpi_request.hpp"
-#include "mc/mc.h"
-#include "src/kernel/activity/CommImpl.hpp"
-#include "src/mc/mc_replay.h"
#include "SmpiHost.hpp"
-#include "private.h"
+#include "mc/mc.h"
#include "private.hpp"
#include "smpi_comm.hpp"
#include "smpi_datatype.hpp"
#include "smpi_op.hpp"
#include "smpi_process.hpp"
+#include "src/kernel/activity/CommImpl.hpp"
+#include "src/mc/mc_replay.hpp"
+#include "src/simix/ActorImpl.hpp"
#include <algorithm>
namespace simgrid{
namespace smpi{
-Request::Request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags) : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
+Request::Request(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags)
+ : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
{
void *old_buf = nullptr;
// FIXME Handle the case of a partial shared malloc.
MPI_Request Request::send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SEND | PREPARED);
}
MPI_Request Request::ssend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | SSEND | SEND | PREPARED);
}
MPI_Request Request::isend_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, PERSISTENT | ISEND | SEND | PREPARED);
}
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
if(op==MPI_OP_NULL){
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, src, dst, tag,
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, comm->group()->actor(src)->getPid(),
+ comm->group()->actor(dst)->getPid(), tag,
comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
}else{
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
+ request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(),
+ comm->group()->actor(dst)->getPid(), tag,
comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
request->op_ = op;
}
MPI_Request Request::recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype,
- src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src),
- smpi_process()->index(), tag, comm, PERSISTENT | RECV | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::rma_recv_init(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
if(op==MPI_OP_NULL){
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
- comm, RMA | NON_PERSISTENT | RECV | PREPARED);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
+ RMA | NON_PERSISTENT | RECV | PREPARED);
}else{
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src, dst, tag,
- comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, comm->group()->actor(src)->getPid(), comm->group()->actor(dst)->getPid(), tag, comm,
+ RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
request->op_ = op;
}
return request;
MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
- return new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(src), smpi_process()->index(), tag,
- comm, PERSISTENT | RECV | PREPARED);
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV | PREPARED);
}
MPI_Request Request::isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SEND);
request->start();
return request;
}
MPI_Request Request::issend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | ISEND | SSEND | SEND);
request->start();
return request;
}
MPI_Request Request::irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(src), smpi_process()->index(), tag, comm,
- NON_PERSISTENT | RECV);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype,
+ src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(src)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, NON_PERSISTENT | RECV);
request->start();
return request;
}
void Request::send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SEND);
request->start();
wait(&request, MPI_STATUS_IGNORE);
void Request::ssend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
- request = new Request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process()->index(),
- comm->group()->index(dst), tag, comm, NON_PERSISTENT | SSEND | SEND);
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::Actor::self()->getPid(),
+ comm->group()->actor(dst)->getPid(), tag, comm, NON_PERSISTENT | SSEND | SEND);
request->start();
wait(&request,MPI_STATUS_IGNORE);
{
MPI_Request requests[2];
MPI_Status stats[2];
- int myid=smpi_process()->index();
- if ((comm->group()->index(dst) == myid) && (comm->group()->index(src) == myid)){
+ unsigned int myid = simgrid::s4u::Actor::self()->getPid();
+ if ((comm->group()->actor(dst)->getPid() == myid) && (comm->group()->actor(src)->getPid() == myid)){
Datatype::copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
return;
}
if ((flags_ & RECV) != 0) {
this->print_request("New recv");
- simgrid::smpi::Process* process = smpi_process_remote(dst_);
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_irecv(
- process->process(), mailbox, buf_, &real_size_, &match_recv,
+ process->process()->getImpl(), mailbox, buf_, &real_size_, &match_recv,
process->replaying() ? &smpi_comm_null_copy_buffer_callback : smpi_comm_copy_data_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
xbt_mutex_release(mut);
} else { /* the RECV flag was not set, so this is a send */
- simgrid::smpi::Process* process = smpi_process_remote(dst_);
+ simgrid::smpi::Process* process = smpi_process_remote(simgrid::s4u::Actor::byPid(dst_));
int rank = src_;
if (TRACE_smpi_view_internals()) {
TRACE_smpi_send(rank, rank, dst_, tag_, size_);
if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
oldbuf = buf_;
if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
- if((smpi_privatize_global_variables != 0)
- && (static_cast<char*>(buf_) >= smpi_start_data_exe)
- && (static_cast<char*>(buf_) < smpi_start_data_exe + smpi_size_data_exe )){
+ if ((smpi_privatize_global_variables != 0) && (static_cast<char*>(buf_) >= smpi_data_exe_start) &&
+ (static_cast<char*>(buf_) < smpi_data_exe_start + smpi_data_exe_size)) {
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
smpi_switch_data_segment(src_);
}
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
action_ = simcall_comm_isend(
- SIMIX_process_from_PID(src_ + 1), mailbox, size_, -1.0, buf, real_size_, &match_send,
+ simgrid::s4u::Actor::byPid(src_)->getImpl(), mailbox, size_, -1.0, buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
not process->replaying() ? smpi_comm_copy_data_callback : &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
if (flag) {
finish_wait(request,status);
nsleeps=1;//reset the number of sleeps we will do next time
- if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT)==0)
- *request = MPI_REQUEST_NULL;
+ if (*request != MPI_REQUEST_NULL && ((*request)->flags_ & PERSISTENT) == 0)
+ *request = MPI_REQUEST_NULL;
} else if (xbt_cfg_get_boolean("smpi/grow-injected-times")){
nsleeps++;
}
static int nsleeps = 1;
double speed = simgrid::s4u::Actor::self()->getHost()->getSpeed();
double maxrate = xbt_cfg_get_double("smpi/iprobe-cpu-usage");
- MPI_Request request = new Request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE :
- comm->group()->index(source), comm->rank(), tag, comm, PERSISTENT | RECV);
+ MPI_Request request = new Request(
+ nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->actor(source)->getPid(),
+ simgrid::s4u::Actor::self()->getPid(), tag, comm, PERSISTENT | RECV);
if (smpi_iprobe_sleep > 0) {
- smx_activity_t iprobe_sleep = simcall_execution_start("iprobe", /* flops to executek*/nsleeps*smpi_iprobe_sleep*speed*maxrate, /* priority */1.0, /* performance bound */maxrate*speed);
+ smx_activity_t iprobe_sleep = simcall_execution_start(
+ "iprobe", /* flops to executek*/ nsleeps * smpi_iprobe_sleep * speed * maxrate, /* priority */ 1.0,
+ /* performance bound */ maxrate * speed, smpi_process()->process()->getImpl()->host);
simcall_execution_wait(iprobe_sleep);
}
// behave like a receive, but don't do it
if (((req->flags_ & ACCUMULATE) != 0) ||
(datatype->flags() & DT_FLAG_DERIVED)) { // && (not smpi_is_shared(req->old_buf_))){
- if (not smpi_process()->replaying()) {
- if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf_) >= smpi_start_data_exe)
- && ((char*)req->old_buf_ < smpi_start_data_exe + smpi_size_data_exe )){
- XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
- smpi_switch_data_segment(smpi_process()->index());
- }
+ if (not smpi_process()->replaying() && smpi_privatize_global_variables != 0 &&
+ static_cast<char*>(req->old_buf_) >= smpi_data_exe_start &&
+ static_cast<char*>(req->old_buf_) < smpi_data_exe_start + smpi_data_exe_size) {
+ XBT_VERB("Privatization : We are unserializing to a zone in global memory Switch data segment ");
+ smpi_switch_data_segment(simgrid::s4u::Actor::self()->getPid());
}
if(datatype->flags() & DT_FLAG_DERIVED){
}
if (TRACE_smpi_view_internals() && ((req->flags_ & RECV) != 0)){
- int rank = smpi_process()->index();
+ int rank = simgrid::s4u::Actor::self()->getPid();
int src_traced = (req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_);
TRACE_smpi_recv(src_traced, rank,req->tag_);
}
if (not accumulates.empty()) {
std::sort(accumulates.begin(), accumulates.end(), sort_accumulates);
- for (auto req : accumulates) {
+ for (auto& req : accumulates) {
finish_wait(&req, status);
}
}
status[index] = *pstat;
}
if (requests[index] != MPI_REQUEST_NULL && (requests[index]->flags_ & NON_PERSISTENT))
- requests[index]=MPI_REQUEST_NULL;
+ requests[index] = MPI_REQUEST_NULL;
}else{
return MPI_UNDEFINED;
}
char key[KEY_SIZE];
if(id==MPI_FORTRAN_REQUEST_NULL)
return static_cast<MPI_Request>(MPI_REQUEST_NULL);
- return static_cast<MPI_Request>(xbt_dict_get(F2C::f2c_lookup(), get_key_id(key, id)));
+ return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key_id(key, id)));
}
-int Request::add_f() {
- if(F2C::f2c_lookup()==nullptr){
- F2C::set_f2c_lookup(xbt_dict_new_homogeneous(nullptr));
+int Request::add_f()
+{
+ if (F2C::f2c_lookup() == nullptr) {
+ F2C::set_f2c_lookup(new std::unordered_map<std::string, F2C*>);
}
char key[KEY_SIZE];
- xbt_dict_set(F2C::f2c_lookup(), get_key_id(key, F2C::f2c_id()), this, nullptr);
+ (*(F2C::f2c_lookup()))[get_key_id(key, F2C::f2c_id())] = this;
F2C::f2c_id_increment();
return F2C::f2c_id()-1;
}
-void Request::free_f(int id) {
+void Request::free_f(int id)
+{
if (id != MPI_FORTRAN_REQUEST_NULL) {
char key[KEY_SIZE];
- xbt_dict_remove(F2C::f2c_lookup(), get_key_id(key, id));
+ F2C::f2c_lookup()->erase(get_key_id(key, id));
}
}
}
}
-
-
-