Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
[SMPI] Replay: Remove old functions / datatype
[simgrid.git] / src / smpi / internals / smpi_replay.cpp
index d38d6f9..2cc7ee2 100644 (file)
 #include <memory>
 #include <numeric>
 #include <unordered_map>
+#include <sstream>
 #include <vector>
 
 using simgrid::s4u::Actor;
 
+#include <tuple>
+// From https://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set
+// This is all just to make std::unordered_map work with std::tuple. If we need this in other places,
+// this could go into a header file.
+namespace hash_tuple{
+    template <typename TT>
+    struct hash
+    {
+        size_t
+        operator()(TT const& tt) const
+        {
+            return std::hash<TT>()(tt);
+        }
+    };
+
+    template <class T>
+    inline void hash_combine(std::size_t& seed, T const& v)
+    {
+        seed ^= hash_tuple::hash<T>()(v) + 0x9e3779b9 + (seed<<6) + (seed>>2);
+    }
+
+    // Recursive template code derived from Matthieu M.
+    template <class Tuple, size_t Index = std::tuple_size<Tuple>::value - 1>
+    struct HashValueImpl
+    {
+      static void apply(size_t& seed, Tuple const& tuple)
+      {
+        HashValueImpl<Tuple, Index-1>::apply(seed, tuple);
+        hash_combine(seed, std::get<Index>(tuple));
+      }
+    };
+
+    template <class Tuple>
+    struct HashValueImpl<Tuple,0>
+    {
+      static void apply(size_t& seed, Tuple const& tuple)
+      {
+        hash_combine(seed, std::get<0>(tuple));
+      }
+    };
+
+    template <typename ... TT>
+    struct hash<std::tuple<TT...>>
+    {
+        size_t
+        operator()(std::tuple<TT...> const& tt) const
+        {
+            size_t seed = 0;
+            HashValueImpl<std::tuple<TT...> >::apply(seed, tt);
+            return seed;
+        }
+    };
+}
+
 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI");
 
-static int active_processes  = 0;
-static std::unordered_map<int, std::vector<MPI_Request>*> reqq;
+typedef std::tuple</*sender*/ int, /* reciever */ int, /* tag */int> req_key_t;
+typedef std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int,int,int>>> req_storage_t;
 
 static MPI_Datatype MPI_DEFAULT_TYPE;
 
 #define CHECK_ACTION_PARAMS(action, mandatory, optional)                                                               \
   {                                                                                                                    \
-    if (action.size() < static_cast<unsigned long>(mandatory + 2))                                                     \
+    if (action.size() < static_cast<unsigned long>(mandatory + 2)) {                                                   \
+      std::stringstream ss;                                                                                            \
+      for (const auto& elem : action) {                                                                                \
+        ss << elem << " ";                                                                                             \
+      }                                                                                                                \
       THROWF(arg_error, 0, "%s replay failed.\n"                                                                       \
                            "%zu items were given on the line. First two should be process_id and action.  "            \
                            "This action needs after them %lu mandatory arguments, and accepts %lu optional ones. \n"   \
+                           "The full line that was given is:\n   %s\n"                                                 \
                            "Please contact the Simgrid team if support is needed",                                     \
-             __func__, action.size(), static_cast<unsigned long>(mandatory), static_cast<unsigned long>(optional));    \
+             __func__, action.size(), static_cast<unsigned long>(mandatory), static_cast<unsigned long>(optional),     \
+             ss.str().c_str());                                                                                        \
+    }                                                                                                                  \
   }
 
 static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock)
@@ -45,16 +107,6 @@ static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock)
   }
 }
 
-static std::vector<MPI_Request>* get_reqq_self()
-{
-  return reqq.at(simgrid::s4u::this_actor::getPid());
-}
-
-static void set_reqq_self(std::vector<MPI_Request> *mpi_request)
-{
-  reqq.insert({simgrid::s4u::this_actor::getPid(), mpi_request});
-}
-
 /* Helper function */
 static double parse_double(std::string string)
 {
@@ -65,26 +117,98 @@ namespace simgrid {
 namespace smpi {
 
 namespace replay {
+
+class RequestStorage {
+private:
+    req_storage_t store;
+
+public:
+    RequestStorage() {}
+    int size()
+    {
+      return store.size();
+    }
+
+    req_storage_t& get_store()
+    {
+      return store;
+    }
+
+    void get_requests(std::vector<MPI_Request>& vec)
+    {
+      for (auto& pair : store) {
+        auto& req = pair.second;
+        auto my_proc_id = simgrid::s4u::this_actor::get_pid();
+        if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
+          vec.push_back(pair.second);
+          pair.second->print_request("MM");
+        }
+      }
+    }
+
+    MPI_Request find(int src, int dst, int tag)
+    {
+      req_storage_t::iterator it = store.find(req_key_t(src, dst, tag));
+      return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
+    }
+
+    void remove(MPI_Request req)
+    {
+      if (req == MPI_REQUEST_NULL) return;
+
+      store.erase(req_key_t(req->src()-1, req->dst()-1, req->tag()));
+    }
+
+    void add(MPI_Request req)
+    {
+      if (req != MPI_REQUEST_NULL) // Can and does happen in the case of TestAction
+        store.insert({req_key_t(req->src()-1, req->dst()-1, req->tag()), req});
+    }
+
+    /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
+    void addNullRequest(int src, int dst, int tag)
+    {
+      store.insert({req_key_t(src, dst, tag), MPI_REQUEST_NULL});
+    }
+};
+
 class ActionArgParser {
 public:
   virtual ~ActionArgParser() = default;
   virtual void parse(simgrid::xbt::ReplayAction& action, std::string name) { CHECK_ACTION_PARAMS(action, 0, 0) }
 };
 
+class WaitTestParser : public ActionArgParser {
+public:
+  int src;
+  int dst;
+  int tag;
+
+  void parse(simgrid::xbt::ReplayAction& action, std::string name) override
+  {
+    CHECK_ACTION_PARAMS(action, 3, 0)
+    src = std::stoi(action[2]);
+    dst = std::stoi(action[3]);
+    tag = std::stoi(action[4]);
+  }
+};
+
 class SendRecvParser : public ActionArgParser {
 public:
   /* communication partner; if we send, this is the receiver and vice versa */
   int partner;
   double size;
+  int tag;
   MPI_Datatype datatype1 = MPI_DEFAULT_TYPE;
 
   void parse(simgrid::xbt::ReplayAction& action, std::string name) override
   {
-    CHECK_ACTION_PARAMS(action, 2, 1)
+    CHECK_ACTION_PARAMS(action, 3, 1)
     partner = std::stoi(action[2]);
-    size    = parse_double(action[3]);
-    if (action.size() > 4)
-      datatype1 = simgrid::smpi::Datatype::decode(action[4]);
+    tag     = std::stoi(action[3]);
+    size    = parse_double(action[4]);
+    if (action.size() > 5)
+      datatype1 = simgrid::smpi::Datatype::decode(action[5]);
   }
 };
 
@@ -394,11 +518,13 @@ public:
 template <class T> class ReplayAction {
 protected:
   const std::string name;
+  RequestStorage* req_storage; // Points to the right storage for this process, nullptr except for Send/Recv/Wait/Test actions.
   const int my_proc_id;
   T args;
 
 public:
-  explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::this_actor::getPid()) {}
+  explicit ReplayAction(std::string name, RequestStorage& storage) : name(name), req_storage(&storage), my_proc_id(simgrid::s4u::this_actor::get_pid()) {}
+  explicit ReplayAction(std::string name) : name(name), req_storage(nullptr), my_proc_id(simgrid::s4u::this_actor::get_pid()) {}
   virtual ~ReplayAction() = default;
 
   virtual void execute(simgrid::xbt::ReplayAction& action)
@@ -424,17 +550,17 @@ public:
   }
 };
 
-class WaitAction : public ReplayAction<ActionArgParser> {
+class WaitAction : public ReplayAction<WaitTestParser> {
 public:
-  WaitAction() : ReplayAction("Wait") {}
+  WaitAction(RequestStorage& storage) : ReplayAction("Wait", storage) {}
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
     std::string s = boost::algorithm::join(action, " ");
-    xbt_assert(get_reqq_self()->size(), "action wait not preceded by any irecv or isend: %s", s.c_str());
-    MPI_Request request = get_reqq_self()->back();
-    get_reqq_self()->pop_back();
+    xbt_assert(req_storage->size(), "action wait not preceded by any irecv or isend: %s", s.c_str());
+    MPI_Request request = req_storage->find(args.src, args.dst, args.tag);
+    req_storage->remove(request);
 
-    if (request == nullptr) {
+    if (request == MPI_REQUEST_NULL) {
       /* Assume that the trace is well formed, meaning the comm might have been caught by a MPI_test. Then just
        * return.*/
       return;
@@ -444,8 +570,6 @@ public:
 
     // Must be taken before Request::wait() since the request may be set to
     // MPI_REQUEST_NULL by Request::wait!
-    int src                  = request->comm()->group()->rank(request->src());
-    int dst                  = request->comm()->group()->rank(request->dst());
     bool is_wait_for_receive = (request->flags() & RECV);
     // TODO: Here we take the rank while we normally take the process id (look for my_proc_id)
     TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("wait"));
@@ -455,28 +579,28 @@ public:
 
     TRACE_smpi_comm_out(rank);
     if (is_wait_for_receive)
-      TRACE_smpi_recv(src, dst, 0);
+      TRACE_smpi_recv(args.src, args.dst, args.tag);
   }
 };
 
 class SendAction : public ReplayAction<SendRecvParser> {
 public:
   SendAction() = delete;
-  explicit SendAction(std::string name) : ReplayAction(name) {}
+  explicit SendAction(std::string name, RequestStorage& storage) : ReplayAction(name, storage) {}
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
-    int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid();
+    int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
 
     TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
-                                                                             Datatype::encode(args.datatype1)));
+                                                                             args.tag, Datatype::encode(args.datatype1)));
     if (not TRACE_smpi_view_internals())
-      TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, 0, args.size * args.datatype1->size());
+      TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, args.tag, args.size * args.datatype1->size());
 
     if (name == "send") {
-      Request::send(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
+      Request::send(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
     } else if (name == "Isend") {
-      MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
-      get_reqq_self()->push_back(request);
+      MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
+      req_storage->add(request);
     } else {
       xbt_die("Don't know this action, %s", name.c_str());
     }
@@ -488,32 +612,32 @@ public:
 class RecvAction : public ReplayAction<SendRecvParser> {
 public:
   RecvAction() = delete;
-  explicit RecvAction(std::string name) : ReplayAction(name) {}
+  explicit RecvAction(std::string name, RequestStorage& storage) : ReplayAction(name, storage) {}
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
-    int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid();
+    int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
 
     TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
-                                                                             Datatype::encode(args.datatype1)));
+                                                                             args.tag, Datatype::encode(args.datatype1)));
 
     MPI_Status status;
     // unknown size from the receiver point of view
     if (args.size <= 0.0) {
-      Request::probe(args.partner, 0, MPI_COMM_WORLD, &status);
+      Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status);
       args.size = status.count;
     }
 
     if (name == "recv") {
-      Request::recv(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD, &status);
+      Request::recv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
     } else if (name == "Irecv") {
-      MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
-      get_reqq_self()->push_back(request);
+      MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
+      req_storage->add(request);
     }
 
     TRACE_smpi_comm_out(my_proc_id);
     // TODO: Check why this was only activated in the "recv" case and not in the "Irecv" case
     if (name == "recv" && not TRACE_smpi_view_internals()) {
-      TRACE_smpi_recv(src_traced, my_proc_id, 0);
+      TRACE_smpi_recv(src_traced, my_proc_id, args.tag);
     }
   }
 };
@@ -529,17 +653,17 @@ public:
   }
 };
 
-class TestAction : public ReplayAction<ActionArgParser> {
+class TestAction : public ReplayAction<WaitTestParser> {
 public:
-  TestAction() : ReplayAction("Test") {}
+  TestAction(RequestStorage& storage) : ReplayAction("Test", storage) {}
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
-    MPI_Request request = get_reqq_self()->back();
-    get_reqq_self()->pop_back();
+    MPI_Request request = req_storage->find(args.src, args.dst, args.tag);
+    req_storage->remove(request);
     // if request is null here, this may mean that a previous test has succeeded
     // Different times in traced application and replayed version may lead to this
     // In this case, ignore the extra calls.
-    if (request != nullptr) {
+    if (request != MPI_REQUEST_NULL) {
       TRACE_smpi_testing_in(my_proc_id);
 
       MPI_Status status;
@@ -548,7 +672,10 @@ public:
       XBT_DEBUG("MPI_Test result: %d", flag);
       /* push back request in vector to be caught by a subsequent wait. if the test did succeed, the request is now
        * nullptr.*/
-      get_reqq_self()->push_back(request);
+      if (request == MPI_REQUEST_NULL)
+        req_storage->addNullRequest(args.src, args.dst, args.tag);
+      else
+        req_storage->add(request);
 
       TRACE_smpi_testing_out(my_proc_id);
     }
@@ -566,10 +693,6 @@ public:
 
     /* start a simulated timer */
     smpi_process()->simulated_start();
-    /*initialize the number of active processes */
-    active_processes = smpi_process_count();
-
-    set_reqq_self(new std::vector<MPI_Request>);
   }
 };
 
@@ -581,21 +704,23 @@ public:
 
 class WaitAllAction : public ReplayAction<ActionArgParser> {
 public:
-  WaitAllAction() : ReplayAction("waitAll") {}
+  WaitAllAction(RequestStorage& storage) : ReplayAction("waitAll", storage) {}
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
-    const unsigned int count_requests = get_reqq_self()->size();
+    const unsigned int count_requests = req_storage->size();
 
     if (count_requests > 0) {
       TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, ""));
       std::vector<std::pair</*sender*/int,/*recv*/int>> sender_receiver;
-      for (const auto& req : (*get_reqq_self())) {
+      std::vector<MPI_Request> reqs;
+      req_storage->get_requests(reqs);
+      for (const auto& req : reqs) {
         if (req && (req->flags() & RECV)) {
           sender_receiver.push_back({req->src(), req->dst()});
         }
       }
       MPI_Status status[count_requests];
-      Request::waitall(count_requests, &(*get_reqq_self())[0], status);
+      Request::waitall(count_requests, &(reqs.data())[0], status);
 
       for (auto& pair : sender_receiver) {
         TRACE_smpi_recv(pair.first, pair.second, 0);
@@ -622,7 +747,7 @@ public:
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
     TRACE_smpi_comm_in(my_proc_id, "action_bcast",
-                       new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->getPid(),
+                       new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
                                                       -1.0, args.size, -1, Datatype::encode(args.datatype1), ""));
 
     Colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
@@ -637,8 +762,9 @@ public:
   void kernel(simgrid::xbt::ReplayAction& action) override
   {
     TRACE_smpi_comm_in(my_proc_id, "action_reduce",
-                       new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->getPid(), args.comp_size,
-                                                      args.comm_size, -1, Datatype::encode(args.datatype1), ""));
+                       new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
+                                                      args.comp_size, args.comm_size, -1,
+                                                      Datatype::encode(args.datatype1), ""));
 
     Colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
         recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD);
@@ -674,9 +800,9 @@ public:
                                                     Datatype::encode(args.datatype1),
                                                     Datatype::encode(args.datatype2)));
 
-    Colls::alltoall(send_buffer(args.send_size*args.comm_size* args.datatype1->size()), 
-      args.send_size, args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
-      args.recv_size, args.datatype2, MPI_COMM_WORLD);
+    Colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size,
+                    args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
+                    args.recv_size, args.datatype2, MPI_COMM_WORLD);
 
     TRACE_smpi_comm_out(my_proc_id);
   }
@@ -715,14 +841,14 @@ public:
                                                Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
 
     if (name == "gatherV") {
-      Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, 
-                     (rank == args.root) ? recv_buffer(args.recv_size_sum  * args.datatype2->size()) : nullptr, args.recvcounts->data(), args.disps.data(), args.datatype2, args.root,
-                     MPI_COMM_WORLD);
+      Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+                     (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr,
+                     args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD);
     }
     else {
-      Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, 
-                        recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(), args.disps.data(), args.datatype2,
-                    MPI_COMM_WORLD);
+      Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+                        recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(),
+                        args.disps.data(), args.datatype2, MPI_COMM_WORLD);
     }
 
     TRACE_smpi_comm_out(my_proc_id);
@@ -757,9 +883,10 @@ public:
           nullptr, Datatype::encode(args.datatype1),
           Datatype::encode(args.datatype2)));
 
-    Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr, args.sendcounts->data(), args.disps.data(), 
-        args.datatype1, recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
-        MPI_COMM_WORLD);
+    Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr,
+                    args.sendcounts->data(), args.disps.data(), args.datatype1,
+                    recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
+                    MPI_COMM_WORLD);
 
     TRACE_smpi_comm_out(my_proc_id);
   }
@@ -775,8 +902,9 @@ public:
                                                          std::to_string(args.comp_size), /* ugly hack to print comp_size */
                                                          Datatype::encode(args.datatype1)));
 
-    Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()), recv_buffer(args.recv_size_sum * args.datatype1->size()), 
-                          args.recvcounts->data(), args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+    Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
+                          recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
+                          args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
 
     smpi_execute_flops(args.comp_size);
     TRACE_smpi_comm_out(my_proc_id);
@@ -802,6 +930,7 @@ public:
 } // Replay Namespace
 }} // namespace simgrid::smpi
 
+std::vector<simgrid::smpi::replay::RequestStorage> storage;
 /** @brief Only initialize the replay, don't do it for real */
 void smpi_replay_init(int* argc, char*** argv)
 {
@@ -809,7 +938,9 @@ void smpi_replay_init(int* argc, char*** argv)
   smpi_process()->mark_as_initialized();
   smpi_process()->set_replaying(true);
 
-  int my_proc_id = simgrid::s4u::this_actor::getPid();
+  int my_proc_id = simgrid::s4u::this_actor::get_pid();
+  storage.resize(smpi_process_count());
+
   TRACE_smpi_init(my_proc_id);
   TRACE_smpi_computing_init(my_proc_id);
   TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
@@ -820,13 +951,13 @@ void smpi_replay_init(int* argc, char*** argv)
   xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
   xbt_replay_action_register("comm_dup",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
 
-  xbt_replay_action_register("send",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("send").execute(action); });
-  xbt_replay_action_register("Isend", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("Isend").execute(action); });
-  xbt_replay_action_register("recv",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("recv").execute(action); });
-  xbt_replay_action_register("Irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("Irecv").execute(action); });
-  xbt_replay_action_register("test",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::TestAction().execute(action); });
-  xbt_replay_action_register("wait",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAction().execute(action); });
-  xbt_replay_action_register("waitAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAllAction().execute(action); });
+  xbt_replay_action_register("send",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("send", storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("Isend", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("Isend", storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("recv",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("recv", storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("Irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("Irecv", storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("test",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::TestAction(storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("wait",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAction(storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
+  xbt_replay_action_register("waitAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAllAction(storage[simgrid::s4u::this_actor::get_pid()-1]).execute(action); });
   xbt_replay_action_register("barrier", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BarrierAction().execute(action); });
   xbt_replay_action_register("bcast",   [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BcastAction().execute(action); });
   xbt_replay_action_register("reduce",  [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceAction().execute(action); });
@@ -857,24 +988,25 @@ void smpi_replay_init(int* argc, char*** argv)
 /** @brief actually run the replay after initialization */
 void smpi_replay_main(int* argc, char*** argv)
 {
+  static int active_processes = 0;
+  active_processes++;
   simgrid::xbt::replay_runner(*argc, *argv);
 
   /* and now, finalize everything */
   /* One active process will stop. Decrease the counter*/
-  XBT_DEBUG("There are %zu elements in reqq[*]", get_reqq_self()->size());
-  if (not get_reqq_self()->empty()) {
-    unsigned int count_requests=get_reqq_self()->size();
+  unsigned int count_requests = storage[simgrid::s4u::this_actor::get_pid() - 1].size();
+  XBT_DEBUG("There are %ud elements in reqq[*]", count_requests);
+  if (count_requests > 0) {
     MPI_Request requests[count_requests];
     MPI_Status status[count_requests];
     unsigned int i=0;
 
-    for (auto const& req : *get_reqq_self()) {
-      requests[i] = req;
+    for (auto const& pair : storage[simgrid::s4u::this_actor::get_pid() - 1].get_store()) {
+      requests[i] = pair.second;
       i++;
     }
     simgrid::smpi::Request::waitall(count_requests, requests, status);
   }
-  delete get_reqq_self();
   active_processes--;
 
   if(active_processes==0){
@@ -883,13 +1015,13 @@ void smpi_replay_main(int* argc, char*** argv)
     smpi_free_replay_tmp_buffers();
   }
 
-  TRACE_smpi_comm_in(simgrid::s4u::this_actor::getPid(), "smpi_replay_run_finalize",
+  TRACE_smpi_comm_in(simgrid::s4u::this_actor::get_pid(), "smpi_replay_run_finalize",
                      new simgrid::instr::NoOpTIData("finalize"));
 
   smpi_process()->finalize();
 
-  TRACE_smpi_comm_out(simgrid::s4u::this_actor::getPid());
-  TRACE_smpi_finalize(simgrid::s4u::this_actor::getPid());
+  TRACE_smpi_comm_out(simgrid::s4u::this_actor::get_pid());
+  TRACE_smpi_finalize(simgrid::s4u::this_actor::get_pid());
 }
 
 /** @brief chain a replay initialization and a replay start */