#include <memory>
#include <numeric>
#include <unordered_map>
+#include <sstream>
#include <vector>
using simgrid::s4u::Actor;
+#include <tuple>
+// From https://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set
+// This is all just to make std::unordered_map work with std::tuple. If we need this in other places,
+// this could go into a header file.
+namespace hash_tuple{
+ template <typename TT>
+ struct hash
+ {
+ size_t
+ operator()(TT const& tt) const
+ {
+ return std::hash<TT>()(tt);
+ }
+ };
+
+ template <class T>
+ inline void hash_combine(std::size_t& seed, T const& v)
+ {
+ seed ^= hash_tuple::hash<T>()(v) + 0x9e3779b9 + (seed<<6) + (seed>>2);
+ }
+
+ // Recursive template code derived from Matthieu M.
+ template <class Tuple, size_t Index = std::tuple_size<Tuple>::value - 1>
+ struct HashValueImpl
+ {
+ static void apply(size_t& seed, Tuple const& tuple)
+ {
+ HashValueImpl<Tuple, Index-1>::apply(seed, tuple);
+ hash_combine(seed, std::get<Index>(tuple));
+ }
+ };
+
+ template <class Tuple>
+ struct HashValueImpl<Tuple,0>
+ {
+ static void apply(size_t& seed, Tuple const& tuple)
+ {
+ hash_combine(seed, std::get<0>(tuple));
+ }
+ };
+
+ template <typename ... TT>
+ struct hash<std::tuple<TT...>>
+ {
+ size_t
+ operator()(std::tuple<TT...> const& tt) const
+ {
+ size_t seed = 0;
+ HashValueImpl<std::tuple<TT...> >::apply(seed, tt);
+ return seed;
+ }
+ };
+}
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI");
-static int active_processes = 0;
static std::unordered_map<int, std::vector<MPI_Request>*> reqq;
+typedef std::tuple</*sender*/ int, /* reciever */ int, /* tag */int> req_key_t;
+typedef std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int,int,int>>> req_storage_t;
static MPI_Datatype MPI_DEFAULT_TYPE;
#define CHECK_ACTION_PARAMS(action, mandatory, optional) \
{ \
- if (action.size() < static_cast<unsigned long>(mandatory + 2)) \
+ if (action.size() < static_cast<unsigned long>(mandatory + 2)) { \
+ std::stringstream ss; \
+ for (const auto& elem : action) { \
+ ss << elem << " "; \
+ } \
THROWF(arg_error, 0, "%s replay failed.\n" \
"%zu items were given on the line. First two should be process_id and action. " \
"This action needs after them %lu mandatory arguments, and accepts %lu optional ones. \n" \
+ "The full line that was given is:\n %s\n" \
"Please contact the Simgrid team if support is needed", \
- __func__, action.size(), static_cast<unsigned long>(mandatory), static_cast<unsigned long>(optional)); \
+ __func__, action.size(), static_cast<unsigned long>(mandatory), static_cast<unsigned long>(optional), \
+ ss.str().c_str()); \
+ } \
}
static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock)
static std::vector<MPI_Request>* get_reqq_self()
{
- return reqq.at(simgrid::s4u::this_actor::getPid());
+ return reqq.at(simgrid::s4u::this_actor::get_pid());
}
static void set_reqq_self(std::vector<MPI_Request> *mpi_request)
{
- reqq.insert({simgrid::s4u::this_actor::getPid(), mpi_request});
+ reqq.insert({simgrid::s4u::this_actor::get_pid(), mpi_request});
}
/* Helper function */
namespace smpi {
namespace replay {
+
+class RequestStorage {
+private:
+ req_storage_t store;
+
+public:
+ RequestStorage() {}
+ int size()
+ {
+ return store.size();
+ }
+
+ req_storage_t& get_store()
+ {
+ return store;
+ }
+
+ void get_requests(std::vector<MPI_Request>& vec)
+ {
+ for (auto& pair : store) {
+ auto& req = pair.second;
+ auto my_proc_id = simgrid::s4u::this_actor::getPid();
+ if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
+ vec.push_back(pair.second);
+ pair.second->print_request("MM");
+ }
+ }
+ }
+
+ MPI_Request find(int src, int dst, int tag)
+ {
+ req_storage_t::iterator it = store.find(req_key_t(src, dst, tag));
+ return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
+ }
+
+ void remove(MPI_Request req)
+ {
+ if (req == MPI_REQUEST_NULL) return;
+
+ store.erase(req_key_t(req->src()-1, req->dst()-1, req->tag()));
+ }
+
+ void add(MPI_Request req)
+ {
+ if (req != MPI_REQUEST_NULL) // Can and does happen in the case of TestAction
+ store.insert({req_key_t(req->src()-1, req->dst()-1, req->tag()), req});
+ }
+
+ /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
+ void addNullRequest(int src, int dst, int tag)
+ {
+ store.insert({req_key_t(src, dst, tag), MPI_REQUEST_NULL});
+ }
+};
+
class ActionArgParser {
public:
virtual ~ActionArgParser() = default;
virtual void parse(simgrid::xbt::ReplayAction& action, std::string name) { CHECK_ACTION_PARAMS(action, 0, 0) }
};
+class WaitTestParser : public ActionArgParser {
+public:
+ int src;
+ int dst;
+ int tag;
+
+ void parse(simgrid::xbt::ReplayAction& action, std::string name) override
+ {
+ CHECK_ACTION_PARAMS(action, 3, 0)
+ src = std::stoi(action[2]);
+ dst = std::stoi(action[3]);
+ tag = std::stoi(action[4]);
+ }
+};
+
class SendRecvParser : public ActionArgParser {
public:
/* communication partner; if we send, this is the receiver and vice versa */
int partner;
double size;
+ int tag;
MPI_Datatype datatype1 = MPI_DEFAULT_TYPE;
void parse(simgrid::xbt::ReplayAction& action, std::string name) override
{
- CHECK_ACTION_PARAMS(action, 2, 1)
+ CHECK_ACTION_PARAMS(action, 3, 1)
partner = std::stoi(action[2]);
- size = parse_double(action[3]);
- if (action.size() > 4)
- datatype1 = simgrid::smpi::Datatype::decode(action[4]);
+ tag = std::stoi(action[3]);
+ size = parse_double(action[4]);
+ if (action.size() > 5)
+ datatype1 = simgrid::smpi::Datatype::decode(action[5]);
}
};
template <class T> class ReplayAction {
protected:
const std::string name;
+ const int my_proc_id;
T args;
- int my_proc_id;
-
public:
- explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::this_actor::getPid()) {}
+ explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::this_actor::get_pid()) {}
virtual ~ReplayAction() = default;
virtual void execute(simgrid::xbt::ReplayAction& action)
}
};
-class WaitAction : public ReplayAction<ActionArgParser> {
+class WaitAction : public ReplayAction<WaitTestParser> {
public:
WaitAction() : ReplayAction("Wait") {}
void kernel(simgrid::xbt::ReplayAction& action) override
MPI_Request request = get_reqq_self()->back();
get_reqq_self()->pop_back();
- if (request == nullptr) {
+ if (request == MPI_REQUEST_NULL) {
/* Assume that the trace is well formed, meaning the comm might have been caught by a MPI_test. Then just
* return.*/
return;
// Must be taken before Request::wait() since the request may be set to
// MPI_REQUEST_NULL by Request::wait!
- int src = request->comm()->group()->rank(request->src());
- int dst = request->comm()->group()->rank(request->dst());
bool is_wait_for_receive = (request->flags() & RECV);
// TODO: Here we take the rank while we normally take the process id (look for my_proc_id)
TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("wait"));
TRACE_smpi_comm_out(rank);
if (is_wait_for_receive)
- TRACE_smpi_recv(src, dst, 0);
+ TRACE_smpi_recv(args.src, args.dst, args.tag);
}
};
explicit SendAction(std::string name) : ReplayAction(name) {}
void kernel(simgrid::xbt::ReplayAction& action) override
{
- int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid();
+ int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
- Datatype::encode(args.datatype1)));
+ args.tag, Datatype::encode(args.datatype1)));
if (not TRACE_smpi_view_internals())
- TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, 0, args.size * args.datatype1->size());
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, args.tag, args.size * args.datatype1->size());
if (name == "send") {
- Request::send(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
+ Request::send(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
} else if (name == "Isend") {
- MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
+ MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
get_reqq_self()->push_back(request);
} else {
xbt_die("Don't know this action, %s", name.c_str());
explicit RecvAction(std::string name) : ReplayAction(name) {}
void kernel(simgrid::xbt::ReplayAction& action) override
{
- int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid();
+ int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
- Datatype::encode(args.datatype1)));
+ args.tag, Datatype::encode(args.datatype1)));
MPI_Status status;
// unknown size from the receiver point of view
if (args.size <= 0.0) {
- Request::probe(args.partner, 0, MPI_COMM_WORLD, &status);
+ Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status);
args.size = status.count;
}
if (name == "recv") {
- Request::recv(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD, &status);
+ Request::recv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
} else if (name == "Irecv") {
- MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, 0, MPI_COMM_WORLD);
+ MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
get_reqq_self()->push_back(request);
}
TRACE_smpi_comm_out(my_proc_id);
// TODO: Check why this was only activated in the "recv" case and not in the "Irecv" case
if (name == "recv" && not TRACE_smpi_view_internals()) {
- TRACE_smpi_recv(src_traced, my_proc_id, 0);
+ TRACE_smpi_recv(src_traced, my_proc_id, args.tag);
}
}
};
}
};
-class TestAction : public ReplayAction<ActionArgParser> {
+class TestAction : public ReplayAction<WaitTestParser> {
public:
TestAction() : ReplayAction("Test") {}
void kernel(simgrid::xbt::ReplayAction& action) override
// if request is null here, this may mean that a previous test has succeeded
// Different times in traced application and replayed version may lead to this
// In this case, ignore the extra calls.
- if (request != nullptr) {
+ if (request != MPI_REQUEST_NULL) {
TRACE_smpi_testing_in(my_proc_id);
MPI_Status status;
/* start a simulated timer */
smpi_process()->simulated_start();
- /*initialize the number of active processes */
- active_processes = smpi_process_count();
-
set_reqq_self(new std::vector<MPI_Request>);
}
};
void kernel(simgrid::xbt::ReplayAction& action) override
{
TRACE_smpi_comm_in(my_proc_id, "action_bcast",
- new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->getPid(),
+ new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
-1.0, args.size, -1, Datatype::encode(args.datatype1), ""));
Colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
void kernel(simgrid::xbt::ReplayAction& action) override
{
TRACE_smpi_comm_in(my_proc_id, "action_reduce",
- new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->getPid(), args.comp_size,
- args.comm_size, -1, Datatype::encode(args.datatype1), ""));
+ new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
+ args.comp_size, args.comm_size, -1,
+ Datatype::encode(args.datatype1), ""));
Colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD);
Datatype::encode(args.datatype1),
Datatype::encode(args.datatype2)));
- Colls::alltoall(send_buffer(args.send_size*args.comm_size* args.datatype1->size()),
- args.send_size, args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
- args.recv_size, args.datatype2, MPI_COMM_WORLD);
+ Colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size,
+ args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
+ args.recv_size, args.datatype2, MPI_COMM_WORLD);
TRACE_smpi_comm_out(my_proc_id);
}
Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
if (name == "gatherV") {
- Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr, args.recvcounts->data(), args.disps.data(), args.datatype2, args.root,
- MPI_COMM_WORLD);
+ Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr,
+ args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD);
}
else {
- Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
- recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(), args.disps.data(), args.datatype2,
- MPI_COMM_WORLD);
+ Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
+ recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(),
+ args.disps.data(), args.datatype2, MPI_COMM_WORLD);
}
TRACE_smpi_comm_out(my_proc_id);
nullptr, Datatype::encode(args.datatype1),
Datatype::encode(args.datatype2)));
- Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr, args.sendcounts->data(), args.disps.data(),
- args.datatype1, recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
- MPI_COMM_WORLD);
+ Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr,
+ args.sendcounts->data(), args.disps.data(), args.datatype1,
+ recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
+ MPI_COMM_WORLD);
TRACE_smpi_comm_out(my_proc_id);
}
std::to_string(args.comp_size), /* ugly hack to print comp_size */
Datatype::encode(args.datatype1)));
- Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()), recv_buffer(args.recv_size_sum * args.datatype1->size()),
- args.recvcounts->data(), args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
+ Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
+ recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
+ args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
smpi_execute_flops(args.comp_size);
TRACE_smpi_comm_out(my_proc_id);
} // Replay Namespace
}} // namespace simgrid::smpi
+std::vector<simgrid::smpi::replay::RequestStorage> storage;
/** @brief Only initialize the replay, don't do it for real */
void smpi_replay_init(int* argc, char*** argv)
{
smpi_process()->mark_as_initialized();
smpi_process()->set_replaying(true);
- int my_proc_id = simgrid::s4u::this_actor::getPid();
+ int my_proc_id = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_init(my_proc_id);
TRACE_smpi_computing_init(my_proc_id);
TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
/** @brief actually run the replay after initialization */
void smpi_replay_main(int* argc, char*** argv)
{
+ static int active_processes = 0;
+ active_processes++;
simgrid::xbt::replay_runner(*argc, *argv);
/* and now, finalize everything */
smpi_free_replay_tmp_buffers();
}
- TRACE_smpi_comm_in(simgrid::s4u::this_actor::getPid(), "smpi_replay_run_finalize",
+ TRACE_smpi_comm_in(simgrid::s4u::this_actor::get_pid(), "smpi_replay_run_finalize",
new simgrid::instr::NoOpTIData("finalize"));
smpi_process()->finalize();
- TRACE_smpi_comm_out(simgrid::s4u::this_actor::getPid());
- TRACE_smpi_finalize(simgrid::s4u::this_actor::getPid());
+ TRACE_smpi_comm_out(simgrid::s4u::this_actor::get_pid());
+ TRACE_smpi_finalize(simgrid::s4u::this_actor::get_pid());
}
/** @brief chain a replay initialization and a replay start */