1 /* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "smpi_coll.hpp"
8 #include "smpi_comm.hpp"
9 #include "smpi_datatype.hpp"
10 #include "smpi_group.hpp"
11 #include "smpi_process.hpp"
12 #include "smpi_request.hpp"
13 #include "xbt/replay.hpp"
15 #include <boost/algorithm/string/join.hpp>
18 #include <unordered_map>
22 using simgrid::s4u::Actor;
25 // From https://stackoverflow.com/questions/7110301/generic-hash-for-tuples-in-unordered-map-unordered-set
26 // This is all just to make std::unordered_map work with std::tuple. If we need this in other places,
27 // this could go into a header file.
29 template <typename TT>
33 operator()(TT const& tt) const
35 return std::hash<TT>()(tt);
40 inline void hash_combine(std::size_t& seed, T const& v)
42 seed ^= hash_tuple::hash<T>()(v) + 0x9e3779b9 + (seed<<6) + (seed>>2);
45 // Recursive template code derived from Matthieu M.
46 template <class Tuple, size_t Index = std::tuple_size<Tuple>::value - 1>
49 static void apply(size_t& seed, Tuple const& tuple)
51 HashValueImpl<Tuple, Index-1>::apply(seed, tuple);
52 hash_combine(seed, std::get<Index>(tuple));
56 template <class Tuple>
57 struct HashValueImpl<Tuple,0>
59 static void apply(size_t& seed, Tuple const& tuple)
61 hash_combine(seed, std::get<0>(tuple));
65 template <typename ... TT>
66 struct hash<std::tuple<TT...>>
69 operator()(std::tuple<TT...> const& tt) const
72 HashValueImpl<std::tuple<TT...> >::apply(seed, tt);
78 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI");
80 static std::unordered_map<int, std::vector<MPI_Request>*> reqq;
81 typedef std::tuple</*sender*/ int, /* reciever */ int, /* tag */int> req_key_t;
82 typedef std::unordered_map<req_key_t, MPI_Request, hash_tuple::hash<std::tuple<int,int,int>>> req_storage_t;
84 static MPI_Datatype MPI_DEFAULT_TYPE;
86 #define CHECK_ACTION_PARAMS(action, mandatory, optional) \
88 if (action.size() < static_cast<unsigned long>(mandatory + 2)) { \
89 std::stringstream ss; \
90 for (const auto& elem : action) { \
93 THROWF(arg_error, 0, "%s replay failed.\n" \
94 "%zu items were given on the line. First two should be process_id and action. " \
95 "This action needs after them %lu mandatory arguments, and accepts %lu optional ones. \n" \
96 "The full line that was given is:\n %s\n" \
97 "Please contact the Simgrid team if support is needed", \
98 __func__, action.size(), static_cast<unsigned long>(mandatory), static_cast<unsigned long>(optional), \
103 static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock)
105 if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){
106 std::string s = boost::algorithm::join(action, " ");
107 XBT_VERB("%s %f", s.c_str(), smpi_process()->simulated_elapsed() - clock);
111 static std::vector<MPI_Request>* get_reqq_self()
113 return reqq.at(simgrid::s4u::this_actor::get_pid());
116 static void set_reqq_self(std::vector<MPI_Request> *mpi_request)
118 reqq.insert({simgrid::s4u::this_actor::get_pid(), mpi_request});
121 /* Helper function */
122 static double parse_double(std::string string)
124 return xbt_str_parse_double(string.c_str(), "%s is not a double");
132 class RequestStorage {
143 req_storage_t& get_store()
148 void get_requests(std::vector<MPI_Request>& vec)
150 for (auto& pair : store) {
151 auto& req = pair.second;
152 auto my_proc_id = simgrid::s4u::this_actor::getPid();
153 if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) {
154 vec.push_back(pair.second);
155 pair.second->print_request("MM");
160 MPI_Request find(int src, int dst, int tag)
162 req_storage_t::iterator it = store.find(req_key_t(src, dst, tag));
163 return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
166 void remove(MPI_Request req)
168 if (req == MPI_REQUEST_NULL) return;
170 store.erase(req_key_t(req->src()-1, req->dst()-1, req->tag()));
173 void add(MPI_Request req)
175 if (req != MPI_REQUEST_NULL) // Can and does happen in the case of TestAction
176 store.insert({req_key_t(req->src()-1, req->dst()-1, req->tag()), req});
179 /* Sometimes we need to re-insert MPI_REQUEST_NULL but we still need src,dst and tag */
180 void addNullRequest(int src, int dst, int tag)
182 store.insert({req_key_t(src, dst, tag), MPI_REQUEST_NULL});
186 class ActionArgParser {
188 virtual ~ActionArgParser() = default;
189 virtual void parse(simgrid::xbt::ReplayAction& action, std::string name) { CHECK_ACTION_PARAMS(action, 0, 0) }
192 class WaitTestParser : public ActionArgParser {
198 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
200 CHECK_ACTION_PARAMS(action, 3, 0)
201 src = std::stoi(action[2]);
202 dst = std::stoi(action[3]);
203 tag = std::stoi(action[4]);
207 class SendRecvParser : public ActionArgParser {
209 /* communication partner; if we send, this is the receiver and vice versa */
213 MPI_Datatype datatype1 = MPI_DEFAULT_TYPE;
215 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
217 CHECK_ACTION_PARAMS(action, 3, 1)
218 partner = std::stoi(action[2]);
219 tag = std::stoi(action[3]);
220 size = parse_double(action[4]);
221 if (action.size() > 5)
222 datatype1 = simgrid::smpi::Datatype::decode(action[5]);
226 class ComputeParser : public ActionArgParser {
228 /* communication partner; if we send, this is the receiver and vice versa */
231 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
233 CHECK_ACTION_PARAMS(action, 1, 0)
234 flops = parse_double(action[2]);
238 class CollCommParser : public ActionArgParser {
246 MPI_Datatype datatype1 = MPI_DEFAULT_TYPE;
247 MPI_Datatype datatype2 = MPI_DEFAULT_TYPE;
250 class BcastArgParser : public CollCommParser {
252 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
254 CHECK_ACTION_PARAMS(action, 1, 2)
255 size = parse_double(action[2]);
256 root = (action.size() > 3) ? std::stoi(action[3]) : 0;
257 if (action.size() > 4)
258 datatype1 = simgrid::smpi::Datatype::decode(action[4]);
262 class ReduceArgParser : public CollCommParser {
264 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
266 CHECK_ACTION_PARAMS(action, 2, 2)
267 comm_size = parse_double(action[2]);
268 comp_size = parse_double(action[3]);
269 root = (action.size() > 4) ? std::stoi(action[4]) : 0;
270 if (action.size() > 5)
271 datatype1 = simgrid::smpi::Datatype::decode(action[5]);
275 class AllReduceArgParser : public CollCommParser {
277 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
279 CHECK_ACTION_PARAMS(action, 2, 1)
280 comm_size = parse_double(action[2]);
281 comp_size = parse_double(action[3]);
282 if (action.size() > 4)
283 datatype1 = simgrid::smpi::Datatype::decode(action[4]);
287 class AllToAllArgParser : public CollCommParser {
289 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
291 CHECK_ACTION_PARAMS(action, 2, 1)
292 comm_size = MPI_COMM_WORLD->size();
293 send_size = parse_double(action[2]);
294 recv_size = parse_double(action[3]);
296 if (action.size() > 4)
297 datatype1 = simgrid::smpi::Datatype::decode(action[4]);
298 if (action.size() > 5)
299 datatype2 = simgrid::smpi::Datatype::decode(action[5]);
303 class GatherArgParser : public CollCommParser {
305 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
307 /* The structure of the gather action for the rank 0 (total 4 processes) is the following:
310 1) 68 is the sendcounts
311 2) 68 is the recvcounts
312 3) 0 is the root node
313 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
314 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
316 CHECK_ACTION_PARAMS(action, 2, 3)
317 comm_size = MPI_COMM_WORLD->size();
318 send_size = parse_double(action[2]);
319 recv_size = parse_double(action[3]);
321 if (name == "gather") {
322 root = (action.size() > 4) ? std::stoi(action[4]) : 0;
323 if (action.size() > 5)
324 datatype1 = simgrid::smpi::Datatype::decode(action[5]);
325 if (action.size() > 6)
326 datatype2 = simgrid::smpi::Datatype::decode(action[6]);
329 if (action.size() > 4)
330 datatype1 = simgrid::smpi::Datatype::decode(action[4]);
331 if (action.size() > 5)
332 datatype2 = simgrid::smpi::Datatype::decode(action[5]);
337 class GatherVArgParser : public CollCommParser {
340 std::shared_ptr<std::vector<int>> recvcounts;
341 std::vector<int> disps;
342 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
344 /* The structure of the gatherv action for the rank 0 (total 4 processes) is the following:
345 0 gather 68 68 10 10 10 0 0 0
347 1) 68 is the sendcount
348 2) 68 10 10 10 is the recvcounts
349 3) 0 is the root node
350 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
351 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
353 comm_size = MPI_COMM_WORLD->size();
354 CHECK_ACTION_PARAMS(action, comm_size+1, 2)
355 send_size = parse_double(action[2]);
356 disps = std::vector<int>(comm_size, 0);
357 recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
359 if (name == "gatherV") {
360 root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0;
361 if (action.size() > 4 + comm_size)
362 datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]);
363 if (action.size() > 5 + comm_size)
364 datatype2 = simgrid::smpi::Datatype::decode(action[5 + comm_size]);
367 int datatype_index = 0;
369 /* The 3 comes from "0 gather <sendcount>", which must always be present.
370 * The + comm_size is the recvcounts array, which must also be present
372 if (action.size() > 3 + comm_size + comm_size) { /* datatype + disp are specified */
373 datatype_index = 3 + comm_size;
374 disp_index = datatype_index + 1;
375 datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
376 datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
377 } else if (action.size() > 3 + comm_size + 2) { /* disps specified; datatype is not specified; use the default one */
378 disp_index = 3 + comm_size;
379 } else if (action.size() > 3 + comm_size) { /* only datatype, no disp specified */
380 datatype_index = 3 + comm_size;
381 datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]);
382 datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]);
385 if (disp_index != 0) {
386 for (unsigned int i = 0; i < comm_size; i++)
387 disps[i] = std::stoi(action[disp_index + i]);
391 for (unsigned int i = 0; i < comm_size; i++) {
392 (*recvcounts)[i] = std::stoi(action[i + 3]);
394 recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0);
398 class ScatterArgParser : public CollCommParser {
400 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
402 /* The structure of the scatter action for the rank 0 (total 4 processes) is the following:
405 1) 68 is the sendcounts
406 2) 68 is the recvcounts
407 3) 0 is the root node
408 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
409 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
411 CHECK_ACTION_PARAMS(action, 2, 3)
412 comm_size = MPI_COMM_WORLD->size();
413 send_size = parse_double(action[2]);
414 recv_size = parse_double(action[3]);
415 root = (action.size() > 4) ? std::stoi(action[4]) : 0;
416 if (action.size() > 5)
417 datatype1 = simgrid::smpi::Datatype::decode(action[5]);
418 if (action.size() > 6)
419 datatype2 = simgrid::smpi::Datatype::decode(action[6]);
423 class ScatterVArgParser : public CollCommParser {
427 std::shared_ptr<std::vector<int>> sendcounts;
428 std::vector<int> disps;
429 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
431 /* The structure of the scatterv action for the rank 0 (total 4 processes) is the following:
432 0 gather 68 10 10 10 68 0 0 0
434 1) 68 10 10 10 is the sendcounts
435 2) 68 is the recvcount
436 3) 0 is the root node
437 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode()
438 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode()
440 CHECK_ACTION_PARAMS(action, comm_size + 1, 2)
441 recv_size = parse_double(action[2 + comm_size]);
442 disps = std::vector<int>(comm_size, 0);
443 sendcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
445 if (action.size() > 5 + comm_size)
446 datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]);
447 if (action.size() > 5 + comm_size)
448 datatype2 = simgrid::smpi::Datatype::decode(action[5]);
450 for (unsigned int i = 0; i < comm_size; i++) {
451 (*sendcounts)[i] = std::stoi(action[i + 2]);
453 send_size_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0);
454 root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0;
458 class ReduceScatterArgParser : public CollCommParser {
461 std::shared_ptr<std::vector<int>> recvcounts;
462 std::vector<int> disps;
463 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
465 /* The structure of the reducescatter action for the rank 0 (total 4 processes) is the following:
466 0 reduceScatter 275427 275427 275427 204020 11346849 0
468 1) The first four values after the name of the action declare the recvcounts array
469 2) The value 11346849 is the amount of instructions
470 3) The last value corresponds to the datatype, see simgrid::smpi::Datatype::decode().
472 comm_size = MPI_COMM_WORLD->size();
473 CHECK_ACTION_PARAMS(action, comm_size+1, 1)
474 comp_size = parse_double(action[2+comm_size]);
475 recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
476 if (action.size() > 3 + comm_size)
477 datatype1 = simgrid::smpi::Datatype::decode(action[3 + comm_size]);
479 for (unsigned int i = 0; i < comm_size; i++) {
480 recvcounts->push_back(std::stoi(action[i + 2]));
482 recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0);
486 class AllToAllVArgParser : public CollCommParser {
490 std::shared_ptr<std::vector<int>> recvcounts;
491 std::shared_ptr<std::vector<int>> sendcounts;
492 std::vector<int> senddisps;
493 std::vector<int> recvdisps;
496 void parse(simgrid::xbt::ReplayAction& action, std::string name) override
498 /* The structure of the allToAllV action for the rank 0 (total 4 processes) is the following:
499 0 allToAllV 100 1 7 10 12 100 1 70 10 5
501 1) 100 is the size of the send buffer *sizeof(int),
502 2) 1 7 10 12 is the sendcounts array
503 3) 100*sizeof(int) is the size of the receiver buffer
504 4) 1 70 10 5 is the recvcounts array
506 comm_size = MPI_COMM_WORLD->size();
507 CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2)
508 sendcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
509 recvcounts = std::shared_ptr<std::vector<int>>(new std::vector<int>(comm_size));
510 senddisps = std::vector<int>(comm_size, 0);
511 recvdisps = std::vector<int>(comm_size, 0);
513 if (action.size() > 5 + 2 * comm_size)
514 datatype1 = simgrid::smpi::Datatype::decode(action[4 + 2 * comm_size]);
515 if (action.size() > 5 + 2 * comm_size)
516 datatype2 = simgrid::smpi::Datatype::decode(action[5 + 2 * comm_size]);
518 send_buf_size=parse_double(action[2]);
519 recv_buf_size=parse_double(action[3+comm_size]);
520 for (unsigned int i = 0; i < comm_size; i++) {
521 (*sendcounts)[i] = std::stoi(action[3 + i]);
522 (*recvcounts)[i] = std::stoi(action[4 + comm_size + i]);
524 send_size_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0);
525 recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0);
529 template <class T> class ReplayAction {
531 const std::string name;
532 const int my_proc_id;
536 explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::this_actor::get_pid()) {}
537 virtual ~ReplayAction() = default;
539 virtual void execute(simgrid::xbt::ReplayAction& action)
541 // Needs to be re-initialized for every action, hence here
542 double start_time = smpi_process()->simulated_elapsed();
543 args.parse(action, name);
546 log_timed_action(action, start_time);
549 virtual void kernel(simgrid::xbt::ReplayAction& action) = 0;
551 void* send_buffer(int size)
553 return smpi_get_tmp_sendbuffer(size);
556 void* recv_buffer(int size)
558 return smpi_get_tmp_recvbuffer(size);
562 class WaitAction : public ReplayAction<WaitTestParser> {
564 WaitAction() : ReplayAction("Wait") {}
565 void kernel(simgrid::xbt::ReplayAction& action) override
567 std::string s = boost::algorithm::join(action, " ");
568 xbt_assert(get_reqq_self()->size(), "action wait not preceded by any irecv or isend: %s", s.c_str());
569 MPI_Request request = get_reqq_self()->back();
570 get_reqq_self()->pop_back();
572 if (request == MPI_REQUEST_NULL) {
573 /* Assume that the trace is well formed, meaning the comm might have been caught by a MPI_test. Then just
578 int rank = request->comm() != MPI_COMM_NULL ? request->comm()->rank() : -1;
580 // Must be taken before Request::wait() since the request may be set to
581 // MPI_REQUEST_NULL by Request::wait!
582 bool is_wait_for_receive = (request->flags() & RECV);
583 // TODO: Here we take the rank while we normally take the process id (look for my_proc_id)
584 TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("wait"));
587 Request::wait(&request, &status);
589 TRACE_smpi_comm_out(rank);
590 if (is_wait_for_receive)
591 TRACE_smpi_recv(args.src, args.dst, args.tag);
595 class SendAction : public ReplayAction<SendRecvParser> {
597 SendAction() = delete;
598 explicit SendAction(std::string name) : ReplayAction(name) {}
599 void kernel(simgrid::xbt::ReplayAction& action) override
601 int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
603 TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
604 args.tag, Datatype::encode(args.datatype1)));
605 if (not TRACE_smpi_view_internals())
606 TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, args.tag, args.size * args.datatype1->size());
608 if (name == "send") {
609 Request::send(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
610 } else if (name == "Isend") {
611 MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
612 get_reqq_self()->push_back(request);
614 xbt_die("Don't know this action, %s", name.c_str());
617 TRACE_smpi_comm_out(my_proc_id);
621 class RecvAction : public ReplayAction<SendRecvParser> {
623 RecvAction() = delete;
624 explicit RecvAction(std::string name) : ReplayAction(name) {}
625 void kernel(simgrid::xbt::ReplayAction& action) override
627 int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid();
629 TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size,
630 args.tag, Datatype::encode(args.datatype1)));
633 // unknown size from the receiver point of view
634 if (args.size <= 0.0) {
635 Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status);
636 args.size = status.count;
639 if (name == "recv") {
640 Request::recv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
641 } else if (name == "Irecv") {
642 MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD);
643 get_reqq_self()->push_back(request);
646 TRACE_smpi_comm_out(my_proc_id);
647 // TODO: Check why this was only activated in the "recv" case and not in the "Irecv" case
648 if (name == "recv" && not TRACE_smpi_view_internals()) {
649 TRACE_smpi_recv(src_traced, my_proc_id, args.tag);
654 class ComputeAction : public ReplayAction<ComputeParser> {
656 ComputeAction() : ReplayAction("compute") {}
657 void kernel(simgrid::xbt::ReplayAction& action) override
659 TRACE_smpi_computing_in(my_proc_id, args.flops);
660 smpi_execute_flops(args.flops);
661 TRACE_smpi_computing_out(my_proc_id);
665 class TestAction : public ReplayAction<WaitTestParser> {
667 TestAction() : ReplayAction("Test") {}
668 void kernel(simgrid::xbt::ReplayAction& action) override
670 MPI_Request request = get_reqq_self()->back();
671 get_reqq_self()->pop_back();
672 // if request is null here, this may mean that a previous test has succeeded
673 // Different times in traced application and replayed version may lead to this
674 // In this case, ignore the extra calls.
675 if (request != MPI_REQUEST_NULL) {
676 TRACE_smpi_testing_in(my_proc_id);
679 int flag = Request::test(&request, &status);
681 XBT_DEBUG("MPI_Test result: %d", flag);
682 /* push back request in vector to be caught by a subsequent wait. if the test did succeed, the request is now
684 get_reqq_self()->push_back(request);
686 TRACE_smpi_testing_out(my_proc_id);
691 class InitAction : public ReplayAction<ActionArgParser> {
693 InitAction() : ReplayAction("Init") {}
694 void kernel(simgrid::xbt::ReplayAction& action) override
696 CHECK_ACTION_PARAMS(action, 0, 1)
697 MPI_DEFAULT_TYPE = (action.size() > 2) ? MPI_DOUBLE // default MPE datatype
698 : MPI_BYTE; // default TAU datatype
700 /* start a simulated timer */
701 smpi_process()->simulated_start();
702 set_reqq_self(new std::vector<MPI_Request>);
706 class CommunicatorAction : public ReplayAction<ActionArgParser> {
708 CommunicatorAction() : ReplayAction("Comm") {}
709 void kernel(simgrid::xbt::ReplayAction& action) override { /* nothing to do */}
712 class WaitAllAction : public ReplayAction<ActionArgParser> {
714 WaitAllAction() : ReplayAction("waitAll") {}
715 void kernel(simgrid::xbt::ReplayAction& action) override
717 const unsigned int count_requests = get_reqq_self()->size();
719 if (count_requests > 0) {
720 TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, ""));
721 std::vector<std::pair</*sender*/int,/*recv*/int>> sender_receiver;
722 for (const auto& req : (*get_reqq_self())) {
723 if (req && (req->flags() & RECV)) {
724 sender_receiver.push_back({req->src(), req->dst()});
727 MPI_Status status[count_requests];
728 Request::waitall(count_requests, &(*get_reqq_self())[0], status);
730 for (auto& pair : sender_receiver) {
731 TRACE_smpi_recv(pair.first, pair.second, 0);
733 TRACE_smpi_comm_out(my_proc_id);
738 class BarrierAction : public ReplayAction<ActionArgParser> {
740 BarrierAction() : ReplayAction("barrier") {}
741 void kernel(simgrid::xbt::ReplayAction& action) override
743 TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("barrier"));
744 Colls::barrier(MPI_COMM_WORLD);
745 TRACE_smpi_comm_out(my_proc_id);
749 class BcastAction : public ReplayAction<BcastArgParser> {
751 BcastAction() : ReplayAction("bcast") {}
752 void kernel(simgrid::xbt::ReplayAction& action) override
754 TRACE_smpi_comm_in(my_proc_id, "action_bcast",
755 new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
756 -1.0, args.size, -1, Datatype::encode(args.datatype1), ""));
758 Colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD);
760 TRACE_smpi_comm_out(my_proc_id);
764 class ReduceAction : public ReplayAction<ReduceArgParser> {
766 ReduceAction() : ReplayAction("reduce") {}
767 void kernel(simgrid::xbt::ReplayAction& action) override
769 TRACE_smpi_comm_in(my_proc_id, "action_reduce",
770 new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(),
771 args.comp_size, args.comm_size, -1,
772 Datatype::encode(args.datatype1), ""));
774 Colls::reduce(send_buffer(args.comm_size * args.datatype1->size()),
775 recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD);
776 smpi_execute_flops(args.comp_size);
778 TRACE_smpi_comm_out(my_proc_id);
782 class AllReduceAction : public ReplayAction<AllReduceArgParser> {
784 AllReduceAction() : ReplayAction("allReduce") {}
785 void kernel(simgrid::xbt::ReplayAction& action) override
787 TRACE_smpi_comm_in(my_proc_id, "action_allReduce", new simgrid::instr::CollTIData("allReduce", -1, args.comp_size, args.comm_size, -1,
788 Datatype::encode(args.datatype1), ""));
790 Colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()),
791 recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
792 smpi_execute_flops(args.comp_size);
794 TRACE_smpi_comm_out(my_proc_id);
798 class AllToAllAction : public ReplayAction<AllToAllArgParser> {
800 AllToAllAction() : ReplayAction("allToAll") {}
801 void kernel(simgrid::xbt::ReplayAction& action) override
803 TRACE_smpi_comm_in(my_proc_id, "action_allToAll",
804 new simgrid::instr::CollTIData("allToAll", -1, -1.0, args.send_size, args.recv_size,
805 Datatype::encode(args.datatype1),
806 Datatype::encode(args.datatype2)));
808 Colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size,
809 args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()),
810 args.recv_size, args.datatype2, MPI_COMM_WORLD);
812 TRACE_smpi_comm_out(my_proc_id);
816 class GatherAction : public ReplayAction<GatherArgParser> {
818 explicit GatherAction(std::string name) : ReplayAction(name) {}
819 void kernel(simgrid::xbt::ReplayAction& action) override
821 TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::CollTIData(name, (name == "gather") ? args.root : -1, -1.0, args.send_size, args.recv_size,
822 Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
824 if (name == "gather") {
825 int rank = MPI_COMM_WORLD->rank();
826 Colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
827 (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
830 Colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
831 recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, MPI_COMM_WORLD);
833 TRACE_smpi_comm_out(my_proc_id);
837 class GatherVAction : public ReplayAction<GatherVArgParser> {
839 explicit GatherVAction(std::string name) : ReplayAction(name) {}
840 void kernel(simgrid::xbt::ReplayAction& action) override
842 int rank = MPI_COMM_WORLD->rank();
844 TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::VarCollTIData(
845 name, (name == "gatherV") ? args.root : -1, args.send_size, nullptr, -1, args.recvcounts,
846 Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
848 if (name == "gatherV") {
849 Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
850 (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr,
851 args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD);
854 Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
855 recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(),
856 args.disps.data(), args.datatype2, MPI_COMM_WORLD);
859 TRACE_smpi_comm_out(my_proc_id);
863 class ScatterAction : public ReplayAction<ScatterArgParser> {
865 ScatterAction() : ReplayAction("scatter") {}
866 void kernel(simgrid::xbt::ReplayAction& action) override
868 int rank = MPI_COMM_WORLD->rank();
869 TRACE_smpi_comm_in(my_proc_id, "action_scatter", new simgrid::instr::CollTIData(name, args.root, -1.0, args.send_size, args.recv_size,
870 Datatype::encode(args.datatype1),
871 Datatype::encode(args.datatype2)));
873 Colls::scatter(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1,
874 (rank == args.root) ? recv_buffer(args.recv_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD);
876 TRACE_smpi_comm_out(my_proc_id);
881 class ScatterVAction : public ReplayAction<ScatterVArgParser> {
883 ScatterVAction() : ReplayAction("scatterV") {}
884 void kernel(simgrid::xbt::ReplayAction& action) override
886 int rank = MPI_COMM_WORLD->rank();
887 TRACE_smpi_comm_in(my_proc_id, "action_scatterv", new simgrid::instr::VarCollTIData(name, args.root, -1, args.sendcounts, args.recv_size,
888 nullptr, Datatype::encode(args.datatype1),
889 Datatype::encode(args.datatype2)));
891 Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr,
892 args.sendcounts->data(), args.disps.data(), args.datatype1,
893 recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root,
896 TRACE_smpi_comm_out(my_proc_id);
900 class ReduceScatterAction : public ReplayAction<ReduceScatterArgParser> {
902 ReduceScatterAction() : ReplayAction("reduceScatter") {}
903 void kernel(simgrid::xbt::ReplayAction& action) override
905 TRACE_smpi_comm_in(my_proc_id, "action_reducescatter",
906 new simgrid::instr::VarCollTIData("reduceScatter", -1, 0, nullptr, -1, args.recvcounts,
907 std::to_string(args.comp_size), /* ugly hack to print comp_size */
908 Datatype::encode(args.datatype1)));
910 Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()),
911 recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(),
912 args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD);
914 smpi_execute_flops(args.comp_size);
915 TRACE_smpi_comm_out(my_proc_id);
919 class AllToAllVAction : public ReplayAction<AllToAllVArgParser> {
921 AllToAllVAction() : ReplayAction("allToAllV") {}
922 void kernel(simgrid::xbt::ReplayAction& action) override
924 TRACE_smpi_comm_in(my_proc_id, __func__,
925 new simgrid::instr::VarCollTIData(
926 "allToAllV", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts,
927 Datatype::encode(args.datatype1), Datatype::encode(args.datatype2)));
929 Colls::alltoallv(send_buffer(args.send_buf_size * args.datatype1->size()), args.sendcounts->data(), args.senddisps.data(), args.datatype1,
930 recv_buffer(args.recv_buf_size * args.datatype2->size()), args.recvcounts->data(), args.recvdisps.data(), args.datatype2, MPI_COMM_WORLD);
932 TRACE_smpi_comm_out(my_proc_id);
935 } // Replay Namespace
936 }} // namespace simgrid::smpi
938 std::vector<simgrid::smpi::replay::RequestStorage> storage;
939 /** @brief Only initialize the replay, don't do it for real */
940 void smpi_replay_init(int* argc, char*** argv)
942 simgrid::smpi::Process::init(argc, argv);
943 smpi_process()->mark_as_initialized();
944 smpi_process()->set_replaying(true);
946 int my_proc_id = simgrid::s4u::this_actor::get_pid();
947 TRACE_smpi_init(my_proc_id);
948 TRACE_smpi_computing_init(my_proc_id);
949 TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init"));
950 TRACE_smpi_comm_out(my_proc_id);
951 xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::InitAction().execute(action); });
952 xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction& action) { /* nothing to do */ });
953 xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
954 xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
955 xbt_replay_action_register("comm_dup", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); });
957 xbt_replay_action_register("send", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("send").execute(action); });
958 xbt_replay_action_register("Isend", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("Isend").execute(action); });
959 xbt_replay_action_register("recv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("recv").execute(action); });
960 xbt_replay_action_register("Irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("Irecv").execute(action); });
961 xbt_replay_action_register("test", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::TestAction().execute(action); });
962 xbt_replay_action_register("wait", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAction().execute(action); });
963 xbt_replay_action_register("waitAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAllAction().execute(action); });
964 xbt_replay_action_register("barrier", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BarrierAction().execute(action); });
965 xbt_replay_action_register("bcast", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BcastAction().execute(action); });
966 xbt_replay_action_register("reduce", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceAction().execute(action); });
967 xbt_replay_action_register("allReduce", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllReduceAction().execute(action); });
968 xbt_replay_action_register("allToAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllToAllAction().execute(action); });
969 xbt_replay_action_register("allToAllV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllToAllVAction().execute(action); });
970 xbt_replay_action_register("gather", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherAction("gather").execute(action); });
971 xbt_replay_action_register("scatter", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScatterAction().execute(action); });
972 xbt_replay_action_register("gatherV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherVAction("gatherV").execute(action); });
973 xbt_replay_action_register("scatterV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScatterVAction().execute(action); });
974 xbt_replay_action_register("allGather", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherAction("allGather").execute(action); });
975 xbt_replay_action_register("allGatherV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherVAction("allGatherV").execute(action); });
976 xbt_replay_action_register("reduceScatter", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceScatterAction().execute(action); });
977 xbt_replay_action_register("compute", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ComputeAction().execute(action); });
979 //if we have a delayed start, sleep here.
981 double value = xbt_str_parse_double((*argv)[2], "%s is not a double");
982 XBT_VERB("Delayed start for instance - Sleeping for %f flops ",value );
983 smpi_execute_flops(value);
985 //UGLY: force a context switch to be sure that all MSG_processes begin initialization
986 XBT_DEBUG("Force context switch by smpi_execute_flops - Sleeping for 0.0 flops ");
987 smpi_execute_flops(0.0);
991 /** @brief actually run the replay after initialization */
992 void smpi_replay_main(int* argc, char*** argv)
994 static int active_processes = 0;
996 simgrid::xbt::replay_runner(*argc, *argv);
998 /* and now, finalize everything */
999 /* One active process will stop. Decrease the counter*/
1000 XBT_DEBUG("There are %zu elements in reqq[*]", get_reqq_self()->size());
1001 if (not get_reqq_self()->empty()) {
1002 unsigned int count_requests=get_reqq_self()->size();
1003 MPI_Request requests[count_requests];
1004 MPI_Status status[count_requests];
1007 for (auto const& req : *get_reqq_self()) {
1011 simgrid::smpi::Request::waitall(count_requests, requests, status);
1013 delete get_reqq_self();
1016 if(active_processes==0){
1017 /* Last process alive speaking: end the simulated timer */
1018 XBT_INFO("Simulation time %f", smpi_process()->simulated_elapsed());
1019 smpi_free_replay_tmp_buffers();
1022 TRACE_smpi_comm_in(simgrid::s4u::this_actor::get_pid(), "smpi_replay_run_finalize",
1023 new simgrid::instr::NoOpTIData("finalize"));
1025 smpi_process()->finalize();
1027 TRACE_smpi_comm_out(simgrid::s4u::this_actor::get_pid());
1028 TRACE_smpi_finalize(simgrid::s4u::this_actor::get_pid());
1031 /** @brief chain a replay initialization and a replay start */
1032 void smpi_replay_run(int* argc, char*** argv)
1034 smpi_replay_init(argc, argv);
1035 smpi_replay_main(argc, argv);