X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/e16e02338a20ee10478d01659df54ed60dea1101..39c935d6d5ee86d153f6f7e6a10d723ae7c57f6f:/src/smpi/internals/smpi_replay.cpp diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index b0c8bd973f..21951bad72 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2009-2019. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2009-2021. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -61,10 +61,10 @@ public: }; } -typedef std::tuple req_key_t; -typedef std::unordered_map>> req_storage_t; +using req_key_t = std::tuple; +using req_storage_t = std::unordered_map>>; -void log_timed_action(simgrid::xbt::ReplayAction& action, double clock) +void log_timed_action(const simgrid::xbt::ReplayAction& action, double clock) { if (XBT_LOG_ISENABLED(smpi_replay, xbt_log_priority_verbose)){ std::string s = boost::algorithm::join(action, " "); @@ -89,36 +89,30 @@ private: req_storage_t store; public: - RequestStorage() {} - int size() - { - return store.size(); - } + RequestStorage() = default; + int size() const { return store.size(); } - req_storage_t& get_store() - { - return store; - } + req_storage_t& get_store() { return store; } - void get_requests(std::vector& vec) - { - for (auto& pair : store) { - auto& req = pair.second; - auto my_proc_id = simgrid::s4u::this_actor::get_pid(); - if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) { - vec.push_back(pair.second); - pair.second->print_request("MM"); - } + void get_requests(std::vector& vec) const + { + for (auto const& pair : store) { + auto& req = pair.second; + auto my_proc_id = simgrid::s4u::this_actor::get_pid(); + if (req != MPI_REQUEST_NULL && (req->src() == my_proc_id || req->dst() == my_proc_id)) { + vec.push_back(pair.second); + pair.second->print_request("MM"); } } + } MPI_Request find(int src, int dst, int tag) { - req_storage_t::iterator it = store.find(req_key_t(src, dst, tag)); + auto it = store.find(req_key_t(src, dst, tag)); return (it == store.end()) ? MPI_REQUEST_NULL : it->second; } - void remove(MPI_Request req) + void remove(const Request* req) { if (req == MPI_REQUEST_NULL) return; @@ -264,7 +258,7 @@ void GatherVArgParser::parse(simgrid::xbt::ReplayAction& action, const std::stri CHECK_ACTION_PARAMS(action, comm_size + 1, 2) send_size = parse_double(action[2]); disps = std::vector(comm_size, 0); - recvcounts = std::shared_ptr>(new std::vector(comm_size)); + recvcounts = std::make_shared>(comm_size); if (name == "gatherv") { root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0; @@ -339,7 +333,7 @@ void ScatterVArgParser::parse(simgrid::xbt::ReplayAction& action, const std::str CHECK_ACTION_PARAMS(action, comm_size + 1, 2) recv_size = parse_double(action[2 + comm_size]); disps = std::vector(comm_size, 0); - sendcounts = std::shared_ptr>(new std::vector(comm_size)); + sendcounts = std::make_shared>(comm_size); if (action.size() > 5 + comm_size) datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]); @@ -365,7 +359,7 @@ void ReduceScatterArgParser::parse(simgrid::xbt::ReplayAction& action, const std comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, comm_size + 1, 1) comp_size = parse_double(action[2 + comm_size]); - recvcounts = std::shared_ptr>(new std::vector(comm_size)); + recvcounts = std::make_shared>(comm_size); if (action.size() > 3 + comm_size) datatype1 = simgrid::smpi::Datatype::decode(action[3 + comm_size]); @@ -387,8 +381,8 @@ void AllToAllVArgParser::parse(simgrid::xbt::ReplayAction& action, const std::st */ comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, 2 * comm_size + 2, 2) - sendcounts = std::shared_ptr>(new std::vector(comm_size)); - recvcounts = std::shared_ptr>(new std::vector(comm_size)); + sendcounts = std::make_shared>(comm_size); + recvcounts = std::make_shared>(comm_size); senddisps = std::vector(comm_size, 0); recvdisps = std::vector(comm_size, 0); @@ -411,11 +405,12 @@ void WaitAction::kernel(simgrid::xbt::ReplayAction& action) { std::string s = boost::algorithm::join(action, " "); xbt_assert(req_storage.size(), "action wait not preceded by any irecv or isend: %s", s.c_str()); + const WaitTestParser& args = get_args(); MPI_Request request = req_storage.find(args.src, args.dst, args.tag); req_storage.remove(request); if (request == MPI_REQUEST_NULL) { - /* Assume that the trace is well formed, meaning the comm might have been caught by a MPI_test. Then just + /* Assume that the trace is well formed, meaning the comm might have been caught by an MPI_test. Then just * return.*/ return; } @@ -425,7 +420,7 @@ void WaitAction::kernel(simgrid::xbt::ReplayAction& action) // Must be taken before Request::wait() since the request may be set to // MPI_REQUEST_NULL by Request::wait! bool is_wait_for_receive = (request->flags() & MPI_REQ_RECV); - // TODO: Here we take the rank while we normally take the process id (look for my_proc_id) + // TODO: Here we take the rank while we normally take the process id (look for get_pid()) TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::WaitTIData(args.src, args.dst, args.tag)); MPI_Status status; @@ -438,64 +433,71 @@ void WaitAction::kernel(simgrid::xbt::ReplayAction& action) void SendAction::kernel(simgrid::xbt::ReplayAction&) { + const SendRecvParser& args = get_args(); int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid(); - TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, - args.tag, Datatype::encode(args.datatype1))); + TRACE_smpi_comm_in( + get_pid(), __func__, + new simgrid::instr::Pt2PtTIData(get_name(), args.partner, args.size, args.tag, Datatype::encode(args.datatype1))); if (not TRACE_smpi_view_internals()) - TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, args.tag, args.size * args.datatype1->size()); + TRACE_smpi_send(get_pid(), get_pid(), dst_traced, args.tag, args.size * args.datatype1->size()); - if (name == "send") { + if (get_name() == "send") { Request::send(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD); - } else if (name == "isend") { + } else if (get_name() == "isend") { MPI_Request request = Request::isend(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD); req_storage.add(request); } else { - xbt_die("Don't know this action, %s", name.c_str()); + xbt_die("Don't know this action, %s", get_name().c_str()); } - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void RecvAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, - args.tag, Datatype::encode(args.datatype1))); + const SendRecvParser& args = get_args(); + TRACE_smpi_comm_in( + get_pid(), __func__, + new simgrid::instr::Pt2PtTIData(get_name(), args.partner, args.size, args.tag, Datatype::encode(args.datatype1))); MPI_Status status; // unknown size from the receiver point of view - if (args.size <= 0.0) { + double arg_size = args.size; + if (arg_size <= 0.0) { Request::probe(args.partner, args.tag, MPI_COMM_WORLD, &status); - args.size = status.count; + arg_size = status.count; } - bool is_recv = false; // Help analyzers understanding that status is not used unintialized - if (name == "recv") { + bool is_recv = false; // Help analyzers understanding that status is not used uninitialized + if (get_name() == "recv") { is_recv = true; - Request::recv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status); - } else if (name == "irecv") { - MPI_Request request = Request::irecv(nullptr, args.size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD); + Request::recv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status); + } else if (get_name() == "irecv") { + MPI_Request request = Request::irecv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD); req_storage.add(request); } else { THROW_IMPOSSIBLE; } - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); if (is_recv && not TRACE_smpi_view_internals()) { int src_traced = MPI_COMM_WORLD->group()->actor(status.MPI_SOURCE)->get_pid(); - TRACE_smpi_recv(src_traced, my_proc_id, args.tag); + TRACE_smpi_recv(src_traced, get_pid(), args.tag); } } void ComputeAction::kernel(simgrid::xbt::ReplayAction&) { - if (simgrid::config::get_value("smpi/simulate-computation")) { + const ComputeParser& args = get_args(); + if (smpi_cfg_simulate_computation()) { smpi_execute_flops(args.flops/smpi_adjust_comp_speed()); } } void SleepAction::kernel(simgrid::xbt::ReplayAction&) { + const SleepParser& args = get_args(); XBT_DEBUG("Sleep for: %lf secs", args.time); int rank = simgrid::s4u::this_actor::get_pid(); TRACE_smpi_sleeping_in(rank, args.time); @@ -505,18 +507,20 @@ void SleepAction::kernel(simgrid::xbt::ReplayAction&) void LocationAction::kernel(simgrid::xbt::ReplayAction&) { + const LocationParser& args = get_args(); smpi_trace_set_call_location(args.filename.c_str(), args.line); } void TestAction::kernel(simgrid::xbt::ReplayAction&) { + const WaitTestParser& args = get_args(); MPI_Request request = req_storage.find(args.src, args.dst, args.tag); req_storage.remove(request); // if request is null here, this may mean that a previous test has succeeded // Different times in traced application and replayed version may lead to this // In this case, ignore the extra calls. if (request != MPI_REQUEST_NULL) { - TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("test")); + TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::NoOpTIData("test")); MPI_Status status; int flag = 0; @@ -530,7 +534,7 @@ void TestAction::kernel(simgrid::xbt::ReplayAction&) else req_storage.add(request); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } } @@ -554,182 +558,196 @@ void WaitAllAction::kernel(simgrid::xbt::ReplayAction&) const unsigned int count_requests = req_storage.size(); if (count_requests > 0) { - TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("waitall", -1, count_requests, "")); + TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::Pt2PtTIData("waitall", -1, count_requests, "")); std::vector> sender_receiver; std::vector reqs; req_storage.get_requests(reqs); - for (const auto& req : reqs) { + for (auto const& req : reqs) { if (req && (req->flags() & MPI_REQ_RECV)) { - sender_receiver.push_back({req->src(), req->dst()}); + sender_receiver.emplace_back(req->src(), req->dst()); } } Request::waitall(count_requests, &(reqs.data())[0], MPI_STATUSES_IGNORE); req_storage.get_store().clear(); - for (auto& pair : sender_receiver) { + for (auto const& pair : sender_receiver) { TRACE_smpi_recv(pair.first, pair.second, 0); } - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } } void BarrierAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("barrier")); + TRACE_smpi_comm_in(get_pid(), __func__, new simgrid::instr::NoOpTIData("barrier")); colls::barrier(MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void BcastAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, "action_bcast", - new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), - -1.0, args.size, -1, Datatype::encode(args.datatype1), "")); + const BcastArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_bcast", + new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), -1.0, + args.size, -1, Datatype::encode(args.datatype1), "")); colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void ReduceAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, "action_reduce", - new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), - args.comp_size, args.comm_size, -1, - Datatype::encode(args.datatype1), "")); + const ReduceArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_reduce", + new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), + args.comp_size, args.comm_size, -1, + Datatype::encode(args.datatype1), "")); colls::reduce(send_buffer(args.comm_size * args.datatype1->size()), recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD); private_execute_flops(args.comp_size); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void AllReduceAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, "action_allreduce", new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, -1, - Datatype::encode(args.datatype1), "")); + const AllReduceArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_allreduce", + new simgrid::instr::CollTIData("allreduce", -1, args.comp_size, args.comm_size, -1, + Datatype::encode(args.datatype1), "")); colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()), recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD); private_execute_flops(args.comp_size); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void AllToAllAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, "action_alltoall", - new simgrid::instr::CollTIData("alltoall", -1, -1.0, args.send_size, args.recv_size, - Datatype::encode(args.datatype1), - Datatype::encode(args.datatype2))); + const AllToAllArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_alltoall", + new simgrid::instr::CollTIData("alltoall", -1, -1.0, args.send_size, args.recv_size, + Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size, args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()), args.recv_size, args.datatype2, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void GatherAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::CollTIData(name, (name == "gather") ? args.root : -1, -1.0, args.send_size, args.recv_size, - Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); + const GatherArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), get_name().c_str(), + new simgrid::instr::CollTIData(get_name(), (get_name() == "gather") ? args.root : -1, -1.0, + args.send_size, args.recv_size, Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); - if (name == "gather") { + if (get_name() == "gather") { int rank = MPI_COMM_WORLD->rank(); colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD); - } - else + } else colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void GatherVAction::kernel(simgrid::xbt::ReplayAction&) { int rank = MPI_COMM_WORLD->rank(); + const GatherVArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), get_name().c_str(), + new simgrid::instr::VarCollTIData( + get_name(), (get_name() == "gatherv") ? args.root : -1, args.send_size, nullptr, -1, + args.recvcounts, Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); - TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::VarCollTIData( - name, (name == "gatherv") ? args.root : -1, args.send_size, nullptr, -1, args.recvcounts, - Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); - - if (name == "gatherv") { + if (get_name() == "gatherv") { colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr, args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD); - } - else { + } else { colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(), args.disps.data(), args.datatype2, MPI_COMM_WORLD); } - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void ScatterAction::kernel(simgrid::xbt::ReplayAction&) { int rank = MPI_COMM_WORLD->rank(); - TRACE_smpi_comm_in(my_proc_id, "action_scatter", new simgrid::instr::CollTIData(name, args.root, -1.0, args.send_size, args.recv_size, - Datatype::encode(args.datatype1), - Datatype::encode(args.datatype2))); + const ScatterArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_scatter", + new simgrid::instr::CollTIData(get_name(), args.root, -1.0, args.send_size, args.recv_size, + Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); colls::scatter(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, (rank == args.root) ? recv_buffer(args.recv_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void ScatterVAction::kernel(simgrid::xbt::ReplayAction&) { int rank = MPI_COMM_WORLD->rank(); - TRACE_smpi_comm_in(my_proc_id, "action_scatterv", new simgrid::instr::VarCollTIData(name, args.root, -1, args.sendcounts, args.recv_size, - nullptr, Datatype::encode(args.datatype1), - Datatype::encode(args.datatype2))); + const ScatterVArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), "action_scatterv", + new simgrid::instr::VarCollTIData(get_name(), args.root, -1, args.sendcounts, args.recv_size, + nullptr, Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr, args.sendcounts->data(), args.disps.data(), args.datatype1, recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void ReduceScatterAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, "action_reducescatter", + const ReduceScatterArgParser& args = get_args(); + TRACE_smpi_comm_in( + get_pid(), "action_reducescatter", new simgrid::instr::VarCollTIData("reducescatter", -1, 0, nullptr, -1, args.recvcounts, - std::to_string(args.comp_size), /* ugly hack to print comp_size */ - Datatype::encode(args.datatype1))); + std::to_string(args.comp_size), /* ugly hack to print comp_size */ + Datatype::encode(args.datatype1))); colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()), recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(), args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD); private_execute_flops(args.comp_size); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } void AllToAllVAction::kernel(simgrid::xbt::ReplayAction&) { - TRACE_smpi_comm_in(my_proc_id, __func__, - new simgrid::instr::VarCollTIData( - "alltoallv", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts, - Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); + const AllToAllVArgParser& args = get_args(); + TRACE_smpi_comm_in(get_pid(), __func__, + new simgrid::instr::VarCollTIData( + "alltoallv", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts, + Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); colls::alltoallv(send_buffer(args.send_buf_size * args.datatype1->size()), args.sendcounts->data(), args.senddisps.data(), args.datatype1, recv_buffer(args.recv_buf_size * args.datatype2->size()), args.recvcounts->data(), args.recvdisps.data(), args.datatype2, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_comm_out(get_pid()); } } // Replay Namespace }} // namespace simgrid::smpi @@ -749,12 +767,9 @@ void smpi_replay_init(const char* instance_id, int rank, double start_delay_flop int my_proc_id = simgrid::s4u::this_actor::get_pid(); - TRACE_smpi_init(my_proc_id); - TRACE_smpi_computing_init(my_proc_id); - TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init")); - TRACE_smpi_comm_out(my_proc_id); + TRACE_smpi_init(my_proc_id, "smpi_replay_run_init"); xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::InitAction().execute(action); }); - xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction&) { /* nothing to do */ }); + xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction const&) { /* nothing to do */ }); xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); xbt_replay_action_register("comm_dup", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); @@ -806,15 +821,14 @@ void smpi_replay_main(int rank, const char* trace_filename) unsigned int count_requests = storage[simgrid::s4u::this_actor::get_pid()].size(); XBT_DEBUG("There are %ud elements in reqq[*]", count_requests); if (count_requests > 0) { - MPI_Request* requests= new MPI_Request[count_requests]; + std::vector requests(count_requests); unsigned int i=0; for (auto const& pair : storage[simgrid::s4u::this_actor::get_pid()].get_store()) { requests[i] = pair.second; i++; } - simgrid::smpi::Request::waitall(count_requests, requests, MPI_STATUSES_IGNORE); - delete[] requests; + simgrid::smpi::Request::waitall(count_requests, requests.data(), MPI_STATUSES_IGNORE); } active_processes--;