X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/c0794724502f4e5b214ae19bada5efd4eb0770f1..d7acbd4087c3b0a61d6e7e44b0a4db5b40772a71:/src/smpi/internals/smpi_replay.cpp diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index 2da2236c26..2d03b91272 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2009-2018. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -16,26 +16,31 @@ #include #include #include +#include #include using simgrid::s4u::Actor; XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI"); -static int active_processes = 0; static std::unordered_map*> reqq; static MPI_Datatype MPI_DEFAULT_TYPE; #define CHECK_ACTION_PARAMS(action, mandatory, optional) \ { \ - if (action.size() < static_cast(mandatory + 2)) \ + if (action.size() < static_cast(mandatory + 2)) { \ + std::stringstream ss; \ + for (const auto& elem : action) { \ + ss << elem << " "; \ + } \ THROWF(arg_error, 0, "%s replay failed.\n" \ - "%lu items were given on the line. First two should be process_id and action. " \ + "%zu items were given on the line. First two should be process_id and action. " \ "This action needs after them %lu mandatory arguments, and accepts %lu optional ones. \n" \ + "The full line that was given is:\n %s\n" \ "Please contact the Simgrid team if support is needed", \ - __FUNCTION__, action.size(), static_cast(mandatory), \ - static_cast(optional)); \ + __func__, action.size(), static_cast(mandatory), static_cast(optional), ss.str().c_str()); \ + }\ } static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock) @@ -48,12 +53,12 @@ static void log_timed_action(simgrid::xbt::ReplayAction& action, double clock) static std::vector* get_reqq_self() { - return reqq.at(Actor::self()->getPid()); + return reqq.at(simgrid::s4u::this_actor::getPid()); } static void set_reqq_self(std::vector *mpi_request) { - reqq.insert({Actor::self()->getPid(), mpi_request}); + reqq.insert({simgrid::s4u::this_actor::getPid(), mpi_request}); } /* Helper function */ @@ -65,10 +70,11 @@ static double parse_double(std::string string) namespace simgrid { namespace smpi { -namespace Replay { +namespace replay { class ActionArgParser { public: - virtual void parse(simgrid::xbt::ReplayAction& action) { CHECK_ACTION_PARAMS(action, 0, 0) } + virtual ~ActionArgParser() = default; + virtual void parse(simgrid::xbt::ReplayAction& action, std::string name) { CHECK_ACTION_PARAMS(action, 0, 0) } }; class SendRecvParser : public ActionArgParser { @@ -78,7 +84,7 @@ public: double size; MPI_Datatype datatype1 = MPI_DEFAULT_TYPE; - void parse(simgrid::xbt::ReplayAction& action) override + void parse(simgrid::xbt::ReplayAction& action, std::string name) override { CHECK_ACTION_PARAMS(action, 2, 1) partner = std::stoi(action[2]); @@ -93,7 +99,7 @@ public: /* communication partner; if we send, this is the receiver and vice versa */ double flops; - void parse(simgrid::xbt::ReplayAction& action) override + void parse(simgrid::xbt::ReplayAction& action, std::string name) override { CHECK_ACTION_PARAMS(action, 1, 0) flops = parse_double(action[2]); @@ -105,6 +111,8 @@ public: double size; double comm_size; double comp_size; + int send_size; + int recv_size; int root = 0; MPI_Datatype datatype1 = MPI_DEFAULT_TYPE; MPI_Datatype datatype2 = MPI_DEFAULT_TYPE; @@ -112,7 +120,7 @@ public: class BcastArgParser : public CollCommParser { public: - void parse(simgrid::xbt::ReplayAction& action) override + void parse(simgrid::xbt::ReplayAction& action, std::string name) override { CHECK_ACTION_PARAMS(action, 1, 2) size = parse_double(action[2]); @@ -124,7 +132,7 @@ public: class ReduceArgParser : public CollCommParser { public: - void parse(simgrid::xbt::ReplayAction& action) override + void parse(simgrid::xbt::ReplayAction& action, std::string name) override { CHECK_ACTION_PARAMS(action, 2, 2) comm_size = parse_double(action[2]); @@ -135,21 +143,275 @@ public: } }; +class AllReduceArgParser : public CollCommParser { +public: + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + CHECK_ACTION_PARAMS(action, 2, 1) + comm_size = parse_double(action[2]); + comp_size = parse_double(action[3]); + if (action.size() > 4) + datatype1 = simgrid::smpi::Datatype::decode(action[4]); + } +}; + +class AllToAllArgParser : public CollCommParser { +public: + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + CHECK_ACTION_PARAMS(action, 2, 1) + comm_size = MPI_COMM_WORLD->size(); + send_size = parse_double(action[2]); + recv_size = parse_double(action[3]); + + if (action.size() > 4) + datatype1 = simgrid::smpi::Datatype::decode(action[4]); + if (action.size() > 5) + datatype2 = simgrid::smpi::Datatype::decode(action[5]); + } +}; + +class GatherArgParser : public CollCommParser { +public: + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the gather action for the rank 0 (total 4 processes) is the following: + 0 gather 68 68 0 0 0 + where: + 1) 68 is the sendcounts + 2) 68 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() + 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() + */ + CHECK_ACTION_PARAMS(action, 2, 3) + comm_size = MPI_COMM_WORLD->size(); + send_size = parse_double(action[2]); + recv_size = parse_double(action[3]); + + if (name == "gather") { + root = (action.size() > 4) ? std::stoi(action[4]) : 0; + if (action.size() > 5) + datatype1 = simgrid::smpi::Datatype::decode(action[5]); + if (action.size() > 6) + datatype2 = simgrid::smpi::Datatype::decode(action[6]); + } + else { + if (action.size() > 4) + datatype1 = simgrid::smpi::Datatype::decode(action[4]); + if (action.size() > 5) + datatype2 = simgrid::smpi::Datatype::decode(action[5]); + } + } +}; + +class GatherVArgParser : public CollCommParser { +public: + int recv_size_sum; + std::shared_ptr> recvcounts; + std::vector disps; + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the gatherv action for the rank 0 (total 4 processes) is the following: + 0 gather 68 68 10 10 10 0 0 0 + where: + 1) 68 is the sendcount + 2) 68 10 10 10 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() + 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() + */ + comm_size = MPI_COMM_WORLD->size(); + CHECK_ACTION_PARAMS(action, comm_size+1, 2) + send_size = parse_double(action[2]); + disps = std::vector(comm_size, 0); + recvcounts = std::shared_ptr>(new std::vector(comm_size)); + + if (name == "gatherV") { + root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0; + if (action.size() > 4 + comm_size) + datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]); + if (action.size() > 5 + comm_size) + datatype2 = simgrid::smpi::Datatype::decode(action[5 + comm_size]); + } + else { + int datatype_index = 0; + int disp_index = 0; + /* The 3 comes from "0 gather ", which must always be present. + * The + comm_size is the recvcounts array, which must also be present + */ + if (action.size() > 3 + comm_size + comm_size) { /* datatype + disp are specified */ + datatype_index = 3 + comm_size; + disp_index = datatype_index + 1; + datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]); + datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]); + } else if (action.size() > 3 + comm_size + 2) { /* disps specified; datatype is not specified; use the default one */ + disp_index = 3 + comm_size; + } else if (action.size() > 3 + comm_size) { /* only datatype, no disp specified */ + datatype_index = 3 + comm_size; + datatype1 = simgrid::smpi::Datatype::decode(action[datatype_index]); + datatype2 = simgrid::smpi::Datatype::decode(action[datatype_index]); + } + + if (disp_index != 0) { + for (unsigned int i = 0; i < comm_size; i++) + disps[i] = std::stoi(action[disp_index + i]); + } + } + + for (unsigned int i = 0; i < comm_size; i++) { + (*recvcounts)[i] = std::stoi(action[i + 3]); + } + recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); + } +}; + +class ScatterArgParser : public CollCommParser { +public: + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the scatter action for the rank 0 (total 4 processes) is the following: + 0 gather 68 68 0 0 0 + where: + 1) 68 is the sendcounts + 2) 68 is the recvcounts + 3) 0 is the root node + 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() + 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() + */ + CHECK_ACTION_PARAMS(action, 2, 3) + comm_size = MPI_COMM_WORLD->size(); + send_size = parse_double(action[2]); + recv_size = parse_double(action[3]); + root = (action.size() > 4) ? std::stoi(action[4]) : 0; + if (action.size() > 5) + datatype1 = simgrid::smpi::Datatype::decode(action[5]); + if (action.size() > 6) + datatype2 = simgrid::smpi::Datatype::decode(action[6]); + } +}; + +class ScatterVArgParser : public CollCommParser { +public: + int recv_size_sum; + int send_size_sum; + std::shared_ptr> sendcounts; + std::vector disps; + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the scatterv action for the rank 0 (total 4 processes) is the following: + 0 gather 68 10 10 10 68 0 0 0 + where: + 1) 68 10 10 10 is the sendcounts + 2) 68 is the recvcount + 3) 0 is the root node + 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() + 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() + */ + CHECK_ACTION_PARAMS(action, comm_size + 1, 2) + recv_size = parse_double(action[2 + comm_size]); + disps = std::vector(comm_size, 0); + sendcounts = std::shared_ptr>(new std::vector(comm_size)); + + if (action.size() > 5 + comm_size) + datatype1 = simgrid::smpi::Datatype::decode(action[4 + comm_size]); + if (action.size() > 5 + comm_size) + datatype2 = simgrid::smpi::Datatype::decode(action[5]); + + for (unsigned int i = 0; i < comm_size; i++) { + (*sendcounts)[i] = std::stoi(action[i + 2]); + } + send_size_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0); + root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0; + } +}; + +class ReduceScatterArgParser : public CollCommParser { +public: + int recv_size_sum; + std::shared_ptr> recvcounts; + std::vector disps; + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the reducescatter action for the rank 0 (total 4 processes) is the following: + 0 reduceScatter 275427 275427 275427 204020 11346849 0 + where: + 1) The first four values after the name of the action declare the recvcounts array + 2) The value 11346849 is the amount of instructions + 3) The last value corresponds to the datatype, see simgrid::smpi::Datatype::decode(). + */ + comm_size = MPI_COMM_WORLD->size(); + CHECK_ACTION_PARAMS(action, comm_size+1, 1) + comp_size = parse_double(action[2+comm_size]); + recvcounts = std::shared_ptr>(new std::vector(comm_size)); + if (action.size() > 3 + comm_size) + datatype1 = simgrid::smpi::Datatype::decode(action[3 + comm_size]); + + for (unsigned int i = 0; i < comm_size; i++) { + recvcounts->push_back(std::stoi(action[i + 2])); + } + recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); + } +}; + +class AllToAllVArgParser : public CollCommParser { +public: + int recv_size_sum; + int send_size_sum; + std::shared_ptr> recvcounts; + std::shared_ptr> sendcounts; + std::vector senddisps; + std::vector recvdisps; + int send_buf_size; + int recv_buf_size; + void parse(simgrid::xbt::ReplayAction& action, std::string name) override + { + /* The structure of the allToAllV action for the rank 0 (total 4 processes) is the following: + 0 allToAllV 100 1 7 10 12 100 1 70 10 5 + where: + 1) 100 is the size of the send buffer *sizeof(int), + 2) 1 7 10 12 is the sendcounts array + 3) 100*sizeof(int) is the size of the receiver buffer + 4) 1 70 10 5 is the recvcounts array + */ + comm_size = MPI_COMM_WORLD->size(); + CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2) + sendcounts = std::shared_ptr>(new std::vector(comm_size)); + recvcounts = std::shared_ptr>(new std::vector(comm_size)); + senddisps = std::vector(comm_size, 0); + recvdisps = std::vector(comm_size, 0); + + if (action.size() > 5 + 2 * comm_size) + datatype1 = simgrid::smpi::Datatype::decode(action[4 + 2 * comm_size]); + if (action.size() > 5 + 2 * comm_size) + datatype2 = simgrid::smpi::Datatype::decode(action[5 + 2 * comm_size]); + + send_buf_size=parse_double(action[2]); + recv_buf_size=parse_double(action[3+comm_size]); + for (unsigned int i = 0; i < comm_size; i++) { + (*sendcounts)[i] = std::stoi(action[3 + i]); + (*recvcounts)[i] = std::stoi(action[4 + comm_size + i]); + } + send_size_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0); + recv_size_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); + } +}; + template class ReplayAction { protected: const std::string name; + const int my_proc_id; T args; - int my_proc_id; - public: - explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::Actor::self()->getPid()) {} + explicit ReplayAction(std::string name) : name(name), my_proc_id(simgrid::s4u::this_actor::getPid()) {} + virtual ~ReplayAction() = default; virtual void execute(simgrid::xbt::ReplayAction& action) { // Needs to be re-initialized for every action, hence here double start_time = smpi_process()->simulated_elapsed(); - args.parse(action); + args.parse(action, name); kernel(action); if (name != "Init") log_timed_action(action, start_time); @@ -192,7 +454,7 @@ public: int dst = request->comm()->group()->rank(request->dst()); bool is_wait_for_receive = (request->flags() & RECV); // TODO: Here we take the rank while we normally take the process id (look for my_proc_id) - TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::NoOpTIData("wait")); + TRACE_smpi_comm_in(rank, __func__, new simgrid::instr::NoOpTIData("wait")); MPI_Status status; Request::wait(&request, &status); @@ -206,13 +468,13 @@ public: class SendAction : public ReplayAction { public: SendAction() = delete; - SendAction(std::string name) : ReplayAction(name) {} + explicit SendAction(std::string name) : ReplayAction(name) {} void kernel(simgrid::xbt::ReplayAction& action) override { - int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid(); + int dst_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid(); - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, - Datatype::encode(args.datatype1))); + TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, + Datatype::encode(args.datatype1))); if (not TRACE_smpi_view_internals()) TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, 0, args.size * args.datatype1->size()); @@ -235,10 +497,10 @@ public: explicit RecvAction(std::string name) : ReplayAction(name) {} void kernel(simgrid::xbt::ReplayAction& action) override { - int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->getPid(); + int src_traced = MPI_COMM_WORLD->group()->actor(args.partner)->get_pid(); - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, - Datatype::encode(args.datatype1))); + TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData(name, args.partner, args.size, + Datatype::encode(args.datatype1))); MPI_Status status; // unknown size from the receiver point of view @@ -310,9 +572,6 @@ public: /* start a simulated timer */ smpi_process()->simulated_start(); - /*initialize the number of active processes */ - active_processes = smpi_process_count(); - set_reqq_self(new std::vector); } }; @@ -331,8 +590,7 @@ public: const unsigned int count_requests = get_reqq_self()->size(); if (count_requests > 0) { - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, "")); + TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::Pt2PtTIData("waitAll", -1, count_requests, "")); std::vector> sender_receiver; for (const auto& req : (*get_reqq_self())) { if (req && (req->flags() & RECV)) { @@ -355,7 +613,7 @@ public: BarrierAction() : ReplayAction("barrier") {} void kernel(simgrid::xbt::ReplayAction& action) override { - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::NoOpTIData("barrier")); + TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("barrier")); Colls::barrier(MPI_COMM_WORLD); TRACE_smpi_comm_out(my_proc_id); } @@ -367,7 +625,7 @@ public: void kernel(simgrid::xbt::ReplayAction& action) override { TRACE_smpi_comm_in(my_proc_id, "action_bcast", - new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->getPid(), + new simgrid::instr::CollTIData("bcast", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), -1.0, args.size, -1, Datatype::encode(args.datatype1), "")); Colls::bcast(send_buffer(args.size * args.datatype1->size()), args.size, args.datatype1, args.root, MPI_COMM_WORLD); @@ -382,8 +640,9 @@ public: void kernel(simgrid::xbt::ReplayAction& action) override { TRACE_smpi_comm_in(my_proc_id, "action_reduce", - new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->getPid(), args.comp_size, - args.comm_size, -1, Datatype::encode(args.datatype1), "")); + new simgrid::instr::CollTIData("reduce", MPI_COMM_WORLD->group()->actor(args.root)->get_pid(), + args.comp_size, args.comm_size, -1, + Datatype::encode(args.datatype1), "")); Colls::reduce(send_buffer(args.comm_size * args.datatype1->size()), recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, args.root, MPI_COMM_WORLD); @@ -393,401 +652,160 @@ public: } }; -class AllReduceAction : public ReplayAction { +class AllReduceAction : public ReplayAction { public: - AllReduceAction() : ReplayAction("barrier") {} + AllReduceAction() : ReplayAction("allReduce") {} void kernel(simgrid::xbt::ReplayAction& action) override { - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, new simgrid::instr::CollTIData("allReduce", -1, comp_size, comm_size, -1, - Datatype::encode(MPI_CURRENT_TYPE), "")); + TRACE_smpi_comm_in(my_proc_id, "action_allReduce", new simgrid::instr::CollTIData("allReduce", -1, args.comp_size, args.comm_size, -1, + Datatype::encode(args.datatype1), "")); - Colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()), + Colls::allreduce(send_buffer(args.comm_size * args.datatype1->size()), recv_buffer(args.comm_size * args.datatype1->size()), args.comm_size, args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD); smpi_execute_flops(args.comp_size); TRACE_smpi_comm_out(my_proc_id); } }; -} // Replay Namespace -static void action_allToAll(simgrid::xbt::ReplayAction& action) -{ - CHECK_ACTION_PARAMS(action, 2, 2) //two mandatory (send and recv volumes) and two optional (corresponding datatypes) - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - int send_size = parse_double(action[2]); - int recv_size = parse_double(action[3]); - MPI_Datatype MPI_CURRENT_TYPE{(action.size() > 5) ? simgrid::smpi::Datatype::decode(action[4]) : MPI_DEFAULT_TYPE}; - MPI_Datatype MPI_CURRENT_TYPE2{(action.size() > 5) ? simgrid::smpi::Datatype::decode(action[5]) : MPI_DEFAULT_TYPE}; - - void *send = smpi_get_tmp_sendbuffer(send_size*comm_size* MPI_CURRENT_TYPE->size()); - void *recv = smpi_get_tmp_recvbuffer(recv_size*comm_size* MPI_CURRENT_TYPE2->size()); - - int my_proc_id = Actor::self()->getPid(); - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::CollTIData("allToAll", -1, -1.0, send_size, recv_size, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); +class AllToAllAction : public ReplayAction { +public: + AllToAllAction() : ReplayAction("allToAll") {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + TRACE_smpi_comm_in(my_proc_id, "action_allToAll", + new simgrid::instr::CollTIData("allToAll", -1, -1.0, args.send_size, args.recv_size, + Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); - Colls::alltoall(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, MPI_COMM_WORLD); + Colls::alltoall(send_buffer(args.send_size * args.comm_size * args.datatype1->size()), args.send_size, + args.datatype1, recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()), + args.recv_size, args.datatype2, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); - log_timed_action (action, clock); -} + TRACE_smpi_comm_out(my_proc_id); + } +}; -static void action_gather(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the gather action for the rank 0 (total 4 processes) is the following: - 0 gather 68 68 0 0 0 - where: - 1) 68 is the sendcounts - 2) 68 is the recvcounts - 3) 0 is the root node - 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() - 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() - */ - CHECK_ACTION_PARAMS(action, 2, 3) - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - int send_size = parse_double(action[2]); - int recv_size = parse_double(action[3]); - MPI_Datatype MPI_CURRENT_TYPE{(action.size() > 6) ? simgrid::smpi::Datatype::decode(action[5]) : MPI_DEFAULT_TYPE}; - MPI_Datatype MPI_CURRENT_TYPE2{(action.size() > 6) ? simgrid::smpi::Datatype::decode(action[6]) : MPI_DEFAULT_TYPE}; - - void *send = smpi_get_tmp_sendbuffer(send_size* MPI_CURRENT_TYPE->size()); - void *recv = nullptr; - int root = (action.size() > 4) ? std::stoi(action[4]) : 0; - int rank = MPI_COMM_WORLD->rank(); - - if(rank==root) - recv = smpi_get_tmp_recvbuffer(recv_size*comm_size* MPI_CURRENT_TYPE2->size()); - - TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::CollTIData("gather", root, -1.0, send_size, recv_size, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::gather(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD); - - TRACE_smpi_comm_out(Actor::self()->getPid()); - log_timed_action (action, clock); -} +class GatherAction : public ReplayAction { +public: + explicit GatherAction(std::string name) : ReplayAction(name) {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::CollTIData(name, (name == "gather") ? args.root : -1, -1.0, args.send_size, args.recv_size, + Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); -static void action_scatter(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the scatter action for the rank 0 (total 4 processes) is the following: - 0 gather 68 68 0 0 0 - where: - 1) 68 is the sendcounts - 2) 68 is the recvcounts - 3) 0 is the root node - 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() - 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() - */ - CHECK_ACTION_PARAMS(action, 2, 3) - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - int send_size = parse_double(action[2]); - int recv_size = parse_double(action[3]); - MPI_Datatype MPI_CURRENT_TYPE{(action.size() > 6) ? simgrid::smpi::Datatype::decode(action[5]) : MPI_DEFAULT_TYPE}; - MPI_Datatype MPI_CURRENT_TYPE2{(action.size() > 6) ? simgrid::smpi::Datatype::decode(action[6]) : MPI_DEFAULT_TYPE}; - - void* send = smpi_get_tmp_sendbuffer(send_size * MPI_CURRENT_TYPE->size()); - void* recv = nullptr; - int root = (action.size() > 4) ? std::stoi(action[4]) : 0; - int rank = MPI_COMM_WORLD->rank(); - - if (rank == root) - recv = smpi_get_tmp_recvbuffer(recv_size * comm_size * MPI_CURRENT_TYPE2->size()); - - TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::CollTIData("gather", root, -1.0, send_size, recv_size, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::scatter(send, send_size, MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, MPI_COMM_WORLD); - - TRACE_smpi_comm_out(Actor::self()->getPid()); - log_timed_action(action, clock); -} + if (name == "gather") { + int rank = MPI_COMM_WORLD->rank(); + Colls::gather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, + (rank == args.root) ? recv_buffer(args.recv_size * args.comm_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD); + } + else + Colls::allgather(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, + recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, MPI_COMM_WORLD); -static void action_gatherv(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the gatherv action for the rank 0 (total 4 processes) is the following: - 0 gather 68 68 10 10 10 0 0 0 - where: - 1) 68 is the sendcount - 2) 68 10 10 10 is the recvcounts - 3) 0 is the root node - 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() - 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() - */ - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - CHECK_ACTION_PARAMS(action, comm_size+1, 2) - int send_size = parse_double(action[2]); - std::vector disps(comm_size, 0); - std::shared_ptr> recvcounts(new std::vector(comm_size)); - - MPI_Datatype MPI_CURRENT_TYPE = - (action.size() > 5 + comm_size) ? simgrid::smpi::Datatype::decode(action[4 + comm_size]) : MPI_DEFAULT_TYPE; - MPI_Datatype MPI_CURRENT_TYPE2{ - (action.size() > 5 + comm_size) ? simgrid::smpi::Datatype::decode(action[5 + comm_size]) : MPI_DEFAULT_TYPE}; - - void *send = smpi_get_tmp_sendbuffer(send_size* MPI_CURRENT_TYPE->size()); - void *recv = nullptr; - for (unsigned int i = 0; i < comm_size; i++) { - (*recvcounts)[i] = std::stoi(action[i + 3]); - } - int recv_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); - - int root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0; - int rank = MPI_COMM_WORLD->rank(); - - if(rank==root) - recv = smpi_get_tmp_recvbuffer(recv_sum* MPI_CURRENT_TYPE2->size()); - - TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData( - "gatherV", root, send_size, nullptr, -1, recvcounts, - Datatype::encode(MPI_CURRENT_TYPE), Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::gatherv(send, send_size, MPI_CURRENT_TYPE, recv, recvcounts->data(), disps.data(), MPI_CURRENT_TYPE2, root, - MPI_COMM_WORLD); - - TRACE_smpi_comm_out(Actor::self()->getPid()); - log_timed_action (action, clock); -} + TRACE_smpi_comm_out(my_proc_id); + } +}; -static void action_scatterv(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the scatterv action for the rank 0 (total 4 processes) is the following: - 0 gather 68 10 10 10 68 0 0 0 - where: - 1) 68 10 10 10 is the sendcounts - 2) 68 is the recvcount - 3) 0 is the root node - 4) 0 is the send datatype id, see simgrid::smpi::Datatype::decode() - 5) 0 is the recv datatype id, see simgrid::smpi::Datatype::decode() - */ - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - CHECK_ACTION_PARAMS(action, comm_size + 1, 2) - int recv_size = parse_double(action[2 + comm_size]); - std::vector disps(comm_size, 0); - std::shared_ptr> sendcounts(new std::vector(comm_size)); - - MPI_Datatype MPI_CURRENT_TYPE = - (action.size() > 5 + comm_size) ? simgrid::smpi::Datatype::decode(action[4 + comm_size]) : MPI_DEFAULT_TYPE; - MPI_Datatype MPI_CURRENT_TYPE2{ - (action.size() > 5 + comm_size) ? simgrid::smpi::Datatype::decode(action[5 + comm_size]) : MPI_DEFAULT_TYPE}; - - void* send = nullptr; - void* recv = smpi_get_tmp_recvbuffer(recv_size * MPI_CURRENT_TYPE->size()); - for (unsigned int i = 0; i < comm_size; i++) { - (*sendcounts)[i] = std::stoi(action[i + 2]); - } - int send_sum = std::accumulate(sendcounts->begin(), sendcounts->end(), 0); - - int root = (action.size() > 3 + comm_size) ? std::stoi(action[3 + comm_size]) : 0; - int rank = MPI_COMM_WORLD->rank(); - - if (rank == root) - send = smpi_get_tmp_sendbuffer(send_sum * MPI_CURRENT_TYPE2->size()); - - TRACE_smpi_comm_in(rank, __FUNCTION__, new simgrid::instr::VarCollTIData("gatherV", root, -1, sendcounts, recv_size, - nullptr, Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::scatterv(send, sendcounts->data(), disps.data(), MPI_CURRENT_TYPE, recv, recv_size, MPI_CURRENT_TYPE2, root, - MPI_COMM_WORLD); - - TRACE_smpi_comm_out(Actor::self()->getPid()); - log_timed_action(action, clock); -} +class GatherVAction : public ReplayAction { +public: + explicit GatherVAction(std::string name) : ReplayAction(name) {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + int rank = MPI_COMM_WORLD->rank(); -static void action_reducescatter(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the reducescatter action for the rank 0 (total 4 processes) is the following: - 0 reduceScatter 275427 275427 275427 204020 11346849 0 - where: - 1) The first four values after the name of the action declare the recvcounts array - 2) The value 11346849 is the amount of instructions - 3) The last value corresponds to the datatype, see simgrid::smpi::Datatype::decode(). - */ - double clock = smpi_process()->simulated_elapsed(); - unsigned long comm_size = MPI_COMM_WORLD->size(); - CHECK_ACTION_PARAMS(action, comm_size+1, 1) - int comp_size = parse_double(action[2+comm_size]); - int my_proc_id = Actor::self()->getPid(); - std::shared_ptr> recvcounts(new std::vector); - MPI_Datatype MPI_CURRENT_TYPE = - (action.size() > 3 + comm_size) ? simgrid::smpi::Datatype::decode(action[3 + comm_size]) : MPI_DEFAULT_TYPE; - - for (unsigned int i = 0; i < comm_size; i++) { - recvcounts->push_back(std::stoi(action[i + 2])); - } - int size{std::accumulate(recvcounts->begin(), recvcounts->end(), 0)}; - - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::VarCollTIData("reduceScatter", -1, 0, nullptr, -1, recvcounts, - std::to_string(comp_size), /* ugly hack to print comp_size */ - Datatype::encode(MPI_CURRENT_TYPE))); - - void *sendbuf = smpi_get_tmp_sendbuffer(size* MPI_CURRENT_TYPE->size()); - void *recvbuf = smpi_get_tmp_recvbuffer(size* MPI_CURRENT_TYPE->size()); - - Colls::reduce_scatter(sendbuf, recvbuf, recvcounts->data(), MPI_CURRENT_TYPE, MPI_OP_NULL, MPI_COMM_WORLD); - smpi_execute_flops(comp_size); + TRACE_smpi_comm_in(my_proc_id, name.c_str(), new simgrid::instr::VarCollTIData( + name, (name == "gatherV") ? args.root : -1, args.send_size, nullptr, -1, args.recvcounts, + Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); - TRACE_smpi_comm_out(my_proc_id); - log_timed_action (action, clock); -} + if (name == "gatherV") { + Colls::gatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, + (rank == args.root) ? recv_buffer(args.recv_size_sum * args.datatype2->size()) : nullptr, + args.recvcounts->data(), args.disps.data(), args.datatype2, args.root, MPI_COMM_WORLD); + } + else { + Colls::allgatherv(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, + recv_buffer(args.recv_size_sum * args.datatype2->size()), args.recvcounts->data(), + args.disps.data(), args.datatype2, MPI_COMM_WORLD); + } -static void action_allgather(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the allgather action for the rank 0 (total 4 processes) is the following: - 0 allGather 275427 275427 - where: - 1) 275427 is the sendcount - 2) 275427 is the recvcount - 3) No more values mean that the datatype for sent and receive buffer is the default one, see - simgrid::smpi::Datatype::decode(). - */ - double clock = smpi_process()->simulated_elapsed(); + TRACE_smpi_comm_out(my_proc_id); + } +}; - CHECK_ACTION_PARAMS(action, 2, 2) - int sendcount = std::stoi(action[2]); - int recvcount = std::stoi(action[3]); +class ScatterAction : public ReplayAction { +public: + ScatterAction() : ReplayAction("scatter") {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + int rank = MPI_COMM_WORLD->rank(); + TRACE_smpi_comm_in(my_proc_id, "action_scatter", new simgrid::instr::CollTIData(name, args.root, -1.0, args.send_size, args.recv_size, + Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); - MPI_Datatype MPI_CURRENT_TYPE{(action.size() > 5) ? simgrid::smpi::Datatype::decode(action[4]) : MPI_DEFAULT_TYPE}; - MPI_Datatype MPI_CURRENT_TYPE2{(action.size() > 5) ? simgrid::smpi::Datatype::decode(action[5]) : MPI_DEFAULT_TYPE}; + Colls::scatter(send_buffer(args.send_size * args.datatype1->size()), args.send_size, args.datatype1, + (rank == args.root) ? recv_buffer(args.recv_size * args.datatype2->size()) : nullptr, args.recv_size, args.datatype2, args.root, MPI_COMM_WORLD); - void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* MPI_CURRENT_TYPE->size()); - void *recvbuf = smpi_get_tmp_recvbuffer(recvcount* MPI_CURRENT_TYPE2->size()); + TRACE_smpi_comm_out(my_proc_id); + } +}; - int my_proc_id = Actor::self()->getPid(); - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::CollTIData("allGather", -1, -1.0, sendcount, recvcount, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); +class ScatterVAction : public ReplayAction { +public: + ScatterVAction() : ReplayAction("scatterV") {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + int rank = MPI_COMM_WORLD->rank(); + TRACE_smpi_comm_in(my_proc_id, "action_scatterv", new simgrid::instr::VarCollTIData(name, args.root, -1, args.sendcounts, args.recv_size, + nullptr, Datatype::encode(args.datatype1), + Datatype::encode(args.datatype2))); + + Colls::scatterv((rank == args.root) ? send_buffer(args.send_size_sum * args.datatype1->size()) : nullptr, + args.sendcounts->data(), args.disps.data(), args.datatype1, + recv_buffer(args.recv_size * args.datatype2->size()), args.recv_size, args.datatype2, args.root, + MPI_COMM_WORLD); - Colls::allgather(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcount, MPI_CURRENT_TYPE2, MPI_COMM_WORLD); + TRACE_smpi_comm_out(my_proc_id); + } +}; - TRACE_smpi_comm_out(my_proc_id); - log_timed_action (action, clock); -} +class ReduceScatterAction : public ReplayAction { +public: + ReduceScatterAction() : ReplayAction("reduceScatter") {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + TRACE_smpi_comm_in(my_proc_id, "action_reducescatter", + new simgrid::instr::VarCollTIData("reduceScatter", -1, 0, nullptr, -1, args.recvcounts, + std::to_string(args.comp_size), /* ugly hack to print comp_size */ + Datatype::encode(args.datatype1))); -static void action_allgatherv(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the allgatherv action for the rank 0 (total 4 processes) is the following: - 0 allGatherV 275427 275427 275427 275427 204020 - where: - 1) 275427 is the sendcount - 2) The next four elements declare the recvcounts array - 3) No more values mean that the datatype for sent and receive buffer is the default one, see - simgrid::smpi::Datatype::decode(). - */ - double clock = smpi_process()->simulated_elapsed(); - - unsigned long comm_size = MPI_COMM_WORLD->size(); - CHECK_ACTION_PARAMS(action, comm_size+1, 2) - int sendcount = std::stoi(action[2]); - std::shared_ptr> recvcounts(new std::vector(comm_size)); - std::vector disps(comm_size, 0); - - int datatype_index = 0, disp_index = 0; - if (action.size() > 3 + 2 * comm_size) { /* datatype + disp are specified */ - datatype_index = 3 + comm_size; - disp_index = datatype_index + 1; - } else if (action.size() > 3 + 2 * comm_size) { /* disps specified; datatype is not specified; use the default one */ - datatype_index = -1; - disp_index = 3 + comm_size; - } else if (action.size() > 3 + comm_size) { /* only datatype, no disp specified */ - datatype_index = 3 + comm_size; - } - - if (disp_index != 0) { - for (unsigned int i = 0; i < comm_size; i++) - disps[i] = std::stoi(action[disp_index + i]); - } - - MPI_Datatype MPI_CURRENT_TYPE{(datatype_index > 0) ? simgrid::smpi::Datatype::decode(action[datatype_index]) - : MPI_DEFAULT_TYPE}; - MPI_Datatype MPI_CURRENT_TYPE2{(datatype_index > 0) ? simgrid::smpi::Datatype::decode(action[datatype_index]) - : MPI_DEFAULT_TYPE}; - - void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* MPI_CURRENT_TYPE->size()); - - for (unsigned int i = 0; i < comm_size; i++) { - (*recvcounts)[i] = std::stoi(action[i + 3]); - } - int recv_sum = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); - void *recvbuf = smpi_get_tmp_recvbuffer(recv_sum* MPI_CURRENT_TYPE2->size()); - - int my_proc_id = Actor::self()->getPid(); - - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::VarCollTIData("allGatherV", -1, sendcount, nullptr, -1, recvcounts, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::allgatherv(sendbuf, sendcount, MPI_CURRENT_TYPE, recvbuf, recvcounts->data(), disps.data(), MPI_CURRENT_TYPE2, - MPI_COMM_WORLD); + Colls::reduce_scatter(send_buffer(args.recv_size_sum * args.datatype1->size()), + recv_buffer(args.recv_size_sum * args.datatype1->size()), args.recvcounts->data(), + args.datatype1, MPI_OP_NULL, MPI_COMM_WORLD); - TRACE_smpi_comm_out(my_proc_id); - log_timed_action (action, clock); -} + smpi_execute_flops(args.comp_size); + TRACE_smpi_comm_out(my_proc_id); + } +}; -static void action_allToAllv(simgrid::xbt::ReplayAction& action) -{ - /* The structure of the allToAllV action for the rank 0 (total 4 processes) is the following: - 0 allToAllV 100 1 7 10 12 100 1 70 10 5 - where: - 1) 100 is the size of the send buffer *sizeof(int), - 2) 1 7 10 12 is the sendcounts array - 3) 100*sizeof(int) is the size of the receiver buffer - 4) 1 70 10 5 is the recvcounts array - */ - double clock = smpi_process()->simulated_elapsed(); - - unsigned long comm_size = MPI_COMM_WORLD->size(); - CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2) - std::shared_ptr> sendcounts(new std::vector(comm_size)); - std::shared_ptr> recvcounts(new std::vector(comm_size)); - std::vector senddisps(comm_size, 0); - std::vector recvdisps(comm_size, 0); - - MPI_Datatype MPI_CURRENT_TYPE = (action.size() > 5 + 2 * comm_size) - ? simgrid::smpi::Datatype::decode(action[4 + 2 * comm_size]) - : MPI_DEFAULT_TYPE; - MPI_Datatype MPI_CURRENT_TYPE2{(action.size() > 5 + 2 * comm_size) - ? simgrid::smpi::Datatype::decode(action[5 + 2 * comm_size]) - : MPI_DEFAULT_TYPE}; - - int send_buf_size=parse_double(action[2]); - int recv_buf_size=parse_double(action[3+comm_size]); - int my_proc_id = Actor::self()->getPid(); - void *sendbuf = smpi_get_tmp_sendbuffer(send_buf_size* MPI_CURRENT_TYPE->size()); - void *recvbuf = smpi_get_tmp_recvbuffer(recv_buf_size* MPI_CURRENT_TYPE2->size()); - - for (unsigned int i = 0; i < comm_size; i++) { - (*sendcounts)[i] = std::stoi(action[3 + i]); - (*recvcounts)[i] = std::stoi(action[4 + comm_size + i]); - } - int send_size = std::accumulate(sendcounts->begin(), sendcounts->end(), 0); - int recv_size = std::accumulate(recvcounts->begin(), recvcounts->end(), 0); - - TRACE_smpi_comm_in(my_proc_id, __FUNCTION__, - new simgrid::instr::VarCollTIData("allToAllV", -1, send_size, sendcounts, recv_size, recvcounts, - Datatype::encode(MPI_CURRENT_TYPE), - Datatype::encode(MPI_CURRENT_TYPE2))); - - Colls::alltoallv(sendbuf, sendcounts->data(), senddisps.data(), MPI_CURRENT_TYPE, recvbuf, recvcounts->data(), - recvdisps.data(), MPI_CURRENT_TYPE, MPI_COMM_WORLD); +class AllToAllVAction : public ReplayAction { +public: + AllToAllVAction() : ReplayAction("allToAllV") {} + void kernel(simgrid::xbt::ReplayAction& action) override + { + TRACE_smpi_comm_in(my_proc_id, __func__, + new simgrid::instr::VarCollTIData( + "allToAllV", -1, args.send_size_sum, args.sendcounts, args.recv_size_sum, args.recvcounts, + Datatype::encode(args.datatype1), Datatype::encode(args.datatype2))); - TRACE_smpi_comm_out(my_proc_id); - log_timed_action (action, clock); -} + Colls::alltoallv(send_buffer(args.send_buf_size * args.datatype1->size()), args.sendcounts->data(), args.senddisps.data(), args.datatype1, + recv_buffer(args.recv_buf_size * args.datatype2->size()), args.recvcounts->data(), args.recvdisps.data(), args.datatype2, MPI_COMM_WORLD); + TRACE_smpi_comm_out(my_proc_id); + } +}; +} // Replay Namespace }} // namespace simgrid::smpi /** @brief Only initialize the replay, don't do it for real */ @@ -797,38 +815,38 @@ void smpi_replay_init(int* argc, char*** argv) smpi_process()->mark_as_initialized(); smpi_process()->set_replaying(true); - int my_proc_id = Actor::self()->getPid(); + int my_proc_id = simgrid::s4u::this_actor::getPid(); TRACE_smpi_init(my_proc_id); TRACE_smpi_computing_init(my_proc_id); TRACE_smpi_comm_in(my_proc_id, "smpi_replay_run_init", new simgrid::instr::NoOpTIData("init")); TRACE_smpi_comm_out(my_proc_id); - xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::InitAction().execute(action); }); + xbt_replay_action_register("init", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::InitAction().execute(action); }); xbt_replay_action_register("finalize", [](simgrid::xbt::ReplayAction& action) { /* nothing to do */ }); - xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::CommunicatorAction().execute(action); }); - xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::CommunicatorAction().execute(action); }); - xbt_replay_action_register("comm_dup", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::CommunicatorAction().execute(action); }); - - xbt_replay_action_register("send", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::SendAction("send").execute(action); }); - xbt_replay_action_register("Isend", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::SendAction("Isend").execute(action); }); - xbt_replay_action_register("recv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::RecvAction("recv").execute(action); }); - xbt_replay_action_register("Irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::RecvAction("Irecv").execute(action); }); - xbt_replay_action_register("test", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::TestAction().execute(action); }); - xbt_replay_action_register("wait", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::WaitAction().execute(action); }); - xbt_replay_action_register("waitAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::WaitAllAction().execute(action); }); - xbt_replay_action_register("barrier", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::BarrierAction().execute(action); }); - xbt_replay_action_register("bcast", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::BcastAction().execute(action); }); - xbt_replay_action_register("reduce", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::ReduceAction().execute(action); }); - xbt_replay_action_register("allReduce", simgrid::smpi::action_allReduce); - xbt_replay_action_register("allToAll", simgrid::smpi::action_allToAll); - xbt_replay_action_register("allToAllV", simgrid::smpi::action_allToAllv); - xbt_replay_action_register("gather", simgrid::smpi::action_gather); - xbt_replay_action_register("scatter", simgrid::smpi::action_scatter); - xbt_replay_action_register("gatherV", simgrid::smpi::action_gatherv); - xbt_replay_action_register("scatterV", simgrid::smpi::action_scatterv); - xbt_replay_action_register("allGather", simgrid::smpi::action_allgather); - xbt_replay_action_register("allGatherV", simgrid::smpi::action_allgatherv); - xbt_replay_action_register("reduceScatter", simgrid::smpi::action_reducescatter); - xbt_replay_action_register("compute", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::Replay::ComputeAction().execute(action); }); + xbt_replay_action_register("comm_size", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); + xbt_replay_action_register("comm_split",[](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); + xbt_replay_action_register("comm_dup", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::CommunicatorAction().execute(action); }); + + xbt_replay_action_register("send", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("send").execute(action); }); + xbt_replay_action_register("Isend", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::SendAction("Isend").execute(action); }); + xbt_replay_action_register("recv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("recv").execute(action); }); + xbt_replay_action_register("Irecv", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::RecvAction("Irecv").execute(action); }); + xbt_replay_action_register("test", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::TestAction().execute(action); }); + xbt_replay_action_register("wait", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAction().execute(action); }); + xbt_replay_action_register("waitAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::WaitAllAction().execute(action); }); + xbt_replay_action_register("barrier", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BarrierAction().execute(action); }); + xbt_replay_action_register("bcast", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::BcastAction().execute(action); }); + xbt_replay_action_register("reduce", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceAction().execute(action); }); + xbt_replay_action_register("allReduce", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllReduceAction().execute(action); }); + xbt_replay_action_register("allToAll", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllToAllAction().execute(action); }); + xbt_replay_action_register("allToAllV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::AllToAllVAction().execute(action); }); + xbt_replay_action_register("gather", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherAction("gather").execute(action); }); + xbt_replay_action_register("scatter", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScatterAction().execute(action); }); + xbt_replay_action_register("gatherV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherVAction("gatherV").execute(action); }); + xbt_replay_action_register("scatterV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ScatterVAction().execute(action); }); + xbt_replay_action_register("allGather", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherAction("allGather").execute(action); }); + xbt_replay_action_register("allGatherV", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::GatherVAction("allGatherV").execute(action); }); + xbt_replay_action_register("reduceScatter", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ReduceScatterAction().execute(action); }); + xbt_replay_action_register("compute", [](simgrid::xbt::ReplayAction& action) { simgrid::smpi::replay::ComputeAction().execute(action); }); //if we have a delayed start, sleep here. if(*argc>2){ @@ -845,6 +863,8 @@ void smpi_replay_init(int* argc, char*** argv) /** @brief actually run the replay after initialization */ void smpi_replay_main(int* argc, char*** argv) { + static int active_processes = 0; + active_processes++; simgrid::xbt::replay_runner(*argc, *argv); /* and now, finalize everything */ @@ -871,12 +891,13 @@ void smpi_replay_main(int* argc, char*** argv) smpi_free_replay_tmp_buffers(); } - TRACE_smpi_comm_in(Actor::self()->getPid(), "smpi_replay_run_finalize", new simgrid::instr::NoOpTIData("finalize")); + TRACE_smpi_comm_in(simgrid::s4u::this_actor::getPid(), "smpi_replay_run_finalize", + new simgrid::instr::NoOpTIData("finalize")); smpi_process()->finalize(); - TRACE_smpi_comm_out(Actor::self()->getPid()); - TRACE_smpi_finalize(Actor::self()->getPid()); + TRACE_smpi_comm_out(simgrid::s4u::this_actor::getPid()); + TRACE_smpi_finalize(simgrid::s4u::this_actor::getPid()); } /** @brief chain a replay initialization and a replay start */