From 101c8b10d9965e115c9ff5c28954750a065cf155 Mon Sep 17 00:00:00 2001 From: Frederic Suter Date: Mon, 9 Dec 2019 10:39:05 +0100 Subject: [PATCH] another bunch of codefactor style issues --- .readthedocs.yml | 2 +- src/bindings/python/simgrid_python.cpp | 4 --- src/mc/inspect/mc_dwarf.cpp | 9 ++----- src/msg/msg_task.cpp | 4 +-- src/plugins/host_energy.cpp | 36 ++++++++++++------------- src/smpi/bindings/smpi_pmpi_request.cpp | 3 --- src/smpi/mpi/smpi_datatype.cpp | 28 +++++++++---------- src/smpi/mpi/smpi_win.cpp | 24 +++++++---------- 8 files changed, 45 insertions(+), 65 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 93ac3bbc34..7f4e5c21c3 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,5 +1,5 @@ # Path to the pip requirements file requirements_file: docs/requirements.txt - + # Don't build any extra formats formats: [] \ No newline at end of file diff --git a/src/bindings/python/simgrid_python.cpp b/src/bindings/python/simgrid_python.cpp index b8301ada04..de65d4a83d 100644 --- a/src/bindings/python/simgrid_python.cpp +++ b/src/bindings/python/simgrid_python.cpp @@ -63,7 +63,6 @@ static std::string simgrid_version = get_simgrid_version(); PYBIND11_MODULE(simgrid, m) { - m.doc() = "SimGrid userspace API"; m.attr("simgrid_version") = simgrid_version; @@ -234,12 +233,9 @@ PYBIND11_MODULE(simgrid, m) py::class_(m, "Actor", "An actor is an independent stream of execution in your distributed " "application, see :ref:`class s4u::Actor `") - .def("create", [](py::str name, py::object host, py::object fun, py::args args) { - return simgrid::s4u::Actor::create(name, host.cast(), [fun, args]() { - try { fun(*args); } catch (py::error_already_set& ex) { diff --git a/src/mc/inspect/mc_dwarf.cpp b/src/mc/inspect/mc_dwarf.cpp index 93fc9c70dc..e3a6d25e48 100644 --- a/src/mc/inspect/mc_dwarf.cpp +++ b/src/mc/inspect/mc_dwarf.cpp @@ -494,7 +494,6 @@ static void MC_dwarf_add_members(simgrid::mc::ObjectInformation* /*info*/, Dwarf for (res = dwarf_child(die, &child); res == 0; res = dwarf_siblingof(&child, &child)) { int tag = dwarf_tag(&child); if (tag == DW_TAG_member || tag == DW_TAG_inheritance) { - // Skip declarations: if (MC_dwarf_attr_flag(&child, DW_AT_declaration, false)) continue; @@ -802,8 +801,7 @@ static void MC_dwarf_handle_scope_die(simgrid::mc::ObjectInformation* info, Dwar Dwarf_Addr high_pc; switch (simgrid::dwarf::classify_form(dwarf_whatform(&attr))) { - - // DW_AT_high_pc if an offset from the low_pc: + // DW_AT_high_pc if an offset from the low_pc: case simgrid::dwarf::FormClass::Constant: xbt_assert(dwarf_formsdata(&attr, &offset) == 0, "Could not read constant"); @@ -867,8 +865,7 @@ static void MC_dwarf_handle_die(simgrid::mc::ObjectInformation* info, Dwarf_Die* int tag = dwarf_tag(die); simgrid::dwarf::TagClass klass = simgrid::dwarf::classify_tag(tag); switch (klass) { - - // Type: + // Type: case simgrid::dwarf::TagClass::Type: MC_dwarf_handle_type_die(info, die, unit, frame, ns); break; @@ -1139,7 +1136,6 @@ static void MC_post_process_variables(simgrid::mc::ObjectInformation* info) static void mc_post_process_scope(simgrid::mc::ObjectInformation* info, simgrid::mc::Frame* scope) { - if (scope->tag == DW_TAG_inlined_subroutine) { // Attach correct namespaced name in inlined subroutine: auto i = info->subprograms.find(scope->abstract_origin_id); @@ -1216,7 +1212,6 @@ std::shared_ptr createObjectInformation(std::vec void postProcessObjectInformation(simgrid::mc::RemoteClient* process, simgrid::mc::ObjectInformation* info) { for (auto& t : info->types) { - simgrid::mc::Type* type = &(t.second); simgrid::mc::Type* subtype = type; while (subtype->type == DW_TAG_typedef || subtype->type == DW_TAG_volatile_type || diff --git a/src/msg/msg_task.cpp b/src/msg/msg_task.cpp index f87e1d8561..9343108e67 100644 --- a/src/msg/msg_task.cpp +++ b/src/msg/msg_task.cpp @@ -683,8 +683,8 @@ msg_error_t MSG_task_cancel(msg_task_t task) * * It works for either parallel or sequential tasks. */ -double MSG_task_get_remaining_work_ratio(msg_task_t task) { - +double MSG_task_get_remaining_work_ratio(msg_task_t task) +{ xbt_assert((task != nullptr), "Cannot get information from a nullptr task"); if (task->compute) { // Task in progress diff --git a/src/plugins/host_energy.cpp b/src/plugins/host_energy.cpp index 175c455bc8..c7ea34026a 100644 --- a/src/plugins/host_energy.cpp +++ b/src/plugins/host_energy.cpp @@ -125,7 +125,24 @@ public: }; class HostEnergy { + simgrid::s4u::Host* host_ = nullptr; + /*< List of (idle_power, epsilon_power, max_power) tuple corresponding to each cpu pstate */ + std::vector power_range_watts_list_; + + /* We need to keep track of what pstate has been used, as we will sometimes be notified only *after* a pstate has been + * used (but we need to update the energy consumption with the old pstate!) + */ + int pstate_ = 0; + const int pstate_off_ = -1; + + /* Only used to split total energy into unused/used hosts. + * If you want to get this info for something else, rather use the host_load plugin + */ + bool host_was_used_ = false; + + void init_watts_range_list(); friend void ::on_simulation_end(); // For access to host_was_used_ + public: static simgrid::xbt::Extension EXTENSION_ID; @@ -141,23 +158,6 @@ public: double get_power_range_slope_at(int pstate); void update(); -private: - void init_watts_range_list(); - simgrid::s4u::Host* host_ = nullptr; - /*< List of (idle_power, epsilon_power, max_power) tuple corresponding to each cpu pstate */ - std::vector power_range_watts_list_; - - /* We need to keep track of what pstate has been used, as we will sometimes be notified only *after* a pstate has been - * used (but we need to update the energy consumption with the old pstate!) - */ - int pstate_ = 0; - const int pstate_off_ = -1; - - /* Only used to split total energy into unused/used hosts. - * If you want to get this info for something else, rather use the host_load plugin - */ - bool host_was_used_ = false; -public: double watts_off_ = 0.0; /*< Consumption when the machine is turned off (shutdown) */ double total_energy_ = 0.0; /*< Total energy consumed by the host */ double last_updated_; /*< Timestamp of the last energy update event*/ @@ -472,7 +472,6 @@ static void on_action_state_change(simgrid::kernel::resource::CpuAction const& a for (simgrid::kernel::resource::Cpu* const& cpu : action.cpus()) { simgrid::s4u::Host* host = cpu->get_host(); if (host != nullptr) { - // If it's a VM, take the corresponding PM simgrid::s4u::VirtualMachine* vm = dynamic_cast(host); if (vm) // If it's a VM, take the corresponding PM @@ -516,7 +515,6 @@ static void on_simulation_end() double used_hosts_energy = 0.0; // Energy consumed by hosts that computed something for (size_t i = 0; i < hosts.size(); i++) { if (dynamic_cast(hosts[i]) == nullptr) { // Ignore virtual machines - double energy = hosts[i]->extension()->get_consumed_energy(); total_energy += energy; if (hosts[i]->extension()->host_was_used_) diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp index f64dbabda6..62e4822d57 100644 --- a/src/smpi/bindings/smpi_pmpi_request.cpp +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -203,7 +203,6 @@ int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MP } else if(tag<0 && tag != MPI_ANY_TAG){ retval = MPI_ERR_TAG; } else { - int my_proc_id = simgrid::s4u::this_actor::get_pid(); TRACE_smpi_comm_in(my_proc_id, __func__, @@ -500,7 +499,6 @@ int PMPI_Ibsend(const void* buf, int count, MPI_Datatype datatype, int dst, int int PMPI_Bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) { - int retval = 0; smpi_bench_end(); @@ -641,7 +639,6 @@ int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype); } xbt_free(recvbuf); - } return retval; } diff --git a/src/smpi/mpi/smpi_datatype.cpp b/src/smpi/mpi/smpi_datatype.cpp index eabb154634..3829b155f9 100644 --- a/src/smpi/mpi/smpi_datatype.cpp +++ b/src/smpi/mpi/smpi_datatype.cpp @@ -160,7 +160,8 @@ Datatype::Datatype(Datatype *datatype, int* ret) : name_(nullptr), size_(datatyp } } -Datatype::~Datatype(){ +Datatype::~Datatype() +{ xbt_assert(refcount_ >= 0); if(flags_ & DT_FLAG_PREDEFINED) @@ -177,8 +178,8 @@ Datatype::~Datatype(){ xbt_free(name_); } -void Datatype::ref(){ - +void Datatype::ref() +{ refcount_++; #if SIMGRID_HAVE_MC @@ -270,10 +271,10 @@ int Datatype::unpack(const void* inbuf, int insize, int* position, void* outbuf, return MPI_SUCCESS; } -int Datatype::copy(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype){ - -// FIXME Handle the case of a partial shared malloc. +int Datatype::copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, + MPI_Datatype recvtype) +{ + // FIXME Handle the case of a partial shared malloc. if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) { smpi_switch_data_segment(simgrid::s4u::Actor::self()); @@ -306,8 +307,7 @@ int Datatype::copy(const void *sendbuf, int sendcount, MPI_Datatype sendtype, recvtype->unserialize(sendbuf, recvbuf, count / recvtype->size(), MPI_REPLACE); } else if (not(recvtype->flags() & DT_FLAG_DERIVED)) { sendtype->serialize(sendbuf, recvbuf, count / sendtype->size()); - }else{ - + } else { void * buf_tmp = xbt_malloc(count); sendtype->serialize( sendbuf, buf_tmp,count/sendtype->size()); @@ -583,11 +583,9 @@ int Datatype::create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, return MPI_SUCCESS; } -Datatype* Datatype::f2c(int id){ +Datatype* Datatype::f2c(int id) +{ return static_cast(F2C::f2c(id)); } - - -} -} - +} // namespace smpi +} // namespace simgrid diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 3685c7a6c9..9efd47911d 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -239,8 +239,7 @@ int Win::put(const void *origin_addr, int origin_count, MPI_Datatype origin_data recv_win->requests_->push_back(rreq); rreq->start(); recv_win->mut_->unlock(); - - }else{ + } else { XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank); Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype); if(request!=nullptr) @@ -300,17 +299,14 @@ int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, requests_->push_back(rreq); mut_->unlock(); } - - }else{ + } else { Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype); if(request!=nullptr) *request=MPI_REQUEST_NULL; } - return MPI_SUCCESS; } - int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request) { @@ -402,7 +398,6 @@ int Win::get_accumulate(const void* origin_addr, int origin_count, MPI_Datatype Request::wait(&req, MPI_STATUS_IGNORE); send_win->atomic_mut_->unlock(); return MPI_SUCCESS; - } int Win::compare_and_swap(const void *origin_addr, void *compare_addr, @@ -739,15 +734,16 @@ int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) return MPI_SUCCESS; } -MPI_Errhandler Win::errhandler(){ +MPI_Errhandler Win::errhandler() +{ return errhandler_; } -void Win::set_errhandler(MPI_Errhandler errhandler){ - errhandler_=errhandler; - if(errhandler_!= MPI_ERRHANDLER_NULL) +void Win::set_errhandler(MPI_Errhandler errhandler) +{ + errhandler_ = errhandler; + if (errhandler_ != MPI_ERRHANDLER_NULL) errhandler->ref(); } - -} -} +} // namespace smpi +} // namespace simgrid -- 2.20.1