-/* Copyright (c) 2006-2020. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "simgrid/Exception.hpp"
+#include "simgrid/host.h"
#include "simgrid/kernel/routing/NetPoint.hpp"
#include "simgrid/s4u/Actor.hpp"
+#include "simgrid/s4u/Comm.hpp"
#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/Exec.hpp"
#include "simgrid/s4u/VirtualMachine.hpp"
#include "src/plugins/vm/VirtualMachineImpl.hpp"
#include "src/surf/HostImpl.hpp"
+#include "xbt/parse_units.hpp"
#include <algorithm>
#include <string>
xbt::signal<void(Host const&)> Host::on_state_change;
xbt::signal<void(Host const&)> Host::on_speed_change;
-Host::Host(const std::string& name) : name_(name)
+Host* Host::set_netpoint(kernel::routing::NetPoint* netpoint)
{
- xbt_assert(Host::by_name_or_null(name_) == nullptr, "Refusing to create a second host named '%s'.", get_cname());
- Engine::get_instance()->host_register(name_, this);
- new surf::HostImpl(this);
+ pimpl_netpoint_ = netpoint;
+ return this;
}
Host::~Host()
{
- xbt_assert(currently_destroying_, "Please call h->destroy() instead of manually deleting it.");
-
- delete pimpl_;
if (pimpl_netpoint_ != nullptr) // not removed yet by a children class
Engine::get_instance()->netpoint_unregister(pimpl_netpoint_);
delete pimpl_cpu;
- delete mounts_;
}
/** @brief Fire the required callbacks and destroy the object
*
* Don't delete directly a host, call h->destroy() instead.
*
- * This is cumbersome but this is the simplest solution to ensure that the onDestruction() callback receives a valid
+ * This is cumbersome but this is the simplest solution to ensure that the on_destruction() callback receives a valid
* object (because of the destructor order in a class hierarchy).
*/
void Host::destroy()
{
- if (not currently_destroying_) {
- currently_destroying_ = true;
- on_destruction(*this);
- Engine::get_instance()->host_unregister(std::string(name_));
- delete this;
- }
+ kernel::actor::simcall([this] { this->pimpl_->destroy(); });
}
Host* Host::by_name(const std::string& name)
Host* Host::current()
{
kernel::actor::ActorImpl* self = kernel::actor::ActorImpl::self();
- if (self == nullptr)
- xbt_die("Cannot call Host::current() from the maestro context");
+ xbt_assert(self != nullptr, "Cannot call Host::current() from the maestro context");
return self->get_host();
}
+xbt::string const& Host::get_name() const
+{
+ return this->pimpl_->get_name();
+}
+
+const char* Host::get_cname() const
+{
+ return this->pimpl_->get_cname();
+}
+
void Host::turn_on()
{
if (not is_on()) {
void Host::turn_off()
{
if (is_on()) {
- kernel::actor::ActorImpl* self = kernel::actor::ActorImpl::self();
+ const kernel::actor::ActorImpl* self = kernel::actor::ActorImpl::self();
kernel::actor::simcall([this, self] {
for (VirtualMachine* const& vm : vm::VirtualMachineImpl::allVms_)
if (vm->get_pm() == this) {
{
std::vector<kernel::resource::LinkImpl*> linkImpls;
this->route_to(dest, linkImpls, latency);
- for (kernel::resource::LinkImpl* const& l : linkImpls)
+ for (auto* l : linkImpls)
links.push_back(l->get_iface());
}
if (XBT_LOG_ISENABLED(surf_route, xbt_log_priority_debug)) {
XBT_CDEBUG(surf_route, "Route from '%s' to '%s' (latency: %f):", get_cname(), dest->get_cname(),
(latency == nullptr ? -1 : *latency));
- for (auto const& link : links)
- XBT_CDEBUG(surf_route, "Link %s", link->get_cname());
+ for (auto const* link : links)
+ XBT_CDEBUG(surf_route, " Link '%s'", link->get_cname());
}
}
return pimpl_netpoint_->get_englobing_zone()->get_iface();
}
-void Host::send_to(Host* dest, double byte_amount)
+void Host::sendto(Host* dest, double byte_amount) // deprecated 331
+{
+ Comm::sendto_async(this, dest, byte_amount)->wait();
+}
+
+CommPtr Host::sendto_async(Host* dest, double byte_amount) // deprecated 331
{
- std::vector<Host*> m_host_list = {this, dest};
- std::vector<double> flops_amount = {0, 0};
- std::vector<double> bytes_amount = {0, byte_amount, 0, 0};
- this_actor::parallel_execute(m_host_list, flops_amount, bytes_amount);
+ return Comm::sendto_async(this, dest, byte_amount);
+}
+
+void Host::send_to(Host* dest, double byte_amount) // deprecated 330
+{
+ Comm::sendto(this, dest, byte_amount);
}
/** Get the properties assigned to a host */
return this->pimpl_->get_property(key);
}
-void Host::set_property(const std::string& key, const std::string& value)
+Host* Host::set_property(const std::string& key, const std::string& value)
{
kernel::actor::simcall([this, &key, &value] { this->pimpl_->set_property(key, value); });
+ return this;
}
-void Host::set_properties(const std::map<std::string, std::string>& properties)
+Host* Host::set_properties(const std::unordered_map<std::string, std::string>& properties)
{
kernel::actor::simcall([this, &properties] { this->pimpl_->set_properties(properties); });
+ return this;
}
-/** Specify a profile turning the host on and off according to a exhaustive list or a stochastic law.
+/** Specify a profile turning the host on and off according to an exhaustive list or a stochastic law.
* The profile must contain boolean values. */
-void Host::set_state_profile(kernel::profile::Profile* p)
+Host* Host::set_state_profile(kernel::profile::Profile* p)
{
- return kernel::actor::simcall([this, p] { pimpl_cpu->set_state_profile(p); });
+ kernel::actor::simcall([this, p] { pimpl_cpu->set_state_profile(p); });
+ return this;
}
-/** Specify a profile modeling the external load according to a exhaustive list or a stochastic law.
+/** Specify a profile modeling the external load according to an exhaustive list or a stochastic law.
*
* Each event of the profile represent a peak speed change that is due to external load. The values are given as a rate
* of the initial value. This means that the actual value is obtained by multiplying the initial value (the peek speed
* at this pstate level) by the rate coming from the profile.
*/
-void Host::set_speed_profile(kernel::profile::Profile* p)
+Host* Host::set_speed_profile(kernel::profile::Profile* p)
{
- return kernel::actor::simcall([this, p] { pimpl_cpu->set_speed_profile(p); });
+ kernel::actor::simcall([this, p] { pimpl_cpu->set_speed_profile(p); });
+ return this;
}
/** @brief Get the peak processor speed (in flops/s), at the specified pstate */
return this->pimpl_cpu->get_pstate_peak_speed(pstate_index);
}
-/** @brief Get the peak computing speed in flops/s at the current pstate, NOT taking the external load into account.
- *
- * The amount of flops per second available for computing depends on several things:
- * - The current pstate determines the maximal peak computing speed (use @ref get_pstate_speed() to retrieve the
- * computing speed you would get at another pstate)
- * - If you declared an external load (with @ref simgrid::surf::Cpu::set_speed_profile()), you must multiply the
- * result of get_speed() by get_available_speed() to retrieve what a new computation would get.
- *
- * The remaining speed is then shared between the executions located on this host.
- * You can retrieve the amount of tasks currently running on this host with @ref get_load().
- *
- * The host may have multiple cores, and your executions may be able to use more than a single core.
- *
- * Finally, executions of priority 2 get twice the amount of flops than executions of priority 1.
- */
double Host::get_speed() const
{
return this->pimpl_cpu->get_speed(1.0);
}
-/** @brief Returns the current computation load (in flops per second)
- *
- * The external load (coming from an availability trace) is not taken in account.
- * You may also be interested in the load plugin.
- */
double Host::get_load() const
{
return this->pimpl_cpu->get_load();
}
-/** @brief Get the available speed ratio, between 0 and 1.
- *
- * This accounts for external load (see @ref simgrid::surf::Cpu::set_speed_profile()).
- */
double Host::get_available_speed() const
{
return this->pimpl_cpu->get_speed_ratio();
}
-/** @brief Returns the number of core of the processor. */
int Host::get_core_count() const
{
return this->pimpl_cpu->get_core_count();
}
+Host* Host::set_core_count(int core_count)
+{
+ kernel::actor::simcall([this, core_count] { this->pimpl_cpu->set_core_count(core_count); });
+ return this;
+}
+
+Host* Host::set_pstate_speed(const std::vector<double>& speed_per_state)
+{
+ kernel::actor::simcall([this, &speed_per_state] { pimpl_cpu->set_pstate_speed(speed_per_state); });
+ return this;
+}
+
+std::vector<double> Host::convert_pstate_speed_vector(const std::vector<std::string>& speed_per_state)
+{
+ std::vector<double> speed_list;
+ speed_list.reserve(speed_per_state.size());
+ for (const auto& speed_str : speed_per_state) {
+ try {
+ double speed = xbt_parse_get_speed("", 0, speed_str.c_str(), nullptr, "");
+ speed_list.push_back(speed);
+ } catch (const simgrid::ParseError&) {
+ throw std::invalid_argument(std::string("Invalid speed value: ") + speed_str);
+ }
+ }
+ return speed_list;
+}
+
+Host* Host::set_pstate_speed(const std::vector<std::string>& speed_per_state)
+{
+ set_pstate_speed(Host::convert_pstate_speed_vector(speed_per_state));
+ return this;
+}
+
/** @brief Set the pstate at which the host should run */
-void Host::set_pstate(int pstate_index)
+Host* Host::set_pstate(int pstate_index)
{
kernel::actor::simcall([this, pstate_index] { this->pimpl_cpu->set_pstate(pstate_index); });
+ return this;
}
+
/** @brief Retrieve the pstate at which the host is currently running */
int Host::get_pstate() const
{
std::vector<Disk*> Host::get_disks() const
{
- return kernel::actor::simcall([this] { return this->pimpl_->get_disks(); });
+ return this->pimpl_->get_disks();
}
-void Host::add_disk(Disk* disk)
+Disk* Host::create_disk(const std::string& name, double read_bandwidth, double write_bandwidth)
+{
+ return kernel::actor::simcall([this, &name, read_bandwidth, write_bandwidth] {
+ return this->pimpl_->create_disk(name, read_bandwidth, write_bandwidth);
+ });
+}
+
+Disk* Host::create_disk(const std::string& name, const std::string& read_bandwidth, const std::string& write_bandwidth)
+{
+ double d_read, d_write;
+ try {
+ d_read = xbt_parse_get_bandwidth("", 0, read_bandwidth.c_str(), nullptr, "");
+ } catch (const simgrid::ParseError&) {
+ throw std::invalid_argument(std::string("Impossible to create disk: ") + name +
+ std::string(". Invalid read bandwidth: ") + read_bandwidth);
+ }
+ try {
+ d_write = xbt_parse_get_bandwidth("", 0, write_bandwidth.c_str(), nullptr, "");
+ } catch (const simgrid::ParseError&) {
+ throw std::invalid_argument(std::string("Impossible to create disk: ") + name +
+ std::string(". Invalid write bandwidth: ") + write_bandwidth);
+ }
+ return create_disk(name, d_read, d_write);
+}
+
+void Host::add_disk(const Disk* disk)
{
kernel::actor::simcall([this, disk] { this->pimpl_->add_disk(disk); });
}
{
kernel::actor::simcall([this, disk_name] { this->pimpl_->remove_disk(disk_name); });
}
-/**
- * @ingroup simix_storage_management
- * @brief Returns the list of storages attached to a host.
- * @return a vector containing all storages attached to the host
- */
-std::vector<const char*> Host::get_attached_storages() const
+
+ExecPtr Host::exec_init(double flops) const
{
- return kernel::actor::simcall([this] { return this->pimpl_->get_attached_storages(); });
+ return this_actor::exec_init(flops);
}
-std::unordered_map<std::string, Storage*> const& Host::get_mounted_storages()
+ExecPtr Host::exec_async(double flops) const
{
- if (mounts_ == nullptr)
- mounts_ = kernel::actor::simcall([this] { return this->pimpl_->get_mounted_storages(); });
-
- return *mounts_;
+ return this_actor::exec_async(flops);
}
-ExecPtr Host::exec_async(double flops)
+void Host::execute(double flops) const
{
- return this_actor::exec_init(flops);
+ execute(flops, 1.0 /* priority */);
}
-void Host::execute(double flops)
+void Host::execute(double flops, double priority) const
{
- execute(flops, 1.0 /* priority */);
+ this_actor::exec_init(flops)->set_priority(1 / priority)->vetoable_start()->wait();
}
-void Host::execute(double flops, double priority)
+Host* Host::seal()
{
- this_actor::exec_init(flops)->set_priority(1 / priority)->start()->wait();
+ kernel::actor::simcall([this]() { this->pimpl_->seal(); });
+ simgrid::s4u::Host::on_creation(*this); // notify the signal
+ return this;
}
} // namespace s4u
{
return simgrid::s4u::Engine::get_instance()->get_host_count();
}
-/** @brief Returns the host list
- *
- * Uses sg_host_count() to know the array size.
- *
- * @return an array of @ref sg_host_t containing all the hosts in the platform.
- * @remark The host order in this array is generally different from the
- * creation/declaration order in the XML platform (we use a hash table
- * internally).
- * @see sg_host_count()
- */
sg_host_t* sg_host_list()
{
- xbt_assert(sg_host_count() > 0, "There is no host!");
- std::vector<simgrid::s4u::Host*> hosts = simgrid::s4u::Engine::get_instance()->get_all_hosts();
+ const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
+ size_t host_count = e->get_host_count();
+
+ xbt_assert(host_count > 0, "There is no host!");
+ std::vector<simgrid::s4u::Host*> hosts = e->get_all_hosts();
sg_host_t* res = xbt_new(sg_host_t, hosts.size());
- memcpy(res, hosts.data(), sizeof(sg_host_t) * hosts.size());
+ std::copy(begin(hosts), end(hosts), res);
return res;
}
return simgrid::s4u::Host::by_name_or_null(name);
}
-xbt_dynar_t sg_hosts_as_dynar()
+xbt_dynar_t sg_hosts_as_dynar() // XBT_ATTRIB_DEPRECATED_v330
{
std::vector<simgrid::s4u::Host*> list = simgrid::s4u::Engine::get_instance()->get_all_hosts();
// ========= Layering madness ==============*
// ========== User data Layer ==========
-void* sg_host_data(const_sg_host_t host)
-{
- return host->get_data();
-}
-void sg_host_data_set(sg_host_t host, void* userdata)
-{
- host->set_data(userdata);
-}
-void* sg_host_user(sg_host_t host) // deprecated
+void* sg_host_get_data(const_sg_host_t host)
{
return host->get_data();
}
-void sg_host_user_set(sg_host_t host, void* userdata) // deprecated
+void sg_host_set_data(sg_host_t host, void* userdata)
{
host->set_data(userdata);
}
-void sg_host_user_destroy(sg_host_t host) // deprecated
+void* sg_host_data(const_sg_host_t host) // XBT_ATTRIB_DEPRECATED_v330
{
- host->set_data(nullptr);
+ return sg_host_get_data(host);
}
-
-// ========= storage related functions ============
-xbt_dict_t sg_host_get_mounted_storage_list(sg_host_t host)
+void sg_host_data_set(sg_host_t host, void* userdata) // XBT_ATTRIB_DEPRECATED_v330
{
- xbt_assert((host != nullptr), "Invalid parameters");
- xbt_dict_t res = xbt_dict_new_homogeneous(nullptr);
- for (auto const& elm : host->get_mounted_storages()) {
- const char* mount_name = elm.first.c_str();
- const simgrid::s4u::Storage* storage = elm.second;
- xbt_dict_set(res, mount_name, (void*)storage->get_cname());
- }
-
- return res;
+ sg_host_set_data(host, userdata);
}
-xbt_dynar_t sg_host_get_attached_storage_list(const_sg_host_t host)
+// ========= Disk related functions ============
+void sg_host_get_disks(const_sg_host_t host, unsigned int* disk_count, sg_disk_t** disks)
{
- xbt_dynar_t storage_dynar = xbt_dynar_new(sizeof(const char*), nullptr);
- std::vector<const char*> storage_vector = host->get_attached_storages();
- for (auto const& name : storage_vector)
- xbt_dynar_push(storage_dynar, &name);
- return storage_dynar;
+ std::vector<sg_disk_t> list = host->get_disks();
+ *disk_count = list.size();
+ *disks = xbt_new(sg_disk_t, list.size());
+ std::copy(begin(list), end(list), *disks);
}
// =========== user-level functions ===============
// ================================================
/** @brief Returns the total speed of a host */
-double sg_host_speed(const_sg_host_t host)
+double sg_host_get_speed(const_sg_host_t host)
{
return host->get_speed();
}
+double sg_host_speed(const_sg_host_t host) // XBT_ATTRIB_DEPRECATED_v330
+{
+ return sg_host_get_speed(host);
+}
+
/** @brief Return the speed of the processor (in flop/s) at a given pstate. See also @ref plugin_energy.
*
* @param host host to test
/** @brief Get the properties of a host */
xbt_dict_t sg_host_get_properties(const_sg_host_t host)
{
- xbt_dict_t as_dict = xbt_dict_new_homogeneous(xbt_free_f);
const std::unordered_map<std::string, std::string>* props = host->get_properties();
+ xbt_dict_t as_dict = xbt_dict_new_homogeneous(xbt_free_f);
+
if (props == nullptr)
return nullptr;
for (auto const& elm : *props) {
* @param to where to
* @param links [OUT] where to store the list of links (must exist, cannot be nullptr).
*/
-void sg_host_route(const_sg_host_t from, const_sg_host_t to, xbt_dynar_t links)
+void sg_host_get_route(const_sg_host_t from, const_sg_host_t to, xbt_dynar_t links)
{
std::vector<simgrid::s4u::Link*> vlinks;
from->route_to(to, vlinks, nullptr);
* @param from where from
* @param to where to
*/
-double sg_host_route_latency(const_sg_host_t from, const_sg_host_t to)
+double sg_host_get_route_latency(const_sg_host_t from, const_sg_host_t to)
{
std::vector<simgrid::s4u::Link*> vlinks;
double res = 0;
* @param from where from
* @param to where to
*/
-double sg_host_route_bandwidth(const_sg_host_t from, const_sg_host_t to)
+double sg_host_get_route_bandwidth(const_sg_host_t from, const_sg_host_t to)
{
double min_bandwidth = -1.0;
return min_bandwidth;
}
-void sg_host_send_to(sg_host_t from, sg_host_t to, double byte_amount)
+void sg_host_route(const_sg_host_t from, const_sg_host_t to, xbt_dynar_t links) // XBT_ATTRIB_DEPRECATED_v330
{
- from->send_to(to, byte_amount);
+ sg_host_get_route(from, to, links);
+}
+
+double sg_host_route_latency(const_sg_host_t from, const_sg_host_t to) // XBT_ATTRIB_DEPRECATED_v330
+{
+ return sg_host_get_route_latency(from, to);
+}
+
+double sg_host_route_bandwidth(const_sg_host_t from, const_sg_host_t to) // XBT_ATTRIB_DEPRECATED_v330
+{
+ return sg_host_get_route_bandwidth(from, to);
+}
+
+void sg_host_sendto(sg_host_t from, sg_host_t to, double byte_amount)
+{
+ simgrid::s4u::Comm::sendto(from, to, byte_amount);
}
/** @brief Displays debugging information about a host */
return res;
}
-double sg_host_load(const_sg_host_t host)
+double sg_host_get_load(const_sg_host_t host)
{
return host->get_load();
}
+
+double sg_host_load(const_sg_host_t host) // XBT_ATTRIB_DEPRECATED_v330
+{
+ return sg_host_get_load(host);
+}