1 /* Copyright (c) 2013-2023. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include <simgrid/Exception.hpp>
7 #include <simgrid/kernel/routing/NetPoint.hpp>
8 #include <simgrid/kernel/routing/NetZoneImpl.hpp>
9 #include <simgrid/s4u/Exec.hpp>
11 #include "simgrid/sg_config.hpp"
12 #include "src/kernel/EngineImpl.hpp"
13 #include "src/kernel/activity/ExecImpl.hpp"
14 #include "src/kernel/resource/VirtualMachineImpl.hpp"
15 #include "src/surf/cpu_cas01.hpp"
16 #include "src/surf/cpu_ti.hpp"
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(res_vm, ker_resource, "Virtual Machines, containing actors and mobile across hosts");
22 void surf_vm_model_init_HL13(simgrid::kernel::resource::CpuModel* cpu_pm_model)
24 auto vm_model = std::make_shared<simgrid::kernel::resource::VMModel>("VM_HL13");
25 auto* engine = simgrid::kernel::EngineImpl::get_instance();
27 engine->add_model(vm_model, {cpu_pm_model});
28 std::shared_ptr<simgrid::kernel::resource::CpuModel> cpu_model_vm;
30 if (simgrid::config::get_value<std::string>("cpu/optim") == "TI") {
31 cpu_model_vm = std::make_shared<simgrid::kernel::resource::CpuTiModel>("VmCpu_TI");
33 cpu_model_vm = std::make_shared<simgrid::kernel::resource::CpuCas01Model>("VmCpu_Cas01");
35 engine->add_model(cpu_model_vm, {cpu_pm_model, vm_model.get()});
36 engine->get_netzone_root()->set_cpu_vm_model(cpu_model_vm);
40 template class xbt::Extendable<kernel::resource::VirtualMachineImpl>;
42 namespace kernel::resource {
48 std::deque<s4u::VirtualMachine*> VirtualMachineImpl::allVms_;
50 /* In the real world, processes on the guest operating system will be somewhat degraded due to virtualization overhead.
51 * The total CPU share these processes get is smaller than that of the VM process gets on a host operating system.
52 * FIXME: add a configuration flag for this
54 const double virt_overhead = 1; // 0.95
56 static void host_state_change(s4u::Host const& host)
58 if (not host.is_on()) { // just turned off.
59 std::vector<s4u::VirtualMachine*> trash;
60 /* Find all VMs living on that host */
61 for (s4u::VirtualMachine* const& vm : VirtualMachineImpl::allVms_)
62 if (vm->get_pm() == &host)
64 for (s4u::VirtualMachine* vm : trash)
69 static void add_active_exec(s4u::Exec const& task)
71 const s4u::VirtualMachine* vm = dynamic_cast<s4u::VirtualMachine*>(task.get_host());
73 VirtualMachineImpl* vm_impl = vm->get_vm_impl();
74 for (int i = 1; i <= task.get_thread_count(); i++)
75 vm_impl->add_active_exec();
76 vm_impl->update_action_weight();
80 static void remove_active_exec(s4u::Activity const& task)
82 const auto* exec = dynamic_cast<s4u::Exec const*>(&task);
85 if (not exec->is_assigned())
87 const s4u::VirtualMachine* vm = dynamic_cast<s4u::VirtualMachine*>(exec->get_host());
89 VirtualMachineImpl* vm_impl = vm->get_vm_impl();
90 for (int i = 1; i <= exec->get_thread_count(); i++)
91 vm_impl->remove_active_exec();
92 vm_impl->update_action_weight();
96 static s4u::VirtualMachine* get_vm_from_activity(s4u::Activity const& act)
98 auto* exec = dynamic_cast<kernel::activity::ExecImpl const*>(act.get_impl());
99 return exec != nullptr ? dynamic_cast<s4u::VirtualMachine*>(exec->get_host()) : nullptr;
102 static void add_active_activity(s4u::Activity const& act)
104 const s4u::VirtualMachine* vm = get_vm_from_activity(act);
106 VirtualMachineImpl* vm_impl = vm->get_vm_impl();
107 vm_impl->add_active_exec();
108 vm_impl->update_action_weight();
112 static void remove_active_activity(s4u::Activity const& act)
114 const s4u::VirtualMachine* vm = get_vm_from_activity(act);
116 VirtualMachineImpl* vm_impl = vm->get_vm_impl();
117 vm_impl->remove_active_exec();
118 vm_impl->update_action_weight();
122 VMModel::VMModel(const std::string& name) : HostModel(name)
124 s4u::Host::on_state_change_cb(host_state_change);
125 s4u::Exec::on_start_cb(add_active_exec);
126 s4u::Activity::on_completion_cb(remove_active_exec);
127 s4u::Activity::on_resumed_cb(add_active_activity);
128 s4u::Activity::on_suspended_cb(remove_active_activity);
131 double VMModel::next_occurring_event(double now)
133 /* TODO: update action's cost with the total cost of processes on the VM. */
135 /* 1. Now we know how many resource should be assigned to each virtual
136 * machine. We update constraints of the virtual machine layer.
138 * If we have two virtual machine (VM1 and VM2) on a physical machine (PM1).
139 * X1 + X2 = C (Equation 1)
141 * the resource share of VM1: X1
142 * the resource share of VM2: X2
143 * the capacity of PM1: C
145 * Then, if we have two process (P1 and P2) on VM1.
146 * X1_1 + X1_2 = X1 (Equation 2)
148 * the resource share of P1: X1_1
149 * the resource share of P2: X1_2
150 * the capacity of VM1: X1
152 * Equation 1 was solved in the physical machine layer.
153 * Equation 2 is solved in the virtual machine layer (here).
154 * X1 must be passed to the virtual machine layer as a constraint value.
157 /* iterate for all virtual machines */
158 for (s4u::VirtualMachine* const& ws_vm : VirtualMachineImpl::allVms_) {
159 if (ws_vm->get_state() == s4u::VirtualMachine::State::SUSPENDED) // Ignore suspended VMs
162 const kernel::resource::CpuImpl* cpu = ws_vm->get_cpu();
164 // solved_value below is X1 in comment above: what this VM got in the sharing on the PM
165 double solved_value = ws_vm->get_vm_impl()->get_action()->get_rate();
166 XBT_DEBUG("assign %f to vm %s @ pm %s", solved_value, ws_vm->get_cname(), ws_vm->get_pm()->get_cname());
168 lmm::System* vcpu_system = cpu->get_model()->get_maxmin_system();
169 vcpu_system->update_constraint_bound(cpu->get_constraint(), virt_overhead * solved_value);
171 /* actual next occurring event is determined by VM CPU model at EngineImpl::solve */
175 Action* VMModel::execute_thread(const s4u::Host* host, double flops_amount, int thread_count)
177 auto cpu = host->get_cpu();
178 return cpu->execution_start(thread_count * flops_amount, thread_count, -1);
185 VirtualMachineImpl::VirtualMachineImpl(const std::string& name, s4u::VirtualMachine* piface,
186 simgrid::s4u::Host* host_PM, int core_amount, size_t ramsize)
187 : VirtualMachineImpl(name, host_PM, core_amount, ramsize)
192 VirtualMachineImpl::VirtualMachineImpl(const std::string& name, simgrid::s4u::Host* host_PM, int core_amount,
194 : HostImpl(name), physical_host_(host_PM), core_amount_(core_amount), ramsize_(ramsize)
196 /* We create cpu_action corresponding to a VM process on the host operating system. */
197 /* TODO: we have to periodically input GUESTOS_NOISE to the system? how ?
198 * The value for GUESTOS_NOISE corresponds to the cost of the global action associated to the VM. It corresponds to
199 * the cost of a VM running no tasks.
201 action_ = physical_host_->get_cpu()->execution_start(0, core_amount_, 0);
203 // It's empty for now, so it should not request resources in the PM
204 update_action_weight();
205 XBT_VERB("Create VM(%s)@PM(%s)", name.c_str(), physical_host_->get_cname());
208 void VirtualMachineImpl::set_piface(s4u::VirtualMachine* piface)
210 xbt_assert(not piface_, "Pointer to interface already configured for this VM (%s)", get_cname());
212 /* Register this VM to the list of all VMs */
213 allVms_.push_back(piface);
216 /** @brief A physical host does not disappear in the current SimGrid code, but a VM may disappear during a simulation */
217 void VirtualMachineImpl::vm_destroy()
219 /* I was already removed from the allVms set if the VM was destroyed cleanly */
220 if (auto iter = find(allVms_.begin(), allVms_.end(), piface_); iter != allVms_.end())
223 /* Free the cpu_action of the VM. */
224 XBT_ATTRIB_UNUSED bool ret = action_->unref();
225 xbt_assert(ret, "Bug: some resource still remains");
227 // VM uses the host's netpoint, clean but don't destroy it
228 get_iface()->set_netpoint(nullptr);
229 // Take a temporary copy to delete iface safely after impl is destroy'ed
230 const auto* iface = get_iface();
231 // calls the HostImpl() destroy, it'll delete the impl object
237 void VirtualMachineImpl::start()
239 s4u::VirtualMachine::on_start(*get_iface());
240 s4u::VmHostExt::ensureVmExtInstalled();
242 if (physical_host_->extension<s4u::VmHostExt>() == nullptr)
243 physical_host_->extension_set(new s4u::VmHostExt());
245 if (size_t pm_ramsize = physical_host_->extension<s4u::VmHostExt>()->ramsize;
247 not physical_host_->extension<s4u::VmHostExt>()->overcommit) { /* Need to verify that we don't overcommit */
248 /* Retrieve the memory occupied by the VMs on that host. Yep, we have to traverse all VMs of all hosts for that */
249 size_t total_ramsize_of_vms = 0;
250 for (auto* const& ws_vm : allVms_)
251 if (physical_host_ == ws_vm->get_pm())
252 total_ramsize_of_vms += ws_vm->get_ramsize();
254 if (total_ramsize_of_vms + get_ramsize() > pm_ramsize) {
255 XBT_WARN("cannot start %s@%s due to memory shortage: get_ramsize() %zu, free %zu, pm_ramsize %zu (bytes).",
256 get_cname(), physical_host_->get_cname(), get_ramsize(), pm_ramsize - total_ramsize_of_vms, pm_ramsize);
257 throw VmFailureException(XBT_THROW_POINT,
258 xbt::string_printf("Memory shortage on host '%s', VM '%s' cannot be started",
259 physical_host_->get_cname(), get_cname()));
262 vm_state_ = s4u::VirtualMachine::State::RUNNING;
264 s4u::VirtualMachine::on_started(*get_iface());
267 void VirtualMachineImpl::suspend(const actor::ActorImpl* issuer)
269 s4u::VirtualMachine::on_suspend(*get_iface());
271 if (vm_state_ != s4u::VirtualMachine::State::RUNNING)
272 throw VmFailureException(XBT_THROW_POINT,
273 xbt::string_printf("Cannot suspend VM %s: it is not running.", piface_->get_cname()));
274 if (issuer->get_host() == piface_)
275 throw VmFailureException(XBT_THROW_POINT, xbt::string_printf("Actor %s cannot suspend the VM %s in which it runs",
276 issuer->get_cname(), piface_->get_cname()));
278 XBT_DEBUG("suspend VM(%s), where %zu actors exist", piface_->get_cname(), get_actor_count());
282 foreach_actor([](auto& actor) {
283 XBT_DEBUG("suspend %s", actor.get_cname());
287 XBT_DEBUG("suspend all actors on the VM done done");
289 vm_state_ = s4u::VirtualMachine::State::SUSPENDED;
292 void VirtualMachineImpl::resume()
294 if (vm_state_ != s4u::VirtualMachine::State::SUSPENDED)
295 throw VmFailureException(XBT_THROW_POINT,
296 xbt::string_printf("Cannot resume VM %s: it was not suspended", piface_->get_cname()));
298 XBT_DEBUG("Resume VM %s, containing %zu actors.", piface_->get_cname(), get_actor_count());
302 foreach_actor([](auto& actor) {
303 XBT_DEBUG("resume %s", actor.get_cname());
307 vm_state_ = s4u::VirtualMachine::State::RUNNING;
308 s4u::VirtualMachine::on_resume(*get_iface());
311 /** @brief Power off a VM.
313 * All hosted processes will be killed, but the VM state is preserved on memory.
314 * It can later be restarted.
316 * @param issuer the actor requesting the shutdown
318 void VirtualMachineImpl::shutdown(actor::ActorImpl* issuer)
320 if (vm_state_ != s4u::VirtualMachine::State::RUNNING)
321 XBT_VERB("Shutting down the VM %s even if it's not running but in state %s", piface_->get_cname(),
322 s4u::VirtualMachine::to_c_str(get_state()));
324 XBT_DEBUG("shutdown VM %s, that contains %zu actors", piface_->get_cname(), get_actor_count());
326 foreach_actor([issuer](auto& actor) {
327 XBT_DEBUG("kill %s@%s on behalf of %s which shutdown that VM.", actor.get_cname(), actor.get_host()->get_cname(),
328 issuer->get_cname());
329 issuer->kill(&actor);
332 set_state(s4u::VirtualMachine::State::DESTROYED);
334 s4u::VirtualMachine::on_shutdown(*get_iface());
335 /* FIXME: we may have to do something at the surf layer, e.g., vcpu action */
338 /** @brief Change the physical host on which the given VM is running
340 * This is an instantaneous migration.
342 void VirtualMachineImpl::set_physical_host(s4u::Host* destination)
344 std::string vm_name = piface_->get_name();
345 std::string pm_name_src = physical_host_->get_name();
346 std::string pm_name_dst = destination->get_name();
348 /* update net_elm with that of the destination physical host */
349 piface_->set_netpoint(destination->get_netpoint());
350 physical_host_->get_impl()->move_vm(this, destination->get_impl());
352 /* Adapt the speed, pstate and other physical characteristics to the one of our new physical CPU */
353 piface_->get_cpu()->reset_vcpu(destination->get_cpu());
355 physical_host_ = destination;
357 /* Update vcpu's action for the new pm */
358 /* create a cpu action bound to the pm model at the destination. */
359 CpuAction* new_cpu_action = destination->get_cpu()->execution_start(0, this->core_amount_);
361 if (action_->get_remains_no_update() > 0)
362 XBT_CRITICAL("FIXME: need copy the state(?), %f", action_->get_remains_no_update());
364 /* keep the bound value of the cpu action of the VM. */
365 if (double old_bound = action_->get_bound(); old_bound > 0) {
366 XBT_DEBUG("migrate VM(%s): set bound (%f) at %s", vm_name.c_str(), old_bound, pm_name_dst.c_str());
367 new_cpu_action->set_bound(old_bound);
370 XBT_ATTRIB_UNUSED bool ret = action_->unref();
371 xbt_assert(ret, "Bug: some resource still remains");
373 action_ = new_cpu_action;
375 XBT_DEBUG("migrate VM(%s): change PM (%s to %s)", vm_name.c_str(), pm_name_src.c_str(), pm_name_dst.c_str());
378 void VirtualMachineImpl::set_bound(double bound)
381 action_->set_user_bound(user_bound_);
382 update_action_weight();
385 void VirtualMachineImpl::update_action_weight()
387 /* The impact of the VM over its PM is the min between its vCPU amount and the amount of tasks it contains */
388 int impact = std::min(active_execs_, get_core_amount());
390 XBT_DEBUG("set the weight of the dummy CPU action of VM%p on PM to %d (#tasks: %u)", this, impact, active_execs_);
393 action_->set_sharing_penalty(1. / impact);
395 action_->set_sharing_penalty(0.);
397 action_->set_bound(std::min(impact * physical_host_->get_speed(), user_bound_));
400 void VirtualMachineImpl::start_migration()
402 is_migrating_ = true;
403 s4u::VirtualMachine::on_migration_start(*get_iface());
406 void VirtualMachineImpl::end_migration()
408 is_migrating_ = false;
409 s4u::VirtualMachine::on_migration_end(*get_iface());
412 void VirtualMachineImpl::seal()
415 s4u::VirtualMachine::on_vm_creation(*get_iface());
418 } // namespace kernel::resource
419 } // namespace simgrid