-/* Copyright (c) 2012-2015. The SimGrid Team.
+/* Copyright (c) 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include <xbt/ex.hpp>
-#include "src/instr/instr_private.h"
-#include "src/msg/msg_private.h"
+#include "src/instr/instr_private.hpp"
+#include "src/msg/msg_private.hpp"
#include "src/plugins/vm/VirtualMachineImpl.hpp"
#include "src/plugins/vm/VmHostExt.hpp"
#include "simgrid/host.h"
#include "simgrid/simix.hpp"
+#include "xbt/string.hpp"
-SG_BEGIN_DECL()
+extern "C" {
-struct dirty_page {
- double prev_clock;
- double prev_remaining;
- msg_task_t task;
+struct s_dirty_page {
+ double prev_clock = 0.0;
+ double prev_remaining = 0.0;
+ msg_task_t task = nullptr;
};
-typedef struct dirty_page s_dirty_page;
-typedef struct dirty_page* dirty_page_t;
+typedef s_dirty_page* dirty_page_t;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_vm, msg, "Cloud-oriented parts of the MSG API");
vm->getParameters(params);
}
+void MSG_vm_set_ramsize(msg_vm_t vm, size_t size)
+{
+ vm->setRamsize(size);
+}
+size_t MSG_vm_get_ramsize(msg_vm_t vm)
+{
+ return vm->getRamsize();
+}
+
/* **** Check state of a VM **** */
static inline int __MSG_vm_is_state(msg_vm_t vm, e_surf_vm_state_t state)
{
/* For the moment, intensity_rate is the percentage against the migration bandwidth */
- msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount);
+ msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount, static_cast<sg_size_t>(ramsize) * 1024 * 1024);
s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- params.ramsize = static_cast<sg_size_t>(ramsize) * 1024 * 1024;
- params.devsize = 0;
- params.skip_stage2 = 0;
+ params.devsize = 0;
+ params.skip_stage1 = 0;
+ params.skip_stage2 = 0;
params.max_downtime = 0.03;
- params.mig_speed = static_cast<double>(mig_netspeed) * 1024 * 1024; // mig_speed
+ params.mig_speed = static_cast<double>(mig_netspeed) * 1024 * 1024; // mig_speed
params.dp_intensity = static_cast<double>(dp_intensity) / 100;
- params.dp_cap = params.ramsize * 0.9; // assume working set memory is 90% of ramsize
+ params.dp_cap = vm->getRamsize() * 0.9; // assume working set memory is 90% of ramsize
XBT_DEBUG("migspeed : %f intensity mem : %d", params.mig_speed, dp_intensity);
vm->setParameters(¶ms);
xbt_assert(sg_host_by_name(name) == nullptr,
"Cannot create a VM named %s: this name is already used by an host or a VM", name);
- return new simgrid::s4u::VirtualMachine(name, pm, 1);
+ msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, 1);
+ s_vm_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ vm->setParameters(¶ms);
+ return vm;
}
/** @brief Create a new VM object with the default parameters, but with a specified amount of cores
* @ingroup msg_VMs*
xbt_assert(sg_host_by_name(name) == nullptr,
"Cannot create a VM named %s: this name is already used by an host or a VM", name);
- return new simgrid::s4u::VirtualMachine(name, pm, coreAmount);
+ msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount);
+ s_vm_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ vm->setParameters(¶ms);
+ return vm;
}
/** @brief Destroy a VM. Destroy the VM object from the simulation.
simgrid::simix::kernelImmediate([vm]() { vm->destroy(); });
if (TRACE_msg_vm_is_enabled()) {
- container_t container = PJ_container_get(vm->getCname());
- PJ_container_remove_from_parent(container);
- PJ_container_free(container);
+ container_t container = simgrid::instr::Container::byName(vm->getName());
+ container->removeFromParent();
+ delete container;
}
}
*/
void MSG_vm_start(msg_vm_t vm)
{
- simgrid::simix::kernelImmediate([vm]() {
- simgrid::vm::VmHostExt::ensureVmExtInstalled();
-
- simgrid::s4u::Host* pm = vm->pimpl_vm_->getPm();
- if (pm->extension<simgrid::vm::VmHostExt>() == nullptr)
- pm->extension_set(new simgrid::vm::VmHostExt());
-
- long pm_ramsize = pm->extension<simgrid::vm::VmHostExt>()->ramsize;
- int pm_overcommit = pm->extension<simgrid::vm::VmHostExt>()->overcommit;
- long vm_ramsize = vm->getRamsize();
-
- if (pm_ramsize && not pm_overcommit) { /* Only verify that we don't overcommit on need */
- /* Retrieve the memory occupied by the VMs on that host. Yep, we have to traverse all VMs of all hosts for that */
- long total_ramsize_of_vms = 0;
- for (simgrid::s4u::VirtualMachine* ws_vm : simgrid::vm::VirtualMachineImpl::allVms_)
- if (pm == ws_vm->pimpl_vm_->getPm())
- total_ramsize_of_vms += ws_vm->pimpl_vm_->getRamsize();
-
- if (vm_ramsize > pm_ramsize - total_ramsize_of_vms) {
- XBT_WARN("cannnot start %s@%s due to memory shortage: vm_ramsize %ld, free %ld, pm_ramsize %ld (bytes).",
- vm->getCname(), pm->getCname(), vm_ramsize, pm_ramsize - total_ramsize_of_vms, pm_ramsize);
- THROWF(vm_error, 0, "Memory shortage on host '%s', VM '%s' cannot be started", pm->getCname(), vm->getCname());
- }
- }
-
- vm->pimpl_vm_->setState(SURF_VM_STATE_RUNNING);
- });
-
+ vm->start();
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->getCname());
- type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
- val_t value = s_val::PJ_value_get_or_new("start", "0 0 1", type); // start is blue
- new PushStateEvent(MSG_get_clock(), vm_container, type, value);
+ simgrid::instr::StateType* state = simgrid::instr::Container::byName(vm->getName())->getState("MSG_VM_STATE");
+ state->addEntityValue("start", "0 0 1"); // start is blue
+ state->pushEvent("start");
}
}
*/
void MSG_vm_shutdown(msg_vm_t vm)
{
- smx_actor_t issuer=SIMIX_process_self();
+ smx_actor_t issuer = SIMIX_process_self();
simgrid::simix::kernelImmediate([vm, issuer]() { vm->pimpl_vm_->shutdown(issuer); });
- // Make sure that the processes in the VM are killed in this scheduling round before processing
- // (eg with the VM destroy)
+ // Make sure that processes in the VM are killed in this scheduling round before processing (eg with the VM destroy)
MSG_process_sleep(0.);
}
-static inline char *get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
+static std::string get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
{
- return bprintf("__pr_mig_tx:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname());
+ return std::string("__pr_mig_tx:") + vm->getCname() + "(" + src_pm->getCname() + "-" + dst_pm->getCname() + ")";
}
-static inline char *get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
+static std::string get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
{
- return bprintf("__pr_mig_rx:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname());
+ return std::string("__pr_mig_rx:") + vm->getCname() + "(" + src_pm->getCname() + "-" + dst_pm->getCname() + ")";
}
-static inline char *get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage)
+static std::string get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage)
{
- return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm->getCname(), src_pm->getCname(), dst_pm->getCname());
+ return std::string("__task_mig_stage") + std::to_string(stage) + ":" + vm->getCname() + "(" + src_pm->getCname() +
+ "-" + dst_pm->getCname() + ")";
}
struct migration_session {
/* The miration_rx process uses mbox_ctl to let the caller of do_migration()
* know the completion of the migration. */
- char *mbox_ctl;
+ std::string mbox_ctl;
/* The migration_rx and migration_tx processes use mbox to transfer migration data. */
- char *mbox;
+ std::string mbox;
};
static int migration_rx_fun(int argc, char *argv[])
XBT_DEBUG("mig: rx_start");
// The structure has been created in the do_migration function and should only be freed in the same place ;)
- struct migration_session* ms = static_cast<migration_session*>(MSG_process_get_data(MSG_process_self()));
+ migration_session* ms = static_cast<migration_session*>(MSG_process_get_data(MSG_process_self()));
bool received_finalize = false;
- char *finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3);
+ std::string finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3);
while (not received_finalize) {
msg_task_t task = nullptr;
- int ret = MSG_task_recv(&task, ms->mbox);
+ int ret = MSG_task_recv(&task, ms->mbox.c_str());
if (ret != MSG_OK) {
// An error occurred, clean the code and return
// The owner did not change, hence the task should be only destroyed on the other side
- xbt_free(finalize_task_name);
return 0;
}
- if (strcmp(task->name, finalize_task_name) == 0)
+ if (finalize_task_name == task->name)
received_finalize = 1;
MSG_task_destroy(task);
}
- xbt_free(finalize_task_name);
// Here Stage 1, 2 and 3 have been performed.
// Hence complete the migration
if (TRACE_msg_vm_is_enabled()) {
static long long int counter = 0;
- char key[INSTR_DEFAULT_STR_SIZE];
- snprintf(key, INSTR_DEFAULT_STR_SIZE, "%lld", counter);
+ std::string key = std::to_string(counter);
counter++;
// start link
- container_t msg = PJ_container_get(vm->getCname());
- type_t type = PJ_type_get("MSG_VM_LINK", PJ_type_get_root());
- new StartLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key);
+ container_t msg = simgrid::instr::Container::byName(vm->getName());
+ simgrid::instr::Container::getRoot()->getLink("MSG_VM_LINK")->startEvent(msg, "M", key);
// destroy existing container of this vm
- container_t existing_container = PJ_container_get(vm->getCname());
- PJ_container_remove_from_parent(existing_container);
- PJ_container_free(existing_container);
+ container_t existing_container = simgrid::instr::Container::byName(vm->getName());
+ existing_container->removeFromParent();
+ delete existing_container;
// create new container on the new_host location
- PJ_container_new(vm->getCname(), INSTR_MSG_VM, PJ_container_get(ms->dst_pm->getCname()));
+ new simgrid::instr::Container(vm->getCname(), "MSG_VM", simgrid::instr::Container::byName(ms->dst_pm->getName()));
// end link
- msg = PJ_container_get(vm->getCname());
- type = PJ_type_get("MSG_VM_LINK", PJ_type_get_root());
- new EndLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key);
+ msg = simgrid::instr::Container::byName(vm->getName());
+ simgrid::instr::Container::getRoot()->getLink("MSG_VM_LINK")->endEvent(msg, "M", key);
}
// Inform the SRC that the migration has been correctly performed
- char *task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 4);
- msg_task_t task = MSG_task_create(task_name, 0, 0, nullptr);
- msg_error_t ret = MSG_task_send(task, ms->mbox_ctl);
- // xbt_assert(ret == MSG_OK);
+ std::string task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 4);
+ msg_task_t task = MSG_task_create(task_name.c_str(), 0, 0, nullptr);
+ msg_error_t ret = MSG_task_send(task, ms->mbox_ctl.c_str());
if(ret == MSG_HOST_FAILURE){
// The DST has crashed, this is a problem has the VM since we are not sure whether SRC is considering that the VM
// has been correctly migrated on the DST node
// The SRC has crashed, this is not a problem has the VM has been correctly migrated on the DST node
MSG_task_destroy(task);
}
- xbt_free(task_name);
XBT_DEBUG("mig: rx_done");
return 0;
if (vm->pimpl_vm_->dp_objs.empty())
return;
- for (auto elm : vm->pimpl_vm_->dp_objs) {
+ for (auto const& elm : vm->pimpl_vm_->dp_objs) {
dirty_page_t dp = elm.second;
- double remaining = MSG_task_get_flops_amount(dp->task);
+ double remaining = MSG_task_get_remaining_work_ratio(dp->task);
dp->prev_clock = MSG_get_clock();
dp->prev_remaining = remaining;
XBT_DEBUG("%s@%s remaining %f", elm.first.c_str(), vm->getCname(), remaining);
vm->pimpl_vm_->dp_enabled = 0;
}
-static double get_computed(const char* key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
+static double get_computed(const std::string& key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
{
double computed = dp->prev_remaining - remaining;
double duration = clock - dp->prev_clock;
- XBT_DEBUG("%s@%s: computed %f ops (remaining %f -> %f) in %f secs (%f -> %f)", key, vm->getCname(), computed,
+ XBT_DEBUG("%s@%s: computed %f ops (remaining %f -> %f) in %f secs (%f -> %f)", key.c_str(), vm->getCname(), computed,
dp->prev_remaining, remaining, duration, dp->prev_clock, clock);
return computed;
{
double total = 0;
- for (auto elm : vm->pimpl_vm_->dp_objs) {
- const char* key = elm.first.c_str();
+ for (auto const& elm : vm->pimpl_vm_->dp_objs) {
+ const std::string& key = elm.first;
dirty_page_t dp = elm.second;
- double remaining = MSG_task_get_flops_amount(dp->task);
+ double remaining = MSG_task_get_remaining_work_ratio(dp->task);
double clock = MSG_get_clock();
- // total += calc_updated_pages(key, vm, dp, remaining, clock);
total += get_computed(key, vm, dp, remaining, clock);
dp->prev_remaining = remaining;
if (vm == nullptr)
return;
- double remaining = MSG_task_get_flops_amount(task);
- char *key = bprintf("%s-%p", task->name, task);
+ double remaining = MSG_task_get_initial_flops_amount(task);
+ std::string key = simgrid::xbt::string_printf("%s-%p", task->name, task);
- dirty_page_t dp = xbt_new0(s_dirty_page, 1);
+ dirty_page_t dp = new s_dirty_page;
dp->task = task;
if (vm->pimpl_vm_->dp_enabled) {
dp->prev_clock = MSG_get_clock();
dp->prev_remaining = remaining;
}
vm->pimpl_vm_->dp_objs.insert({key, dp});
- XBT_DEBUG("add %s on %s (remaining %f, dp_enabled %d)", key, host->getCname(), remaining, vm->pimpl_vm_->dp_enabled);
-
- xbt_free(key);
+ XBT_DEBUG("add %s on %s (remaining %f, dp_enabled %d)", key.c_str(), host->getCname(), remaining,
+ vm->pimpl_vm_->dp_enabled);
}
void MSG_host_del_task(msg_host_t host, msg_task_t task)
if (vm == nullptr)
return;
- char *key = bprintf("%s-%p", task->name, task);
+ std::string key = simgrid::xbt::string_printf("%s-%p", task->name, task);
dirty_page_t dp = nullptr;
- if (vm->pimpl_vm_->dp_objs.find(key) != vm->pimpl_vm_->dp_objs.end())
- dp = vm->pimpl_vm_->dp_objs.at(key);
+ auto dp_obj = vm->pimpl_vm_->dp_objs.find(key);
+ if (dp_obj != vm->pimpl_vm_->dp_objs.end())
+ dp = dp_obj->second;
xbt_assert(dp && dp->task == task);
/* If we are in the middle of dirty page tracking, we record how much computation has been done until now, and keep
* the information for the lookup_() function that will called soon. */
if (vm->pimpl_vm_->dp_enabled) {
- double remaining = MSG_task_get_flops_amount(task);
+ double remaining = MSG_task_get_remaining_work_ratio(task);
double clock = MSG_get_clock();
- // double updated = calc_updated_pages(key, host, dp, remaining, clock);
double updated = get_computed(key, vm, dp, remaining, clock); // was host instead of vm
vm->pimpl_vm_->dp_updated_by_deleted_tasks += updated;
}
vm->pimpl_vm_->dp_objs.erase(key);
- xbt_free(dp);
+ delete dp;
- XBT_DEBUG("del %s on %s", key, host->getCname());
- xbt_free(key);
+ XBT_DEBUG("del %s on %s", key.c_str(), host->getCname());
}
-static sg_size_t send_migration_data(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, sg_size_t size, char* mbox,
- int stage, int stage2_round, double mig_speed, double timeout)
+static sg_size_t send_migration_data(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, sg_size_t size,
+ const std::string& mbox, int stage, int stage2_round, double mig_speed,
+ double timeout)
{
sg_size_t sent = 0;
- char *task_name = get_mig_task_name(vm, src_pm, dst_pm, stage);
- msg_task_t task = MSG_task_create(task_name, 0, static_cast<double>(size), nullptr);
+ std::string task_name = get_mig_task_name(vm, src_pm, dst_pm, stage);
+ msg_task_t task = MSG_task_create(task_name.c_str(), 0, static_cast<double>(size), nullptr);
double clock_sta = MSG_get_clock();
msg_error_t ret;
if (mig_speed > 0)
- ret = MSG_task_send_with_timeout_bounded(task, mbox, timeout, mig_speed);
+ ret = MSG_task_send_with_timeout_bounded(task, mbox.c_str(), timeout, mig_speed);
else
- ret = MSG_task_send(task, mbox);
-
- xbt_free(task_name);
+ ret = MSG_task_send(task, mbox.c_str());
if (ret == MSG_OK) {
sent = size;
double updated_size = computed * dp_rate;
XBT_DEBUG("updated_size %f dp_rate %f", updated_size, dp_rate);
if (updated_size > dp_cap) {
- // XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size,
- // dp_cap);
updated_size = dp_cap;
}
XBT_DEBUG("mig: tx_start");
// Note that the ms structure has been allocated in do_migration and hence should be freed in the same function ;)
- migration_session *ms = static_cast<migration_session *>(MSG_process_get_data(MSG_process_self()));
+ migration_session* ms = static_cast<migration_session*>(MSG_process_get_data(MSG_process_self()));
double host_speed = ms->vm->pimpl_vm_->getPm()->getSpeed();
s_vm_params_t params;
ms->vm->getParameters(¶ms);
- const sg_size_t ramsize = params.ramsize;
+ const sg_size_t ramsize = ms->vm->getRamsize();
const sg_size_t devsize = params.devsize;
const int skip_stage1 = params.skip_stage1;
int skip_stage2 = params.skip_stage2;
vm->pimpl_vm_->isMigrating = true;
- struct migration_session *ms = xbt_new(struct migration_session, 1);
- ms->vm = vm;
- ms->src_pm = src_pm;
- ms->dst_pm = dst_pm;
+ migration_session ms;
+ ms.vm = vm;
+ ms.src_pm = src_pm;
+ ms.dst_pm = dst_pm;
/* We have two mailboxes. mbox is used to transfer migration data between source and destination PMs. mbox_ctl is used
* to detect the completion of a migration. The names of these mailboxes must not conflict with others. */
- ms->mbox_ctl = bprintf("__mbox_mig_ctl:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname());
- ms->mbox = bprintf("__mbox_mig_src_dst:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname());
+ ms.mbox_ctl =
+ simgrid::xbt::string_printf("__mbox_mig_ctl:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname());
+ ms.mbox = simgrid::xbt::string_printf("__mbox_mig_src_dst:%s(%s-%s)", vm->getCname(), src_pm->getCname(),
+ dst_pm->getCname());
- char *pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm);
- char *pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm);
+ std::string pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm);
+ std::string pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm);
- char** argv = xbt_new(char*, 2);
- argv[0] = pr_rx_name;
- argv[1] = nullptr;
- MSG_process_create_with_arguments(pr_rx_name, migration_rx_fun, ms, dst_pm, 1, argv);
+ MSG_process_create(pr_rx_name.c_str(), migration_rx_fun, &ms, dst_pm);
- argv = xbt_new(char*, 2);
- argv[0] = pr_tx_name;
- argv[1] = nullptr;
- MSG_process_create_with_arguments(pr_tx_name, migration_tx_fun, ms, src_pm, 1, argv);
+ MSG_process_create(pr_tx_name.c_str(), migration_tx_fun, &ms, src_pm);
/* wait until the migration have finished or on error has occurred */
XBT_DEBUG("wait for reception of the final ACK (i.e. migration has been correctly performed");
msg_task_t task = nullptr;
- msg_error_t ret = MSG_task_receive(&task, ms->mbox_ctl);
+ msg_error_t ret = MSG_task_receive(&task, ms.mbox_ctl.c_str());
vm->pimpl_vm_->isMigrating = false;
- xbt_free(ms->mbox_ctl);
- xbt_free(ms->mbox);
- xbt_free(ms);
-
if (ret == MSG_HOST_FAILURE) {
// Note that since the communication failed, the owner did not change and the task should be destroyed on the
// other side. Hence, just throw the execption
vm->getCname());
}
- char* expected_task_name = get_mig_task_name(vm, src_pm, dst_pm, 4);
- xbt_assert(strcmp(task->name, expected_task_name) == 0);
- xbt_free(expected_task_name);
+ xbt_assert(get_mig_task_name(vm, src_pm, dst_pm, 4) == task->name);
MSG_task_destroy(task);
}
XBT_DEBUG("vm_suspend done");
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->getCname());
- type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
- val_t value = s_val::PJ_value_get_or_new("suspend", "1 0 0", type); // suspend is red
- new PushStateEvent(MSG_get_clock(), vm_container, type, value);
+ simgrid::instr::StateType* state = simgrid::instr::Container::byName(vm->getName())->getState("MSG_VM_STATE");
+ state->addEntityValue("suspend", "1 0 0"); // suspend is red
+ state->pushEvent("suspend");
}
}
{
vm->pimpl_vm_->resume();
- if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->getCname());
- type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
- new PopStateEvent(MSG_get_clock(), vm_container, type);
- }
+ if (TRACE_msg_vm_is_enabled())
+ simgrid::instr::Container::byName(vm->getName())->getState("MSG_VM_STATE")->popEvent();
}
/** @brief Get the physical host of a given VM.
{
simgrid::simix::kernelImmediate([vm, bound]() { vm->pimpl_vm_->setBound(bound); });
}
-
-SG_END_DECL()
+}