void MSG_vm_destroy(msg_vm_t vm)
{
if (MSG_vm_is_migrating(vm))
- THROWF(vm_error, 0, "VM(%s) is migrating", vm->name().c_str());
+ THROWF(vm_error, 0, "Cannot destroy VM '%s', which is migrating.", vm->cname());
/* First, terminate all processes on the VM if necessary */
if (MSG_vm_is_running(vm))
void MSG_vm_shutdown(msg_vm_t vm)
{
simcall_vm_shutdown(vm);
- MSG_process_sleep(0.); // Make sure that the processes in the VM are killed in this scheduling round before processing
- // (eg with the VM destroy)
-}
-
-/* We have two mailboxes. mbox is used to transfer migration data between source and destination PMs. mbox_ctl is used
- * to detect the completion of a migration. The names of these mailboxes must not conflict with others. */
-static inline char *get_mig_mbox_src_dst(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
-{
- const char *vm_name = sg_host_get_name(vm);
- const char *src_pm_name = sg_host_get_name(src_pm);
- const char *dst_pm_name = sg_host_get_name(dst_pm);
-
- return bprintf("__mbox_mig_src_dst:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name);
-}
-static inline char *get_mig_mbox_ctl(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
-{
- const char *vm_name = sg_host_get_name(vm);
- const char *src_pm_name = sg_host_get_name(src_pm);
- const char *dst_pm_name = sg_host_get_name(dst_pm);
-
- return bprintf("__mbox_mig_ctl:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name);
+ // Make sure that the processes in the VM are killed in this scheduling round before processing
+ // (eg with the VM destroy)
+ MSG_process_sleep(0.);
}
static inline char *get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
{
- const char *vm_name = sg_host_get_name(vm);
- const char *src_pm_name = sg_host_get_name(src_pm);
- const char *dst_pm_name = sg_host_get_name(dst_pm);
-
- return bprintf("__pr_mig_tx:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name);
+ return bprintf("__pr_mig_tx:%s(%s-%s)", vm->cname(), src_pm->cname(), dst_pm->cname());
}
static inline char *get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm)
{
- const char *vm_name = sg_host_get_name(vm);
- const char *src_pm_name = sg_host_get_name(src_pm);
- const char *dst_pm_name = sg_host_get_name(dst_pm);
-
- return bprintf("__pr_mig_rx:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name);
+ return bprintf("__pr_mig_rx:%s(%s-%s)", vm->cname(), src_pm->cname(), dst_pm->cname());
}
static inline char *get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage)
{
- const char *vm_name = sg_host_get_name(vm);
- const char *src_pm_name = sg_host_get_name(src_pm);
- const char *dst_pm_name = sg_host_get_name(dst_pm);
-
- return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm_name, src_pm_name, dst_pm_name);
+ return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm->cname(), src_pm->cname(), dst_pm->cname());
}
struct migration_session {
// The structure has been created in the do_migration function and should only be freed in the same place ;)
struct migration_session *ms = (migration_session *) MSG_process_get_data(MSG_process_self());
- s_vm_params_t params;
- static_cast<simgrid::s4u::VirtualMachine*>(ms->vm)->parameters(¶ms);
-
- int need_exit = 0;
+ bool received_finalize = false;
char *finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3);
-
- int ret = 0;
- for (;;) {
+ while (!received_finalize) {
msg_task_t task = nullptr;
- ret = MSG_task_recv(&task, ms->mbox);
- {
- if (ret != MSG_OK) {
- // An error occurred, clean the code and return
- // The owner did not change, hence the task should be only destroyed on the other side
- xbt_free(finalize_task_name);
- return 0;
- }
+ int ret = MSG_task_recv(&task, ms->mbox);
+
+ if (ret != MSG_OK) {
+ // An error occurred, clean the code and return
+ // The owner did not change, hence the task should be only destroyed on the other side
+ xbt_free(finalize_task_name);
+ return 0;
}
if (strcmp(task->name, finalize_task_name) == 0)
- need_exit = 1;
+ received_finalize = 1;
MSG_task_destroy(task);
-
- if (need_exit)
- break;
}
+ xbt_free(finalize_task_name);
// Here Stage 1, 2 and 3 have been performed.
// Hence complete the migration
{
// Now the VM is running on the new host (the migration is completed) (even if the SRC crash)
vm->pimpl_vm_->isMigrating = false;
- XBT_DEBUG("VM(%s) moved from PM(%s) to PM(%s)", sg_host_get_name(ms->vm), sg_host_get_name(ms->src_pm),
- sg_host_get_name(ms->dst_pm));
+ XBT_DEBUG("VM(%s) moved from PM(%s) to PM(%s)", ms->vm->cname(), ms->src_pm->cname(), ms->dst_pm->cname());
if (TRACE_msg_vm_is_enabled()) {
static long long int counter = 0;
PJ_container_free(existing_container);
// create new container on the new_host location
- PJ_container_new(vm->name().c_str(), INSTR_MSG_VM, PJ_container_get(sg_host_get_name(ms->dst_pm)));
+ PJ_container_new(vm->cname(), INSTR_MSG_VM, PJ_container_get(sg_host_get_name(ms->dst_pm)));
// end link
msg = PJ_container_get(vm->name().c_str());
xbt_free(task_name);
}
- xbt_free(finalize_task_name);
-
XBT_DEBUG("mig: rx_done");
return 0;
}
-static void reset_dirty_pages(msg_vm_t vm)
+static void start_dirty_page_tracking(msg_vm_t vm)
{
simgrid::surf::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
+ pimpl->dp_enabled = 1;
+ if (!pimpl->dp_objs)
+ return;
+
char *key = nullptr;
xbt_dict_cursor_t cursor = nullptr;
dirty_page_t dp = nullptr;
- if (!pimpl->dp_objs)
- return;
xbt_dict_foreach (pimpl->dp_objs, cursor, key, dp) {
double remaining = MSG_task_get_flops_amount(dp->task);
dp->prev_clock = MSG_get_clock();
}
}
-static void start_dirty_page_tracking(msg_vm_t vm)
-{
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->dp_enabled = 1;
-
- reset_dirty_pages(vm);
-}
-
static void stop_dirty_page_tracking(msg_vm_t vm)
{
static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->dp_enabled = 0;
ms->vm = vm;
ms->src_pm = src_pm;
ms->dst_pm = dst_pm;
- ms->mbox_ctl = get_mig_mbox_ctl(vm, src_pm, dst_pm);
- ms->mbox = get_mig_mbox_src_dst(vm, src_pm, dst_pm);
+
+ /* We have two mailboxes. mbox is used to transfer migration data between source and destination PMs. mbox_ctl is used
+ * to detect the completion of a migration. The names of these mailboxes must not conflict with others. */
+ ms->mbox_ctl = bprintf("__mbox_mig_ctl:%s(%s-%s)", vm->cname(), src_pm->cname(), dst_pm->cname());
+ ms->mbox = bprintf("__mbox_mig_src_dst:%s(%s-%s)", vm->cname(), src_pm->cname(), dst_pm->cname());
char *pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm);
char *pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm);
argv[1] = nullptr;
MSG_process_create_with_arguments(pr_rx_name, migration_rx_fun, ms, dst_pm, 1, argv);
- char** argv = xbt_new(char*, 2);
+ argv = xbt_new(char*, 2);
argv[0] = pr_tx_name;
argv[1] = nullptr;
MSG_process_create_with_arguments(pr_tx_name, migration_tx_fun, ms, src_pm, 1, argv);
xbt_free(ms->mbox);
xbt_free(ms);
- // xbt_assert(ret == MSG_OK);
if (ret == MSG_HOST_FAILURE) {
// Note that since the communication failed, the owner did not change and the task should be destroyed on the
// other side. Hence, just throw the execption
* The second one would be easier.
*/
- simgrid::surf::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
+ simgrid::s4u::VirtualMachine* typedVm = static_cast<simgrid::s4u::VirtualMachine*>(vm);
+ simgrid::surf::VirtualMachineImpl* pimpl = typedVm->pimpl_vm_;
msg_host_t old_pm = pimpl->getPm();
- if(MSG_host_is_off(old_pm))
- THROWF(vm_error, 0, "SRC host(%s) seems off, cannot start a migration", sg_host_get_name(old_pm));
+ if (old_pm->isOff())
+ THROWF(vm_error, 0, "Cannot migrate VM '%s' from host '%s', which is offline.", vm->cname(), old_pm->cname());
- if(MSG_host_is_off(new_pm))
- THROWF(vm_error, 0, "DST host(%s) seems off, cannot start a migration", sg_host_get_name(new_pm));
+ if (new_pm->isOff())
+ THROWF(vm_error, 0, "Cannot migrate VM '%s' to host '%s', which is offline.", vm->cname(), new_pm->cname());
if (!MSG_vm_is_running(vm))
- THROWF(vm_error, 0, "VM(%s) is not running", sg_host_get_name(vm));
+ THROWF(vm_error, 0, "Cannot migrate VM '%s' that is not running yet.", vm->cname());
- if (MSG_vm_is_migrating(vm))
- THROWF(vm_error, 0, "VM(%s) is already migrating", sg_host_get_name(vm));
+ if (typedVm->isMigrating())
+ THROWF(vm_error, 0, "Cannot migrate VM '%s' that is already migrating.", vm->cname());
- pimpl->isMigrating = 1;
+ pimpl->isMigrating = true;
int ret = do_migration(vm, old_pm, new_pm);
if (ret == -1) {
- pimpl->isMigrating = 0;
- THROWF(host_error, 0, "SRC host failed during migration");
+ pimpl->isMigrating = false;
+ THROWF(host_error, 0, "Source host '%s' failed during the migration of VM '%s'.", old_pm->cname(), vm->cname());
} else if (ret == -2) {
- pimpl->isMigrating = 0;
- THROWF(host_error, 0, "DST host failed during migration");
+ pimpl->isMigrating = false;
+ THROWF(host_error, 0, "Destination host '%s' failed during the migration of VM '%s'.", new_pm->cname(),
+ vm->cname());
}
}
void MSG_vm_suspend(msg_vm_t vm)
{
if (MSG_vm_is_migrating(vm))
- THROWF(vm_error, 0, "VM(%s) is migrating", sg_host_get_name(vm));
+ THROWF(vm_error, 0, "Cannot suspend VM '%s', which is migrating", vm->cname());
simcall_vm_suspend(vm);
XBT_DEBUG("vm_suspend done");
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->name().c_str());
+ container_t vm_container = PJ_container_get(vm->cname());
type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
val_t value = PJ_value_get_or_new("suspend", "1 0 0", type); // suspend is red
new_pajePushState(MSG_get_clock(), vm_container, type, value);
simcall_vm_resume(vm);
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->name().c_str());
+ container_t vm_container = PJ_container_get(vm->cname());
type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
new_pajePopState(MSG_get_clock(), vm_container, type);
}
void MSG_vm_save(msg_vm_t vm)
{
if (MSG_vm_is_migrating(vm))
- THROWF(vm_error, 0, "VM(%s) is migrating", sg_host_get_name(vm));
+ THROWF(vm_error, 0, "Cannot save VM '%s', which is migrating.", vm->cname());
simcall_vm_save(vm);
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->name().c_str());
+ container_t vm_container = PJ_container_get(vm->cname());
type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
val_t value = PJ_value_get_or_new("save", "0 1 0", type); // save is green
new_pajePushState(MSG_get_clock(), vm_container, type, value);
simcall_vm_restore(vm);
if (TRACE_msg_vm_is_enabled()) {
- container_t vm_container = PJ_container_get(vm->name().c_str());
+ container_t vm_container = PJ_container_get(vm->cname());
type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type);
new_pajePopState(MSG_get_clock(), vm_container, type);
}