X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/94f980dcb3d25cdf2f85dd9d811de906d2f49eb8..4636ae2abfffeed6d3c21c565f9fdd6908826402:/src/msg/msg_vm.c diff --git a/src/msg/msg_vm.c b/src/msg/msg_vm.c index 0bc20d94f7..4539a93497 100644 --- a/src/msg/msg_vm.c +++ b/src/msg/msg_vm.c @@ -1,26 +1,21 @@ -/* Copyright (c) 2012-2013. The SimGrid Team. +/* Copyright (c) 2012-2014. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -// QUESTIONS: -// 1./ check how and where a new VM is added to the list of the hosts -// 2./ Diff between SIMIX_Actions and SURF_Actions -// => SIMIX_actions : point synchro entre processus de niveau (theoretically speaking I do not have to create such SIMIX_ACTION -// => Surf_Actions +/* TODO: + * 1. add the support of trace + * 2. use parallel tasks to simulate CPU overhead and remove the very + * experimental code generating micro computation tasks + */ + -// TODO -// MSG_TRACE can be revisited in order to use the host -// To implement a mixed model between workstation and vm_workstation, -// please give a look at surf_model_private_t model_private at SURF Level and to the share resource functions -// double (*share_resources) (double now); -// For the action into the vm workstation model, we should be able to leverage the usual one (and if needed, look at -// the workstation model. #include "msg_private.h" #include "xbt/sysdep.h" #include "xbt/log.h" +#include "simgrid/platf.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_vm, msg, "Cloud-oriented parts of the MSG API"); @@ -57,7 +52,7 @@ xbt_dict_t MSG_vm_get_properties(msg_vm_t vm) /** \ingroup m_host_management * \brief Change the value of a given host property * - * \param host a host + * \param vm a vm * \param name a property name * \param value what to change the property to * \param free_ctn the freeing function to use to kill the value on need @@ -102,7 +97,7 @@ static inline int __MSG_vm_is_state(msg_vm_t vm, e_surf_vm_state_t state) return simcall_vm_get_state(vm) == state; } -/** @brief Returns whether the given VM has just reated, not running. +/** @brief Returns whether the given VM has just created, not running. * @ingroup msg_VMs */ int MSG_vm_is_created(msg_vm_t vm) @@ -123,7 +118,8 @@ int MSG_vm_is_running(msg_vm_t vm) */ int MSG_vm_is_migrating(msg_vm_t vm) { - return __MSG_vm_is_state(vm, SURF_VM_STATE_MIGRATING); + msg_host_priv_t priv = msg_host_resource_priv(vm); + return priv->is_migrating; } /** @brief Returns whether the given VM is currently suspended, not running. @@ -170,30 +166,32 @@ int MSG_vm_is_restoring(msg_vm_t vm) * All parameters are in MBytes * */ -msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, int ramsize, - int net_cap, char *disk_path, int disksize, - int mig_netspeed, int dp_intensity) +msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, + int ncpus, int ramsize, + int net_cap, char *disk_path, int disksize, + int mig_netspeed, int dp_intensity) { - /* For the moment, intensity_rate is the percentage against the migration bandwidth */ - double host_speed = MSG_get_host_speed(ind_pm); - double update_speed = ((double)dp_intensity/100) * mig_netspeed; - - msg_vm_t vm = MSG_vm_create_core(ind_pm, name); - s_ws_params_t params; - memset(¶ms, 0, sizeof(params)); - params.ramsize = 1L * 1024 * 1024 * ramsize; - //params.overcommit = 0; - params.devsize = 0; - params.skip_stage2 = 0; - params.max_downtime = 0.03; - params.dp_rate = (update_speed * 1L * 1024 * 1024 ) / host_speed; - params.dp_cap = params.ramsize / 0.9; // working set memory is 90% - params.mig_speed = 1L * 1024 * 1024 * mig_netspeed; // mig_speed - - //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed); - simcall_host_set_params(vm, ¶ms); - - return vm; + /* For the moment, intensity_rate is the percentage against the migration + * bandwidth */ + double host_speed = MSG_get_host_speed(ind_pm); + double update_speed = ((double)dp_intensity/100) * mig_netspeed; + + msg_vm_t vm = MSG_vm_create_core(ind_pm, name); + s_ws_params_t params; + memset(¶ms, 0, sizeof(params)); + params.ramsize = (sg_size_t)ramsize * 1024 * 1024; + //params.overcommit = 0; + params.devsize = 0; + params.skip_stage2 = 0; + params.max_downtime = 0.03; + params.dp_rate = (update_speed * 1024 * 1024) / host_speed; + params.dp_cap = params.ramsize * 0.9; // assume working set memory is 90% of ramsize + params.mig_speed = (double)mig_netspeed * 1024 * 1024; // mig_speed + + //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed); + simcall_host_set_params(vm, ¶ms); + + return vm; } @@ -235,6 +233,9 @@ msg_vm_t MSG_vm_create_core(msg_host_t ind_pm, const char *name) */ void MSG_vm_destroy(msg_vm_t vm) { + if (MSG_vm_is_migrating(vm)) + THROWF(vm_error, 0, "VM(%s) is migrating", sg_host_name(vm)); + /* First, terminate all processes on the VM if necessary */ if (MSG_vm_is_running(vm)) simcall_vm_shutdown(vm); @@ -291,67 +292,93 @@ void MSG_vm_shutdown(msg_vm_t vm) /* We have two mailboxes. mbox is used to transfer migration data between - * source and destiantion PMs. mbox_ctl is used to detect the completion of a + * source and destination PMs. mbox_ctl is used to detect the completion of a * migration. The names of these mailboxes must not conflict with others. */ -static inline char *get_mig_mbox_src_dst(const char *vm_name, const char *src_pm_name, const char *dst_pm_name) +static inline char *get_mig_mbox_src_dst(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { + char *vm_name = sg_host_name(vm); + char *src_pm_name = sg_host_name(src_pm); + char *dst_pm_name = sg_host_name(dst_pm); + return bprintf("__mbox_mig_src_dst:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name); } -static inline char *get_mig_mbox_ctl(const char *vm_name, const char *src_pm_name, const char *dst_pm_name) +static inline char *get_mig_mbox_ctl(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { + char *vm_name = sg_host_name(vm); + char *src_pm_name = sg_host_name(src_pm); + char *dst_pm_name = sg_host_name(dst_pm); + return bprintf("__mbox_mig_ctl:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name); } -static inline char *get_mig_process_tx_name(const char *vm_name, const char *src_pm_name, const char *dst_pm_name) +static inline char *get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { + char *vm_name = sg_host_name(vm); + char *src_pm_name = sg_host_name(src_pm); + char *dst_pm_name = sg_host_name(dst_pm); + return bprintf("__pr_mig_tx:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name); } -static inline char *get_mig_process_rx_name(const char *vm_name, const char *src_pm_name, const char *dst_pm_name) +static inline char *get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { + char *vm_name = sg_host_name(vm); + char *src_pm_name = sg_host_name(src_pm); + char *dst_pm_name = sg_host_name(dst_pm); + return bprintf("__pr_mig_rx:%s(%s-%s)", vm_name, src_pm_name, dst_pm_name); } -static inline char *get_mig_task_name(const char *vm_name, const char *src_pm_name, const char *dst_pm_name, int stage) +static inline char *get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage) { + char *vm_name = sg_host_name(vm); + char *src_pm_name = sg_host_name(src_pm); + char *dst_pm_name = sg_host_name(dst_pm); + return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm_name, src_pm_name, dst_pm_name); } static void launch_deferred_exec_process(msg_host_t host, double computation, double prio); + +struct migration_session { + msg_vm_t vm; + msg_host_t src_pm; + msg_host_t dst_pm; + + /* The miration_rx process uses mbox_ctl to let the caller of do_migration() + * know the completion of the migration. */ + char *mbox_ctl; + /* The migration_rx and migration_tx processes use mbox to transfer migration + * data. */ + char *mbox; +}; + + static int migration_rx_fun(int argc, char *argv[]) { XBT_DEBUG("mig: rx_start"); - xbt_assert(argc == 4); - const char *vm_name = argv[1]; - const char *src_pm_name = argv[2]; - const char *dst_pm_name = argv[3]; - msg_vm_t vm = MSG_get_host_by_name(vm_name); - msg_host_t src_pm = MSG_get_host_by_name(src_pm_name); - msg_host_t dst_pm = MSG_get_host_by_name(dst_pm_name); + struct migration_session *ms = MSG_process_get_data(MSG_process_self()); s_ws_params_t params; - simcall_host_get_params(vm, ¶ms); + simcall_host_get_params(ms->vm, ¶ms); const double xfer_cpu_overhead = params.xfer_cpu_overhead; - int need_exit = 0; - char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); - char *mbox_ctl = get_mig_mbox_ctl(vm_name, src_pm_name, dst_pm_name); - char *finalize_task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, 3); + char *finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3); for (;;) { msg_task_t task = NULL; - MSG_task_recv(&task, mbox); + MSG_task_recv(&task, ms->mbox); { double received = MSG_task_get_data_size(task); /* TODO: clean up */ // const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); - launch_deferred_exec_process(vm, received * xfer_cpu_overhead, 1); + launch_deferred_exec_process(ms->vm, received * xfer_cpu_overhead, 1); } if (strcmp(task->name, finalize_task_name) == 0) @@ -365,33 +392,31 @@ static int migration_rx_fun(int argc, char *argv[]) /* deinstall the current affinity setting */ - simcall_vm_set_affinity(vm, src_pm, 0); + simcall_vm_set_affinity(ms->vm, ms->src_pm, 0); - simcall_vm_migrate(vm, dst_pm); - simcall_vm_resume(vm); + simcall_vm_migrate(ms->vm, ms->dst_pm); + simcall_vm_resume(ms->vm); /* install the affinity setting of the VM on the destination pm */ { - msg_host_priv_t priv = msg_host_resource_priv(vm); + msg_host_priv_t priv = msg_host_resource_priv(ms->vm); - unsigned long affinity_mask = (unsigned long) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *) dst_pm, sizeof(msg_host_t)); - simcall_vm_set_affinity(vm, dst_pm, affinity_mask); - XBT_INFO("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(dst_pm), MSG_host_get_name(vm)); + unsigned long affinity_mask = (unsigned long) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *) ms->dst_pm, sizeof(msg_host_t)); + simcall_vm_set_affinity(ms->vm, ms->dst_pm, affinity_mask); + XBT_INFO("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(ms->dst_pm), MSG_host_get_name(ms->vm)); } { - char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, 4); + char *task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 4); msg_task_t task = MSG_task_create(task_name, 0, 0, NULL); - msg_error_t ret = MSG_task_send(task, mbox_ctl); + msg_error_t ret = MSG_task_send(task, ms->mbox_ctl); xbt_assert(ret == MSG_OK); xbt_free(task_name); } - xbt_free(mbox); - xbt_free(mbox_ctl); xbt_free(finalize_task_name); XBT_DEBUG("mig: rx_done"); @@ -453,7 +478,7 @@ static double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remai double computed = dp->prev_remaining - remaining; double duration = clock - dp->prev_clock; - XBT_DEBUG("%s@%s: computated %f ops (remaining %f -> %f) in %f secs (%f -> %f)", + XBT_DEBUG("%s@%s: computed %f ops (remaining %f -> %f) in %f secs (%f -> %f)", key, sg_host_name(vm), computed, dp->prev_remaining, remaining, duration, dp->prev_clock, clock); return computed; @@ -469,7 +494,7 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug dirty_page_t dp = NULL; xbt_dict_foreach(priv->dp_objs, cursor, key, dp) { double remaining = MSG_task_get_remaining_computation(dp->task); - + double clock = MSG_get_clock(); // total += calc_updated_pages(key, vm, dp, remaining, clock); @@ -500,7 +525,7 @@ void MSG_host_add_task(msg_host_t host, msg_task_t task) { msg_host_priv_t priv = msg_host_resource_priv(host); double remaining = MSG_task_get_remaining_computation(task); - char *key = bprintf("%s-%lld", task->name, task->counter); + char *key = bprintf("%s-%p", task->name, task); dirty_page_t dp = xbt_new0(s_dirty_page, 1); dp->task = task; @@ -522,13 +547,13 @@ void MSG_host_del_task(msg_host_t host, msg_task_t task) { msg_host_priv_t priv = msg_host_resource_priv(host); - char *key = bprintf("%s-%lld", task->name, task->counter); + char *key = bprintf("%s-%p", task->name, task); dirty_page_t dp = xbt_dict_get_or_null(priv->dp_objs, key); xbt_assert(dp->task == task); /* If we are in the middle of dirty page tracking, we record how much - * computaion has been done until now, and keep the information for the + * computation has been done until now, and keep the information for the * lookup_() function that will called soon. */ if (priv->dp_enabled) { double remaining = MSG_task_get_remaining_computation(task); @@ -557,7 +582,7 @@ static int deferred_exec_fun(int argc, char *argv[]) double prio = atof(prio_str); msg_task_t task = MSG_task_create("__task_deferred", computaion, 0, NULL); - // XBT_INFO("exec deferred %f", computaion); + // XBT_INFO("exec deferred %f", computation); /* dpt is the results of the VM activity */ MSG_task_set_priority(task, prio); @@ -576,14 +601,12 @@ static void launch_deferred_exec_process(msg_host_t host, double computation, do int nargvs = 4; char **argv = xbt_new(char *, nargvs); - argv[0] = xbt_strdup(pr_name); - argv[1] = bprintf("%lf", computation); - argv[2] = bprintf("%lf", prio); + argv[0] = pr_name; + argv[1] = bprintf("%f", computation); + argv[2] = bprintf("%f", prio); argv[3] = NULL; MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv); - - xbt_free(pr_name); } @@ -626,15 +649,12 @@ static void start_overhead_process(msg_task_t comm_task) int nargvs = 3; char **argv = xbt_new(char *, nargvs); - argv[0] = xbt_strdup(pr_name); - argv[1] = xbt_strdup(mbox); + argv[0] = pr_name; + argv[1] = mbox; argv[2] = NULL; // XBT_INFO("micro start: mbox %s", mbox); MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv); - - xbt_free(pr_name); - xbt_free(mbox); } static void shutdown_overhead_process(msg_task_t comm_task) @@ -797,10 +817,10 @@ const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); #endif -static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name, - double size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead) +static void send_migration_data(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, + sg_size_t size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead) { - char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); + char *task_name = get_mig_task_name(vm, src_pm, dst_pm, stage); msg_task_t task = MSG_task_create(task_name, 0, size, NULL); /* TODO: clean up */ @@ -833,9 +853,9 @@ static void send_migration_data(const char *vm_name, const char *src_pm_name, co if (stage == 2){ - XBT_DEBUG("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);} + XBT_DEBUG("mig-stage%d.%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);} else{ - XBT_DEBUG("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization); + XBT_DEBUG("mig-stage%d: sent %llu duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization); } xbt_free(task_name); @@ -870,27 +890,24 @@ static double get_updated_size(double computed, double dp_rate, double dp_cap) return updated_size; } -static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *dst_pm_name, - long ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead) +static double send_stage1(struct migration_session *ms, + sg_size_t ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead) { - const char *vm_name = MSG_host_get_name(vm); - char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); - // const long chunksize = 1024 * 1024 * 100; - const unsigned long chunksize = 1024u * 1024u * 100000; - long remaining = ramsize; + // const long chunksize = (sg_size_t)1024 * 1024 * 100; + const sg_size_t chunksize = (sg_size_t)1024 * 1024 * 100000; + sg_size_t remaining = ramsize; double computed_total = 0; while (remaining > 0) { - long datasize = chunksize; + sg_size_t datasize = chunksize; if (remaining < chunksize) datasize = remaining; remaining -= datasize; - send_migration_data(vm_name, src_pm_name, dst_pm_name, datasize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); - - double computed = lookup_computed_flop_counts(vm, 1, 0); + send_migration_data(ms->vm, ms->src_pm, ms->dst_pm, datasize, ms->mbox, 1, 0, mig_speed, xfer_cpu_overhead); + double computed = lookup_computed_flop_counts(ms->vm, 1, 0); computed_total += computed; // { @@ -919,17 +936,12 @@ static int migration_tx_fun(int argc, char *argv[]) { XBT_DEBUG("mig: tx_start"); - xbt_assert(argc == 4); - const char *vm_name = argv[1]; - const char *src_pm_name = argv[2]; - const char *dst_pm_name = argv[3]; - msg_vm_t vm = MSG_get_host_by_name(vm_name); - + struct migration_session *ms = MSG_process_get_data(MSG_process_self()); s_ws_params_t params; - simcall_host_get_params(vm, ¶ms); - const long ramsize = params.ramsize; - const long devsize = params.devsize; + simcall_host_get_params(ms->vm, ¶ms); + const sg_size_t ramsize = params.ramsize; + const sg_size_t devsize = params.devsize; const int skip_stage1 = params.skip_stage1; const int skip_stage2 = params.skip_stage2; const double dp_rate = params.dp_rate; @@ -954,12 +966,11 @@ static int migration_tx_fun(int argc, char *argv[]) if (ramsize == 0) XBT_WARN("migrate a VM, but ramsize is zero"); - char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); XBT_INFO("mig-stage1: remaining_size %f", remaining_size); /* Stage1: send all memory pages to the destination. */ - start_dirty_page_tracking(vm); + start_dirty_page_tracking(ms->vm); double computed_during_stage1 = 0; if (!skip_stage1) { @@ -968,13 +979,13 @@ static int migration_tx_fun(int argc, char *argv[]) /* send ramsize, but split it */ double clock_prev_send = MSG_get_clock(); - computed_during_stage1 = send_stage1(vm, src_pm_name, dst_pm_name, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); + computed_during_stage1 = send_stage1(ms, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); remaining_size -= ramsize; double clock_post_send = MSG_get_clock(); double bandwidth = ramsize / (clock_post_send - clock_prev_send); threshold = get_threshold_value(bandwidth, max_downtime); - XBT_INFO("actual banwdidth %f, threshold %f", bandwidth / 1024 / 1024, threshold); + XBT_INFO("actual bandwidth %f (MB/s), threshold %f", bandwidth / 1024 / 1024, threshold); } @@ -996,7 +1007,7 @@ static int migration_tx_fun(int argc, char *argv[]) /* just after stage1, nothing has been updated. But, we have to send the data updated during stage1 */ updated_size = get_updated_size(computed_during_stage1, dp_rate, dp_cap); } else { - double computed = lookup_computed_flop_counts(vm, 2, stage2_round); + double computed = lookup_computed_flop_counts(ms->vm, 2, stage2_round); updated_size = get_updated_size(computed, dp_rate, dp_cap); } @@ -1024,18 +1035,13 @@ static int migration_tx_fun(int argc, char *argv[]) double clock_prev_send = MSG_get_clock(); - send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); + send_migration_data(ms->vm, ms->src_pm, ms->dst_pm, updated_size, ms->mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); double clock_post_send = MSG_get_clock(); double bandwidth = updated_size / (clock_post_send - clock_prev_send); threshold = get_threshold_value(bandwidth, max_downtime); - XBT_INFO("actual banwdidth %f, threshold %f", bandwidth / 1024 / 1024, threshold); - - - - - + XBT_INFO("actual bandwidth %f, threshold %f", bandwidth / 1024 / 1024, threshold); remaining_size -= updated_size; @@ -1046,12 +1052,11 @@ static int migration_tx_fun(int argc, char *argv[]) stage3: /* Stage3: stop the VM and copy the rest of states. */ XBT_INFO("mig-stage3: remaining_size %f", remaining_size); - simcall_vm_suspend(vm); - stop_dirty_page_tracking(vm); + simcall_vm_suspend(ms->vm); + stop_dirty_page_tracking(ms->vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed, xfer_cpu_overhead); + send_migration_data(ms->vm, ms->src_pm, ms->dst_pm, remaining_size, ms->mbox, 3, 0, mig_speed, xfer_cpu_overhead); - xbt_free(mbox); XBT_DEBUG("mig: tx_done"); @@ -1062,49 +1067,52 @@ stage3: static void do_migration(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { - char *mbox_ctl = get_mig_mbox_ctl(sg_host_name(vm), sg_host_name(src_pm), sg_host_name(dst_pm)); + struct migration_session *ms = xbt_new(struct migration_session, 1); + ms->vm = vm; + ms->src_pm = src_pm; + ms->dst_pm = dst_pm; + ms->mbox_ctl = get_mig_mbox_ctl(vm, src_pm, dst_pm); + ms->mbox = get_mig_mbox_src_dst(vm, src_pm, dst_pm); + + char *pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm); + char *pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm); + +// MSG_process_create(pr_rx_name, migration_rx_fun, ms, dst_pm); +// MSG_process_create(pr_tx_name, migration_tx_fun, ms, src_pm); +#if 1 + { + char **argv = xbt_new(char *, 2); + argv[0] = pr_rx_name; + argv[1] = NULL; + MSG_process_create_with_arguments(pr_rx_name, migration_rx_fun, ms, dst_pm, 1, argv); + } + { + char **argv = xbt_new(char *, 2); + argv[0] = pr_tx_name; + argv[1] = NULL; + MSG_process_create_with_arguments(pr_tx_name, migration_tx_fun, ms, src_pm, 1, argv); + } +#endif + - { - char *pr_name = get_mig_process_rx_name(sg_host_name(vm), sg_host_name(src_pm), sg_host_name(dst_pm)); - int nargvs = 5; - char **argv = xbt_new(char *, nargvs); - argv[0] = xbt_strdup(pr_name); - argv[1] = xbt_strdup(sg_host_name(vm)); - argv[2] = xbt_strdup(sg_host_name(src_pm)); - argv[3] = xbt_strdup(sg_host_name(dst_pm)); - argv[4] = NULL; - - MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv); - - xbt_free(pr_name); - } - { - char *pr_name = get_mig_process_tx_name(sg_host_name(vm), sg_host_name(src_pm), sg_host_name(dst_pm)); - int nargvs = 5; - char **argv = xbt_new(char *, nargvs); - argv[0] = xbt_strdup(pr_name); - argv[1] = xbt_strdup(sg_host_name(vm)); - argv[2] = xbt_strdup(sg_host_name(src_pm)); - argv[3] = xbt_strdup(sg_host_name(dst_pm)); - argv[4] = NULL; - MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv); - - xbt_free(pr_name); - } /* wait until the migration have finished */ { msg_task_t task = NULL; - msg_error_t ret = MSG_task_recv(&task, mbox_ctl); + msg_error_t ret = MSG_task_recv(&task, ms->mbox_ctl); + xbt_assert(ret == MSG_OK); - char *expected_task_name = get_mig_task_name(sg_host_name(vm), sg_host_name(src_pm), sg_host_name(dst_pm), 4); + char *expected_task_name = get_mig_task_name(vm, src_pm, dst_pm, 4); xbt_assert(strcmp(task->name, expected_task_name) == 0); xbt_free(expected_task_name); + MSG_task_destroy(task); } - xbt_free(mbox_ctl); + xbt_free(ms->mbox_ctl); + xbt_free(ms->mbox); + xbt_free(ms); } @@ -1140,12 +1148,18 @@ void MSG_vm_migrate(msg_vm_t vm, msg_host_t new_pm) msg_host_t old_pm = simcall_vm_get_pm(vm); - if (simcall_vm_get_state(vm) != SURF_VM_STATE_RUNNING) + if (!MSG_vm_is_running(vm)) THROWF(vm_error, 0, "VM(%s) is not running", sg_host_name(vm)); - do_migration(vm, old_pm, new_pm); + if (MSG_vm_is_migrating(vm)) + THROWF(vm_error, 0, "VM(%s) is already migrating", sg_host_name(vm)); + + msg_host_priv_t priv = msg_host_resource_priv(vm); + priv->is_migrating = 1; + do_migration(vm, old_pm, new_pm); + priv->is_migrating = 0; XBT_DEBUG("VM(%s) moved from PM(%s) to PM(%s)", vm->key, old_pm->key, new_pm->key); @@ -1158,13 +1172,16 @@ void MSG_vm_migrate(msg_vm_t vm, msg_host_t new_pm) /** @brief Immediately suspend the execution of all processes within the given VM. * @ingroup msg_VMs * - * This function stops the exection of the VM. All the processes on this VM - * will pause. The state of the VM is perserved. We can later resume it again. + * This function stops the execution of the VM. All the processes on this VM + * will pause. The state of the VM is preserved. We can later resume it again. * * No suspension cost occurs. */ void MSG_vm_suspend(msg_vm_t vm) { + if (MSG_vm_is_migrating(vm)) + THROWF(vm_error, 0, "VM(%s) is migrating", sg_host_name(vm)); + simcall_vm_suspend(vm); XBT_DEBUG("vm_suspend done"); @@ -1193,8 +1210,8 @@ void MSG_vm_resume(msg_vm_t vm) /** @brief Immediately save the execution of all processes within the given VM. * @ingroup msg_VMs * - * This function stops the exection of the VM. All the processes on this VM - * will pause. The state of the VM is perserved. We can later resume it again. + * This function stops the execution of the VM. All the processes on this VM + * will pause. The state of the VM is preserved. We can later resume it again. * * FIXME: No suspension cost occurs. If you want to simulate this too, you want to * use a \ref MSG_file_write() before or after, depending on the exact semantic @@ -1202,6 +1219,9 @@ void MSG_vm_resume(msg_vm_t vm) */ void MSG_vm_save(msg_vm_t vm) { + if (MSG_vm_is_migrating(vm)) + THROWF(vm_error, 0, "VM(%s) is migrating", sg_host_name(vm)); + simcall_vm_save(vm); #ifdef HAVE_TRACING TRACE_msg_vm_save(vm); @@ -1243,11 +1263,11 @@ msg_host_t MSG_vm_get_pm(msg_vm_t vm) * For example, * On PM0, there are Task1 and VM0. * On VM0, there is Task2. - * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0. + * Now we bound 75% to Task1\@PM0 and bound 25% to Task2\@VM0. * Then, - * Task1@PM0 gets 50%. - * Task2@VM0 gets 25%. - * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively. + * Task1\@PM0 gets 50%. + * Task2\@VM0 gets 25%. + * This is NOT 75% for Task1\@PM0 and 25% for Task2\@VM0, respectively. * * This is because a VM has the dummy CPU action in the PM layer. Putting a * task on the VM does not affect the bound of the dummy CPU action. The bound @@ -1255,7 +1275,7 @@ msg_host_t MSG_vm_get_pm(msg_vm_t vm) * * There are some solutions for this problem. One option is to update the bound * of the dummy CPU action automatically. It should be the sum of all tasks on - * the VM. But, this solution might be costy, because we have to scan all tasks + * the VM. But, this solution might be costly, because we have to scan all tasks * on the VM in share_resource() or we have to trap both the start and end of * task execution. *