X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/7fd384a4f660defe96fc76a1ddaa3e56c675a9ef..dd22867177c86db2e9d0b76852cb09e044b1c0c8:/src/msg/msg_vm.c diff --git a/src/msg/msg_vm.c b/src/msg/msg_vm.c index ee2b580a54..113b52167f 100644 --- a/src/msg/msg_vm.c +++ b/src/msg/msg_vm.c @@ -117,7 +117,6 @@ int MSG_vm_is_running(msg_vm_t vm) return __MSG_vm_is_state(vm, SURF_VM_STATE_RUNNING); } -#if 0 /** @brief Returns whether the given VM is currently migrating * @ingroup msg_VMs */ @@ -125,7 +124,6 @@ int MSG_vm_is_migrating(msg_vm_t vm) { return __MSG_vm_is_state(vm, SURF_VM_STATE_MIGRATING); } -#endif /** @brief Returns whether the given VM is currently suspended, not running. * @ingroup msg_VMs @@ -171,7 +169,7 @@ int MSG_vm_is_restoring(msg_vm_t vm) * */ msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, - int ncpus, int ramsize, int net_cap, char *disk_path, int disksize) + int ncpus, long ramsize, long net_cap, char *disk_path, long disksize) { msg_vm_t vm = MSG_vm_create_core(ind_pm, name); @@ -179,11 +177,11 @@ msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, s_ws_params_t params; memset(¶ms, 0, sizeof(params)); params.ramsize = ramsize; - params.overcommit = 0; + //params.overcommit = 0; simcall_host_set_params(vm, ¶ms); } - /* TODO: We will revisit the disk support later. */ + /* TODO: Limit net capability, take into account disk considerations. */ return vm; } @@ -222,6 +220,30 @@ msg_vm_t MSG_vm_create_core(msg_host_t ind_pm, const char *name) return ind_vm; } +/** @brief Destroy a VM. Destroy the VM object from the simulation. + * @ingroup msg_VMs + */ +void MSG_vm_destroy(msg_vm_t vm) +{ + /* First, terminate all processes on the VM if necessary */ + if (MSG_vm_is_running(vm)) + simcall_vm_shutdown(vm); + + if (!MSG_vm_is_created(vm)) { + XBT_CRITICAL("shutdown the given VM before destroying it"); + DIE_IMPOSSIBLE; + } + + /* Then, destroy the VM object */ + simcall_vm_destroy(vm); + + __MSG_host_destroy(vm); + + #ifdef HAVE_TRACING + TRACE_msg_vm_end(vm); + #endif +} + /** @brief Start a vm (i.e., boot the guest operating system) * @ingroup msg_VMs @@ -286,11 +308,10 @@ static inline char *get_mig_task_name(const char *vm_name, const char *src_pm_na return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm_name, src_pm_name, dst_pm_name); } +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio); + static int migration_rx_fun(int argc, char *argv[]) { - const char *pr_name = MSG_process_get_name(MSG_process_self()); - const char *host_name = MSG_host_get_name(MSG_host_self()); - XBT_DEBUG("mig: rx_start"); xbt_assert(argc == 4); @@ -300,6 +321,12 @@ static int migration_rx_fun(int argc, char *argv[]) msg_vm_t vm = MSG_get_host_by_name(vm_name); msg_vm_t dst_pm = MSG_get_host_by_name(dst_pm_name); + + s_ws_params_t params; + simcall_host_get_params(vm, ¶ms); + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + + int need_exit = 0; char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); @@ -309,6 +336,12 @@ static int migration_rx_fun(int argc, char *argv[]) for (;;) { msg_task_t task = NULL; MSG_task_recv(&task, mbox); + { + double received = MSG_task_get_data_size(task); + /* TODO: clean up */ + // const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); + launch_deferred_exec_process(vm, received * xfer_cpu_overhead, 1); + } if (strcmp(task->name, finalize_task_name) == 0) need_exit = 1; @@ -343,14 +376,6 @@ static int migration_rx_fun(int argc, char *argv[]) return 0; } - -typedef struct dirty_page { - double prev_clock; - double prev_remaining; - msg_task_t task; -} s_dirty_page, *dirty_page_t; - - static void reset_dirty_pages(msg_vm_t vm) { msg_host_priv_t priv = msg_host_resource_priv(vm); @@ -411,7 +436,7 @@ double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, d return computed; } -static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fancy_debug) +static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug, int stage2_round_for_fancy_debug) { msg_host_priv_t priv = msg_host_resource_priv(vm); double total = 0; @@ -432,7 +457,8 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fanc total += priv->dp_updated_by_deleted_tasks; - XBT_INFO("mig-stage2.%d: computed %f flop_counts (including %f by deleted tasks)", + XBT_INFO("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)", + stage_for_fancy_debug, stage2_round_for_fancy_debug, total, priv->dp_updated_by_deleted_tasks); @@ -444,6 +470,8 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fanc return total; } +// TODO Is this code redundant with the information provided by +// msg_process_t MSG_process_create(const char *name, xbt_main_func_t code, void *data, msg_host_t host) void MSG_host_add_task(msg_host_t host, msg_task_t task) { msg_host_priv_t priv = msg_host_resource_priv(host); @@ -496,28 +524,365 @@ void MSG_host_del_task(msg_host_t host, msg_task_t task) } +static int deferred_exec_fun(int argc, char *argv[]) +{ + xbt_assert(argc == 3); + const char *comp_str = argv[1]; + double computaion = atof(comp_str); + const char *prio_str = argv[2]; + double prio = atof(prio_str); + + msg_task_t task = MSG_task_create("__task_deferred", computaion, 0, NULL); + // XBT_INFO("exec deferred %f", computaion); + + /* dpt is the results of the VM activity */ + MSG_task_set_priority(task, prio); + MSG_task_execute(task); + + + + MSG_task_destroy(task); + + return 0; +} + +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio) +{ + char *pr_name = bprintf("__pr_deferred_exec_%s", MSG_host_get_name(host)); + + int nargvs = 4; + char **argv = xbt_new(char *, nargvs); + argv[0] = xbt_strdup(pr_name); + argv[1] = bprintf("%lf", computation); + argv[2] = bprintf("%lf", prio); + argv[3] = NULL; + + msg_process_t pr = MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv); + + xbt_free(pr_name); +} + + +static int task_tx_overhead_fun(int argc, char *argv[]) +{ + xbt_assert(argc == 2); + const char *mbox = argv[1]; + + int need_exit = 0; + + // XBT_INFO("start %s", mbox); + + for (;;) { + msg_task_t task = NULL; + MSG_task_recv(&task, mbox); + + // XBT_INFO("task->name %s", task->name); + + if (strcmp(task->name, "finalize_making_overhead") == 0) + need_exit = 1; + + // XBT_INFO("exec"); + // MSG_task_set_priority(task, 1000000); + MSG_task_execute(task); + MSG_task_destroy(task); + + if (need_exit) + break; + } + + // XBT_INFO("bye"); + + return 0; +} + +static void start_overhead_process(msg_task_t comm_task) +{ + char *pr_name = bprintf("__pr_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + int nargvs = 3; + char **argv = xbt_new(char *, nargvs); + argv[0] = xbt_strdup(pr_name); + argv[1] = xbt_strdup(mbox); + argv[2] = NULL; + + // XBT_INFO("micro start: mbox %s", mbox); + msg_process_t pr = MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv); + + xbt_free(pr_name); + xbt_free(mbox); +} + +static void shutdown_overhead_process(msg_task_t comm_task) +{ + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + msg_task_t task = MSG_task_create("finalize_making_overhead", 0, 0, NULL); + + // XBT_INFO("micro shutdown: mbox %s", mbox); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mbox); + // XBT_INFO("shutdown done"); +} + +static void request_overhead(msg_task_t comm_task, double computation) +{ + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + msg_task_t task = MSG_task_create("micro", computation, 0, NULL); + + // XBT_INFO("req overhead"); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mbox); +} + +/* alpha is (floating_operations / bytes). + * + * When actual migration traffic was 32 mbytes/s, we observed the CPU + * utilization of the main thread of the Qemu process was 10 %. + * alpha = 0.1 * C / (32 * 1024 * 1024) + * where the CPU capacity of the PM is C flops/s. + * + * */ +static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox, double mig_speed, double alpha) +{ + const double chunk_size = 1024 * 1024 * 10; + double remaining = MSG_task_get_data_size(comm_task); + + start_overhead_process(comm_task); + + + while (remaining > 0) { + double data_size = chunk_size; + if (remaining < chunk_size) + data_size = remaining; + + remaining -= data_size; + + // XBT_INFO("remaining %f bytes", remaining); + + + double clock_sta = MSG_get_clock(); + + /* create a micro task */ + { + char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task)); + msg_task_t mtask = MSG_task_create(mtask_name, 0, data_size, NULL); + + request_overhead(comm_task, data_size * alpha); + + msg_error_t ret = MSG_task_send(mtask, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mtask_name); + } + +#if 0 + { + /* In the real world, sending data involves small CPU computation. */ + char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task)); + msg_task_t mtask = MSG_task_create(mtask_name, data_size * alpha, data_size, NULL); + MSG_task_execute(mtask); + MSG_task_destroy(mtask); + xbt_free(mtask_name); + } +#endif + + /* TODO */ + + double clock_end = MSG_get_clock(); + + + if (mig_speed > 0) { + /* + * (max bandwidth) > data_size / ((elapsed time) + time_to_sleep) + * + * Thus, we get + * time_to_sleep > data_size / (max bandwidth) - (elapsed time) + * + * If time_to_sleep is smaller than zero, the elapsed time was too big. We + * do not need a micro sleep. + **/ + double time_to_sleep = data_size / mig_speed - (clock_end - clock_sta); + if (time_to_sleep > 0) + MSG_process_sleep(time_to_sleep); + + + //XBT_INFO("duration %f", clock_end - clock_sta); + //XBT_INFO("time_to_sleep %f", time_to_sleep); + } + } + + // XBT_INFO("%s", MSG_task_get_name(comm_task)); + shutdown_overhead_process(comm_task); + +} + + +#if 0 +static void make_cpu_overhead_of_data_transfer(msg_task_t comm_task, double init_comm_size) +{ + double prev_remaining = init_comm_size; + + for (;;) { + double remaining = MSG_task_get_remaining_communication(comm_task); + if (remaining == 0) + need_exit = 1; + + double sent = prev_remaining - remaining; + double comp_size = sent * overhead; + + + char *comp_task_name = bprintf("__sender_overhead%s", MSG_task_get_name(comm_task)); + msg_task_t comp_task = MSG_task_create(comp_task_name, comp_size, 0, NULL); + MSG_task_execute(comp_task); + MSG_task_destroy(comp_task); + + if (need_exit) + break; + + prev_remaining = remaining; + + } + + xbt_free(comp_task_name); +} +#endif + +#define USE_MICRO_TASK 1 + +#if 0 +// const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.32L * 1.0E8 / (24L * 1024 * 1024); // makes super good values for 32 mbytes/s +//const double alpha = 0.32L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (80L * 1024 * 1024); +////const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.66L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); + +/* CPU 22% when 80Mbyte/s */ +const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); +#endif + + static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name, - double size, char *mbox, int stage, int stage2_round) + double size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead) { char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); msg_task_t task = MSG_task_create(task_name, 0, size, NULL); - msg_error_t ret = MSG_task_send(task, mbox); + + /* TODO: clean up */ + + double clock_sta = MSG_get_clock(); + +#ifdef USE_MICRO_TASK + + task_send_bounded_with_cpu_overhead(task, mbox, mig_speed, xfer_cpu_overhead); + +#else + msg_error_t ret; + if (mig_speed > 0) + ret = MSG_task_send_bounded(task, mbox, mig_speed); + else + ret = MSG_task_send(task, mbox); xbt_assert(ret == MSG_OK); +#endif + + double clock_end = MSG_get_clock(); + double duration = clock_end - clock_sta; + double actual_speed = size / duration; +#ifdef USE_MICRO_TASK + double cpu_utilization = size * xfer_cpu_overhead / duration / 1.0E8; +#else + double cpu_utilization = 0; +#endif + + + if (stage == 2) - XBT_INFO("mig-stage%d.%d: sent %f", stage, stage2_round, size); + XBT_INFO("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization); else - XBT_INFO("mig-stage%d: sent %f", stage, size); + XBT_INFO("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization); xbt_free(task_name); + + + +#ifdef USE_MICRO_TASK + /* The name of a micro task starts with __micro, which does not match the + * special name that finalizes the receiver loop. Thus, we send the special task. + **/ + { + if (stage == 3) { + char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); + msg_task_t task = MSG_task_create(task_name, 0, 0, NULL); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + xbt_free(task_name); + } + } +#endif } +double get_updated_size(double computed, double dp_rate, double dp_cap) +{ + double updated_size = computed * dp_rate; + XBT_INFO("updated_size %f dp_rate %f", updated_size, dp_rate); + if (updated_size > dp_cap) { + // XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size, dp_cap); + updated_size = dp_cap; + } -static int migration_tx_fun(int argc, char *argv[]) + return updated_size; +} + +static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *dst_pm_name, + long ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead) { - const char *pr_name = MSG_process_get_name(MSG_process_self()); - const char *host_name = MSG_host_get_name(MSG_host_self()); + const char *vm_name = MSG_host_get_name(vm); + char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); + + const long chunksize = 1024 * 1024 * 100; + long remaining = ramsize; + double computed_total = 0; + + while (remaining > 0) { + long datasize = chunksize; + if (remaining < chunksize) + datasize = remaining; + + remaining -= datasize; + + send_migration_data(vm_name, src_pm_name, dst_pm_name, datasize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + + double computed = lookup_computed_flop_counts(vm, 1, 0); + computed_total += computed; + + { + double updated_size = get_updated_size(computed, dp_rate, dp_cap); + + double overhead = dpt_cpu_overhead * updated_size; + launch_deferred_exec_process(vm, overhead, 10000); + } + } + + return computed_total; +} + + + +static int migration_tx_fun(int argc, char *argv[]) +{ XBT_DEBUG("mig: tx_start"); xbt_assert(argc == 4); @@ -531,12 +896,26 @@ static int migration_tx_fun(int argc, char *argv[]) simcall_host_get_params(vm, ¶ms); const long ramsize = params.ramsize; const long devsize = params.devsize; + const int skip_stage1 = params.skip_stage1; const int skip_stage2 = params.skip_stage2; - const double max_downtime = params.max_downtime; const double dp_rate = params.dp_rate; const double dp_cap = params.dp_cap; + const double mig_speed = params.mig_speed; + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + const double dpt_cpu_overhead = params.dpt_cpu_overhead; + double remaining_size = ramsize + devsize; - double threshold = max_downtime * 125 * 1000 * 1000; + + double max_downtime = params.max_downtime; + if (max_downtime == 0) { + XBT_WARN("use the default max_downtime value 30ms"); + max_downtime = 0.03; + } + + /* This value assumes the network link is 1Gbps. */ + double threshold = max_downtime * 125 * 1024 * 1024; + + /* setting up parameters has done */ if (ramsize == 0) @@ -549,10 +928,14 @@ static int migration_tx_fun(int argc, char *argv[]) /* Stage1: send all memory pages to the destination. */ start_dirty_page_tracking(vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0); - - remaining_size -= ramsize; + double computed_during_stage1 = 0; + if (!skip_stage1) { + // send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + /* send ramsize, but split it */ + computed_during_stage1 = send_stage1(vm, src_pm_name, dst_pm_name, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); + remaining_size -= ramsize; + } /* Stage2: send update pages iteratively until the size of remaining states @@ -567,23 +950,40 @@ static int migration_tx_fun(int argc, char *argv[]) int stage2_round = 0; for (;;) { - // long updated_size = lookup_dirty_pages(vm); - double updated_size = lookup_computed_flop_counts(vm, stage2_round) * dp_rate; - if (updated_size > dp_cap) { - XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", - stage2_round, updated_size, dp_cap); - updated_size = dp_cap; + + double updated_size = 0; + if (stage2_round == 0) { + /* just after stage1, nothing has been updated. But, we have to send the data updated during stage1 */ + updated_size = get_updated_size(computed_during_stage1, dp_rate, dp_cap); + } else { + double computed = lookup_computed_flop_counts(vm, 2, stage2_round); + updated_size = get_updated_size(computed, dp_rate, dp_cap); } - remaining_size += updated_size; + XBT_INFO("%d updated_size %f computed_during_stage1 %f dp_rate %f dp_cap %f", + stage2_round, updated_size, computed_during_stage1, dp_rate, dp_cap); - XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, - remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); - if (remaining_size < threshold) - break; + if (stage2_round != 0) { + /* during stage1, we have already created overhead tasks */ + double overhead = dpt_cpu_overhead * updated_size; + XBT_INFO("updated %f overhead %f", updated_size, overhead); + launch_deferred_exec_process(vm, overhead, 10000); + } + - send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round); + { + remaining_size += updated_size; + + XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, + remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); + + if (remaining_size < threshold) + break; + } + + + send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); remaining_size -= updated_size; stage2_round += 1; @@ -596,7 +996,7 @@ stage3: simcall_vm_suspend(vm); stop_dirty_page_tracking(vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0); + send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed, xfer_cpu_overhead); xbt_free(mbox); @@ -621,7 +1021,7 @@ static void do_migration(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) argv[3] = xbt_strdup(sg_host_name(dst_pm)); argv[4] = NULL; - msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv); + MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv); xbt_free(pr_name); } @@ -635,8 +1035,7 @@ static void do_migration(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) argv[2] = xbt_strdup(sg_host_name(src_pm)); argv[3] = xbt_strdup(sg_host_name(dst_pm)); argv[4] = NULL; - - msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv); + MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv); xbt_free(pr_name); } @@ -773,32 +1172,9 @@ void MSG_vm_restore(msg_vm_t vm) } -/** @brief Destroy a VM. Destroy the VM object from the simulation. - * @ingroup msg_VMs - */ -void MSG_vm_destroy(msg_vm_t vm) -{ - /* First, terminate all processes on the VM if necessary */ - if (MSG_vm_is_running(vm)) - simcall_vm_shutdown(vm); - - if (!MSG_vm_is_created(vm)) { - XBT_CRITICAL("shutdown the given VM before destroying it"); - DIE_IMPOSSIBLE; - } - - /* Then, destroy the VM object */ - simcall_vm_destroy(vm); - - __MSG_host_destroy(vm); - - #ifdef HAVE_TRACING - TRACE_msg_vm_end(vm); - #endif -} -/** @brief Get the physical host of a givne VM. +/** @brief Get the physical host of a given VM. * @ingroup msg_VMs */ msg_host_t MSG_vm_get_pm(msg_vm_t vm)