X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/7d6123fb0ebff1aec98f31fe1962dc22e49b8571..9bbf0601aa9af6089fdcfc0fd09bfdf64382705c:/src/msg/msg_vm.c diff --git a/src/msg/msg_vm.c b/src/msg/msg_vm.c index 3b04f74402..4c474e6ead 100644 --- a/src/msg/msg_vm.c +++ b/src/msg/msg_vm.c @@ -166,24 +166,33 @@ int MSG_vm_is_restoring(msg_vm_t vm) /** @brief Create a new VM with specified parameters. * @ingroup msg_VMs* + * All parameters are in MBytes * */ -msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, - int ncpus, long ramsize, long net_cap, char *disk_path, long disksize) +msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, int ramsize, + int net_cap, char *disk_path, int disksize, + int mig_netspeed, int dp_intensity) { - msg_vm_t vm = MSG_vm_create_core(ind_pm, name); - - { - s_ws_params_t params; - memset(¶ms, 0, sizeof(params)); - params.ramsize = ramsize; - //params.overcommit = 0; - simcall_host_set_params(vm, ¶ms); - } - - /* TODO: Limit net capability, take into account disk considerations. */ - - return vm; + /* For the moment, intensity_rate is the percentage against the migration bandwidth */ + double host_speed = MSG_get_host_speed(ind_pm); + double update_speed = ((double)dp_intensity/100) * mig_netspeed; + + msg_vm_t vm = MSG_vm_create_core(ind_pm, name); + s_ws_params_t params; + memset(¶ms, 0, sizeof(params)); + params.ramsize = 1L * 1024 * 1024 * ramsize; + //params.overcommit = 0; + params.devsize = 0; + params.skip_stage2 = 0; + params.max_downtime = 0.03; + params.dp_rate = (update_speed * 1L * 1024 * 1024 ) / host_speed; + params.dp_cap = params.ramsize / 0.9; // working set memory is 90% + params.mig_speed = 1L * 1024 * 1024 * mig_netspeed; // mig_speed + + //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed); + simcall_host_set_params(vm, ¶ms); + + return vm; } @@ -308,13 +317,10 @@ static inline char *get_mig_task_name(const char *vm_name, const char *src_pm_na return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm_name, src_pm_name, dst_pm_name); } -static void launch_deferred_exec_process(msg_host_t host, double computation); +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio); static int migration_rx_fun(int argc, char *argv[]) { - const char *pr_name = MSG_process_get_name(MSG_process_self()); - const char *host_name = MSG_host_get_name(MSG_host_self()); - XBT_DEBUG("mig: rx_start"); xbt_assert(argc == 4); @@ -324,6 +330,12 @@ static int migration_rx_fun(int argc, char *argv[]) msg_vm_t vm = MSG_get_host_by_name(vm_name); msg_vm_t dst_pm = MSG_get_host_by_name(dst_pm_name); + + s_ws_params_t params; + simcall_host_get_params(vm, ¶ms); + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + + int need_exit = 0; char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); @@ -335,9 +347,9 @@ static int migration_rx_fun(int argc, char *argv[]) MSG_task_recv(&task, mbox); { double received = MSG_task_get_data_size(task); - /* TODO */ - const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); - launch_deferred_exec_process(vm, received * alpha); + /* TODO: clean up */ + // const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); + launch_deferred_exec_process(vm, received * xfer_cpu_overhead, 1); } if (strcmp(task->name, finalize_task_name) == 0) @@ -373,14 +385,6 @@ static int migration_rx_fun(int argc, char *argv[]) return 0; } - -typedef struct dirty_page { - double prev_clock; - double prev_remaining; - msg_task_t task; -} s_dirty_page, *dirty_page_t; - - static void reset_dirty_pages(msg_vm_t vm) { msg_host_priv_t priv = msg_host_resource_priv(vm); @@ -430,7 +434,7 @@ double calc_updated_pages(char *key, msg_vm_t vm, dirty_page_t dp, double remain } #endif -double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock) +static double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock) { double computed = dp->prev_remaining - remaining; double duration = clock - dp->prev_clock; @@ -441,7 +445,7 @@ double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, d return computed; } -static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fancy_debug) +static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug, int stage2_round_for_fancy_debug) { msg_host_priv_t priv = msg_host_resource_priv(vm); double total = 0; @@ -462,7 +466,8 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fanc total += priv->dp_updated_by_deleted_tasks; - XBT_INFO("mig-stage2.%d: computed %f flop_counts (including %f by deleted tasks)", + XBT_INFO("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)", + stage_for_fancy_debug, stage2_round_for_fancy_debug, total, priv->dp_updated_by_deleted_tasks); @@ -530,15 +535,17 @@ void MSG_host_del_task(msg_host_t host, msg_task_t task) static int deferred_exec_fun(int argc, char *argv[]) { - xbt_assert(argc == 2); + xbt_assert(argc == 3); const char *comp_str = argv[1]; double computaion = atof(comp_str); + const char *prio_str = argv[2]; + double prio = atof(prio_str); msg_task_t task = MSG_task_create("__task_deferred", computaion, 0, NULL); // XBT_INFO("exec deferred %f", computaion); /* dpt is the results of the VM activity */ - MSG_task_set_priority(task, 1000000); + MSG_task_set_priority(task, prio); MSG_task_execute(task); @@ -548,17 +555,18 @@ static int deferred_exec_fun(int argc, char *argv[]) return 0; } -static void launch_deferred_exec_process(msg_host_t host, double computation) +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio) { char *pr_name = bprintf("__pr_deferred_exec_%s", MSG_host_get_name(host)); - int nargvs = 3; + int nargvs = 4; char **argv = xbt_new(char *, nargvs); argv[0] = xbt_strdup(pr_name); argv[1] = bprintf("%lf", computation); - argv[2] = NULL; + argv[2] = bprintf("%lf", prio); + argv[3] = NULL; - msg_process_t pr = MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv); + MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv); xbt_free(pr_name); } @@ -571,7 +579,7 @@ static int task_tx_overhead_fun(int argc, char *argv[]) int need_exit = 0; - XBT_INFO("start %s", mbox); + // XBT_INFO("start %s", mbox); for (;;) { msg_task_t task = NULL; @@ -583,6 +591,7 @@ static int task_tx_overhead_fun(int argc, char *argv[]) need_exit = 1; // XBT_INFO("exec"); + // MSG_task_set_priority(task, 1000000); MSG_task_execute(task); MSG_task_destroy(task); @@ -590,7 +599,7 @@ static int task_tx_overhead_fun(int argc, char *argv[]) break; } - XBT_INFO("bye"); + // XBT_INFO("bye"); return 0; } @@ -606,8 +615,8 @@ static void start_overhead_process(msg_task_t comm_task) argv[1] = xbt_strdup(mbox); argv[2] = NULL; - XBT_INFO("micro start: mbox %s", mbox); - msg_process_t pr = MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv); + // XBT_INFO("micro start: mbox %s", mbox); + MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv); xbt_free(pr_name); xbt_free(mbox); @@ -619,12 +628,12 @@ static void shutdown_overhead_process(msg_task_t comm_task) msg_task_t task = MSG_task_create("finalize_making_overhead", 0, 0, NULL); - XBT_INFO("micro shutdown: mbox %s", mbox); + // XBT_INFO("micro shutdown: mbox %s", mbox); msg_error_t ret = MSG_task_send(task, mbox); xbt_assert(ret == MSG_OK); xbt_free(mbox); - XBT_INFO("shutdown done"); + // XBT_INFO("shutdown done"); } static void request_overhead(msg_task_t comm_task, double computation) @@ -633,7 +642,7 @@ static void request_overhead(msg_task_t comm_task, double computation) msg_task_t task = MSG_task_create("micro", computation, 0, NULL); - XBT_INFO("req overhead"); + // XBT_INFO("req overhead"); msg_error_t ret = MSG_task_send(task, mbox); xbt_assert(ret == MSG_OK); @@ -650,7 +659,7 @@ static void request_overhead(msg_task_t comm_task, double computation) * */ static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox, double mig_speed, double alpha) { - const double chunk_size = 1024 * 1024; + const double chunk_size = 1024 * 1024 * 10; double remaining = MSG_task_get_data_size(comm_task); start_overhead_process(comm_task); @@ -663,7 +672,7 @@ static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox remaining -= data_size; - XBT_INFO("remaining %f bytes", remaining); + // XBT_INFO("remaining %f bytes", remaining); double clock_sta = MSG_get_clock(); @@ -712,12 +721,12 @@ static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox MSG_process_sleep(time_to_sleep); - XBT_INFO("duration %f", clock_end - clock_sta); - XBT_INFO("time_to_sleep %f", time_to_sleep); + //XBT_INFO("duration %f", clock_end - clock_sta); + //XBT_INFO("time_to_sleep %f", time_to_sleep); } } - XBT_INFO("%s", MSG_task_get_name(comm_task)); + // XBT_INFO("%s", MSG_task_get_name(comm_task)); shutdown_overhead_process(comm_task); } @@ -755,31 +764,37 @@ static void make_cpu_overhead_of_data_transfer(msg_task_t comm_task, double init #define USE_MICRO_TASK 1 +#if 0 +// const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.32L * 1.0E8 / (24L * 1024 * 1024); // makes super good values for 32 mbytes/s +//const double alpha = 0.32L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (80L * 1024 * 1024); +////const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.66L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); + +/* CPU 22% when 80Mbyte/s */ +const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); +#endif + + static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name, - double size, char *mbox, int stage, int stage2_round, double mig_speed) + double size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead) { char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); msg_task_t task = MSG_task_create(task_name, 0, size, NULL); + /* TODO: clean up */ double clock_sta = MSG_get_clock(); #ifdef USE_MICRO_TASK - // const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024); - // const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); - // const double alpha = 0.20L * 1.0E8 / (85L * 1024 * 1024); - // const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); - // const double alpha = 0.32L * 1.0E8 / (24L * 1024 * 1024); // makes super good values for 32 mbytes/s - //const double alpha = 0.32L * 1.0E8 / (32L * 1024 * 1024); - // const double alpha = 0.56L * 1.0E8 / (80L * 1024 * 1024); - ////const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); - // const double alpha = 0.56L * 1.0E8 / (90L * 1024 * 1024); - // const double alpha = 0.66L * 1.0E8 / (90L * 1024 * 1024); - - // const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); - const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); - - task_send_bounded_with_cpu_overhead(task, mbox, mig_speed, alpha); + + task_send_bounded_with_cpu_overhead(task, mbox, mig_speed, xfer_cpu_overhead); #else msg_error_t ret; @@ -794,7 +809,7 @@ static void send_migration_data(const char *vm_name, const char *src_pm_name, co double duration = clock_end - clock_sta; double actual_speed = size / duration; #ifdef USE_MICRO_TASK - double cpu_utilization = size * alpha / duration / 1.0E8; + double cpu_utilization = size * xfer_cpu_overhead / duration / 1.0E8; #else double cpu_utilization = 0; #endif @@ -827,12 +842,56 @@ static void send_migration_data(const char *vm_name, const char *src_pm_name, co #endif } +static double get_updated_size(double computed, double dp_rate, double dp_cap) +{ + double updated_size = computed * dp_rate; + XBT_INFO("updated_size %f dp_rate %f", updated_size, dp_rate); + if (updated_size > dp_cap) { + // XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size, dp_cap); + updated_size = dp_cap; + } -static int migration_tx_fun(int argc, char *argv[]) + return updated_size; +} + +static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *dst_pm_name, + long ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead) { - const char *pr_name = MSG_process_get_name(MSG_process_self()); - const char *host_name = MSG_host_get_name(MSG_host_self()); + const char *vm_name = MSG_host_get_name(vm); + char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); + + const long chunksize = 1024 * 1024 * 100; + long remaining = ramsize; + double computed_total = 0; + + while (remaining > 0) { + long datasize = chunksize; + if (remaining < chunksize) + datasize = remaining; + + remaining -= datasize; + + send_migration_data(vm_name, src_pm_name, dst_pm_name, datasize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + + double computed = lookup_computed_flop_counts(vm, 1, 0); + computed_total += computed; + + { + double updated_size = get_updated_size(computed, dp_rate, dp_cap); + double overhead = dpt_cpu_overhead * updated_size; + launch_deferred_exec_process(vm, overhead, 10000); + } + } + + return computed_total; +} + + + + +static int migration_tx_fun(int argc, char *argv[]) +{ XBT_DEBUG("mig: tx_start"); xbt_assert(argc == 4); @@ -851,6 +910,9 @@ static int migration_tx_fun(int argc, char *argv[]) const double dp_rate = params.dp_rate; const double dp_cap = params.dp_cap; const double mig_speed = params.mig_speed; + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + const double dpt_cpu_overhead = params.dpt_cpu_overhead; + double remaining_size = ramsize + devsize; double max_downtime = params.max_downtime; @@ -875,13 +937,16 @@ static int migration_tx_fun(int argc, char *argv[]) /* Stage1: send all memory pages to the destination. */ start_dirty_page_tracking(vm); + double computed_during_stage1 = 0; if (!skip_stage1) { - send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed); + // send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + + /* send ramsize, but split it */ + computed_during_stage1 = send_stage1(vm, src_pm_name, dst_pm_name, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); remaining_size -= ramsize; } - /* Stage2: send update pages iteratively until the size of remaining states * becomes smaller than the threshold value. */ if (skip_stage2) @@ -894,33 +959,40 @@ static int migration_tx_fun(int argc, char *argv[]) int stage2_round = 0; for (;;) { - // long updated_size = lookup_dirty_pages(vm); - double updated_size = lookup_computed_flop_counts(vm, stage2_round) * dp_rate; - if (updated_size > dp_cap) { - XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", - stage2_round, updated_size, dp_cap); - updated_size = dp_cap; + + double updated_size = 0; + if (stage2_round == 0) { + /* just after stage1, nothing has been updated. But, we have to send the data updated during stage1 */ + updated_size = get_updated_size(computed_during_stage1, dp_rate, dp_cap); + } else { + double computed = lookup_computed_flop_counts(vm, 2, stage2_round); + updated_size = get_updated_size(computed, dp_rate, dp_cap); } + XBT_INFO("%d updated_size %f computed_during_stage1 %f dp_rate %f dp_cap %f", + stage2_round, updated_size, computed_during_stage1, dp_rate, dp_cap); - // double dpt_overhead_parameter = 1.0L * 1E8 / 0.5 / 40 / 1024 / 1024 * 1000 * 1000 * 1000 * 1000 * 1000; // super cool, but 520 for 0 32 8g 75% - // double dpt_overhead_parameter = 1.0L * 1E8 / 0.5 / 40 / 1024 / 1024 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000; - // double dpt_overhead_parameter = 1.0L * 1E8 / 0.5 / 40 / 1024 / 1024 * 1000 * 1000 * 1000 * 1000; - double dpt_overhead_parameter = 1.0L * 1E8 / 0.5 / 40 / 1024 / 1024 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000; - double overhead = dpt_overhead_parameter * updated_size; - XBT_INFO("updated %f overhead %f", updated_size, overhead); - launch_deferred_exec_process(vm, overhead); + if (stage2_round != 0) { + /* during stage1, we have already created overhead tasks */ + double overhead = dpt_cpu_overhead * updated_size; + XBT_INFO("updated %f overhead %f", updated_size, overhead); + launch_deferred_exec_process(vm, overhead, 10000); + } - remaining_size += updated_size; - XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, - remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); + { + remaining_size += updated_size; - if (remaining_size < threshold) - break; + XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, + remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); - send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed); + if (remaining_size < threshold) + break; + } + + + send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); remaining_size -= updated_size; stage2_round += 1; @@ -933,7 +1005,7 @@ stage3: simcall_vm_suspend(vm); stop_dirty_page_tracking(vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed); + send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed, xfer_cpu_overhead); xbt_free(mbox); @@ -958,7 +1030,7 @@ static void do_migration(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) argv[3] = xbt_strdup(sg_host_name(dst_pm)); argv[4] = NULL; - msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv); + MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv); xbt_free(pr_name); } @@ -972,7 +1044,7 @@ static void do_migration(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) argv[2] = xbt_strdup(sg_host_name(src_pm)); argv[3] = xbt_strdup(sg_host_name(dst_pm)); argv[4] = NULL; - msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv); + MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv); xbt_free(pr_name); } @@ -1109,8 +1181,6 @@ void MSG_vm_restore(msg_vm_t vm) } - - /** @brief Get the physical host of a given VM. * @ingroup msg_VMs */ @@ -1118,3 +1188,41 @@ msg_host_t MSG_vm_get_pm(msg_vm_t vm) { return simcall_vm_get_pm(vm); } + + +/** @brief Set a CPU bound for a given VM. + * @ingroup msg_VMs + * + * 1. + * Note that in some cases MSG_task_set_bound() may not intuitively work for VMs. + * + * For example, + * On PM0, there are Task1 and VM0. + * On VM0, there is Task2. + * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0. + * Then, + * Task1@PM0 gets 50%. + * Task2@VM0 gets 25%. + * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively. + * + * This is because a VM has the dummy CPU action in the PM layer. Putting a + * task on the VM does not affect the bound of the dummy CPU action. The bound + * of the dummy CPU action is unlimited. + * + * There are some solutions for this problem. One option is to update the bound + * of the dummy CPU action automatically. It should be the sum of all tasks on + * the VM. But, this solution might be costy, because we have to scan all tasks + * on the VM in share_resource() or we have to trap both the start and end of + * task execution. + * + * The current solution is to use MSG_vm_set_bound(), which allows us to + * directly set the bound of the dummy CPU action. + * + * + * 2. + * Note that bound == 0 means no bound (i.e., unlimited). + */ +void MSG_vm_set_bound(msg_vm_t vm, double bound) +{ + return simcall_vm_set_bound(vm, bound); +}