From: Takahiro Hirofuchi Date: Wed, 5 Jun 2013 21:45:03 +0000 (+0200) Subject: Merge branch 'hypervisor' of scm.gforge.inria.fr:/gitroot/simgrid/simgrid into hypervisor X-Git-Tag: v3_11_beta~297^2^2~34 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/dd22867177c86db2e9d0b76852cb09e044b1c0c8?hp=220a0c6c5a2edccc979b3c7f8ebc1409ff77ae0f Merge branch 'hypervisor' of scm.gforge.inria.fr:/gitroot/simgrid/simgrid into hypervisor --- diff --git a/examples/msg/cloud/CMakeLists.txt b/examples/msg/cloud/CMakeLists.txt index a19ba8bd20..29c9f3c96e 100644 --- a/examples/msg/cloud/CMakeLists.txt +++ b/examples/msg/cloud/CMakeLists.txt @@ -5,11 +5,13 @@ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") add_executable(masterslave_virtual_machines "masterslave_virtual_machines.c") add_executable(simple_vm "simple_vm.c") add_executable(migrate_vm "migrate_vm.c") +add_executable(bound "bound.c") ### Add definitions for compile target_link_libraries(masterslave_virtual_machines simgrid) target_link_libraries(simple_vm simgrid) target_link_libraries(migrate_vm simgrid) +target_link_libraries(bound simgrid) set(tesh_files ${tesh_files} @@ -27,6 +29,7 @@ set(examples_src ${CMAKE_CURRENT_SOURCE_DIR}/masterslave_virtual_machines.c ${CMAKE_CURRENT_SOURCE_DIR}/simple_vm.c ${CMAKE_CURRENT_SOURCE_DIR}/migrate_vm.c + ${CMAKE_CURRENT_SOURCE_DIR}/bound.c PARENT_SCOPE ) set(bin_files diff --git a/examples/msg/cloud/simple_vm.c b/examples/msg/cloud/simple_vm.c index fce5e13dd3..e61c01c93c 100644 --- a/examples/msg/cloud/simple_vm.c +++ b/examples/msg/cloud/simple_vm.c @@ -282,6 +282,7 @@ int main(int argc, char *argv[]) MSG_init(&argc, argv); /* load the platform file */ + xbt_assert(argc == 2); MSG_create_environment(argv[1]); xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar(); diff --git a/include/msg/msg.h b/include/msg/msg.h index cf912376c5..e042ab7b3e 100644 --- a/include/msg/msg.h +++ b/include/msg/msg.h @@ -205,6 +205,7 @@ XBT_PUBLIC(msg_error_t) MSG_task_receive_from_host_bounded(msg_task_t * task, co XBT_PUBLIC(msg_error_t) MSG_task_execute(msg_task_t task); XBT_PUBLIC(msg_error_t) MSG_parallel_task_execute(msg_task_t task); XBT_PUBLIC(void) MSG_task_set_priority(msg_task_t task, double priority); +XBT_PUBLIC(void) MSG_task_set_bound(msg_task_t task, double bound); XBT_PUBLIC(msg_error_t) MSG_process_sleep(double nb_sec); diff --git a/include/simgrid/platf.h b/include/simgrid/platf.h index 723c659185..27af1c3825 100644 --- a/include/simgrid/platf.h +++ b/include/simgrid/platf.h @@ -67,11 +67,15 @@ typedef struct ws_params { /* The size of other states than memory pages, which is out-of-scope of dirty * page tracking. */ long devsize; + int skip_stage1; int skip_stage2; double max_downtime; double dp_rate; - double dp_cap; + double dp_cap; /* bytes per 1 flop execution */ + + double xfer_cpu_overhead; + double dpt_cpu_overhead; /* set migration speed */ double mig_speed; diff --git a/include/simgrid/simix.h b/include/simgrid/simix.h index 36bce8d4b5..171e4a996c 100644 --- a/include/simgrid/simix.h +++ b/include/simgrid/simix.h @@ -307,7 +307,7 @@ XBT_PUBLIC(void) simcall_host_set_data(smx_host_t host, void *data); XBT_PUBLIC(smx_action_t) simcall_host_execute(const char *name, smx_host_t host, double computation_amount, - double priority); + double priority, double bound); XBT_PUBLIC(smx_action_t) simcall_host_parallel_execute(const char *name, int host_nb, smx_host_t *host_list, @@ -320,6 +320,7 @@ XBT_PUBLIC(void) simcall_host_execution_cancel(smx_action_t execution); XBT_PUBLIC(double) simcall_host_execution_get_remains(smx_action_t execution); XBT_PUBLIC(e_smx_state_t) simcall_host_execution_get_state(smx_action_t execution); XBT_PUBLIC(void) simcall_host_execution_set_priority(smx_action_t execution, double priority); +XBT_PUBLIC(void) simcall_host_execution_set_bound(smx_action_t execution, double bound); XBT_PUBLIC(e_smx_state_t) simcall_host_execution_wait(smx_action_t execution); XBT_PUBLIC(void) simcall_host_get_params(smx_host_t vm, ws_params_t param); XBT_PUBLIC(void) simcall_host_set_params(smx_host_t vm, ws_params_t param); diff --git a/src/include/surf/surf.h b/src/include/surf/surf.h index c5b183ccec..07b15b329b 100644 --- a/src/include/surf/surf.h +++ b/src/include/surf/surf.h @@ -100,6 +100,7 @@ typedef struct surf_action { xbt_swag_t state_set; double cost; /**< cost */ double priority; /**< priority (1.0 by default) */ + double bound; /**< the capping of the CPU use */ double max_duration; /**< max_duration (may fluctuate until the task is completed) */ double remains; /**< How much of that cost remains to @@ -370,6 +371,7 @@ typedef struct surf_model { int (*is_suspended) (surf_action_t action); /**< Return whether an action is suspended */ void (*set_max_duration) (surf_action_t action, double duration); /**< Set the max duration of an action*/ void (*set_priority) (surf_action_t action, double priority); /**< Set the priority of an action */ + void (*set_bound) (surf_action_t action, double bound); /**< Set the bound (the maximum CPU utilization) of an action */ #ifdef HAVE_TRACING void (*set_category) (surf_action_t action, const char *category); /**< Set the category of an action */ #endif diff --git a/src/msg/msg_gos.c b/src/msg/msg_gos.c index e6bd45af50..e8112ed93a 100644 --- a/src/msg/msg_gos.c +++ b/src/msg/msg_gos.c @@ -85,7 +85,8 @@ msg_error_t MSG_parallel_task_execute(msg_task_t task) simdata->compute = simcall_host_execute(task->name, p_simdata->m_host, simdata->computation_amount, - simdata->priority); + simdata->priority, + simdata->bound); } #ifdef HAVE_TRACING diff --git a/src/msg/msg_private.h b/src/msg/msg_private.h index da670e193b..884ab057bd 100644 --- a/src/msg/msg_private.h +++ b/src/msg/msg_private.h @@ -31,7 +31,8 @@ typedef struct simdata_task { msg_process_t receiver; msg_host_t source; double priority; - double rate; + double bound; /* Capping for CPU resource */ + double rate; /* Capping for network resource */ int isused; /* Indicates whether the task is used in SIMIX currently */ int host_nb; /* ==0 if sequential task; parallel task if not */ /******* Parallel Tasks Only !!!! *******/ diff --git a/src/msg/msg_task.c b/src/msg/msg_task.c index a32e0d81a5..03f58104ce 100644 --- a/src/msg/msg_task.c +++ b/src/msg/msg_task.c @@ -62,6 +62,7 @@ msg_task_t MSG_task_create(const char *name, double compute_duration, simdata->receiver = NULL; simdata->source = NULL; simdata->priority = 1.0; + simdata->bound = 0; simdata->rate = -1.0; simdata->isused = 0; @@ -438,3 +439,23 @@ void MSG_task_set_priority(msg_task_t task, double priority) simcall_host_execution_set_priority(task->simdata->compute, task->simdata->priority); } + + +/** \ingroup m_task_management + * \brief Changes the maximum CPU utilization of a computation task. + * Unit is flops/s. + * + */ +void MSG_task_set_bound(msg_task_t task, double bound) +{ + xbt_assert(task, "Invalid parameter"); + xbt_assert(task->simdata, "Invalid parameter"); + + if (bound == 0) + XBT_INFO("bound == 0 means no capping"); + + task->simdata->bound = bound; + if (task->simdata->compute) + simcall_host_execution_set_bound(task->simdata->compute, + task->simdata->bound); +} diff --git a/src/msg/msg_vm.c b/src/msg/msg_vm.c index 102e8222fe..113b52167f 100644 --- a/src/msg/msg_vm.c +++ b/src/msg/msg_vm.c @@ -308,6 +308,8 @@ static inline char *get_mig_task_name(const char *vm_name, const char *src_pm_na return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm_name, src_pm_name, dst_pm_name); } +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio); + static int migration_rx_fun(int argc, char *argv[]) { XBT_DEBUG("mig: rx_start"); @@ -319,6 +321,12 @@ static int migration_rx_fun(int argc, char *argv[]) msg_vm_t vm = MSG_get_host_by_name(vm_name); msg_vm_t dst_pm = MSG_get_host_by_name(dst_pm_name); + + s_ws_params_t params; + simcall_host_get_params(vm, ¶ms); + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + + int need_exit = 0; char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); @@ -328,6 +336,12 @@ static int migration_rx_fun(int argc, char *argv[]) for (;;) { msg_task_t task = NULL; MSG_task_recv(&task, mbox); + { + double received = MSG_task_get_data_size(task); + /* TODO: clean up */ + // const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); + launch_deferred_exec_process(vm, received * xfer_cpu_overhead, 1); + } if (strcmp(task->name, finalize_task_name) == 0) need_exit = 1; @@ -422,7 +436,7 @@ double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, d return computed; } -static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fancy_debug) +static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug, int stage2_round_for_fancy_debug) { msg_host_priv_t priv = msg_host_resource_priv(vm); double total = 0; @@ -443,7 +457,8 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage2_round_for_fanc total += priv->dp_updated_by_deleted_tasks; - XBT_INFO("mig-stage2.%d: computed %f flop_counts (including %f by deleted tasks)", + XBT_INFO("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)", + stage_for_fancy_debug, stage2_round_for_fancy_debug, total, priv->dp_updated_by_deleted_tasks); @@ -509,27 +524,362 @@ void MSG_host_del_task(msg_host_t host, msg_task_t task) } +static int deferred_exec_fun(int argc, char *argv[]) +{ + xbt_assert(argc == 3); + const char *comp_str = argv[1]; + double computaion = atof(comp_str); + const char *prio_str = argv[2]; + double prio = atof(prio_str); + + msg_task_t task = MSG_task_create("__task_deferred", computaion, 0, NULL); + // XBT_INFO("exec deferred %f", computaion); + + /* dpt is the results of the VM activity */ + MSG_task_set_priority(task, prio); + MSG_task_execute(task); + + + + MSG_task_destroy(task); + + return 0; +} + +static void launch_deferred_exec_process(msg_host_t host, double computation, double prio) +{ + char *pr_name = bprintf("__pr_deferred_exec_%s", MSG_host_get_name(host)); + + int nargvs = 4; + char **argv = xbt_new(char *, nargvs); + argv[0] = xbt_strdup(pr_name); + argv[1] = bprintf("%lf", computation); + argv[2] = bprintf("%lf", prio); + argv[3] = NULL; + + msg_process_t pr = MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv); + + xbt_free(pr_name); +} + + +static int task_tx_overhead_fun(int argc, char *argv[]) +{ + xbt_assert(argc == 2); + const char *mbox = argv[1]; + + int need_exit = 0; + + // XBT_INFO("start %s", mbox); + + for (;;) { + msg_task_t task = NULL; + MSG_task_recv(&task, mbox); + + // XBT_INFO("task->name %s", task->name); + + if (strcmp(task->name, "finalize_making_overhead") == 0) + need_exit = 1; + + // XBT_INFO("exec"); + // MSG_task_set_priority(task, 1000000); + MSG_task_execute(task); + MSG_task_destroy(task); + + if (need_exit) + break; + } + + // XBT_INFO("bye"); + + return 0; +} + +static void start_overhead_process(msg_task_t comm_task) +{ + char *pr_name = bprintf("__pr_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + int nargvs = 3; + char **argv = xbt_new(char *, nargvs); + argv[0] = xbt_strdup(pr_name); + argv[1] = xbt_strdup(mbox); + argv[2] = NULL; + + // XBT_INFO("micro start: mbox %s", mbox); + msg_process_t pr = MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv); + + xbt_free(pr_name); + xbt_free(mbox); +} + +static void shutdown_overhead_process(msg_task_t comm_task) +{ + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + msg_task_t task = MSG_task_create("finalize_making_overhead", 0, 0, NULL); + + // XBT_INFO("micro shutdown: mbox %s", mbox); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mbox); + // XBT_INFO("shutdown done"); +} + +static void request_overhead(msg_task_t comm_task, double computation) +{ + char *mbox = bprintf("__mb_task_tx_overhead_%s", MSG_task_get_name(comm_task)); + + msg_task_t task = MSG_task_create("micro", computation, 0, NULL); + + // XBT_INFO("req overhead"); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mbox); +} + +/* alpha is (floating_operations / bytes). + * + * When actual migration traffic was 32 mbytes/s, we observed the CPU + * utilization of the main thread of the Qemu process was 10 %. + * alpha = 0.1 * C / (32 * 1024 * 1024) + * where the CPU capacity of the PM is C flops/s. + * + * */ +static void task_send_bounded_with_cpu_overhead(msg_task_t comm_task, char *mbox, double mig_speed, double alpha) +{ + const double chunk_size = 1024 * 1024 * 10; + double remaining = MSG_task_get_data_size(comm_task); + + start_overhead_process(comm_task); + + + while (remaining > 0) { + double data_size = chunk_size; + if (remaining < chunk_size) + data_size = remaining; + + remaining -= data_size; + + // XBT_INFO("remaining %f bytes", remaining); + + + double clock_sta = MSG_get_clock(); + + /* create a micro task */ + { + char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task)); + msg_task_t mtask = MSG_task_create(mtask_name, 0, data_size, NULL); + + request_overhead(comm_task, data_size * alpha); + + msg_error_t ret = MSG_task_send(mtask, mbox); + xbt_assert(ret == MSG_OK); + + xbt_free(mtask_name); + } + +#if 0 + { + /* In the real world, sending data involves small CPU computation. */ + char *mtask_name = bprintf("__micro_%s", MSG_task_get_name(comm_task)); + msg_task_t mtask = MSG_task_create(mtask_name, data_size * alpha, data_size, NULL); + MSG_task_execute(mtask); + MSG_task_destroy(mtask); + xbt_free(mtask_name); + } +#endif + + /* TODO */ + + double clock_end = MSG_get_clock(); + + + if (mig_speed > 0) { + /* + * (max bandwidth) > data_size / ((elapsed time) + time_to_sleep) + * + * Thus, we get + * time_to_sleep > data_size / (max bandwidth) - (elapsed time) + * + * If time_to_sleep is smaller than zero, the elapsed time was too big. We + * do not need a micro sleep. + **/ + double time_to_sleep = data_size / mig_speed - (clock_end - clock_sta); + if (time_to_sleep > 0) + MSG_process_sleep(time_to_sleep); + + + //XBT_INFO("duration %f", clock_end - clock_sta); + //XBT_INFO("time_to_sleep %f", time_to_sleep); + } + } + + // XBT_INFO("%s", MSG_task_get_name(comm_task)); + shutdown_overhead_process(comm_task); + +} + + +#if 0 +static void make_cpu_overhead_of_data_transfer(msg_task_t comm_task, double init_comm_size) +{ + double prev_remaining = init_comm_size; + + for (;;) { + double remaining = MSG_task_get_remaining_communication(comm_task); + if (remaining == 0) + need_exit = 1; + + double sent = prev_remaining - remaining; + double comp_size = sent * overhead; + + + char *comp_task_name = bprintf("__sender_overhead%s", MSG_task_get_name(comm_task)); + msg_task_t comp_task = MSG_task_create(comp_task_name, comp_size, 0, NULL); + MSG_task_execute(comp_task); + MSG_task_destroy(comp_task); + + if (need_exit) + break; + + prev_remaining = remaining; + + } + + xbt_free(comp_task_name); +} +#endif + +#define USE_MICRO_TASK 1 + +#if 0 +// const double alpha = 0.1L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.25L * 1.0E8 / (85L * 1024 * 1024); +// const double alpha = 0.32L * 1.0E8 / (24L * 1024 * 1024); // makes super good values for 32 mbytes/s +//const double alpha = 0.32L * 1.0E8 / (32L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (80L * 1024 * 1024); +////const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); +// const double alpha = 0.56L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.66L * 1.0E8 / (90L * 1024 * 1024); +// const double alpha = 0.20L * 1.0E8 / (80L * 1024 * 1024); + +/* CPU 22% when 80Mbyte/s */ +const double alpha = 0.22L * 1.0E8 / (80L * 1024 * 1024); +#endif + + static void send_migration_data(const char *vm_name, const char *src_pm_name, const char *dst_pm_name, - double size, char *mbox, int stage, int stage2_round, double mig_speed) + double size, char *mbox, int stage, int stage2_round, double mig_speed, double xfer_cpu_overhead) { char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); msg_task_t task = MSG_task_create(task_name, 0, size, NULL); + /* TODO: clean up */ + + double clock_sta = MSG_get_clock(); + +#ifdef USE_MICRO_TASK + + task_send_bounded_with_cpu_overhead(task, mbox, mig_speed, xfer_cpu_overhead); + +#else msg_error_t ret; if (mig_speed > 0) ret = MSG_task_send_bounded(task, mbox, mig_speed); else ret = MSG_task_send(task, mbox); xbt_assert(ret == MSG_OK); +#endif + + double clock_end = MSG_get_clock(); + double duration = clock_end - clock_sta; + double actual_speed = size / duration; +#ifdef USE_MICRO_TASK + double cpu_utilization = size * xfer_cpu_overhead / duration / 1.0E8; +#else + double cpu_utilization = 0; +#endif + + + if (stage == 2) - XBT_INFO("mig-stage%d.%d: sent %f", stage, stage2_round, size); + XBT_INFO("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization); else - XBT_INFO("mig-stage%d: sent %f", stage, size); + XBT_INFO("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization); xbt_free(task_name); + + + +#ifdef USE_MICRO_TASK + /* The name of a micro task starts with __micro, which does not match the + * special name that finalizes the receiver loop. Thus, we send the special task. + **/ + { + if (stage == 3) { + char *task_name = get_mig_task_name(vm_name, src_pm_name, dst_pm_name, stage); + msg_task_t task = MSG_task_create(task_name, 0, 0, NULL); + msg_error_t ret = MSG_task_send(task, mbox); + xbt_assert(ret == MSG_OK); + xbt_free(task_name); + } + } +#endif +} + +double get_updated_size(double computed, double dp_rate, double dp_cap) +{ + double updated_size = computed * dp_rate; + XBT_INFO("updated_size %f dp_rate %f", updated_size, dp_rate); + if (updated_size > dp_cap) { + // XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size, dp_cap); + updated_size = dp_cap; + } + + return updated_size; } +static double send_stage1(msg_host_t vm, const char *src_pm_name, const char *dst_pm_name, + long ramsize, double mig_speed, double xfer_cpu_overhead, double dp_rate, double dp_cap, double dpt_cpu_overhead) +{ + const char *vm_name = MSG_host_get_name(vm); + char *mbox = get_mig_mbox_src_dst(vm_name, src_pm_name, dst_pm_name); + + const long chunksize = 1024 * 1024 * 100; + long remaining = ramsize; + double computed_total = 0; + + while (remaining > 0) { + long datasize = chunksize; + if (remaining < chunksize) + datasize = remaining; + + remaining -= datasize; + + send_migration_data(vm_name, src_pm_name, dst_pm_name, datasize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + + double computed = lookup_computed_flop_counts(vm, 1, 0); + computed_total += computed; + + { + double updated_size = get_updated_size(computed, dp_rate, dp_cap); + + double overhead = dpt_cpu_overhead * updated_size; + launch_deferred_exec_process(vm, overhead, 10000); + } + } + + return computed_total; +} + + + static int migration_tx_fun(int argc, char *argv[]) { @@ -546,10 +896,14 @@ static int migration_tx_fun(int argc, char *argv[]) simcall_host_get_params(vm, ¶ms); const long ramsize = params.ramsize; const long devsize = params.devsize; + const int skip_stage1 = params.skip_stage1; const int skip_stage2 = params.skip_stage2; const double dp_rate = params.dp_rate; const double dp_cap = params.dp_cap; const double mig_speed = params.mig_speed; + const double xfer_cpu_overhead = params.xfer_cpu_overhead; + const double dpt_cpu_overhead = params.dpt_cpu_overhead; + double remaining_size = ramsize + devsize; double max_downtime = params.max_downtime; @@ -558,6 +912,7 @@ static int migration_tx_fun(int argc, char *argv[]) max_downtime = 0.03; } + /* This value assumes the network link is 1Gbps. */ double threshold = max_downtime * 125 * 1024 * 1024; /* setting up parameters has done */ @@ -573,10 +928,14 @@ static int migration_tx_fun(int argc, char *argv[]) /* Stage1: send all memory pages to the destination. */ start_dirty_page_tracking(vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed); - - remaining_size -= ramsize; + double computed_during_stage1 = 0; + if (!skip_stage1) { + // send_migration_data(vm_name, src_pm_name, dst_pm_name, ramsize, mbox, 1, 0, mig_speed, xfer_cpu_overhead); + /* send ramsize, but split it */ + computed_during_stage1 = send_stage1(vm, src_pm_name, dst_pm_name, ramsize, mig_speed, xfer_cpu_overhead, dp_rate, dp_cap, dpt_cpu_overhead); + remaining_size -= ramsize; + } /* Stage2: send update pages iteratively until the size of remaining states @@ -591,23 +950,40 @@ static int migration_tx_fun(int argc, char *argv[]) int stage2_round = 0; for (;;) { - // long updated_size = lookup_dirty_pages(vm); - double updated_size = lookup_computed_flop_counts(vm, stage2_round) * dp_rate; - if (updated_size > dp_cap) { - XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", - stage2_round, updated_size, dp_cap); - updated_size = dp_cap; + + double updated_size = 0; + if (stage2_round == 0) { + /* just after stage1, nothing has been updated. But, we have to send the data updated during stage1 */ + updated_size = get_updated_size(computed_during_stage1, dp_rate, dp_cap); + } else { + double computed = lookup_computed_flop_counts(vm, 2, stage2_round); + updated_size = get_updated_size(computed, dp_rate, dp_cap); } - remaining_size += updated_size; + XBT_INFO("%d updated_size %f computed_during_stage1 %f dp_rate %f dp_cap %f", + stage2_round, updated_size, computed_during_stage1, dp_rate, dp_cap); - XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, - remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); - if (remaining_size < threshold) - break; + if (stage2_round != 0) { + /* during stage1, we have already created overhead tasks */ + double overhead = dpt_cpu_overhead * updated_size; + XBT_INFO("updated %f overhead %f", updated_size, overhead); + launch_deferred_exec_process(vm, overhead, 10000); + } + + + { + remaining_size += updated_size; + + XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round, + remaining_size, (remaining_size < threshold) ? "<" : ">", threshold); + + if (remaining_size < threshold) + break; + } + - send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed); + send_migration_data(vm_name, src_pm_name, dst_pm_name, updated_size, mbox, 2, stage2_round, mig_speed, xfer_cpu_overhead); remaining_size -= updated_size; stage2_round += 1; @@ -620,7 +996,7 @@ stage3: simcall_vm_suspend(vm); stop_dirty_page_tracking(vm); - send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed); + send_migration_data(vm_name, src_pm_name, dst_pm_name, remaining_size, mbox, 3, 0, mig_speed, xfer_cpu_overhead); xbt_free(mbox); diff --git a/src/simix/smx_host.c b/src/simix/smx_host.c index 9624e63804..86eef31829 100644 --- a/src/simix/smx_host.c +++ b/src/simix/smx_host.c @@ -305,11 +305,11 @@ void SIMIX_host_set_data(smx_host_t host, void *data){ } smx_action_t SIMIX_pre_host_execute(smx_simcall_t simcall,const char *name, - smx_host_t host, double computation_amount, double priority){ - return SIMIX_host_execute(name, host, computation_amount, priority); + smx_host_t host, double computation_amount, double priority, double bound){ + return SIMIX_host_execute(name, host, computation_amount, priority, bound); } smx_action_t SIMIX_host_execute(const char *name, - smx_host_t host, double computation_amount, double priority){ + smx_host_t host, double computation_amount, double priority, double bound){ /* alloc structures and initialize */ smx_action_t action = xbt_mallocator_get(simix_global->action_mallocator); @@ -328,6 +328,7 @@ smx_action_t SIMIX_host_execute(const char *name, action->execution.surf_exec = ws_model->extension.workstation.execute(host, computation_amount); ws_model->action_data_set(action->execution.surf_exec, action); ws_model->set_priority(action->execution.surf_exec, priority); + ws_model->set_bound(action->execution.surf_exec, bound); } XBT_DEBUG("Create execute action %p", action); @@ -454,7 +455,7 @@ e_smx_state_t SIMIX_host_execution_get_state(smx_action_t action){ void SIMIX_pre_host_execution_set_priority(smx_simcall_t simcall, smx_action_t action, double priority){ - return SIMIX_host_execution_set_priority(action, priority); + SIMIX_host_execution_set_priority(action, priority); } void SIMIX_host_execution_set_priority(smx_action_t action, double priority){ surf_model_t ws_model = get_ws_model_from_action(action); @@ -463,6 +464,17 @@ void SIMIX_host_execution_set_priority(smx_action_t action, double priority){ ws_model->set_priority(action->execution.surf_exec, priority); } +void SIMIX_pre_host_execution_set_bound(smx_simcall_t simcall, smx_action_t action, + double bound){ + SIMIX_host_execution_set_bound(action, bound); +} +void SIMIX_host_execution_set_bound(smx_action_t action, double bound){ + surf_model_t ws_model = get_ws_model_from_action(action); + + if(action->execution.surf_exec) + ws_model->set_bound(action->execution.surf_exec, bound); +} + void SIMIX_pre_host_execution_wait(smx_simcall_t simcall, smx_action_t action){ XBT_DEBUG("Wait for execution of action %p, state %d", action, (int)action->state); diff --git a/src/simix/smx_host_private.h b/src/simix/smx_host_private.h index b91a91e55a..11255b8d82 100644 --- a/src/simix/smx_host_private.h +++ b/src/simix/smx_host_private.h @@ -42,7 +42,7 @@ double SIMIX_host_get_speed(smx_host_t host); double SIMIX_host_get_available_speed(smx_host_t host); int SIMIX_host_get_state(smx_host_t host); smx_action_t SIMIX_host_execute(const char *name, - smx_host_t host, double computation_amount, double priority); + smx_host_t host, double computation_amount, double priority, double bound); smx_action_t SIMIX_host_parallel_execute(const char *name, int host_nb, smx_host_t *host_list, double *computation_amount, double *communication_amount, @@ -52,6 +52,7 @@ void SIMIX_host_execution_cancel(smx_action_t action); double SIMIX_host_execution_get_remains(smx_action_t action); e_smx_state_t SIMIX_host_execution_get_state(smx_action_t action); void SIMIX_host_execution_set_priority(smx_action_t action, double priority); +void SIMIX_host_execution_set_bound(smx_action_t action, double bound); void SIMIX_pre_host_execution_wait(smx_simcall_t simcall, smx_action_t action); // pre prototypes @@ -65,7 +66,7 @@ int SIMIX_pre_host_get_state(smx_simcall_t, smx_host_t); void* SIMIX_pre_host_self_get_data(smx_simcall_t); void* SIMIX_pre_host_get_data(smx_simcall_t, smx_host_t); void SIMIX_pre_host_set_data(smx_simcall_t, smx_host_t, void*); -smx_action_t SIMIX_pre_host_execute(smx_simcall_t, const char*, smx_host_t, double, double); +smx_action_t SIMIX_pre_host_execute(smx_simcall_t, const char*, smx_host_t, double, double, double); smx_action_t SIMIX_pre_host_parallel_execute(smx_simcall_t, const char*, int, smx_host_t*, double*, double*, double, double); void SIMIX_pre_host_execution_destroy(smx_simcall_t, smx_action_t); diff --git a/src/simix/smx_process.c b/src/simix/smx_process.c index 0ef4b53622..aa7434acc8 100644 --- a/src/simix/smx_process.c +++ b/src/simix/smx_process.c @@ -487,7 +487,8 @@ smx_action_t SIMIX_process_suspend(smx_process_t process, smx_process_t issuer) return NULL; } } else { - return SIMIX_host_execute("suspend", process->smx_host, 0.0, 1.0); + /* FIXME: computation size is zero. Is it okay that bound is zero ? */ + return SIMIX_host_execute("suspend", process->smx_host, 0.0, 1.0, 0.0); } } diff --git a/src/simix/smx_smurf_private.h b/src/simix/smx_smurf_private.h index 3c6b698d03..8740e10916 100644 --- a/src/simix/smx_smurf_private.h +++ b/src/simix/smx_smurf_private.h @@ -267,13 +267,14 @@ ACTION(SIMCALL_HOST_GET_AVAILABLE_SPEED, host_get_available_speed, WITH_ANSWER, ACTION(SIMCALL_HOST_GET_STATE, host_get_state, WITH_ANSWER, TINT(result), TSPEC(host, smx_host_t)) sep \ ACTION(SIMCALL_HOST_GET_DATA, host_get_data, WITH_ANSWER, TPTR(result), TSPEC(host, smx_host_t)) sep \ ACTION(SIMCALL_HOST_SET_DATA, host_set_data, WITH_ANSWER, TVOID(result), TSPEC(host, smx_host_t), TPTR(data)) sep \ -ACTION(SIMCALL_HOST_EXECUTE, host_execute, WITH_ANSWER, TSPEC(result, smx_action_t), TSTRING(name), TSPEC(host, smx_host_t), TDOUBLE(computation_amount), TDOUBLE(priority)) sep \ +ACTION(SIMCALL_HOST_EXECUTE, host_execute, WITH_ANSWER, TSPEC(result, smx_action_t), TSTRING(name), TSPEC(host, smx_host_t), TDOUBLE(computation_amount), TDOUBLE(priority), TDOUBLE(bound)) sep \ ACTION(SIMCALL_HOST_PARALLEL_EXECUTE, host_parallel_execute, WITH_ANSWER, TSPEC(result, smx_action_t), TSTRING(name), TINT(host_nb), TSPEC(host_list, smx_host_t*), TSPEC(computation_amount, double*), TSPEC(communication_amount, double*), TDOUBLE(amount), TDOUBLE(rate)) sep \ ACTION(SIMCALL_HOST_EXECUTION_DESTROY, host_execution_destroy, WITH_ANSWER, TVOID(result), TSPEC(execution, smx_action_t)) sep \ ACTION(SIMCALL_HOST_EXECUTION_CANCEL, host_execution_cancel, WITH_ANSWER, TVOID(result), TSPEC(execution, smx_action_t)) sep \ ACTION(SIMCALL_HOST_EXECUTION_GET_REMAINS, host_execution_get_remains, WITH_ANSWER, TDOUBLE(result), TSPEC(execution, smx_action_t)) sep \ ACTION(SIMCALL_HOST_EXECUTION_GET_STATE, host_execution_get_state, WITH_ANSWER, TINT(result), TSPEC(execution, smx_action_t)) sep \ ACTION(SIMCALL_HOST_EXECUTION_SET_PRIORITY, host_execution_set_priority, WITH_ANSWER, TVOID(result), TSPEC(execution, smx_action_t), TDOUBLE(priority)) sep \ +ACTION(SIMCALL_HOST_EXECUTION_SET_BOUND, host_execution_set_bound, WITHOUT_ANSWER, TVOID(result), TSPEC(execution, smx_action_t), TDOUBLE(bound)) sep \ ACTION(SIMCALL_HOST_EXECUTION_WAIT, host_execution_wait, WITHOUT_ANSWER, TINT(result), TSPEC(execution, smx_action_t)) sep \ ACTION(SIMCALL_HOST_GET_PARAMS, host_get_params, WITHOUT_ANSWER, TVOID(result), TSPEC(ind_vm, smx_host_t), TSPEC(params, ws_params_t)) sep \ ACTION(SIMCALL_HOST_SET_PARAMS, host_set_params, WITHOUT_ANSWER, TVOID(result), TSPEC(ind_vm, smx_host_t), TSPEC(params, ws_params_t)) sep \ diff --git a/src/simix/smx_user.c b/src/simix/smx_user.c index 06c52c9cb8..303a378209 100644 --- a/src/simix/smx_user.c +++ b/src/simix/smx_user.c @@ -147,13 +147,13 @@ void simcall_host_set_data(smx_host_t host, void *data) smx_action_t simcall_host_execute(const char *name, smx_host_t host, double computation_amount, - double priority) + double priority, double bound) { /* checking for infinite values */ xbt_assert(isfinite(computation_amount), "computation_amount is not finite!"); xbt_assert(isfinite(priority), "priority is not finite!"); - return simcall_BODY_host_execute(name, host, computation_amount, priority); + return simcall_BODY_host_execute(name, host, computation_amount, priority, bound); } /** @@ -263,6 +263,19 @@ void simcall_host_execution_set_priority(smx_action_t execution, double priority simcall_BODY_host_execution_set_priority(execution, priority); } +/** + * \ingroup simix_host_management + * \brief Changes the capping (the maximum CPU utilization) of an execution action. + * + * This functions changes the capping only. It calls a surf function. + * \param execution The execution action + * \param bound The new bound + */ +void simcall_host_execution_set_bound(smx_action_t execution, double bound) +{ + simcall_BODY_host_execution_set_bound(execution, bound); +} + /** * \ingroup simix_host_management * \brief Waits for the completion of an execution action and destroy it. diff --git a/src/surf/cpu_cas01.c b/src/surf/cpu_cas01.c index 9b7d5c4c17..d85dc7374a 100644 --- a/src/surf/cpu_cas01.c +++ b/src/surf/cpu_cas01.c @@ -381,6 +381,7 @@ static surf_model_t surf_cpu_model_init_cas01(void) cpu_model->is_suspended = surf_action_is_suspended; cpu_model->set_max_duration = surf_action_set_max_duration; cpu_model->set_priority = surf_action_set_priority; + cpu_model->set_bound = surf_action_set_bound; #ifdef HAVE_TRACING cpu_model->set_category = surf_action_set_category; #endif diff --git a/src/surf/surf_action.c b/src/surf/surf_action.c index 0e582d2164..bb3006f229 100644 --- a/src/surf/surf_action.c +++ b/src/surf/surf_action.c @@ -278,6 +278,18 @@ void surf_action_set_priority(surf_action_t action, double priority) XBT_OUT(); } +void surf_action_set_bound(surf_action_t action, double bound) +{ + surf_model_t model = action->model_obj; + XBT_IN("(%p,%g)", action, bound); + action->bound = bound; + lmm_update_variable_bound(model->model_private->maxmin_system, ((surf_action_lmm_t) action)->variable, bound); + + if (model->model_private->update_mechanism == UM_LAZY) + surf_action_lmm_heap_remove(model->model_private->action_heap, (surf_action_lmm_t) action); + XBT_OUT(); +} + #ifdef HAVE_TRACING void surf_action_set_category(surf_action_t action, const char *category) diff --git a/src/surf/surf_private.h b/src/surf/surf_private.h index bb40853982..076a05c426 100644 --- a/src/surf/surf_private.h +++ b/src/surf/surf_private.h @@ -75,6 +75,7 @@ void surf_action_resume(surf_action_t action); int surf_action_is_suspended(surf_action_t action); void surf_action_set_max_duration(surf_action_t action, double duration); void surf_action_set_priority(surf_action_t action, double priority); +void surf_action_set_bound(surf_action_t action, double bound); #ifdef HAVE_TRACING void surf_action_set_category(surf_action_t action, const char *category); diff --git a/src/surf/vm_workstation.c b/src/surf/vm_workstation.c index a3c5730f3f..1ad14a76ec 100644 --- a/src/surf/vm_workstation.c +++ b/src/surf/vm_workstation.c @@ -472,6 +472,7 @@ static void surf_vm_workstation_model_init_internal(void) // model->is_suspended = ws_action_is_suspended; // model->set_max_duration = ws_action_set_max_duration; model->set_priority = ws_action_set_priority; + model->set_bound = ws_action_set_bound; // #ifdef HAVE_TRACING // model->set_category = ws_action_set_category; // #endif @@ -492,7 +493,7 @@ static void surf_vm_workstation_model_init_internal(void) model->extension.workstation.execute = vm_ws_execute; model->extension.workstation.sleep = ws_action_sleep; model->extension.workstation.get_state = ws_get_state; - // model->extension.workstation.get_speed = ws_get_speed; + model->extension.workstation.get_speed = ws_get_speed; // model->extension.workstation.get_available_speed = ws_get_available_speed; // model->extension.workstation.communicate = ws_communicate; diff --git a/src/surf/workstation.c b/src/surf/workstation.c index 33bfedf7fc..22e7953481 100644 --- a/src/surf/workstation.c +++ b/src/surf/workstation.c @@ -268,6 +268,17 @@ void ws_action_set_priority(surf_action_t action, double priority) DIE_IMPOSSIBLE; } +void ws_action_set_bound(surf_action_t action, double bound) +{ + /* FIXME: only for CPU model object? */ + if (action->model_obj->type == SURF_MODEL_TYPE_NETWORK) + surf_network_model->set_bound(action, bound); + else if (action->model_obj->type == SURF_MODEL_TYPE_CPU) + action->model_obj->set_bound(action, bound); + else + DIE_IMPOSSIBLE; +} + #ifdef HAVE_TRACING static void ws_action_set_category(surf_action_t action, const char *category) { @@ -317,7 +328,7 @@ e_surf_resource_state_t ws_get_state(void *workstation) return cpu->model->extension.cpu.get_state(workstation); } -static double ws_get_speed(void *workstation, double load) +double ws_get_speed(void *workstation, double load) { surf_resource_t cpu = ((surf_resource_t) surf_cpu_resource_priv(workstation)); return cpu->model->extension.cpu.get_speed(workstation, load); @@ -532,6 +543,7 @@ static void surf_workstation_model_init_internal(void) model->is_suspended = ws_action_is_suspended; model->set_max_duration = ws_action_set_max_duration; model->set_priority = ws_action_set_priority; + model->set_bound = ws_action_set_bound; #ifdef HAVE_TRACING model->set_category = ws_action_set_category; #endif diff --git a/src/surf/workstation_private.h b/src/surf/workstation_private.h index f786f4f0d5..171d9b7cc0 100644 --- a/src/surf/workstation_private.h +++ b/src/surf/workstation_private.h @@ -25,6 +25,7 @@ void ws_update_resource_state(void *id, tmgr_trace_event_t event_type, double va void ws_finalize(surf_model_t workstation_model); void ws_action_set_priority(surf_action_t action, double priority); +void ws_action_set_bound(surf_action_t action, double bound); surf_action_t ws_execute(void *workstation, double size); surf_action_t ws_action_sleep(void *workstation, double duration); @@ -32,6 +33,7 @@ void ws_action_suspend(surf_action_t action); void ws_action_resume(surf_action_t action); void ws_action_cancel(surf_action_t action); e_surf_resource_state_t ws_get_state(void *workstation); +double ws_get_speed(void *workstation, double load); double ws_action_get_remains(surf_action_t action); void ws_get_params(void *ws, ws_params_t params);