/** @brief Create a new VM with specified parameters.
* @ingroup msg_VMs*
+ * All parameters are in MBytes
*
*/
-msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name,
- int ncpus, long ramsize, long net_cap, char *disk_path, long disksize)
+msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, int ramsize,
+ int net_cap, char *disk_path, int disksize,
+ int mig_netspeed, int dp_intensity)
{
- msg_vm_t vm = MSG_vm_create_core(ind_pm, name);
-
- {
- s_ws_params_t params;
- memset(¶ms, 0, sizeof(params));
- params.ramsize = ramsize;
- //params.overcommit = 0;
- simcall_host_set_params(vm, ¶ms);
- }
-
- /* TODO: Limit net capability, take into account disk considerations. */
-
- return vm;
+ /* For the moment, intensity_rate is the percentage against the migration bandwidth */
+ double host_speed = MSG_get_host_speed(ind_pm);
+ double update_speed = ((double)dp_intensity/100) * mig_netspeed;
+
+ msg_vm_t vm = MSG_vm_create_core(ind_pm, name);
+ s_ws_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ params.ramsize = 1L * 1024 * 1024 * ramsize;
+ //params.overcommit = 0;
+ params.devsize = 0;
+ params.skip_stage2 = 0;
+ params.max_downtime = 0.03;
+ params.dp_rate = (update_speed * 1L * 1024 * 1024 ) / host_speed;
+ params.dp_cap = params.ramsize / 0.9; // working set memory is 90%
+ params.mig_speed = 1L * 1024 * 1024 * mig_netspeed; // mig_speed
+
+ //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed);
+ simcall_host_set_params(vm, ¶ms);
+
+ return vm;
}
static int migration_rx_fun(int argc, char *argv[])
{
- const char *pr_name = MSG_process_get_name(MSG_process_self());
- const char *host_name = MSG_host_get_name(MSG_host_self());
-
XBT_DEBUG("mig: rx_start");
xbt_assert(argc == 4);
return 0;
}
-
-typedef struct dirty_page {
- double prev_clock;
- double prev_remaining;
- msg_task_t task;
-} s_dirty_page, *dirty_page_t;
-
-
static void reset_dirty_pages(msg_vm_t vm)
{
msg_host_priv_t priv = msg_host_resource_priv(vm);
}
#endif
-double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
+static double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
{
double computed = dp->prev_remaining - remaining;
double duration = clock - dp->prev_clock;
total += priv->dp_updated_by_deleted_tasks;
- XBT_INFO("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)",
+ XBT_DEBUG("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)",
stage_for_fancy_debug,
stage2_round_for_fancy_debug,
total, priv->dp_updated_by_deleted_tasks);
argv[2] = bprintf("%lf", prio);
argv[3] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv);
xbt_free(pr_name);
}
argv[2] = NULL;
// XBT_INFO("micro start: mbox %s", mbox);
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv);
xbt_free(pr_name);
xbt_free(mbox);
- if (stage == 2)
- XBT_INFO("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);
- else
- XBT_INFO("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization);
+ if (stage == 2){
+ XBT_DEBUG("mig-stage%d.%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, stage2_round, size, duration, actual_speed, mig_speed, cpu_utilization);}
+ else{
+ XBT_DEBUG("mig-stage%d: sent %f duration %f actual_speed %f (target %f) cpu %f", stage, size, duration, actual_speed, mig_speed, cpu_utilization);
+ }
xbt_free(task_name);
#endif
}
-double get_updated_size(double computed, double dp_rate, double dp_cap)
+static double get_updated_size(double computed, double dp_rate, double dp_cap)
{
double updated_size = computed * dp_rate;
- XBT_INFO("updated_size %f dp_rate %f", updated_size, dp_rate);
+ XBT_DEBUG("updated_size %f dp_rate %f", updated_size, dp_rate);
if (updated_size > dp_cap) {
// XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size, dp_cap);
updated_size = dp_cap;
static int migration_tx_fun(int argc, char *argv[])
{
- const char *pr_name = MSG_process_get_name(MSG_process_self());
- const char *host_name = MSG_host_get_name(MSG_host_self());
-
XBT_DEBUG("mig: tx_start");
xbt_assert(argc == 4);
updated_size = get_updated_size(computed, dp_rate, dp_cap);
}
- XBT_INFO("%d updated_size %f computed_during_stage1 %f dp_rate %f dp_cap %f",
+ XBT_INFO("mig-stage 2:%d updated_size %f computed_during_stage1 %f dp_rate %f dp_cap %f",
stage2_round, updated_size, computed_during_stage1, dp_rate, dp_cap);
if (stage2_round != 0) {
/* during stage1, we have already created overhead tasks */
double overhead = dpt_cpu_overhead * updated_size;
- XBT_INFO("updated %f overhead %f", updated_size, overhead);
+ XBT_DEBUG("updated %f overhead %f", updated_size, overhead);
launch_deferred_exec_process(vm, overhead, 10000);
}
{
remaining_size += updated_size;
- XBT_INFO("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round,
+ XBT_DEBUG("mig-stage2.%d: remaining_size %f (%s threshold %f)", stage2_round,
remaining_size, (remaining_size < threshold) ? "<" : ">", threshold);
if (remaining_size < threshold)
argv[3] = xbt_strdup(sg_host_name(dst_pm));
argv[4] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv);
xbt_free(pr_name);
}
argv[2] = xbt_strdup(sg_host_name(src_pm));
argv[3] = xbt_strdup(sg_host_name(dst_pm));
argv[4] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv);
xbt_free(pr_name);
}
}
-
-
/** @brief Get the physical host of a given VM.
* @ingroup msg_VMs
*/
{
return simcall_vm_get_pm(vm);
}
+
+
+/** @brief Set a CPU bound for a given VM.
+ * @ingroup msg_VMs
+ *
+ * 1.
+ * Note that in some cases MSG_task_set_bound() may not intuitively work for VMs.
+ *
+ * For example,
+ * On PM0, there are Task1 and VM0.
+ * On VM0, there is Task2.
+ * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0.
+ * Then,
+ * Task1@PM0 gets 50%.
+ * Task2@VM0 gets 25%.
+ * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively.
+ *
+ * This is because a VM has the dummy CPU action in the PM layer. Putting a
+ * task on the VM does not affect the bound of the dummy CPU action. The bound
+ * of the dummy CPU action is unlimited.
+ *
+ * There are some solutions for this problem. One option is to update the bound
+ * of the dummy CPU action automatically. It should be the sum of all tasks on
+ * the VM. But, this solution might be costy, because we have to scan all tasks
+ * on the VM in share_resource() or we have to trap both the start and end of
+ * task execution.
+ *
+ * The current solution is to use MSG_vm_set_bound(), which allows us to
+ * directly set the bound of the dummy CPU action.
+ *
+ *
+ * 2.
+ * Note that bound == 0 means no bound (i.e., unlimited).
+ */
+void MSG_vm_set_bound(msg_vm_t vm, double bound)
+{
+ return simcall_vm_set_bound(vm, bound);
+}