static int migration_rx_fun(int argc, char *argv[])
{
- const char *pr_name = MSG_process_get_name(MSG_process_self());
- const char *host_name = MSG_host_get_name(MSG_host_self());
-
XBT_DEBUG("mig: rx_start");
xbt_assert(argc == 4);
return 0;
}
-
-typedef struct dirty_page {
- double prev_clock;
- double prev_remaining;
- msg_task_t task;
-} s_dirty_page, *dirty_page_t;
-
-
static void reset_dirty_pages(msg_vm_t vm)
{
msg_host_priv_t priv = msg_host_resource_priv(vm);
}
#endif
-double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
+static double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
{
double computed = dp->prev_remaining - remaining;
double duration = clock - dp->prev_clock;
argv[2] = bprintf("%lf", prio);
argv[3] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, deferred_exec_fun, NULL, host, nargvs - 1, argv);
xbt_free(pr_name);
}
argv[2] = NULL;
// XBT_INFO("micro start: mbox %s", mbox);
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, task_tx_overhead_fun, NULL, MSG_host_self(), nargvs - 1, argv);
xbt_free(pr_name);
xbt_free(mbox);
#endif
}
-double get_updated_size(double computed, double dp_rate, double dp_cap)
+static double get_updated_size(double computed, double dp_rate, double dp_cap)
{
double updated_size = computed * dp_rate;
XBT_INFO("updated_size %f dp_rate %f", updated_size, dp_rate);
static int migration_tx_fun(int argc, char *argv[])
{
- const char *pr_name = MSG_process_get_name(MSG_process_self());
- const char *host_name = MSG_host_get_name(MSG_host_self());
-
XBT_DEBUG("mig: tx_start");
xbt_assert(argc == 4);
argv[3] = xbt_strdup(sg_host_name(dst_pm));
argv[4] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, migration_rx_fun, NULL, dst_pm, nargvs - 1, argv);
xbt_free(pr_name);
}
argv[2] = xbt_strdup(sg_host_name(src_pm));
argv[3] = xbt_strdup(sg_host_name(dst_pm));
argv[4] = NULL;
- msg_process_t pr = MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv);
+ MSG_process_create_with_arguments(pr_name, migration_tx_fun, NULL, src_pm, nargvs - 1, argv);
xbt_free(pr_name);
}
}
-
-
/** @brief Get the physical host of a given VM.
* @ingroup msg_VMs
*/
{
return simcall_vm_get_pm(vm);
}
+
+
+/** @brief Set a CPU bound for a given VM.
+ * @ingroup msg_VMs
+ *
+ * 1.
+ * Note that in some cases MSG_task_set_bound() may not intuitively work for VMs.
+ *
+ * For example,
+ * On PM0, there are Task1 and VM0.
+ * On VM0, there is Task2.
+ * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0.
+ * Then,
+ * Task1@PM0 gets 50%.
+ * Task2@VM0 gets 25%.
+ * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively.
+ *
+ * This is because a VM has the dummy CPU action in the PM layer. Putting a
+ * task on the VM does not affect the bound of the dummy CPU action. The bound
+ * of the dummy CPU action is unlimited.
+ *
+ * There are some solutions for this problem. One option is to update the bound
+ * of the dummy CPU action automatically. It should be the sum of all tasks on
+ * the VM. But, this solution might be costy, because we have to scan all tasks
+ * on the VM in share_resource() or we have to trap both the start and end of
+ * task execution.
+ *
+ * The current solution is to use MSG_vm_set_bound(), which allows us to
+ * directly set the bound of the dummy CPU action.
+ *
+ *
+ * 2.
+ * Note that bound == 0 means no bound (i.e., unlimited).
+ */
+void MSG_vm_set_bound(msg_vm_t vm, double bound)
+{
+ return simcall_vm_set_bound(vm, bound);
+}