/** @brief Create a new VM with specified parameters.
* @ingroup msg_VMs*
+ * All parameters are in MBytes
*
*/
-msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name,
- int ncpus, long ramsize, long net_cap, char *disk_path, long disksize)
+msg_vm_t MSG_vm_create(msg_host_t ind_pm, const char *name, int ncpus, int ramsize,
+ int net_cap, char *disk_path, int disksize,
+ int mig_netspeed, int dp_intensity)
{
- msg_vm_t vm = MSG_vm_create_core(ind_pm, name);
-
- {
- s_ws_params_t params;
- memset(¶ms, 0, sizeof(params));
- params.ramsize = ramsize;
- //params.overcommit = 0;
- simcall_host_set_params(vm, ¶ms);
- }
-
- /* TODO: Limit net capability, take into account disk considerations. */
-
- return vm;
+ /* For the moment, intensity_rate is the percentage against the migration bandwidth */
+ double host_speed = MSG_get_host_speed(ind_pm);
+ double update_speed = ((double)dp_intensity/100) * mig_netspeed;
+
+ msg_vm_t vm = MSG_vm_create_core(ind_pm, name);
+ s_ws_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ params.ramsize = 1L * 1024 * 1024 * ramsize;
+ //params.overcommit = 0;
+ params.devsize = 0;
+ params.skip_stage2 = 0;
+ params.max_downtime = 0.03;
+ params.dp_rate = (update_speed * 1L * 1024 * 1024 ) / host_speed;
+ params.dp_cap = params.ramsize / 0.9; // working set memory is 90%
+ params.mig_speed = 1L * 1024 * 1024 * mig_netspeed; // mig_speed
+
+ //XBT_INFO("dp rate %f migspeed : %f intensity mem : %d, updatespeed %f, hostspeed %f",params.dp_rate, params.mig_speed, dp_intensity, update_speed, host_speed);
+ simcall_host_set_params(vm, ¶ms);
+
+ return vm;
}
}
-
-
/** @brief Get the physical host of a given VM.
* @ingroup msg_VMs
*/
{
return simcall_vm_get_pm(vm);
}
+
+
+/** @brief Set a CPU bound for a given VM.
+ * @ingroup msg_VMs
+ *
+ * 1.
+ * Note that in some cases MSG_task_set_bound() may not intuitively work for VMs.
+ *
+ * For example,
+ * On PM0, there are Task1 and VM0.
+ * On VM0, there is Task2.
+ * Now we bound 75% to Task1@PM0 and bound 25% to Task2@VM0.
+ * Then,
+ * Task1@PM0 gets 50%.
+ * Task2@VM0 gets 25%.
+ * This is NOT 75% for Task1@PM0 and 25% for Task2@VM0, respectively.
+ *
+ * This is because a VM has the dummy CPU action in the PM layer. Putting a
+ * task on the VM does not affect the bound of the dummy CPU action. The bound
+ * of the dummy CPU action is unlimited.
+ *
+ * There are some solutions for this problem. One option is to update the bound
+ * of the dummy CPU action automatically. It should be the sum of all tasks on
+ * the VM. But, this solution might be costy, because we have to scan all tasks
+ * on the VM in share_resource() or we have to trap both the start and end of
+ * task execution.
+ *
+ * The current solution is to use MSG_vm_set_bound(), which allows us to
+ * directly set the bound of the dummy CPU action.
+ *
+ *
+ * 2.
+ * Note that bound == 0 means no bound (i.e., unlimited).
+ */
+void MSG_vm_set_bound(msg_vm_t vm, double bound)
+{
+ return simcall_vm_set_bound(vm, bound);
+}