- {
- {
- /* 1-core PM */
- XBT_INFO("# 4. Put 2 VMs on a 1-core PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm0, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm0, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 5. Put 2 VMs on a 2-core PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 6. Put 2 VMs on a 2-core PM and 1 task on the PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 7. Put 2 VMs and 2 tasks on a 2-core PM. Put two tasks on one of the VMs.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
-
- /* Reduce computation_amount to make all tasks finish at the same time. Simplify results. */
- test_one_task(vm0, computation_amount / 2);
- test_one_task(vm0, computation_amount / 2);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 8. Put 2 VMs and a task on a 2-core PM. Cap the load of VM1 at 50%%.");
- /* This is a tricky case. The process schedular of the host OS may not work as expected. */
-
- /* VM0 gets 50%. VM1 and VM2 get 75%, respectively. */
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- MSG_vm_set_bound(vm0, cpu_speed / 2);
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- test_one_task(pm1, computation_amount);
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
-
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
-
- /* In all the above cases, tasks finish at the same time.
- * TODO: more complex cases must be done.
- **/
-
-#if 0
- {
- /* 2-core PM */
- XBT_INFO("# 8. Put 2 VMs and a task on a 2-core PM. Put two tasks on one of the VMs.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-#endif
- }
+static void test_vm_pin(void)
+{
+ xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
+ msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
+ msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
+ msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
+
+
+ /* set up VMs on PM2 (4 cores) */
+ msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
+ msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
+ msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
+ msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
+
+ s_vm_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ params.ramsize = 1L * 1024 * 1024;
+ params.skip_stage1 = 1;
+ params.skip_stage2 = 1;
+ //params.mig_speed = 1L * 1024 * 1024;
+ MSG_host_set_params(vm0, ¶ms);
+ MSG_host_set_params(vm1, ¶ms);
+ MSG_host_set_params(vm2, ¶ms);
+ MSG_host_set_params(vm3, ¶ms);
+
+ MSG_vm_start(vm0);
+ MSG_vm_start(vm1);
+ MSG_vm_start(vm2);
+ MSG_vm_start(vm3);
+
+
+ /* set up tasks and processes */
+ struct task_data t0;
+ struct task_data t1;
+ struct task_data t2;
+ struct task_data t3;
+
+ t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
+ t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
+ t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
+ t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
+
+ MSG_process_create("worker0", worker_main, t0.task, vm0);
+ MSG_process_create("worker1", worker_main, t1.task, vm1);
+ MSG_process_create("worker2", worker_main, t2.task, vm2);
+ MSG_process_create("worker3", worker_main, t3.task, vm3);
+
+
+ /* start experiments */
+ XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
+ task_data_init_clock(&t0);
+ task_data_init_clock(&t1);
+ task_data_init_clock(&t2);
+ task_data_init_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
+ MSG_vm_set_affinity(vm0, pm2, 0x01);
+ MSG_vm_set_affinity(vm1, pm2, 0x01);
+ MSG_vm_set_affinity(vm2, pm2, 0x01);
+ MSG_vm_set_affinity(vm3, pm2, 0x01);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
+ /* Because VMs are on PM2, the below operations do not effect computation now. */
+ MSG_vm_set_affinity(vm0, pm1, 0x01);
+ MSG_vm_set_affinity(vm1, pm1, 0x01);
+ MSG_vm_set_affinity(vm2, pm1, 0x01);
+ MSG_vm_set_affinity(vm3, pm1, 0x01);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
+ MSG_vm_set_affinity(vm0, pm2, 0x00);
+ MSG_vm_set_affinity(vm2, pm2, 0x02);
+ MSG_vm_set_affinity(vm3, pm2, 0x02);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
+ MSG_vm_migrate(vm0, pm0);
+ MSG_vm_migrate(vm1, pm0);
+ MSG_vm_migrate(vm2, pm0);
+ MSG_vm_migrate(vm3, pm0);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
+ MSG_vm_migrate(vm0, pm1);
+ MSG_vm_migrate(vm1, pm1);
+ MSG_vm_migrate(vm2, pm1);
+ MSG_vm_migrate(vm3, pm1);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 7. clear affinity settings on PM1");
+ MSG_vm_set_affinity(vm0, pm1, 0);
+ MSG_vm_set_affinity(vm1, pm1, 0);
+ MSG_vm_set_affinity(vm2, pm1, 0);
+ MSG_vm_set_affinity(vm3, pm1, 0);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ /* clean up everything */
+ MSG_task_cancel(t0.task);
+ MSG_task_cancel(t1.task);
+ MSG_task_cancel(t2.task);
+ MSG_task_cancel(t3.task);
+ MSG_process_sleep(10);
+ MSG_task_destroy(t0.task);
+ MSG_task_destroy(t1.task);
+ MSG_task_destroy(t2.task);
+ MSG_task_destroy(t3.task);
+
+ MSG_vm_destroy(vm0);
+ MSG_vm_destroy(vm1);
+ MSG_vm_destroy(vm2);
+ MSG_vm_destroy(vm3);