-/* Copyright (c) 2007-2013. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2015. The SimGrid Team.
+ * All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <stdio.h>
-#include "msg/msg.h"
+#include "simgrid/msg.h"
#include "xbt/sysdep.h" /* calloc, printf */
/* Create a log channel to have nice outputs. */
XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-struct worker_data {
- double computation_amount;
-};
+
static int worker_main(int argc, char *argv[])
{
- struct worker_data *params = MSG_process_get_data(MSG_process_self());
- double computation_amount = params->computation_amount;
-
- {
- double clock_sta = MSG_get_clock();
-
- msg_task_t task = MSG_task_create("Task", computation_amount, 0, NULL);
- MSG_task_execute(task);
- MSG_task_destroy(task);
-
- double clock_end = MSG_get_clock();
-
- double duration = clock_end - clock_sta;
- double flops_per_sec = computation_amount / duration;
-
- XBT_INFO("%s: amount %f duration %f (%f flops/s)",
- MSG_host_get_name(MSG_host_self()), computation_amount, duration, flops_per_sec);
- }
+ msg_task_t task = MSG_process_get_data(MSG_process_self());
+ MSG_task_execute(task);
-
-
- xbt_free(params);
+ XBT_INFO("task %p bye", task);
return 0;
}
+struct task_data {
+ msg_task_t task;
+ double prev_computation_amount;
+ double prev_clock;
+};
-static void test_one_task(msg_host_t hostA, double computation)
+static void task_data_init_clock(struct task_data *t)
{
-
- struct worker_data *params = xbt_new(struct worker_data, 1);
- params->computation_amount = computation;
-
- MSG_process_create("worker", worker_main, params, hostA);
-
- //xbt_free(params);
+ t->prev_computation_amount = MSG_task_get_flops_amount(t->task);
+ t->prev_clock = MSG_get_clock();
}
-#if 0
-static void test_two_tasks(msg_host_t hostA, msg_host_t hostB)
-{
- const double cpu_speed = MSG_get_host_speed(hostA);
- xbt_assert(cpu_speed == MSG_get_host_speed(hostB));
- const double computation_amount = cpu_speed * 10;
- const char *hostA_name = MSG_host_get_name(hostA);
- const char *hostB_name = MSG_host_get_name(hostB);
-
- {
- XBT_INFO("### Test: no bound for Task1@%s, no bound for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 0, 0);
- launch_worker(hostB, "worker1", computation_amount, 0, 0);
- }
-
- MSG_process_sleep(1000);
-
- {
- XBT_INFO("### Test: 0 for Task1@%s, 0 for Task2@%s (i.e., unlimited)", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 1, 0);
- launch_worker(hostB, "worker1", computation_amount, 1, 0);
- }
-
- MSG_process_sleep(1000);
-
- {
- XBT_INFO("### Test: 50%% for Task1@%s, 50%% for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 1, cpu_speed / 2);
- launch_worker(hostB, "worker1", computation_amount, 1, cpu_speed / 2);
- }
-
- MSG_process_sleep(1000);
-
- {
- XBT_INFO("### Test: 25%% for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 1, cpu_speed / 4);
- launch_worker(hostB, "worker1", computation_amount, 1, cpu_speed / 4);
- }
-
- MSG_process_sleep(1000);
-
- {
- XBT_INFO("### Test: 75%% for Task1@%s, 100%% for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 1, cpu_speed * 0.75);
- launch_worker(hostB, "worker1", computation_amount, 1, cpu_speed);
- }
-
- MSG_process_sleep(1000);
- {
- XBT_INFO("### Test: no bound for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 0, 0);
- launch_worker(hostB, "worker1", computation_amount, 1, cpu_speed / 4);
- }
+static void task_data_get_clock(struct task_data *t)
+{
+ double now_computation_amount = MSG_task_get_flops_amount(t->task);
+ double now_clock = MSG_get_clock();
- MSG_process_sleep(1000);
+ double done = t->prev_computation_amount - now_computation_amount;
+ double duration = now_clock - t->prev_clock;
- {
- XBT_INFO("### Test: 75%% for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
- launch_worker(hostA, "worker0", computation_amount, 1, cpu_speed * 0.75);
- launch_worker(hostB, "worker1", computation_amount, 1, cpu_speed / 4);
- }
+ XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
- MSG_process_sleep(1000);
+ t->prev_computation_amount = now_computation_amount;
+ t->prev_clock = now_clock;
}
-#endif
-static void test_pm(void)
+
+static void test_pm_pin(void)
{
xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
- const double cpu_speed = MSG_get_host_speed(pm0);
- const double computation_amount = cpu_speed * 10;
- {
- XBT_INFO("# 1. Put a single task on each PM. ");
- test_one_task(pm0, computation_amount);
- MSG_process_sleep(100);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
- test_one_task(pm2, computation_amount);
- }
+ struct task_data t1;
+ struct task_data t2;
+ struct task_data t3;
+ struct task_data t4;
- MSG_process_sleep(100);
+ t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
+ t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
+ t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
+ t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
- {
- XBT_INFO("# 2. Put 2 tasks on each PM. ");
- test_one_task(pm0, computation_amount);
- test_one_task(pm0, computation_amount);
- MSG_process_sleep(100);
+ MSG_process_create("worker1", worker_main, t1.task, pm1);
+ MSG_process_create("worker2", worker_main, t2.task, pm1);
+ MSG_process_create("worker3", worker_main, t3.task, pm1);
+ MSG_process_create("worker4", worker_main, t4.task, pm1);
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
- test_one_task(pm2, computation_amount);
- test_one_task(pm2, computation_amount);
- }
+ XBT_INFO("## 1. start 4 tasks on PM1 (2 cores)");
+ task_data_init_clock(&t1);
+ task_data_init_clock(&t2);
+ task_data_init_clock(&t3);
+ task_data_init_clock(&t4);
- MSG_process_sleep(100);
-
- {
- XBT_INFO("# 3. Put 4 tasks on each PM. ");
- test_one_task(pm0, computation_amount);
- test_one_task(pm0, computation_amount);
- test_one_task(pm0, computation_amount);
- test_one_task(pm0, computation_amount);
- MSG_process_sleep(100);
-
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- test_one_task(pm2, computation_amount);
- test_one_task(pm2, computation_amount);
- test_one_task(pm2, computation_amount);
- test_one_task(pm2, computation_amount);
- }
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- MSG_process_sleep(100);
-}
+ XBT_INFO("## 2. pin all tasks to CPU0");
+ MSG_task_set_affinity(t1.task, pm1, 0x01);
+ MSG_task_set_affinity(t2.task, pm1, 0x01);
+ MSG_task_set_affinity(t3.task, pm1, 0x01);
+ MSG_task_set_affinity(t4.task, pm1, 0x01);
-static void test_vm(void)
-{
- xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
- msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
- msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
- msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- const double cpu_speed = MSG_get_host_speed(pm0);
- const double computation_amount = cpu_speed * 10;
+ XBT_INFO("## 3. clear the affinity of task4");
+ MSG_task_set_affinity(t4.task, pm1, 0);
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- {
- msg_host_t vm0 = MSG_vm_create_core(pm0, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- msg_host_t vm2 = MSG_vm_create_core(pm2, "vm2");
- XBT_INFO("# 1. Put a single task on each VM.");
- test_one_task(vm0, computation_amount);
- MSG_process_sleep(100);
+ XBT_INFO("## 4. clear the affinity of task3");
+ MSG_task_set_affinity(t3.task, pm1, 0);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- test_one_task(vm2, computation_amount);
- MSG_process_sleep(100);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
- }
+ XBT_INFO("## 5. clear the affinity of task2");
+ MSG_task_set_affinity(t2.task, pm1, 0);
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- {
- msg_host_t vm0 = MSG_vm_create_core(pm0, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- msg_host_t vm2 = MSG_vm_create_core(pm2, "vm2");
- XBT_INFO("# 2. Put 2 tasks on each VM.");
- test_one_task(vm0, computation_amount);
- test_one_task(vm0, computation_amount);
- MSG_process_sleep(100);
+ XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
+ MSG_task_set_affinity(t1.task, pm0, 0);
+ MSG_task_set_affinity(t2.task, pm0, 0);
+ MSG_task_set_affinity(t3.task, pm2, 0);
+ MSG_task_set_affinity(t4.task, pm2, 0);
- test_one_task(vm1, computation_amount);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
+ MSG_process_sleep(10);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+ task_data_get_clock(&t4);
- test_one_task(vm2, computation_amount);
- test_one_task(vm2, computation_amount);
- MSG_process_sleep(100);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
- }
-
- {
- msg_host_t vm0 = MSG_vm_create_core(pm0, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- msg_host_t vm2 = MSG_vm_create_core(pm2, "vm2");
-
- XBT_INFO("# 3. Put a task on each VM, and put a task on its PM.");
- test_one_task(vm0, computation_amount);
- test_one_task(pm0, computation_amount);
- MSG_process_sleep(100);
-
- test_one_task(vm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- test_one_task(vm2, computation_amount);
- test_one_task(pm2, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
- }
+ MSG_task_cancel(t1.task);
+ MSG_task_cancel(t2.task);
+ MSG_task_cancel(t3.task);
+ MSG_task_cancel(t4.task);
+ MSG_process_sleep(10);
+ MSG_task_destroy(t1.task);
+ MSG_task_destroy(t2.task);
+ MSG_task_destroy(t3.task);
+ MSG_task_destroy(t4.task);
+}
- {
- {
- /* 1-core PM */
- XBT_INFO("# 4. Put 2 VMs on a 1-core PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm0, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm0, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 5. Put 2 VMs on a 2-core PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 6. Put 2 VMs on a 2-core PM and 1 task on the PM.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 7. Put 2 VMs and 2 tasks on a 2-core PM. Put two tasks on one of the VMs.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- test_one_task(pm1, computation_amount);
- test_one_task(pm1, computation_amount);
-
- /* Reduce computation_amount to make all tasks finish at the same time. Simplify results. */
- test_one_task(vm0, computation_amount / 2);
- test_one_task(vm0, computation_amount / 2);
- test_one_task(vm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
- {
- /* 2-core PM */
- XBT_INFO("# 8. Put 2 VMs and a task on a 2-core PM. Cap the load of VM1 at 50%%.");
- /* This is a tricky case. The process schedular of the host OS may not work as expected. */
-
- /* VM0 gets 50%. VM1 and VM2 get 75%, respectively. */
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- MSG_vm_set_bound(vm0, cpu_speed / 2);
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
- test_one_task(pm1, computation_amount);
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
-
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-
-
- /* In all the above cases, tasks finish at the same time.
- * TODO: more complex cases must be done.
- **/
-
-#if 0
- {
- /* 2-core PM */
- XBT_INFO("# 8. Put 2 VMs and a task on a 2-core PM. Put two tasks on one of the VMs.");
- msg_host_t vm0 = MSG_vm_create_core(pm1, "vm0");
- msg_host_t vm1 = MSG_vm_create_core(pm1, "vm1");
-
- test_one_task(vm0, computation_amount);
- test_one_task(vm0, computation_amount);
- test_one_task(vm1, computation_amount);
- test_one_task(pm1, computation_amount);
- MSG_process_sleep(100);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- }
-#endif
- }
+static void test_vm_pin(void)
+{
+ xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
+ msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
+ msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
+ msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
+
+
+ /* set up VMs on PM2 (4 cores) */
+ msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
+ msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
+ msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
+ msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
+
+ s_vm_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ params.ramsize = 1L * 1024 * 1024;
+ params.skip_stage1 = 1;
+ params.skip_stage2 = 1;
+ //params.mig_speed = 1L * 1024 * 1024;
+ MSG_host_set_params(vm0, ¶ms);
+ MSG_host_set_params(vm1, ¶ms);
+ MSG_host_set_params(vm2, ¶ms);
+ MSG_host_set_params(vm3, ¶ms);
+
+ MSG_vm_start(vm0);
+ MSG_vm_start(vm1);
+ MSG_vm_start(vm2);
+ MSG_vm_start(vm3);
+
+
+ /* set up tasks and processes */
+ struct task_data t0;
+ struct task_data t1;
+ struct task_data t2;
+ struct task_data t3;
+
+ t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
+ t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
+ t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
+ t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
+
+ MSG_process_create("worker0", worker_main, t0.task, vm0);
+ MSG_process_create("worker1", worker_main, t1.task, vm1);
+ MSG_process_create("worker2", worker_main, t2.task, vm2);
+ MSG_process_create("worker3", worker_main, t3.task, vm3);
+
+
+ /* start experiments */
+ XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
+ task_data_init_clock(&t0);
+ task_data_init_clock(&t1);
+ task_data_init_clock(&t2);
+ task_data_init_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
+ MSG_vm_set_affinity(vm0, pm2, 0x01);
+ MSG_vm_set_affinity(vm1, pm2, 0x01);
+ MSG_vm_set_affinity(vm2, pm2, 0x01);
+ MSG_vm_set_affinity(vm3, pm2, 0x01);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
+ /* Because VMs are on PM2, the below operations do not effect computation now. */
+ MSG_vm_set_affinity(vm0, pm1, 0x01);
+ MSG_vm_set_affinity(vm1, pm1, 0x01);
+ MSG_vm_set_affinity(vm2, pm1, 0x01);
+ MSG_vm_set_affinity(vm3, pm1, 0x01);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
+ MSG_vm_set_affinity(vm0, pm2, 0x00);
+ MSG_vm_set_affinity(vm2, pm2, 0x02);
+ MSG_vm_set_affinity(vm3, pm2, 0x02);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
+ MSG_vm_migrate(vm0, pm0);
+ MSG_vm_migrate(vm1, pm0);
+ MSG_vm_migrate(vm2, pm0);
+ MSG_vm_migrate(vm3, pm0);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
+ MSG_vm_migrate(vm0, pm1);
+ MSG_vm_migrate(vm1, pm1);
+ MSG_vm_migrate(vm2, pm1);
+ MSG_vm_migrate(vm3, pm1);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ XBT_INFO("## 7. clear affinity settings on PM1");
+ MSG_vm_set_affinity(vm0, pm1, 0);
+ MSG_vm_set_affinity(vm1, pm1, 0);
+ MSG_vm_set_affinity(vm2, pm1, 0);
+ MSG_vm_set_affinity(vm3, pm1, 0);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+ MSG_process_sleep(10);
+ task_data_get_clock(&t0);
+ task_data_get_clock(&t1);
+ task_data_get_clock(&t2);
+ task_data_get_clock(&t3);
+
+
+ /* clean up everything */
+ MSG_task_cancel(t0.task);
+ MSG_task_cancel(t1.task);
+ MSG_task_cancel(t2.task);
+ MSG_task_cancel(t3.task);
+ MSG_process_sleep(10);
+ MSG_task_destroy(t0.task);
+ MSG_task_destroy(t1.task);
+ MSG_task_destroy(t2.task);
+ MSG_task_destroy(t3.task);
+
+ MSG_vm_destroy(vm0);
+ MSG_vm_destroy(vm1);
+ MSG_vm_destroy(vm2);
+ MSG_vm_destroy(vm3);
}
-
static int master_main(int argc, char *argv[])
{
- XBT_INFO("=== Test PM ===");
- test_pm();
+ XBT_INFO("=== Test PM (set affinity) ===");
+ test_pm_pin();
- XBT_INFO(" ");
- XBT_INFO(" ");
- XBT_INFO("=== Test VM ===");
- test_vm();
+ XBT_INFO("=== Test VM (set affinity) ===");
+ test_vm_pin();
return 0;
}
-
-
-
int main(int argc, char *argv[])
{
/* Get the arguments */