1 /* Copyright (c) 2007-2013. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
8 #include "xbt/sysdep.h" /* calloc, printf */
10 /* Create a log channel to have nice outputs. */
12 #include "xbt/asserts.h"
13 XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
19 static int worker_main(int argc, char *argv[])
21 msg_task_t task = MSG_process_get_data(MSG_process_self());
22 MSG_task_execute(task);
24 XBT_INFO("task %p bye", task);
32 double prev_computation_amount;
37 static void task_data_init_clock(struct task_data *t)
39 t->prev_computation_amount = MSG_task_get_remaining_computation(t->task);
40 t->prev_clock = MSG_get_clock();
44 static void task_data_get_clock(struct task_data *t)
46 double now_computation_amount = MSG_task_get_remaining_computation(t->task);
47 double now_clock = MSG_get_clock();
49 double done = t->prev_computation_amount - now_computation_amount;
50 double duration = now_clock - t->prev_clock;
52 XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
54 t->prev_computation_amount = now_computation_amount;
55 t->prev_clock = now_clock;
59 static void test_pm_pin(void)
61 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
62 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
63 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
64 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
72 t1.task = MSG_task_create("Task1", 10000000000000000UL, 0, NULL);
73 t2.task = MSG_task_create("Task2", 10000000000000000UL, 0, NULL);
74 t3.task = MSG_task_create("Task3", 10000000000000000UL, 0, NULL);
75 t4.task = MSG_task_create("Task4", 10000000000000000UL, 0, NULL);
77 MSG_process_create("worker1", worker_main, t1.task, pm1);
78 MSG_process_create("worker2", worker_main, t2.task, pm1);
79 MSG_process_create("worker3", worker_main, t3.task, pm1);
80 MSG_process_create("worker4", worker_main, t4.task, pm1);
83 XBT_INFO("## 1. start 4 tasks on PM1 (2 cores)");
84 task_data_init_clock(&t1);
85 task_data_init_clock(&t2);
86 task_data_init_clock(&t3);
87 task_data_init_clock(&t4);
89 MSG_process_sleep(10);
90 task_data_get_clock(&t1);
91 task_data_get_clock(&t2);
92 task_data_get_clock(&t3);
93 task_data_get_clock(&t4);
96 XBT_INFO("## 2. pin all tasks to CPU0");
97 MSG_task_set_affinity(t1.task, pm1, 0x01);
98 MSG_task_set_affinity(t2.task, pm1, 0x01);
99 MSG_task_set_affinity(t3.task, pm1, 0x01);
100 MSG_task_set_affinity(t4.task, pm1, 0x01);
102 MSG_process_sleep(10);
103 task_data_get_clock(&t1);
104 task_data_get_clock(&t2);
105 task_data_get_clock(&t3);
106 task_data_get_clock(&t4);
109 XBT_INFO("## 3. clear the affinity of task4");
110 MSG_task_set_affinity(t4.task, pm1, 0);
112 MSG_process_sleep(10);
113 task_data_get_clock(&t1);
114 task_data_get_clock(&t2);
115 task_data_get_clock(&t3);
116 task_data_get_clock(&t4);
119 XBT_INFO("## 4. clear the affinity of task3");
120 MSG_task_set_affinity(t3.task, pm1, 0);
122 MSG_process_sleep(10);
123 task_data_get_clock(&t1);
124 task_data_get_clock(&t2);
125 task_data_get_clock(&t3);
126 task_data_get_clock(&t4);
129 XBT_INFO("## 5. clear the affinity of task2");
130 MSG_task_set_affinity(t2.task, pm1, 0);
132 MSG_process_sleep(10);
133 task_data_get_clock(&t1);
134 task_data_get_clock(&t2);
135 task_data_get_clock(&t3);
136 task_data_get_clock(&t4);
139 XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
140 MSG_task_set_affinity(t1.task, pm0, 0);
141 MSG_task_set_affinity(t2.task, pm0, 0);
142 MSG_task_set_affinity(t3.task, pm2, 0);
143 MSG_task_set_affinity(t4.task, pm2, 0);
145 MSG_process_sleep(10);
146 task_data_get_clock(&t1);
147 task_data_get_clock(&t2);
148 task_data_get_clock(&t3);
149 task_data_get_clock(&t4);
153 MSG_task_cancel(t1.task);
154 MSG_task_cancel(t2.task);
155 MSG_task_cancel(t3.task);
156 MSG_task_cancel(t4.task);
157 MSG_process_sleep(10);
158 MSG_task_destroy(t1.task);
159 MSG_task_destroy(t2.task);
160 MSG_task_destroy(t3.task);
161 MSG_task_destroy(t4.task);
165 static void test_vm_pin(void)
167 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
168 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
169 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
170 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
173 /* set up VMs on PM2 (4 cores) */
174 msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
175 msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
176 msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
177 msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
179 s_ws_params_t params;
180 memset(¶ms, 0, sizeof(params));
181 params.ramsize = 1L * 1024 * 1024;
182 params.skip_stage1 = 1;
183 params.skip_stage2 = 1;
184 //params.mig_speed = 1L * 1024 * 1024;
185 MSG_host_set_params(vm0, ¶ms);
186 MSG_host_set_params(vm1, ¶ms);
187 MSG_host_set_params(vm2, ¶ms);
188 MSG_host_set_params(vm3, ¶ms);
196 /* set up tasks and processes */
202 t0.task = MSG_task_create("Task0", 10000000000000000UL, 0, NULL);
203 t1.task = MSG_task_create("Task1", 10000000000000000UL, 0, NULL);
204 t2.task = MSG_task_create("Task2", 10000000000000000UL, 0, NULL);
205 t3.task = MSG_task_create("Task3", 10000000000000000UL, 0, NULL);
207 MSG_process_create("worker0", worker_main, t0.task, vm0);
208 MSG_process_create("worker1", worker_main, t1.task, vm1);
209 MSG_process_create("worker2", worker_main, t2.task, vm2);
210 MSG_process_create("worker3", worker_main, t3.task, vm3);
213 /* start experiments */
214 XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
215 task_data_init_clock(&t0);
216 task_data_init_clock(&t1);
217 task_data_init_clock(&t2);
218 task_data_init_clock(&t3);
220 MSG_process_sleep(10);
221 task_data_get_clock(&t0);
222 task_data_get_clock(&t1);
223 task_data_get_clock(&t2);
224 task_data_get_clock(&t3);
227 XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
228 MSG_vm_set_affinity(vm0, pm2, 0x01);
229 MSG_vm_set_affinity(vm1, pm2, 0x01);
230 MSG_vm_set_affinity(vm2, pm2, 0x01);
231 MSG_vm_set_affinity(vm3, pm2, 0x01);
233 MSG_process_sleep(10);
234 task_data_get_clock(&t0);
235 task_data_get_clock(&t1);
236 task_data_get_clock(&t2);
237 task_data_get_clock(&t3);
240 XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
241 /* Because VMs are on PM2, the below operations do not effect computation now. */
242 MSG_vm_set_affinity(vm0, pm1, 0x01);
243 MSG_vm_set_affinity(vm1, pm1, 0x01);
244 MSG_vm_set_affinity(vm2, pm1, 0x01);
245 MSG_vm_set_affinity(vm3, pm1, 0x01);
247 MSG_process_sleep(10);
248 task_data_get_clock(&t0);
249 task_data_get_clock(&t1);
250 task_data_get_clock(&t2);
251 task_data_get_clock(&t3);
254 XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
255 MSG_vm_set_affinity(vm0, pm2, 0x00);
256 MSG_vm_set_affinity(vm2, pm2, 0x02);
257 MSG_vm_set_affinity(vm3, pm2, 0x02);
259 MSG_process_sleep(10);
260 task_data_get_clock(&t0);
261 task_data_get_clock(&t1);
262 task_data_get_clock(&t2);
263 task_data_get_clock(&t3);
266 XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
267 MSG_vm_migrate(vm0, pm0);
268 MSG_vm_migrate(vm1, pm0);
269 MSG_vm_migrate(vm2, pm0);
270 MSG_vm_migrate(vm3, pm0);
272 MSG_process_sleep(10);
273 task_data_get_clock(&t0);
274 task_data_get_clock(&t1);
275 task_data_get_clock(&t2);
276 task_data_get_clock(&t3);
278 MSG_process_sleep(10);
279 task_data_get_clock(&t0);
280 task_data_get_clock(&t1);
281 task_data_get_clock(&t2);
282 task_data_get_clock(&t3);
285 XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
286 MSG_vm_migrate(vm0, pm1);
287 MSG_vm_migrate(vm1, pm1);
288 MSG_vm_migrate(vm2, pm1);
289 MSG_vm_migrate(vm3, pm1);
291 MSG_process_sleep(10);
292 task_data_get_clock(&t0);
293 task_data_get_clock(&t1);
294 task_data_get_clock(&t2);
295 task_data_get_clock(&t3);
297 MSG_process_sleep(10);
298 task_data_get_clock(&t0);
299 task_data_get_clock(&t1);
300 task_data_get_clock(&t2);
301 task_data_get_clock(&t3);
304 XBT_INFO("## 7. clear affinity settings on PM1");
305 MSG_vm_set_affinity(vm0, pm1, 0);
306 MSG_vm_set_affinity(vm1, pm1, 0);
307 MSG_vm_set_affinity(vm2, pm1, 0);
308 MSG_vm_set_affinity(vm3, pm1, 0);
310 MSG_process_sleep(10);
311 task_data_get_clock(&t0);
312 task_data_get_clock(&t1);
313 task_data_get_clock(&t2);
314 task_data_get_clock(&t3);
316 MSG_process_sleep(10);
317 task_data_get_clock(&t0);
318 task_data_get_clock(&t1);
319 task_data_get_clock(&t2);
320 task_data_get_clock(&t3);
323 /* clean up everything */
324 MSG_task_cancel(t0.task);
325 MSG_task_cancel(t1.task);
326 MSG_task_cancel(t2.task);
327 MSG_task_cancel(t3.task);
328 MSG_process_sleep(10);
329 MSG_task_destroy(t0.task);
330 MSG_task_destroy(t1.task);
331 MSG_task_destroy(t2.task);
332 MSG_task_destroy(t3.task);
341 static int master_main(int argc, char *argv[])
343 XBT_INFO("=== Test PM (set affinity) ===");
346 XBT_INFO("=== Test VM (set affinity) ===");
353 int main(int argc, char *argv[])
355 /* Get the arguments */
356 MSG_init(&argc, argv);
358 /* load the platform file */
360 printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]);
364 MSG_create_environment(argv[1]);
366 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
367 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
368 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
369 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
372 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0), MSG_get_host_speed(pm0));
373 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1), MSG_get_host_speed(pm1));
374 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2), MSG_get_host_speed(pm2));
378 MSG_process_create("master", master_main, NULL, pm0);
383 int res = MSG_main();
384 XBT_INFO("Bye (simulation time %g)", MSG_get_clock());
387 return !(res == MSG_OK);