1 /* Copyright (c) 2007-2015. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 #include "simgrid/msg.h"
9 #include "xbt/sysdep.h" /* calloc, printf */
11 /* Create a log channel to have nice outputs. */
13 #include "xbt/asserts.h"
14 XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
20 static int worker_main(int argc, char *argv[])
22 msg_task_t task = MSG_process_get_data(MSG_process_self());
23 MSG_task_execute(task);
25 XBT_INFO("task %p bye", task);
33 double prev_computation_amount;
38 static void task_data_init_clock(struct task_data *t)
40 t->prev_computation_amount = MSG_task_get_flops_amount(t->task);
41 t->prev_clock = MSG_get_clock();
45 static void task_data_get_clock(struct task_data *t)
47 double now_computation_amount = MSG_task_get_flops_amount(t->task);
48 double now_clock = MSG_get_clock();
50 double done = t->prev_computation_amount - now_computation_amount;
51 double duration = now_clock - t->prev_clock;
53 XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
55 t->prev_computation_amount = now_computation_amount;
56 t->prev_clock = now_clock;
60 static void test_pm_pin(void)
62 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
63 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
64 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
65 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
73 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
74 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
75 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
76 t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
78 MSG_process_create("worker1", worker_main, t1.task, pm1);
79 MSG_process_create("worker2", worker_main, t2.task, pm1);
80 MSG_process_create("worker3", worker_main, t3.task, pm1);
81 MSG_process_create("worker4", worker_main, t4.task, pm1);
84 XBT_INFO("## 1. start 4 tasks on PM1 (2 cores)");
85 task_data_init_clock(&t1);
86 task_data_init_clock(&t2);
87 task_data_init_clock(&t3);
88 task_data_init_clock(&t4);
90 MSG_process_sleep(10);
91 task_data_get_clock(&t1);
92 task_data_get_clock(&t2);
93 task_data_get_clock(&t3);
94 task_data_get_clock(&t4);
97 XBT_INFO("## 2. pin all tasks to CPU0");
98 MSG_task_set_affinity(t1.task, pm1, 0x01);
99 MSG_task_set_affinity(t2.task, pm1, 0x01);
100 MSG_task_set_affinity(t3.task, pm1, 0x01);
101 MSG_task_set_affinity(t4.task, pm1, 0x01);
103 MSG_process_sleep(10);
104 task_data_get_clock(&t1);
105 task_data_get_clock(&t2);
106 task_data_get_clock(&t3);
107 task_data_get_clock(&t4);
110 XBT_INFO("## 3. clear the affinity of task4");
111 MSG_task_set_affinity(t4.task, pm1, 0);
113 MSG_process_sleep(10);
114 task_data_get_clock(&t1);
115 task_data_get_clock(&t2);
116 task_data_get_clock(&t3);
117 task_data_get_clock(&t4);
120 XBT_INFO("## 4. clear the affinity of task3");
121 MSG_task_set_affinity(t3.task, pm1, 0);
123 MSG_process_sleep(10);
124 task_data_get_clock(&t1);
125 task_data_get_clock(&t2);
126 task_data_get_clock(&t3);
127 task_data_get_clock(&t4);
130 XBT_INFO("## 5. clear the affinity of task2");
131 MSG_task_set_affinity(t2.task, pm1, 0);
133 MSG_process_sleep(10);
134 task_data_get_clock(&t1);
135 task_data_get_clock(&t2);
136 task_data_get_clock(&t3);
137 task_data_get_clock(&t4);
140 XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
141 MSG_task_set_affinity(t1.task, pm0, 0);
142 MSG_task_set_affinity(t2.task, pm0, 0);
143 MSG_task_set_affinity(t3.task, pm2, 0);
144 MSG_task_set_affinity(t4.task, pm2, 0);
146 MSG_process_sleep(10);
147 task_data_get_clock(&t1);
148 task_data_get_clock(&t2);
149 task_data_get_clock(&t3);
150 task_data_get_clock(&t4);
154 MSG_task_cancel(t1.task);
155 MSG_task_cancel(t2.task);
156 MSG_task_cancel(t3.task);
157 MSG_task_cancel(t4.task);
158 MSG_process_sleep(10);
159 MSG_task_destroy(t1.task);
160 MSG_task_destroy(t2.task);
161 MSG_task_destroy(t3.task);
162 MSG_task_destroy(t4.task);
166 static void test_vm_pin(void)
168 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
169 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
170 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
171 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
174 /* set up VMs on PM2 (4 cores) */
175 msg_vm_t vm0 = MSG_vm_create_core(pm2, "VM0");
176 msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
177 msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
178 msg_vm_t vm3 = MSG_vm_create_core(pm2, "VM3");
180 s_vm_params_t params;
181 memset(¶ms, 0, sizeof(params));
182 params.ramsize = 1L * 1024 * 1024;
183 params.skip_stage1 = 1;
184 params.skip_stage2 = 1;
185 //params.mig_speed = 1L * 1024 * 1024;
186 MSG_host_set_params(vm0, ¶ms);
187 MSG_host_set_params(vm1, ¶ms);
188 MSG_host_set_params(vm2, ¶ms);
189 MSG_host_set_params(vm3, ¶ms);
197 /* set up tasks and processes */
203 t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
204 t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
205 t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
206 t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
208 MSG_process_create("worker0", worker_main, t0.task, vm0);
209 MSG_process_create("worker1", worker_main, t1.task, vm1);
210 MSG_process_create("worker2", worker_main, t2.task, vm2);
211 MSG_process_create("worker3", worker_main, t3.task, vm3);
214 /* start experiments */
215 XBT_INFO("## 1. start 4 VMs on PM2 (4 cores)");
216 task_data_init_clock(&t0);
217 task_data_init_clock(&t1);
218 task_data_init_clock(&t2);
219 task_data_init_clock(&t3);
221 MSG_process_sleep(10);
222 task_data_get_clock(&t0);
223 task_data_get_clock(&t1);
224 task_data_get_clock(&t2);
225 task_data_get_clock(&t3);
228 XBT_INFO("## 2. pin all VMs to CPU0 of PM2");
229 MSG_vm_set_affinity(vm0, pm2, 0x01);
230 MSG_vm_set_affinity(vm1, pm2, 0x01);
231 MSG_vm_set_affinity(vm2, pm2, 0x01);
232 MSG_vm_set_affinity(vm3, pm2, 0x01);
234 MSG_process_sleep(10);
235 task_data_get_clock(&t0);
236 task_data_get_clock(&t1);
237 task_data_get_clock(&t2);
238 task_data_get_clock(&t3);
241 XBT_INFO("## 3. pin all VMs to CPU0 of PM1 (no effect at now)");
242 /* Because VMs are on PM2, the below operations do not effect computation now. */
243 MSG_vm_set_affinity(vm0, pm1, 0x01);
244 MSG_vm_set_affinity(vm1, pm1, 0x01);
245 MSG_vm_set_affinity(vm2, pm1, 0x01);
246 MSG_vm_set_affinity(vm3, pm1, 0x01);
248 MSG_process_sleep(10);
249 task_data_get_clock(&t0);
250 task_data_get_clock(&t1);
251 task_data_get_clock(&t2);
252 task_data_get_clock(&t3);
255 XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM2");
256 MSG_vm_set_affinity(vm0, pm2, 0x00);
257 MSG_vm_set_affinity(vm2, pm2, 0x02);
258 MSG_vm_set_affinity(vm3, pm2, 0x02);
260 MSG_process_sleep(10);
261 task_data_get_clock(&t0);
262 task_data_get_clock(&t1);
263 task_data_get_clock(&t2);
264 task_data_get_clock(&t3);
267 XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
268 MSG_vm_migrate(vm0, pm0);
269 MSG_vm_migrate(vm1, pm0);
270 MSG_vm_migrate(vm2, pm0);
271 MSG_vm_migrate(vm3, pm0);
273 MSG_process_sleep(10);
274 task_data_get_clock(&t0);
275 task_data_get_clock(&t1);
276 task_data_get_clock(&t2);
277 task_data_get_clock(&t3);
279 MSG_process_sleep(10);
280 task_data_get_clock(&t0);
281 task_data_get_clock(&t1);
282 task_data_get_clock(&t2);
283 task_data_get_clock(&t3);
286 XBT_INFO("## 6. migrate all VMs to PM1 (2 CPU cores, with affinity settings)");
287 MSG_vm_migrate(vm0, pm1);
288 MSG_vm_migrate(vm1, pm1);
289 MSG_vm_migrate(vm2, pm1);
290 MSG_vm_migrate(vm3, pm1);
292 MSG_process_sleep(10);
293 task_data_get_clock(&t0);
294 task_data_get_clock(&t1);
295 task_data_get_clock(&t2);
296 task_data_get_clock(&t3);
298 MSG_process_sleep(10);
299 task_data_get_clock(&t0);
300 task_data_get_clock(&t1);
301 task_data_get_clock(&t2);
302 task_data_get_clock(&t3);
305 XBT_INFO("## 7. clear affinity settings on PM1");
306 MSG_vm_set_affinity(vm0, pm1, 0);
307 MSG_vm_set_affinity(vm1, pm1, 0);
308 MSG_vm_set_affinity(vm2, pm1, 0);
309 MSG_vm_set_affinity(vm3, pm1, 0);
311 MSG_process_sleep(10);
312 task_data_get_clock(&t0);
313 task_data_get_clock(&t1);
314 task_data_get_clock(&t2);
315 task_data_get_clock(&t3);
317 MSG_process_sleep(10);
318 task_data_get_clock(&t0);
319 task_data_get_clock(&t1);
320 task_data_get_clock(&t2);
321 task_data_get_clock(&t3);
324 /* clean up everything */
325 MSG_task_cancel(t0.task);
326 MSG_task_cancel(t1.task);
327 MSG_task_cancel(t2.task);
328 MSG_task_cancel(t3.task);
329 MSG_process_sleep(10);
330 MSG_task_destroy(t0.task);
331 MSG_task_destroy(t1.task);
332 MSG_task_destroy(t2.task);
333 MSG_task_destroy(t3.task);
342 static int master_main(int argc, char *argv[])
344 XBT_INFO("=== Test PM (set affinity) ===");
347 XBT_INFO("=== Test VM (set affinity) ===");
354 int main(int argc, char *argv[])
356 /* Get the arguments */
357 MSG_init(&argc, argv);
359 /* load the platform file */
361 printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]);
365 MSG_create_environment(argv[1]);
367 xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
368 msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
369 msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
370 msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
373 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0), MSG_get_host_speed(pm0));
374 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1), MSG_get_host_speed(pm1));
375 XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2), MSG_get_host_speed(pm2));
379 MSG_process_create("master", master_main, NULL, pm0);
384 int res = MSG_main();
385 XBT_INFO("Bye (simulation time %g)", MSG_get_clock());
388 return !(res == MSG_OK);