doc/msg-tuto-src/masterworker2
doc/msg-tuto-src/masterworker3
doc/msg-tuto-src/masterworker4
-examples/msg/cloud-capping/cloud-capping
examples/msg/cloud-masterworker/cloud-masterworker
examples/msg/cloud-migration/cloud-migration
examples/msg/dht-kademlia/dht-kademlia
examples/s4u/async-wait/s4u-async-wait
examples/s4u/async-waitall/s4u-async-waitall
examples/s4u/async-waitany/s4u-async-waitany
+examples/s4u/cloud-capping/s4u-cloud-capping
examples/s4u/cloud-simple/s4u-cloud-simple
examples/s4u/dht-chord/s4u-dht-chord
examples/s4u/energy-exec/s4u-energy-exec
teshsuite/msg/async-wait/async-wait
teshsuite/msg/async-waitall/async-waitall
teshsuite/msg/async-waitany/async-waitany
+teshsuite/msg/cloud-capping/cloud-capping
teshsuite/msg/cloud-sharing/cloud-sharing
teshsuite/msg/cloud-simple/cloud-simple
teshsuite/msg/cloud-two-tasks/cloud-two-tasks
# C examples
foreach(x app-masterworker
- cloud-capping cloud-masterworker cloud-migration
+ cloud-masterworker cloud-migration
dht-pastry energy-vm platform-failures
process-create
synchro-semaphore trace-categories
${CMAKE_CURRENT_SOURCE_DIR}/network-ns3/two_clusters_d.xml PARENT_SCOPE)
foreach(x app-masterworker
- cloud-capping cloud-masterworker cloud-migration
+ cloud-masterworker cloud-migration
dht-pastry dht-kademlia platform-failures
energy-vm
process-create
foreach (example actor-create actor-daemon actor-join actor-kill actor-lifetime actor-migration actor-suspend actor-yield
app-chainsend app-masterworker app-pingpong app-token-ring
async-wait async-waitany async-waitall
- cloud-simple
+ cloud-capping cloud-simple
energy-exec energy-boot energy-link energy-vm
exec-async exec-basic exec-dvfs exec-monitor exec-ptask exec-remote
io-file-system io-file-remote io-storage-raw
foreach(example actor-create actor-daemon actor-join actor-kill actor-lifetime actor-migration actor-suspend actor-yield
app-bittorrent app-chainsend app-masterworker app-pingpong app-token-ring
async-wait async-waitall async-waitany
- cloud-simple
+ cloud-capping cloud-simple
dht-chord
energy-exec energy-boot energy-link energy-vm
exec-async exec-basic exec-dvfs exec-monitor exec-ptask exec-remote
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include "simgrid/plugins/live_migration.h"
+#include "simgrid/s4u/VirtualMachine.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example");
+
+static void worker(double computation_amount, bool use_bound, double bound)
+{
+ double clock_start = simgrid::s4u::Engine::getClock();
+
+ simgrid::s4u::ExecPtr exec = simgrid::s4u::this_actor::exec_init(computation_amount);
+
+ if (use_bound) {
+ if (bound < 1e-12) /* close enough to 0 without any floating precision surprise */
+ XBT_INFO("bound == 0 means no capping (i.e., unlimited).");
+ exec->setBound(bound);
+ }
+ exec->start();
+ exec->wait();
+ double clock_end = simgrid::s4u::Engine::getClock();
+ double duration = clock_end - clock_start;
+ double flops_per_sec = computation_amount / duration;
+
+ if (use_bound)
+ XBT_INFO("bound to %f => duration %f (%f flops/s)", bound, duration, flops_per_sec);
+ else
+ XBT_INFO("not bound => duration %f (%f flops/s)", duration, flops_per_sec);
+}
+
+#define DOUBLE_MAX 1e11
+static void worker_busy_loop(const char* name, double speed)
+{
+ simgrid::s4u::ExecPtr exec = simgrid::s4u::this_actor::exec_async(DOUBLE_MAX);
+ double exec_remain_prev = DOUBLE_MAX;
+ for (int i = 0; i < 10; i++) {
+ if (speed > 0) {
+ double new_bound = (speed / 10) * i;
+ XBT_INFO("set bound of VM1 to %f", new_bound);
+ static_cast<simgrid::s4u::VirtualMachine*>(simgrid::s4u::this_actor::getHost())->setBound(new_bound);
+ }
+ simgrid::s4u::this_actor::sleep_for(100);
+ double exec_remain_now = exec->getRemains();
+ double flops_per_sec = exec_remain_prev - exec_remain_now;
+ XBT_INFO("%s@%s: %.0f flops/s", name, simgrid::s4u::this_actor::getHost()->getCname(), flops_per_sec / 100);
+ exec_remain_prev = exec_remain_now;
+ simgrid::s4u::this_actor::sleep_for(1);
+ }
+ exec->wait();
+}
+
+static void test_dynamic_change()
+{
+ simgrid::s4u::Host* pm0 = simgrid::s4u::Host::by_name("Fafard");
+
+ simgrid::s4u::VirtualMachine* vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
+ simgrid::s4u::VirtualMachine* vm1 = new simgrid::s4u::VirtualMachine("VM1", pm0, 1);
+ vm0->start();
+ vm1->start();
+
+ simgrid::s4u::Actor::createActor("worker0", vm0, worker_busy_loop, "Task0", -1);
+ simgrid::s4u::Actor::createActor("worker1", vm1, worker_busy_loop, "Task1", pm0->getSpeed());
+
+ simgrid::s4u::this_actor::sleep_for(3000); // let the tasks end
+ vm0->destroy();
+ vm1->destroy();
+}
+
+static void test_one_task(simgrid::s4u::Host* host)
+{
+ const double cpu_speed = host->getSpeed();
+ const double computation_amount = cpu_speed * 10;
+
+ XBT_INFO("### Test: with/without MSG_task_set_bound");
+
+ XBT_INFO("### Test: no bound for Task1@%s", host->getCname());
+ simgrid::s4u::Actor::createActor("worker0", host, worker, computation_amount, false, 0);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 50%% for Task1@%s", host->getCname());
+ simgrid::s4u::Actor::createActor("worker0", host, worker, computation_amount, true, cpu_speed / 2);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 33%% for Task1@%s", host->getCname());
+ simgrid::s4u::Actor::createActor("worker0", host, worker, computation_amount, true, cpu_speed / 3);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: zero for Task1@%s (i.e., unlimited)", host->getCname());
+ simgrid::s4u::Actor::createActor("worker0", host, worker, computation_amount, true, 0);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 200%% for Task1@%s (i.e., meaningless)", host->getCname());
+ simgrid::s4u::Actor::createActor("worker0", host, worker, computation_amount, true, cpu_speed * 2);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+}
+
+static void test_two_tasks(simgrid::s4u::Host* hostA, simgrid::s4u::Host* hostB)
+{
+ const double cpu_speed = hostA->getSpeed();
+ xbt_assert(cpu_speed == hostB->getSpeed());
+ const double computation_amount = cpu_speed * 10;
+ const char* hostA_name = hostA->getCname();
+ const char* hostB_name = hostB->getCname();
+
+ XBT_INFO("### Test: no bound for Task1@%s, no bound for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, false, 0);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, false, 0);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 0 for Task1@%s, 0 for Task2@%s (i.e., unlimited)", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, true, 0);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, 0);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 50%% for Task1@%s, 50%% for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, true, cpu_speed / 2);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, cpu_speed / 2);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 25%% for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, true, cpu_speed / 4);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, cpu_speed / 4);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 75%% for Task1@%s, 100%% for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, true, cpu_speed * 0.75);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, cpu_speed);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: no bound for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, false, 0);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, cpu_speed / 4);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+
+ XBT_INFO("### Test: 75%% for Task1@%s, 25%% for Task2@%s", hostA_name, hostB_name);
+ simgrid::s4u::Actor::createActor("worker0", hostA, worker, computation_amount, true, cpu_speed * 0.75);
+ simgrid::s4u::Actor::createActor("worker1", hostB, worker, computation_amount, true, cpu_speed / 4);
+
+ simgrid::s4u::this_actor::sleep_for(1000);
+}
+
+static void master_main()
+{
+ simgrid::s4u::Host* pm0 = simgrid::s4u::Host::by_name("Fafard");
+
+ XBT_INFO("# 1. Put a single task on a PM. ");
+ test_one_task(pm0);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 2. Put two tasks on a PM.");
+ test_two_tasks(pm0, pm0);
+ XBT_INFO(" ");
+
+ simgrid::s4u::VirtualMachine* vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
+ vm0->start();
+
+ XBT_INFO("# 3. Put a single task on a VM. ");
+ test_one_task(vm0);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 4. Put two tasks on a VM.");
+ test_two_tasks(vm0, vm0);
+ XBT_INFO(" ");
+
+ vm0->destroy();
+
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
+ vm0->start();
+
+ XBT_INFO("# 6. Put a task on a PM and a task on a VM.");
+ test_two_tasks(pm0, vm0);
+ XBT_INFO(" ");
+
+ vm0->destroy();
+
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
+ vm0->setBound(pm0->getSpeed() / 10);
+ vm0->start();
+
+ XBT_INFO("# 7. Put a single task on the VM capped by 10%%.");
+ test_one_task(vm0);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 8. Put two tasks on the VM capped by 10%%.");
+ test_two_tasks(vm0, vm0);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 9. Put a task on a PM and a task on the VM capped by 10%%.");
+ test_two_tasks(pm0, vm0);
+ XBT_INFO(" ");
+
+ vm0->destroy();
+
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
+ s_vm_params_t params;
+ memset(¶ms, 0, sizeof(params));
+ vm0->setParameters(¶ms);
+ vm0->setRamsize(1e9); // 1GB
+ vm0->start();
+
+ double cpu_speed = pm0->getSpeed();
+
+ XBT_INFO("# 10. Test migration");
+ const double computation_amount = cpu_speed * 10;
+
+ XBT_INFO("# 10. (a) Put a task on a VM without any bound.");
+ simgrid::s4u::Actor::createActor("worker0", vm0, worker, computation_amount, false, 0);
+ simgrid::s4u::this_actor::sleep_for(1000);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 10. (b) set 10%% bound to the VM, and then put a task on the VM.");
+ vm0->setBound(cpu_speed / 10);
+ simgrid::s4u::Actor::createActor("worker0", vm0, worker, computation_amount, false, 0);
+ simgrid::s4u::this_actor::sleep_for(1000);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 10. (c) migrate");
+ simgrid::s4u::Host* pm1 = simgrid::s4u::Host::by_name("Fafard");
+ MSG_vm_migrate(vm0, pm1);
+ XBT_INFO(" ");
+
+ XBT_INFO("# 10. (d) Put a task again on the VM.");
+ simgrid::s4u::Actor::createActor("worker0", vm0, worker, computation_amount, false, 0);
+ simgrid::s4u::this_actor::sleep_for(1000);
+ XBT_INFO(" ");
+
+ vm0->destroy();
+
+ XBT_INFO("# 11. Change a bound dynamically.");
+ test_dynamic_change();
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine e(&argc, argv);
+ sg_vm_live_migration_plugin_init();
+ /* load the platform file */
+ xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s ../platforms/small_platform.xml\n", argv[0], argv[0]);
+
+ e.loadPlatform(argv[1]);
+
+ simgrid::s4u::Actor::createActor("master_", simgrid::s4u::Host::by_name("Fafard"), master_main);
+
+ e.run();
+ XBT_INFO("Bye (simulation time %g)", simgrid::s4u::Engine::getClock());
+
+ return 0;
+}
--- /dev/null
+! output sort
+$ $SG_TEST_EXENV ${bindir:=.}/s4u-cloud-capping ${platfdir}/small_platform.xml --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:master_@Fafard) # 1. Put a single task on a PM.
+> [ 0.000000] (1:master_@Fafard) ### Test: with/without MSG_task_set_bound
+> [ 0.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard
+> [ 10.000000] (2:worker0@Fafard) not bound => duration 10.000000 (76296000.000000 flops/s)
+> [1000.000000] (1:master_@Fafard) ### Test: 50% for Task1@Fafard
+> [1020.000000] (3:worker0@Fafard) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [2000.000000] (1:master_@Fafard) ### Test: 33% for Task1@Fafard
+> [2030.000000] (4:worker0@Fafard) bound to 25432000.000000 => duration 30.000000 (25432000.000000 flops/s)
+> [3000.000000] (1:master_@Fafard) ### Test: zero for Task1@Fafard (i.e., unlimited)
+> [3000.000000] (5:worker0@Fafard) bound == 0 means no capping (i.e., unlimited).
+> [3010.000000] (5:worker0@Fafard) bound to 0.000000 => duration 10.000000 (76296000.000000 flops/s)
+> [4000.000000] (1:master_@Fafard) ### Test: 200% for Task1@Fafard (i.e., meaningless)
+> [4010.000000] (6:worker0@Fafard) bound to 152592000.000000 => duration 10.000000 (76296000.000000 flops/s)
+> [5000.000000] (1:master_@Fafard)
+> [5000.000000] (1:master_@Fafard) # 2. Put two tasks on a PM.
+> [5000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, no bound for Task2@Fafard
+> [5020.000000] (8:worker1@Fafard) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [5020.000000] (7:worker0@Fafard) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [6000.000000] (1:master_@Fafard) ### Test: 0 for Task1@Fafard, 0 for Task2@Fafard (i.e., unlimited)
+> [6000.000000] (9:worker0@Fafard) bound == 0 means no capping (i.e., unlimited).
+> [6000.000000] (10:worker1@Fafard) bound == 0 means no capping (i.e., unlimited).
+> [6020.000000] (10:worker1@Fafard) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [6020.000000] (9:worker0@Fafard) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [7000.000000] (1:master_@Fafard) ### Test: 50% for Task1@Fafard, 50% for Task2@Fafard
+> [7020.000000] (12:worker1@Fafard) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [7020.000000] (11:worker0@Fafard) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [8000.000000] (1:master_@Fafard) ### Test: 25% for Task1@Fafard, 25% for Task2@Fafard
+> [8040.000000] (14:worker1@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [8040.000000] (13:worker0@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [9000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 100% for Task2@Fafard
+> [9020.000000] (16:worker1@Fafard) bound to 76296000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [9020.000000] (15:worker0@Fafard) bound to 57222000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [10000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, 25% for Task2@Fafard
+> [10013.333333] (17:worker0@Fafard) not bound => duration 13.333333 (57221999.999997 flops/s)
+> [10040.000000] (18:worker1@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [11000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 25% for Task2@Fafard
+> [11013.333333] (19:worker0@Fafard) bound to 57222000.000000 => duration 13.333333 (57221999.999997 flops/s)
+> [11040.000000] (20:worker1@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [12000.000000] (1:master_@Fafard)
+> [12000.000000] (1:master_@Fafard) # 3. Put a single task on a VM.
+> [12000.000000] (1:master_@Fafard) ### Test: with/without MSG_task_set_bound
+> [12000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0
+> [12010.000000] (21:worker0@VM0) not bound => duration 10.000000 (76296000.000000 flops/s)
+> [13000.000000] (1:master_@Fafard) ### Test: 50% for Task1@VM0
+> [13020.000000] (22:worker0@VM0) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [14000.000000] (1:master_@Fafard) ### Test: 33% for Task1@VM0
+> [14030.000000] (23:worker0@VM0) bound to 25432000.000000 => duration 30.000000 (25432000.000000 flops/s)
+> [15000.000000] (1:master_@Fafard) ### Test: zero for Task1@VM0 (i.e., unlimited)
+> [15000.000000] (24:worker0@VM0) bound == 0 means no capping (i.e., unlimited).
+> [15010.000000] (24:worker0@VM0) bound to 0.000000 => duration 10.000000 (76296000.000000 flops/s)
+> [16000.000000] (1:master_@Fafard) ### Test: 200% for Task1@VM0 (i.e., meaningless)
+> [16010.000000] (25:worker0@VM0) bound to 152592000.000000 => duration 10.000000 (76296000.000000 flops/s)
+> [17000.000000] (1:master_@Fafard)
+> [17000.000000] (1:master_@Fafard) # 4. Put two tasks on a VM.
+> [17000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0, no bound for Task2@VM0
+> [17020.000000] (27:worker1@VM0) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [17020.000000] (26:worker0@VM0) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [18000.000000] (1:master_@Fafard) ### Test: 0 for Task1@VM0, 0 for Task2@VM0 (i.e., unlimited)
+> [18000.000000] (28:worker0@VM0) bound == 0 means no capping (i.e., unlimited).
+> [18000.000000] (29:worker1@VM0) bound == 0 means no capping (i.e., unlimited).
+> [18020.000000] (29:worker1@VM0) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [18020.000000] (28:worker0@VM0) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [19000.000000] (1:master_@Fafard) ### Test: 50% for Task1@VM0, 50% for Task2@VM0
+> [19020.000000] (31:worker1@VM0) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [19020.000000] (30:worker0@VM0) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [20000.000000] (1:master_@Fafard) ### Test: 25% for Task1@VM0, 25% for Task2@VM0
+> [20040.000000] (33:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [20040.000000] (32:worker0@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [21000.000000] (1:master_@Fafard) ### Test: 75% for Task1@VM0, 100% for Task2@VM0
+> [21020.000000] (35:worker1@VM0) bound to 76296000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [21020.000000] (34:worker0@VM0) bound to 57222000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [22000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0, 25% for Task2@VM0
+> [22013.333333] (36:worker0@VM0) not bound => duration 13.333333 (57222000.000005 flops/s)
+> [22040.000000] (37:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [23000.000000] (1:master_@Fafard) ### Test: 75% for Task1@VM0, 25% for Task2@VM0
+> [23013.333333] (38:worker0@VM0) bound to 57222000.000000 => duration 13.333333 (57222000.000005 flops/s)
+> [23040.000000] (39:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [24000.000000] (1:master_@Fafard)
+> [24000.000000] (1:master_@Fafard) # 6. Put a task on a PM and a task on a VM.
+> [24000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, no bound for Task2@VM0
+> [24020.000000] (40:worker0@Fafard) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [24020.000000] (41:worker1@VM0) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [25000.000000] (1:master_@Fafard) ### Test: 0 for Task1@Fafard, 0 for Task2@VM0 (i.e., unlimited)
+> [25000.000000] (42:worker0@Fafard) bound == 0 means no capping (i.e., unlimited).
+> [25000.000000] (43:worker1@VM0) bound == 0 means no capping (i.e., unlimited).
+> [25020.000000] (42:worker0@Fafard) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [25020.000000] (43:worker1@VM0) bound to 0.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [26000.000000] (1:master_@Fafard) ### Test: 50% for Task1@Fafard, 50% for Task2@VM0
+> [26020.000000] (44:worker0@Fafard) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [26020.000000] (45:worker1@VM0) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [27000.000000] (1:master_@Fafard) ### Test: 25% for Task1@Fafard, 25% for Task2@VM0
+> [27040.000000] (46:worker0@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [27040.000000] (47:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [28000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 100% for Task2@VM0
+> [28020.000000] (48:worker0@Fafard) bound to 57222000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [28020.000000] (49:worker1@VM0) bound to 76296000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [29000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, 25% for Task2@VM0
+> [29020.000000] (50:worker0@Fafard) not bound => duration 20.000000 (38148000.000000 flops/s)
+> [29040.000000] (51:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [30000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 25% for Task2@VM0
+> [30020.000000] (52:worker0@Fafard) bound to 57222000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [30040.000000] (53:worker1@VM0) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [31000.000000] (1:master_@Fafard)
+> [31000.000000] (1:master_@Fafard) # 7. Put a single task on the VM capped by 10%.
+> [31000.000000] (1:master_@Fafard) ### Test: with/without MSG_task_set_bound
+> [31000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0
+> [31100.000000] (54:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
+> [32000.000000] (1:master_@Fafard) ### Test: 50% for Task1@VM0
+> [32100.000000] (55:worker0@VM0) bound to 38148000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [33000.000000] (1:master_@Fafard) ### Test: 33% for Task1@VM0
+> [33100.000000] (56:worker0@VM0) bound to 25432000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [34000.000000] (1:master_@Fafard) ### Test: zero for Task1@VM0 (i.e., unlimited)
+> [34000.000000] (57:worker0@VM0) bound == 0 means no capping (i.e., unlimited).
+> [34100.000000] (57:worker0@VM0) bound to 0.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [35000.000000] (1:master_@Fafard) ### Test: 200% for Task1@VM0 (i.e., meaningless)
+> [35100.000000] (58:worker0@VM0) bound to 152592000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [36000.000000] (1:master_@Fafard)
+> [36000.000000] (1:master_@Fafard) # 8. Put two tasks on the VM capped by 10%.
+> [36000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0, no bound for Task2@VM0
+> [36200.000000] (60:worker1@VM0) not bound => duration 200.000000 (3814800.000000 flops/s)
+> [36200.000000] (59:worker0@VM0) not bound => duration 200.000000 (3814800.000000 flops/s)
+> [37000.000000] (1:master_@Fafard) ### Test: 0 for Task1@VM0, 0 for Task2@VM0 (i.e., unlimited)
+> [37000.000000] (61:worker0@VM0) bound == 0 means no capping (i.e., unlimited).
+> [37000.000000] (62:worker1@VM0) bound == 0 means no capping (i.e., unlimited).
+> [37200.000000] (62:worker1@VM0) bound to 0.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [37200.000000] (61:worker0@VM0) bound to 0.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [38000.000000] (1:master_@Fafard) ### Test: 50% for Task1@VM0, 50% for Task2@VM0
+> [38200.000000] (64:worker1@VM0) bound to 38148000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [38200.000000] (63:worker0@VM0) bound to 38148000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [39000.000000] (1:master_@Fafard) ### Test: 25% for Task1@VM0, 25% for Task2@VM0
+> [39200.000000] (66:worker1@VM0) bound to 19074000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [39200.000000] (65:worker0@VM0) bound to 19074000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [40000.000000] (1:master_@Fafard) ### Test: 75% for Task1@VM0, 100% for Task2@VM0
+> [40200.000000] (68:worker1@VM0) bound to 76296000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [40200.000000] (67:worker0@VM0) bound to 57222000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [41000.000000] (1:master_@Fafard) ### Test: no bound for Task1@VM0, 25% for Task2@VM0
+> [41200.000000] (70:worker1@VM0) bound to 19074000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [41200.000000] (69:worker0@VM0) not bound => duration 200.000000 (3814800.000000 flops/s)
+> [42000.000000] (1:master_@Fafard) ### Test: 75% for Task1@VM0, 25% for Task2@VM0
+> [42200.000000] (72:worker1@VM0) bound to 19074000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [42200.000000] (71:worker0@VM0) bound to 57222000.000000 => duration 200.000000 (3814800.000000 flops/s)
+> [43000.000000] (1:master_@Fafard)
+> [43000.000000] (1:master_@Fafard) # 9. Put a task on a PM and a task on the VM capped by 10%.
+> [43000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, no bound for Task2@VM0
+> [43011.111111] (73:worker0@Fafard) not bound => duration 11.111111 (68666400.000010 flops/s)
+> [43100.000000] (74:worker1@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
+> [44000.000000] (1:master_@Fafard) ### Test: 0 for Task1@Fafard, 0 for Task2@VM0 (i.e., unlimited)
+> [44000.000000] (75:worker0@Fafard) bound == 0 means no capping (i.e., unlimited).
+> [44000.000000] (76:worker1@VM0) bound == 0 means no capping (i.e., unlimited).
+> [44011.111111] (75:worker0@Fafard) bound to 0.000000 => duration 11.111111 (68666400.000010 flops/s)
+> [44100.000000] (76:worker1@VM0) bound to 0.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [45000.000000] (1:master_@Fafard) ### Test: 50% for Task1@Fafard, 50% for Task2@VM0
+> [45020.000000] (77:worker0@Fafard) bound to 38148000.000000 => duration 20.000000 (38148000.000000 flops/s)
+> [45100.000000] (78:worker1@VM0) bound to 38148000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [46000.000000] (1:master_@Fafard) ### Test: 25% for Task1@Fafard, 25% for Task2@VM0
+> [46040.000000] (79:worker0@Fafard) bound to 19074000.000000 => duration 40.000000 (19074000.000000 flops/s)
+> [46100.000000] (80:worker1@VM0) bound to 19074000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [47000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 100% for Task2@VM0
+> [47013.333333] (81:worker0@Fafard) bound to 57222000.000000 => duration 13.333333 (57221999.999990 flops/s)
+> [47100.000000] (82:worker1@VM0) bound to 76296000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [48000.000000] (1:master_@Fafard) ### Test: no bound for Task1@Fafard, 25% for Task2@VM0
+> [48011.111111] (83:worker0@Fafard) not bound => duration 11.111111 (68666400.000010 flops/s)
+> [48100.000000] (84:worker1@VM0) bound to 19074000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [49000.000000] (1:master_@Fafard) ### Test: 75% for Task1@Fafard, 25% for Task2@VM0
+> [49013.333333] (85:worker0@Fafard) bound to 57222000.000000 => duration 13.333333 (57221999.999990 flops/s)
+> [49100.000000] (86:worker1@VM0) bound to 19074000.000000 => duration 100.000000 (7629600.000000 flops/s)
+> [50000.000000] (1:master_@Fafard)
+> [50000.000000] (1:master_@Fafard) # 10. Test migration
+> [50000.000000] (1:master_@Fafard) # 10. (a) Put a task on a VM without any bound.
+> [50010.000000] (87:worker0@VM0) not bound => duration 10.000000 (76296000.000000 flops/s)
+> [51000.000000] (1:master_@Fafard)
+> [51000.000000] (1:master_@Fafard) # 10. (b) set 10% bound to the VM, and then put a task on the VM.
+> [51100.000000] (88:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
+> [52000.000000] (1:master_@Fafard)
+> [52000.000000] (1:master_@Fafard) # 10. (c) migrate
+> [52000.000000] (90:__pr_mig_tx:VM0(Fafard-Fafard)@Fafard) use the default max_downtime value 30ms
+> [52002.070722] (1:master_@Fafard)
+> [52002.070722] (1:master_@Fafard) # 10. (d) Put a task again on the VM.
+> [52102.070722] (91:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
+> [53002.070722] (1:master_@Fafard)
+> [53002.070722] (1:master_@Fafard) # 11. Change a bound dynamically.
+> [53002.070722] (93:worker1@VM1) set bound of VM1 to 0.000000
+> [53102.070722] (93:worker1@VM1) Task1@VM1: 38148000 flops/s
+> [53102.070722] (92:worker0@VM0) Task0@VM0: 38148000 flops/s
+> [53103.070722] (93:worker1@VM1) set bound of VM1 to 7629600.000000
+> [53203.070722] (93:worker1@VM1) Task1@VM1: 8011080 flops/s
+> [53203.070722] (92:worker0@VM0) Task0@VM0: 69047880 flops/s
+> [53204.070722] (93:worker1@VM1) set bound of VM1 to 15259200.000000
+> [53304.070722] (93:worker1@VM1) Task1@VM1: 15335496 flops/s
+> [53304.070722] (92:worker0@VM0) Task0@VM0: 61723464 flops/s
+> [53305.070722] (93:worker1@VM1) set bound of VM1 to 22888800.000000
+> [53405.070722] (93:worker1@VM1) Task1@VM1: 23041392 flops/s
+> [53405.070722] (92:worker0@VM0) Task0@VM0: 54017568 flops/s
+> [53406.070722] (93:worker1@VM1) set bound of VM1 to 30518400.000000
+> [53506.070722] (93:worker1@VM1) Task1@VM1: 30747288 flops/s
+> [53506.070722] (92:worker0@VM0) Task0@VM0: 46311672 flops/s
+> [53507.070722] (93:worker1@VM1) set bound of VM1 to 38148000.000000
+> [53607.070722] (93:worker1@VM1) Task1@VM1: 38453184 flops/s
+> [53607.070722] (92:worker0@VM0) Task0@VM0: 38605776 flops/s
+> [53608.070722] (93:worker1@VM1) set bound of VM1 to 45777600.000000
+> [53708.070722] (93:worker1@VM1) Task1@VM1: 38529480 flops/s
+> [53708.070722] (92:worker0@VM0) Task0@VM0: 38529480 flops/s
+> [53709.070722] (93:worker1@VM1) set bound of VM1 to 53407200.000000
+> [53809.070722] (93:worker1@VM1) Task1@VM1: 38529480 flops/s
+> [53809.070722] (92:worker0@VM0) Task0@VM0: 38529480 flops/s
+> [53810.070722] (93:worker1@VM1) set bound of VM1 to 61036800.000000
+> [53910.070722] (93:worker1@VM1) Task1@VM1: 38529480 flops/s
+> [53910.070722] (92:worker0@VM0) Task0@VM0: 38529480 flops/s
+> [53911.070722] (93:worker1@VM1) set bound of VM1 to 68666400.000000
+> [54011.070722] (93:worker1@VM1) Task1@VM1: 38529480 flops/s
+> [54011.070722] (92:worker0@VM0) Task0@VM0: 38529480 flops/s
+> [56002.070722] (0:maestro@) Bye (simulation time 56002.1)
bool test();
ExecPtr setPriority(double priority);
+ ExecPtr setBound(double bound);
ExecPtr setHost(Host * host);
Host* getHost() { return host_; }
Host* host_ = nullptr;
double flops_amount_ = 0.0;
double priority_ = 1.0;
-
+ double bound_ = 0.0;
std::atomic_int_fast32_t refcount_{0};
}; // class
}
#include "src/surf/surf_interface.hpp"
#include "src/surf/cpu_interface.hpp"
-
#include "simgrid/s4u/Host.hpp"
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(simix_process);
timeoutDetector->unref();
XBT_DEBUG("Destroy exec %p", this);
}
+
void simgrid::kernel::activity::ExecImpl::suspend()
{
XBT_VERB("This exec is suspended (remain: %f)", surfAction_->getRemains());
return surfAction_ ? surfAction_->getRemains() : 0;
}
+
double simgrid::kernel::activity::ExecImpl::remainingRatio()
{
if (host_ == nullptr) // parallel task: their remain is already between 0 and 1 (see comment in ExecImpl::remains())
return surfAction_->getRemains() / surfAction_->getCost();
}
+void simgrid::kernel::activity::ExecImpl::setBound(double bound)
+{
+ if (surfAction_)
+ surfAction_->setBound(bound);
+}
+
void simgrid::kernel::activity::ExecImpl::post()
{
if (host_ && host_->isOff()) { /* FIXME: handle resource failure for parallel tasks too */
return this;
}
-
/*************
* Callbacks *
*************/
void post() override;
double remains();
double remainingRatio();
+ void setBound(double bound);
virtual ActivityImpl* migrate(s4u::Host* to);
- /* The host where the execution takes place. If nullptr, then this is a parallel exec (and only surf
- knows the hosts) */
+ /* The host where the execution takes place. nullptr means this is a parallel exec (and only surf knows the hosts) */
sg_host_t host_ = nullptr;
surf_action_t surfAction_ = nullptr; /* The Surf execution action encapsulated */
surf::Action* timeoutDetector = nullptr;
Activity* Exec::start()
{
pimpl_ = simcall_execution_start(nullptr, flops_amount_, 1 / priority_, 0., host_);
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(pimpl_)->setBound(bound_);
state_ = started;
return this;
}
priority_ = priority;
return this;
}
+
+ExecPtr Exec::setBound(double bound)
+{
+ xbt_assert(state_ == inited, "Cannot change the bound of an exec after its start");
+ bound_ = bound;
+ return this;
+}
+
ExecPtr Exec::setHost(Host* host)
{
xbt_assert(state_ == inited || state_ == started, "Cannot change the host of an exec once it's done (state: %d)", state_);
foreach(x actions-comm actions-storage
app-pingpong app-token-ring
async-wait async-waitall async-waitany
- cloud-sharing cloud-two-tasks cloud-simple
- get_sender host_on_off host_on_off_recv host_on_off_processes
+ cloud-capping cloud-sharing cloud-two-tasks cloud-simple
+ get_sender host_on_off host_on_off_recv host_on_off_processes
process-daemon process-kill process-join process-lifetime process-migration process-suspend process-yield
energy-consumption energy-ptask energy-pstate platform-properties
io-file io-raw-storage io-file-remote
- task-priority
- plugin-hostload
+ task-priority
+ plugin-hostload
trace_integration)
add_executable (${x} ${x}/${x}.c)
target_link_libraries(${x} simgrid)
actions-comm actions-storage
async-wait async-waitall async-waitany
app-bittorrent app-chainsend app-pingpong app-token-ring
- cloud-two-tasks cloud-simple
+ cloud-capping cloud-two-tasks cloud-simple
energy-pstate
host_on_off host_on_off_processes host_on_off_recv
get_sender
* - <b>cloud/bound.c</b>: Demonstrates the use of @ref MSG_task_set_bound
*/
-static int worker_main(int argc, char *argv[])
+static int worker_main(int argc, char* argv[])
{
double computation_amount = xbt_str_parse_double(argv[1], "Invalid computation amount: %s");
- int use_bound = xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s");
- double bound = xbt_str_parse_double(argv[3], "Invalid bound: %s");
+ int use_bound = xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s");
+ double bound = xbt_str_parse_double(argv[3], "Invalid bound: %s");
double clock_sta = MSG_get_clock();
msg_task_t task = MSG_task_create("Task", computation_amount, 0, NULL);
if (use_bound)
- MSG_task_set_bound(task, bound);
+ MSG_task_set_bound(task, bound);
MSG_task_execute(task);
MSG_task_destroy(task);
- double clock_end = MSG_get_clock();
- double duration = clock_end - clock_sta;
+ double clock_end = MSG_get_clock();
+ double duration = clock_end - clock_sta;
double flops_per_sec = computation_amount / duration;
if (use_bound)
return 0;
}
-static void launch_worker(msg_host_t host, const char *pr_name, double computation_amount, int use_bound, double bound)
+static void launch_worker(msg_host_t host, const char* pr_name, double computation_amount, int use_bound, double bound)
{
- char **argv = xbt_new(char *, 5);
- argv[0] = xbt_strdup(pr_name);
- argv[1] = bprintf("%f", computation_amount);
- argv[2] = bprintf("%d", use_bound);
- argv[3] = bprintf("%f", bound);
- argv[4] = NULL;
+ char** argv = xbt_new(char*, 5);
+ argv[0] = xbt_strdup(pr_name);
+ argv[1] = bprintf("%f", computation_amount);
+ argv[2] = bprintf("%d", use_bound);
+ argv[3] = bprintf("%f", bound);
+ argv[4] = NULL;
MSG_process_create_with_arguments(pr_name, worker_main, NULL, host, 4, argv);
}
-static int worker_busy_loop_main(int argc, char *argv[])
+static int worker_busy_loop_main(int argc, char* argv[])
{
- msg_task_t *task = MSG_process_get_data(MSG_process_self());
+ msg_task_t* task = MSG_process_get_data(MSG_process_self());
MSG_task_execute(*task);
MSG_task_destroy(*task);
static void test_one_task(msg_host_t hostA)
{
- const double cpu_speed = MSG_host_get_speed(hostA);
+ const double cpu_speed = MSG_host_get_speed(hostA);
const double computation_amount = cpu_speed * 10;
- const char *hostA_name = MSG_host_get_name(hostA);
+ const char* hostA_name = MSG_host_get_name(hostA);
XBT_INFO("### Test: with/without MSG_task_set_bound");
const double cpu_speed = MSG_host_get_speed(hostA);
xbt_assert(cpu_speed == MSG_host_get_speed(hostB));
const double computation_amount = cpu_speed * 10;
- const char *hostA_name = MSG_host_get_name(hostA);
- const char *hostB_name = MSG_host_get_name(hostB);
+ const char* hostA_name = MSG_host_get_name(hostA);
+ const char* hostB_name = MSG_host_get_name(hostB);
XBT_INFO("### Test: no bound for Task1@%s, no bound for Task2@%s", hostA_name, hostB_name);
launch_worker(hostA, "worker0", computation_amount, 0, 0);
MSG_process_sleep(1000);
}
-static int master_main(int argc, char *argv[])
+static int master_main(int argc, char* argv[])
{
msg_host_t pm0 = MSG_host_by_name("Fafard");
msg_host_t pm1 = MSG_host_by_name("Fafard");
MSG_vm_destroy(vm0);
- vm0 = MSG_vm_create_core(pm0, "VM0");
+ vm0 = MSG_vm_create_core(pm0, "VM0");
double cpu_speed = MSG_host_get_speed(pm0);
MSG_vm_set_bound(vm0, cpu_speed / 10);
MSG_vm_start(vm0);
static void launch_master(msg_host_t host)
{
- const char *pr_name = "master_";
- char **argv = xbt_new(char *, 2);
- argv[0] = xbt_strdup(pr_name);
- argv[1] = NULL;
+ const char* pr_name = "master_";
+ char** argv = xbt_new(char*, 2);
+ argv[0] = xbt_strdup(pr_name);
+ argv[1] = NULL;
MSG_process_create_with_arguments(pr_name, master_main, NULL, host, 1, argv);
}
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
/* Get the arguments */
MSG_init(&argc, argv);