node-1.1core.org --[l1]-- router -- (outer world)
... /
node-7.1core.org --[l9]--
-
So the route from node-0 to node-1 is {l0.UP, l1.DOWN}
-->
- <cluster id="simple" prefix="node-" radical="0-7" suffix=".1core.org" speed="1Gf" bw="125MBps" lat="50us" />
+ <cluster id="simple" prefix="node-" radical="0-7" suffix=".1core.org" speed="1Gf" bw="125MBps" lat="50us">
+ <prop id="watt_per_state" value="0.0:1.0" />
+ <prop id="watt_off" value="0.0" />
+ </cluster>
<!-- This second cluster has a backbone link, connecting all private links:
-
node-0.2cores.org --[l0]-------+
|
node-1.2cores.org --[l1]--[backbone]-- router -- (outer world)
... |
node-7.2cores.org --[l7]-------+
-
-
The route from node-0 to node-1 is: l0.UP ; backbone ; l1.DOWN
The route from node-0 to the outer world begins with: l0.UP ; backbone
<cluster id="backboned" prefix="node-" radical="0-7" suffix=".2cores.org"
speed="1Gf" core="2"
bw="125MBps" lat="50us"
- bb_bw="2.25GBps" bb_lat="500us"/>
+ bb_bw="2.25GBps" bb_lat="500us">
+ <prop id="watt_per_state" value="0.0:1.0:2.0" />
+ <prop id="watt_off" value="0.0" />
+ </cluster>
<!-- This cluster has a backbone link, but no links are splitduplex.
-->
<cluster id="halfduplex" prefix="node-" radical="0-7" suffix=".4cores.org" speed="1Gf" core="4"
bw="125MBps" lat="50us" sharing_policy="SHARED"
- bb_bw="2.25GBps" bb_lat="500us" bb_sharing_policy="SHARED" />
-
+ bb_bw="2.25GBps" bb_lat="500us" bb_sharing_policy="SHARED">
+ <prop id="watt_per_state" value="0.0:1.0:4.0" />
+ <prop id="watt_off" value="0.0" />
+ </cluster>
<!-- And now, we create the routes between the clusters, ie inter-zone routes -->
XBT_VERB("This exec is suspended (remain: %f)", surf_action_->get_remains());
if (surf_action_ != nullptr)
surf_action_->suspend();
+ on_suspended(this);
}
void simgrid::kernel::activity::ExecImpl::resume()
XBT_VERB("This exec is resumed (remain: %f)", surf_action_->get_remains());
if (surf_action_ != nullptr)
surf_action_->resume();
+ on_resumed(this);
}
void simgrid::kernel::activity::ExecImpl::cancel()
{
*************/
simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr)> simgrid::kernel::activity::ExecImpl::on_creation;
simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr)> simgrid::kernel::activity::ExecImpl::on_completion;
+simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr)> simgrid::kernel::activity::ExecImpl::on_resumed;
+simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr)> simgrid::kernel::activity::ExecImpl::on_suspended;
simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr, simgrid::s4u::Host*)>
simgrid::kernel::activity::ExecImpl::on_migration;
public:
static simgrid::xbt::signal<void(kernel::activity::ExecImplPtr)> on_creation;
static simgrid::xbt::signal<void(kernel::activity::ExecImplPtr)> on_completion;
+ static simgrid::xbt::signal<void(kernel::activity::ExecImplPtr)> on_suspended;
+ static simgrid::xbt::signal<void(kernel::activity::ExecImplPtr)> on_resumed;
static simgrid::xbt::signal<void(simgrid::kernel::activity::ExecImplPtr, simgrid::s4u::Host*)> on_migration;
};
}
#include "src/include/surf/surf.hpp"
#include "src/simix/ActorImpl.hpp"
#include "src/simix/smx_host_private.hpp"
+#include "src/kernel/activity/ExecImpl.hpp"
#include "xbt/asserts.h" // xbt_log_no_loc
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_vm, surf, "Logging specific to the SURF VM module");
}
}
+static void addActiveTask(kernel::activity::ExecImplPtr exec)
+{
+ s4u::VirtualMachine *vm = dynamic_cast<s4u::VirtualMachine*>(exec->host_);
+ if (vm != nullptr) {
+ VirtualMachineImpl *vm_impl = vm->get_impl();
+ vm_impl->active_tasks_ = vm_impl->active_tasks_ + 1;
+ vm_impl->update_action_weight();
+ }
+}
+
+static void removeActiveTask(kernel::activity::ExecImplPtr exec)
+{
+ s4u::VirtualMachine *vm = dynamic_cast<s4u::VirtualMachine*>(exec->host_);
+ if (vm != nullptr) {
+ VirtualMachineImpl *vm_impl = vm->get_impl();
+ vm_impl->active_tasks_ = vm_impl->active_tasks_ - 1;
+ vm_impl->update_action_weight();
+ }
+}
+
VMModel::VMModel()
{
all_existing_models.push_back(this);
s4u::Host::on_state_change.connect(hostStateChange);
+ kernel::activity::ExecImpl::on_creation.connect(addActiveTask);
+ kernel::activity::ExecImpl::on_completion.connect(removeActiveTask);
+ kernel::activity::ExecImpl::on_resumed.connect(addActiveTask);
+ kernel::activity::ExecImpl::on_suspended.connect(removeActiveTask);
}
double VMModel::next_occuring_event(double now)
vcpu_system->update_constraint_bound(cpu->get_constraint(), virt_overhead * solved_value);
}
- /* 2. Calculate resource share at the virtual machine layer. */
- ignore_empty_vm_in_pm_LMM();
-
- /* 3. Ready. Get the next occurring event */
+ /* 2. Ready. Get the next occurring event */
return surf_cpu_model_vm->next_occuring_event(now);
}
VirtualMachineImpl::VirtualMachineImpl(simgrid::s4u::VirtualMachine* piface, simgrid::s4u::Host* host_PM,
int core_amount, size_t ramsize)
- : HostImpl(piface), physical_host_(host_PM), core_amount_(core_amount), ramsize_(ramsize)
+ : HostImpl(piface), physical_host_(host_PM), core_amount_(core_amount), user_bound_(std::numeric_limits<double>::max()), ramsize_(ramsize)
{
/* Register this VM to the list of all VMs */
allVms_.push_back(piface);
/* TODO: we have to periodically input GUESTOS_NOISE to the system? how ? */
action_ = host_PM->pimpl_cpu->execution_start(0, core_amount);
+ // It's empty for now, so it should not request resources in the PM
+ update_action_weight();
+
XBT_VERB("Create VM(%s)@PM(%s)", piface->get_cname(), physical_host_->get_cname());
on_creation(this);
}
void VirtualMachineImpl::set_bound(double bound)
{
- action_->set_bound(bound);
+ user_bound_ = bound;
+ update_action_weight();
+}
+
+void VirtualMachineImpl::update_action_weight(){
+ /* The impact of the VM over its PM is the min between its vCPU amount and the amount of tasks it contains */
+ int impact = std::min(active_tasks_, get_core_amount());
+
+ XBT_DEBUG("set the weight of the dummy CPU action of VM%p on PM to %d (#tasks: %d)", this, impact, active_tasks_);
+
+ if (impact > 0)
+ action_->set_priority(1. / impact);
+ else
+ action_->set_priority(0.);
+
+ action_->set_bound(std::min(impact * physical_host_->get_speed(), user_bound_));
}
}
kernel::resource::Action* action_ = nullptr;
static std::deque<s4u::VirtualMachine*> allVms_;
bool is_migrating_ = false;
+ int active_tasks_ = 0;
+
+ void update_action_weight();
private:
s4u::Host* physical_host_;
int core_amount_;
+ double user_bound_;
size_t ramsize_ = 0;
s4u::VirtualMachine::state vm_state_ = s4u::VirtualMachine::state::CREATED;
};
class XBT_PRIVATE VMModel : public surf::HostModel {
public:
VMModel();
- void ignore_empty_vm_in_pm_LMM() override{};
double next_occuring_event(double now) override;
void update_actions_state(double /*now*/, double /*delta*/) override{};
* Model *
*********/
-/* Each VM has a dummy CPU action on the PM layer. This CPU action works as the constraint (capacity) of the VM in the
- * PM layer. If the VM does not have any active task, the dummy CPU action must be deactivated, so that the VM does not
- * get any CPU share in the PM layer. */
-void HostModel::ignore_empty_vm_in_pm_LMM()
-{
- /* iterate for all virtual machines */
- for (s4u::VirtualMachine* const& ws_vm : vm::VirtualMachineImpl::allVms_) {
- Cpu* cpu = ws_vm->pimpl_cpu;
- int active_tasks = cpu->get_constraint()->get_variable_amount();
-
- /* The impact of the VM over its PM is the min between its vCPU amount and the amount of tasks it contains */
- int impact = std::min(active_tasks, ws_vm->get_impl()->get_core_amount());
-
- XBT_DEBUG("set the weight of the dummy CPU action of VM%p on PM to %d (#tasks: %d)", ws_vm, impact, active_tasks);
- if (impact > 0)
- ws_vm->get_impl()->action_->set_priority(1. / impact);
- else
- ws_vm->get_impl()->action_->set_priority(0.);
- }
-}
-
/* Helper function for executeParallelTask */
static inline double has_cost(double* array, int pos)
{
public:
HostModel() : Model(Model::UpdateAlgo::FULL) {}
- virtual void ignore_empty_vm_in_pm_LMM();
virtual kernel::resource::Action* execute_parallel(int host_nb, sg_host_t* host_list, double* flops_amount,
double* bytes_amount, double rate);
};
}
double HostCLM03Model::next_occuring_event(double now)
{
- ignore_empty_vm_in_pm_LMM();
-
double min_by_cpu = surf_cpu_model_pm->next_occuring_event(now);
double min_by_net =
surf_network_model->next_occuring_event_is_idempotent() ? surf_network_model->next_occuring_event(now) : -1;
# C examples
foreach(x app-pingpong app-token-ring
async-wait async-waitall async-waitany
- cloud-capping cloud-migration cloud-sharing cloud-two-tasks cloud-simple
+ cloud-capping cloud-migration cloud-two-tasks cloud-simple
get_sender host_on_off host_on_off_recv
process-daemon process-kill process-join process-lifetime process-migration process-suspend process-yield
energy-consumption energy-ptask energy-pstate platform-properties
${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x}/${x}.tesh)
endforeach()
-foreach(x cloud-sharing)
- ADD_TESH(tesh-msg-${x} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/teshsuite/msg/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x}/${x}.tesh)
-endforeach()
if(HAVE_SANITIZER_THREAD)
ADD_TESH_FACTORIES(tesh-app-bittorrent-parallel "thread" --cfg contexts/nthreads:4 ${CONTEXTS_SYNCHRO} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/msg/app-bittorrent --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/msg/app-bittorrent app-bittorrent.tesh)
+++ /dev/null
-/* Copyright (c) 2007-2018. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-const int FAIL_ON_ERROR = 0;
-const int flop_amount = 100000000;
-int failed_test = 0;
-
-static int computation_fun(int argc, char* argv[])
-{
- int *size = MSG_process_get_data(MSG_process_self());
- msg_task_t task = MSG_task_create("Task", *size, 0, NULL);
-
- double begin = MSG_get_clock();
- MSG_task_execute(task);
- MSG_task_destroy(task);
- double end = MSG_get_clock();
-
- if (0.1 - (end - begin) > 0.001) {
- xbt_assert(! FAIL_ON_ERROR, "%s with %.4g load (%dflops) took %.4fs instead of 0.1s",
- MSG_process_get_name(MSG_process_self()), ((double)*size/flop_amount),*size, (end-begin));
- XBT_INFO("FAILED TEST: %s with %.4g load (%dflops) took %.4fs instead of 0.1s",
- MSG_process_get_name(MSG_process_self()),((double)*size/flop_amount), *size, (end-begin));
- failed_test ++;
- } else {
- XBT_INFO("Passed: %s with %.4g load (%dflops) took 0.1s as expected",
- MSG_process_get_name(MSG_process_self()), ((double)*size/flop_amount), *size);
- }
-
- free(size);
-
- return 0;
-}
-
-static void run_test_process(const char* name, msg_host_t location, int size)
-{
- int* data = xbt_new(int, 1);
- *data = size;
- MSG_process_create(name, computation_fun, data, location);
-}
-
-static void run_test(const char* chooser)
-{
- msg_host_t pm0 = MSG_host_by_name("node-0.1core.org");
- msg_host_t pm1 = MSG_host_by_name("node-1.1core.org");
- msg_host_t pm2 = MSG_host_by_name("node-0.2cores.org"); // 2 cores
- msg_host_t pm4 = MSG_host_by_name("node-0.4cores.org");
-
- msg_vm_t vm0;
- xbt_assert(pm0, "Host node-0.1core.org does not seem to exist");
- xbt_assert(pm2, "Host node-0.2cores.org does not seem to exist");
- xbt_assert(pm4, "Host node-0.4cores.org does not seem to exist");
-
- // syntax of the process name:
- // "( )1" means PM with one core; "( )2" means PM with 2 cores
- // "( [ ]2 )4" means a VM with 2 cores, on a PM with 4 cores.
- // "o" means another process is there
- // "X" means the process which holds this name
-
- if (!strcmp(chooser, "(o)1")) {
- XBT_INFO("### Test '%s'. A task on a regular PM", chooser);
- run_test_process("(X)1", pm0, flop_amount);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "(oo)1")) {
- XBT_INFO("### Test '%s'. 2 tasks on a regular PM", chooser);
- run_test_process("(Xo)1", pm0, flop_amount / 2);
- run_test_process("(oX)1", pm0, flop_amount / 2);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "(o)1 (o)1")) {
- XBT_INFO("### Test '%s'. 2 regular PMs, with a task each.", chooser);
- run_test_process("(X)1 (o)1", pm0, flop_amount);
- run_test_process("(o)1 (X)1", pm1, flop_amount);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "( [o]1 )1")) {
- XBT_INFO("### Test '%s'. A task in a VM on a PM.", chooser);
- vm0 = MSG_vm_create_core(pm0, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [X]1 )1", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]1 )1")) {
- XBT_INFO("### Test '%s'. 2 tasks co-located in a VM on a PM.", chooser);
- vm0 = MSG_vm_create_core(pm0, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [Xo]1 )1", (msg_host_t)vm0, flop_amount / 2);
- run_test_process("( [oX]1 )1", (msg_host_t)vm0, flop_amount / 2);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]1 o )1")) {
- XBT_INFO("### Test '%s'. 1 task collocated with an empty VM", chooser);
- vm0 = MSG_vm_create_core(pm0, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [ ]1 X )1", pm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]1 o )1")) {
- XBT_INFO("### Test '%s'. A task in a VM, plus a task", chooser);
- vm0 = MSG_vm_create_core(pm0, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [X]1 o )1", (msg_host_t)vm0, flop_amount / 2);
- run_test_process("( [o]1 X )1", pm0, flop_amount / 2);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]1 o )1")) {
- XBT_INFO("### Test '%s'. 2 tasks in a VM, plus a task", chooser);
- vm0 = MSG_vm_create_core(pm0, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [Xo]1 o )1", (msg_host_t)vm0, flop_amount / 4);
- run_test_process("( [oX]1 o )1", (msg_host_t)vm0, flop_amount / 4);
- run_test_process("( [oo]1 X )1", pm0, flop_amount / 2);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( o )2")) {
- XBT_INFO("### Test '%s'. A task on bicore PM", chooser);
- run_test_process("(X)2", pm2, flop_amount);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "( oo )2")) {
- XBT_INFO("### Test '%s'. 2 tasks on a bicore PM", chooser);
- run_test_process("(Xx)2", pm2, flop_amount);
- run_test_process("(xX)2", pm2, flop_amount);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "( ooo )2")) {
- XBT_INFO("### Test '%s'. 3 tasks on a bicore PM", chooser);
- run_test_process("(Xxx)2", pm2, flop_amount * 2 / 3);
- run_test_process("(xXx)2", pm2, flop_amount * 2 / 3);
- run_test_process("(xxX)2", pm2, flop_amount * 2 / 3);
- MSG_process_sleep(2);
-
- } else if (!strcmp(chooser, "( [o]1 )2")) {
- XBT_INFO("### Test '%s'. A task in a VM on a bicore PM", chooser);
- vm0 = MSG_vm_create_core(pm2, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [X]1 )2", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]1 )2")) {
- XBT_INFO("### Test '%s'. 2 tasks in a VM on a bicore PM", chooser);
- vm0 = MSG_vm_create_core(pm2, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [Xx]1 )2", (msg_host_t)vm0, flop_amount / 2);
- run_test_process("( [xX]1 )2", (msg_host_t)vm0, flop_amount / 2);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]1 o )2")) {
- XBT_INFO("### Put a VM on a PM, and put a task to the PM");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [ ]1 X )2", pm2, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]1 o )2")) {
- XBT_INFO("### Put a VM on a PM, put a task to the PM and a task to the VM");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- MSG_vm_start(vm0);
- run_test_process("( [X]1 x )2", (msg_host_t)vm0, flop_amount);
- run_test_process("( [x]1 X )2", pm2, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]1 [ ]1 )2")) {
- XBT_INFO("### Put two VMs on a PM, and put a task to one VM");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
- MSG_vm_start(vm0);
- MSG_vm_start(vm1);
- run_test_process("( [X]1 [ ]1 )2", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
-
- } else if (!strcmp(chooser, "( [o]1 [o]1 )2")) {
- XBT_INFO("### Put two VMs on a PM, and put a task to each VM");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
- MSG_vm_start(vm0);
- MSG_vm_start(vm1);
- run_test_process("( [X]1 [x]1 )2", (msg_host_t)vm0, flop_amount);
- run_test_process("( [x]1 [X]1 )2", (msg_host_t)vm1, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
-
- } else if (!strcmp(chooser, "( [o]1 [o]1 [ ]1 )2")) {
- XBT_INFO("### Put three VMs on a PM, and put a task to two VMs");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
- msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
- MSG_vm_start(vm0);
- MSG_vm_start(vm1);
- MSG_vm_start(vm2);
- run_test_process("( [X]1 [x]1 [ ]1 )2", (msg_host_t)vm0, flop_amount);
- run_test_process("( [x]1 [X]1 [ ]1 )2", (msg_host_t)vm1, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
-
- } else if (!strcmp(chooser, "( [o]1 [o]1 [o]1 )2")) {
- XBT_INFO("### Put three VMs on a PM, and put a task to each VM");
- vm0 = MSG_vm_create_core(pm2, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
- msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
- MSG_vm_start(vm0);
- MSG_vm_start(vm1);
- MSG_vm_start(vm2);
- run_test_process("( [X]1 [o]1 [o]1 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [o]1 [X]1 [o]1 )2", (msg_host_t)vm1, flop_amount * 2 / 3);
- run_test_process("( [o]1 [o]1 [X]1 )2", (msg_host_t)vm2, flop_amount * 2 / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
-
- } else if (!strcmp(chooser, "( [o]2 )2")) {
- XBT_INFO("### Put a VM on a PM, and put a task to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [X]2 )2", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 )2")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xo]2 )2", (msg_host_t)vm0, flop_amount);
- run_test_process("( [oX]2 )2", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ooo]2 )2")) {
- XBT_INFO("### Put a VM on a PM, and put three tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [oXo]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [ooX]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 o )2")) {
- XBT_INFO("### Put a VM on a PM, and put a task to the PM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 X )2", pm2, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 o )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [o]2 X )2", pm2, flop_amount);
- run_test_process("( [X]2 o )2", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 o )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [oo]2 X )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [Xo]2 o )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [oX]2 o )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ooo]2 o )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ooo]2 X )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [Xoo]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [oXo]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [ooX]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 oo )2")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 Xo )2", pm2, flop_amount);
- run_test_process("( [ ]2 oX )2", pm2, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 oo )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [o]2 Xo )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [o]2 oX )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [X]2 oo )2", (msg_host_t)vm0, flop_amount * 2 / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 oo )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [oo]2 Xo )2", pm2, flop_amount / 2);
- run_test_process("( [oo]2 oX )2", pm2, flop_amount / 2);
- run_test_process("( [Xo]2 oo )2", (msg_host_t)vm0, flop_amount / 2);
- run_test_process("( [oX]2 oo )2", (msg_host_t)vm0, flop_amount / 2);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ooo]2 oo )2")) {
- XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ooo]2 Xo )2", pm2, flop_amount * 2 / 4);
- run_test_process("( [ooo]2 oX )2", pm2, flop_amount * 2 / 4);
- run_test_process("( [Xoo]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
- run_test_process("( [oXo]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
- run_test_process("( [ooX]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 )4")) {
- XBT_INFO("### Put a VM on a PM, and put a task to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [X]2 )4", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 )4")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xo]2 )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [oX]2 )4", (msg_host_t)vm0, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ooo]2 )4")) {
- XBT_INFO("### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [oXo]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
- run_test_process("( [ooX]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 o )4")) {
- XBT_INFO("### Put a VM on a PM, and put a task to the PM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 X )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 oo )4")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 Xo )4", pm4, flop_amount);
- run_test_process("( [ ]2 oX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 ooo )4")) {
- XBT_INFO("### Put a VM on a PM, and put three tasks to the PM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 Xoo )4", pm4, flop_amount);
- run_test_process("( [ ]2 oXo )4", pm4, flop_amount);
- run_test_process("( [ ]2 ooX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ ]2 oooo )4")) {
- XBT_INFO("### Put a VM on a PM, and put four tasks to the PM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [ ]2 Xooo )4", pm4, flop_amount);
- run_test_process("( [ ]2 oXoo )4", pm4, flop_amount);
- run_test_process("( [ ]2 ooXo )4", pm4, flop_amount);
- run_test_process("( [ ]2 oooX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 o )4")) {
- XBT_INFO("### Put a VM on a PM, and put one task to the PM and one task to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [X]2 o )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [o]2 X )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 oo )4")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and one task to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [X]2 oo )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [o]2 Xo )4", pm4, flop_amount);
- run_test_process("( [o]2 oX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 oo )4")) {
- XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xo]2 oo )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [oX]2 oo )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [oo]2 Xo )4", pm4, flop_amount);
- run_test_process("( [oo]2 oX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [o]2 ooo )4")) {
- XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [X]2 ooo )4", (msg_host_t)vm0, flop_amount);
- run_test_process("( [o]2 Xoo )4", pm4, flop_amount);
- run_test_process("( [o]2 oXo )4", pm4, flop_amount);
- run_test_process("( [o]2 ooX )4", pm4, flop_amount);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [oo]2 ooo )4")) {
- XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xo]2 ooo )4", (msg_host_t)vm0, flop_amount * 4 / 5);
- run_test_process("( [oX]2 ooo )4", (msg_host_t)vm0, flop_amount * 4 / 5);
- run_test_process("( [oo]2 Xoo )4", pm4, flop_amount * 4 / 5);
- run_test_process("( [oo]2 oXo )4", pm4, flop_amount * 4 / 5);
- run_test_process("( [oo]2 ooX )4", pm4, flop_amount * 4 / 5);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else if (!strcmp(chooser, "( [ooo]2 ooo )4")) {
- XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM");
- vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
- MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM
- run_test_process("( [oXo]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3);
- run_test_process("( [ooX]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3);
-
- run_test_process("( [ooo]2 Xoo )4", pm4, flop_amount * 4 / 5);
- run_test_process("( [ooo]2 oXo )4", pm4, flop_amount * 4 / 5);
- run_test_process("( [ooo]2 ooX )4", pm4, flop_amount * 4 / 5);
- MSG_process_sleep(2);
- MSG_vm_destroy(vm0);
-
- } else {
- xbt_die("Unknown chooser: %s", chooser);
- }
-}
-static int master_main(int argc, char* argv[])
-{
- XBT_INFO("# TEST ON SINGLE-CORE PMs");
- XBT_INFO("## Check computation on regular PMs");
- run_test("(o)1");
- run_test("(oo)1");
- run_test("(o)1 (o)1");
- XBT_INFO("# TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs");
-
- XBT_INFO("## Check the impact of running tasks inside a VM (no degradation for the moment)");
- run_test("( [o]1 )1");
- run_test("( [oo]1 )1");
-
- XBT_INFO("## Check impact of running tasks collocated with VMs (no VM noise for the moment)");
- run_test("( [ ]1 o )1");
- run_test("( [o]1 o )1");
- run_test("( [oo]1 o )1");
-
- XBT_INFO("# TEST ON TWO-CORE PMs");
- XBT_INFO("## Check computation on 2 cores PMs");
- run_test("( o )2");
- run_test("( oo )2");
- run_test("( ooo )2");
-
- XBT_INFO("# TEST ON TWO-CORE PMs AND SINGLE-CORE VMs");
- XBT_INFO("## Check impact of a single VM (no degradation for the moment)");
- run_test("( [o]1 )2");
- run_test("( [oo]1 )2");
- run_test("( [ ]1 o )2");
- run_test("( [o]1 o )2");
-
- XBT_INFO("## Check impact of a several VMs (there is no degradation for the moment)");
- run_test("( [o]1 [ ]1 )2");
- run_test("( [o]1 [o]1 )2");
- run_test("( [o]1 [o]1 [ ]1 )2");
- run_test("( [o]1 [o]1 [o]1 )2");
-
- XBT_INFO("# TEST ON TWO-CORE PMs AND TWO-CORE VMs");
-
- XBT_INFO("## Check impact of a single VM (there is no degradation for the moment)");
- run_test("( [o]2 )2");
- run_test("( [oo]2 )2");
- run_test("( [ooo]2 )2");
-
- XBT_INFO("## Check impact of a single VM collocated with a task (there is no degradation for the moment)");
- run_test("( [ ]2 o )2");
- run_test("( [o]2 o )2");
- run_test("( [oo]2 o )2");
- run_test("( [ooo]2 o )2");
- run_test("( [ ]2 oo )2");
- run_test("( [o]2 oo )2");
- run_test("( [oo]2 oo )2");
- run_test("( [ooo]2 oo )2");
-
- XBT_INFO("# TEST ON FOUR-CORE PMs AND TWO-CORE VMs");
- XBT_INFO("## Check impact of a single VM");
- run_test("( [o]2 )4");
- run_test("( [oo]2 )4");
- run_test("( [ooo]2 )4");
-
- XBT_INFO("## Check impact of a single empty VM collocated with tasks");
- run_test("( [ ]2 o )4");
- run_test("( [ ]2 oo )4");
- run_test("( [ ]2 ooo )4");
- run_test("( [ ]2 oooo )4");
-
- XBT_INFO("## Check impact of a single working VM collocated with tasks");
- run_test("( [o]2 o )4");
- run_test("( [o]2 oo )4");
- run_test("( [oo]2 oo )4");
- run_test("( [o]2 ooo )4");
- run_test("( [oo]2 ooo )4");
- run_test("( [ooo]2 ooo )4");
-
- XBT_INFO(" ");
- XBT_INFO(" ");
- XBT_INFO("## %d test failed", failed_test);
- XBT_INFO(" ");
- return 0;
-}
-int main(int argc, char* argv[])
-{
- /* Get the arguments */
- MSG_init(&argc, argv);
-
- /* load the platform file */
- const char* platform = "../../../platforms/cloud-sharing.xml";
- if (argc == 2)
- platform = argv[1];
- MSG_create_environment(platform);
-
- msg_host_t pm0 = MSG_host_by_name("node-0.1core.org");
- xbt_assert(pm0, "Host 'node-0.1core.org' not found");
- MSG_process_create("master", master_main, NULL, pm0);
-
- return MSG_main() != MSG_OK || failed_test;
-}
+++ /dev/null
-#!/usr/bin/env tesh
-
-$ $SG_TEST_EXENV ${bindir:=.}/cloud-sharing$EXEEXT --log=root.fmt:%m%n ${platfdir}/cluster_multi.xml
-> # TEST ON SINGLE-CORE PMs
-> ## Check computation on regular PMs
-> ### Test '(o)1'. A task on a regular PM
-> Passed: (X)1 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '(oo)1'. 2 tasks on a regular PM
-> Passed: (oX)1 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: (Xo)1 with 0.5 load (50000000flops) took 0.1s as expected
-> ### Test '(o)1 (o)1'. 2 regular PMs, with a task each.
-> Passed: (X)1 (o)1 with 1 load (100000000flops) took 0.1s as expected
-> Passed: (o)1 (X)1 with 1 load (100000000flops) took 0.1s as expected
-> # TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs
-> ## Check the impact of running tasks inside a VM (no degradation for the moment)
-> ### Test '( [o]1 )1'. A task in a VM on a PM.
-> Passed: ( [X]1 )1 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '( [oo]1 )1'. 2 tasks co-located in a VM on a PM.
-> Passed: ( [oX]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [Xo]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
-> ## Check impact of running tasks collocated with VMs (no VM noise for the moment)
-> ### Test '( [ ]1 o )1'. 1 task collocated with an empty VM
-> Passed: ( [ ]1 X )1 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '( [o]1 o )1'. A task in a VM, plus a task
-> Passed: ( [o]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [X]1 o )1 with 0.5 load (50000000flops) took 0.1s as expected
-> ### Test '( [oo]1 o )1'. 2 tasks in a VM, plus a task
-> Passed: ( [oo]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [oX]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
-> Passed: ( [Xo]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
-> # TEST ON TWO-CORE PMs
-> ## Check computation on 2 cores PMs
-> ### Test '( o )2'. A task on bicore PM
-> Passed: (X)2 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '( oo )2'. 2 tasks on a bicore PM
-> Passed: (xX)2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: (Xx)2 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '( ooo )2'. 3 tasks on a bicore PM
-> Passed: (xxX)2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: (xXx)2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: (Xxx)2 with 0.6667 load (66666666flops) took 0.1s as expected
-> # TEST ON TWO-CORE PMs AND SINGLE-CORE VMs
-> ## Check impact of a single VM (no degradation for the moment)
-> ### Test '( [o]1 )2'. A task in a VM on a bicore PM
-> Passed: ( [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Test '( [oo]1 )2'. 2 tasks in a VM on a bicore PM
-> Passed: ( [xX]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [Xx]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put a task to the PM
-> Passed: ( [ ]1 X )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, put a task to the PM and a task to the VM
-> Passed: ( [x]1 X )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [X]1 x )2 with 1 load (100000000flops) took 0.1s as expected
-> ## Check impact of a several VMs (there is no degradation for the moment)
-> ### Put two VMs on a PM, and put a task to one VM
-> Passed: ( [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put two VMs on a PM, and put a task to each VM
-> Passed: ( [X]1 [x]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [x]1 [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put three VMs on a PM, and put a task to two VMs
-> Passed: ( [X]1 [x]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [x]1 [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put three VMs on a PM, and put a task to each VM
-> Passed: ( [X]1 [o]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [o]1 [X]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [o]1 [o]1 [X]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> # TEST ON TWO-CORE PMs AND TWO-CORE VMs
-> ## Check impact of a single VM (there is no degradation for the moment)
-> ### Put a VM on a PM, and put a task to the VM
-> Passed: ( [X]2 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the VM
-> Passed: ( [oX]2 )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [Xo]2 )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put three tasks to the VM
-> Passed: ( [ooX]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [oXo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [Xoo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> ## Check impact of a single VM collocated with a task (there is no degradation for the moment)
-> ### Put a VM on a PM, and put a task to the PM
-> Passed: ( [ ]2 X )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and one task to the VM
-> Passed: ( [o]2 X )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [X]2 o )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and two tasks to the VM
-> Passed: ( [oo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [oX]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [Xo]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and three tasks to the VM
-> Passed: ( [ooo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [ooX]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
-> Passed: ( [oXo]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
-> Passed: ( [Xoo]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the PM
-> Passed: ( [ ]2 oX )2 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 Xo )2 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and one task to the VM
-> Passed: ( [o]2 oX )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [o]2 Xo )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [X]2 oo )2 with 0.6667 load (66666666flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and two tasks to the VM
-> Passed: ( [oo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [oo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [oX]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [Xo]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
-> ### Put a VM on a PM, put one task to the PM and three tasks to the VM
-> Passed: ( [ooo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [ooo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
-> Passed: ( [ooX]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
-> Passed: ( [oXo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
-> Passed: ( [Xoo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
-> # TEST ON FOUR-CORE PMs AND TWO-CORE VMs
-> ## Check impact of a single VM
-> ### Put a VM on a PM, and put a task to the VM
-> Passed: ( [X]2 )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the VM
-> Passed: ( [oX]2 )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [Xo]2 )4 with 1 load (100000000flops) took 0.1s as expected
-> ### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM
-> Passed: ( [ooX]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [oXo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
-> Passed: ( [Xoo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
-> ## Check impact of a single empty VM collocated with tasks
-> ### Put a VM on a PM, and put a task to the PM
-> Passed: ( [ ]2 X )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the PM
-> Passed: ( [ ]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put three tasks to the PM
-> Passed: ( [ ]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put four tasks to the PM
-> Passed: ( [ ]2 oooX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 ooXo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 oXoo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [ ]2 Xooo )4 with 1 load (100000000flops) took 0.1s as expected
-> ## Check impact of a single working VM collocated with tasks
-> ### Put a VM on a PM, and put one task to the PM and one task to the VM
-> Passed: ( [o]2 X )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [X]2 o )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the PM and one task to the VM
-> Passed: ( [o]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [o]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [X]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM
-> Passed: ( [oo]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [oo]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [oX]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [Xo]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM
-> Passed: ( [o]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [o]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [o]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
-> Passed: ( [X]2 ooo )4 with 1 load (100000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM
-> Passed: ( [oo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [oo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [oo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [oX]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [Xo]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> ### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM
-> Passed: ( [ooo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [ooo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [ooo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
-> Passed: ( [ooX]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
-> Passed: ( [oXo]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
-> Passed: ( [Xoo]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
->
->
-> ## 0 test failed
->
\ No newline at end of file
foreach(x actor actor-autorestart actor-migration
activity-lifecycle
comm-pt2pt
- cloud-interrupt-migration
+ cloud-interrupt-migration cloud-sharing
concurrent_rw storage_client_server listen_async pid )
add_executable (${x} ${x}/${x}.cpp)
target_link_libraries(${x} simgrid)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/actor-autorestart/actor-autorestart.tesh)
-foreach(x listen_async pid storage_client_server)
+foreach(x listen_async pid storage_client_server cloud-sharing)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
ADD_TESH(tesh-s4u-${x} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
endforeach()
--- /dev/null
+/* Copyright (c) 2007-2018. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include "simgrid/plugins/energy.h"
+#include "simgrid/s4u/VirtualMachine.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this msg example");
+
+const int FAIL_ON_ERROR = 0;
+const int flop_amount = 100000000;
+int failed_test = 0;
+
+double energy = 0;
+
+static int computation_fun(std::vector<std::string> argv)
+{
+ int size = std::stoi(argv[0]);
+
+ double begin = simgrid::s4u::Engine::get_clock();
+ simgrid::s4u::this_actor::execute(size);
+ double end = simgrid::s4u::Engine::get_clock();
+
+ if (0.1 - (end - begin) > 0.001) {
+ xbt_assert(! FAIL_ON_ERROR, "%s with %.4g load (%dflops) took %.4fs instead of 0.1s",
+ simgrid::s4u::this_actor::get_name().c_str(), ((double)size/flop_amount),size, (end-begin));
+ XBT_INFO("FAILED TEST: %s with %.4g load (%dflops) took %.4fs instead of 0.1s",
+ simgrid::s4u::this_actor::get_name().c_str(),((double)size/flop_amount), size, (end-begin));
+ failed_test ++;
+ } else {
+ XBT_INFO("Passed: %s with %.4g load (%dflops) took 0.1s as expected",
+ simgrid::s4u::this_actor::get_name().c_str(), ((double)size/flop_amount), size);
+ }
+
+ return 0;
+}
+
+static void run_test_process(std::string name, simgrid::s4u::Host *location, int size)
+{
+ std::vector<std::string> arg = {std::to_string(size)};
+ simgrid::s4u::Actor::create(name, location, computation_fun, arg);
+}
+
+static void test_energy_consumption(std::string name, int nb_cores)
+{
+ double new_energy = 0;
+
+ for (simgrid::s4u::Host *pm : simgrid::s4u::Engine::get_instance()->get_all_hosts() ){
+ if(!dynamic_cast<simgrid::s4u::VirtualMachine *>(pm))
+ new_energy += sg_host_get_consumed_energy(pm);
+ }
+
+ double expected_consumption = 0.1 * nb_cores;
+ double actual_consumption = new_energy - energy;
+
+ energy = new_energy;
+
+ if (std::abs(expected_consumption - actual_consumption) > 0.001) {
+ XBT_INFO("FAILED TEST: %s consumed %f instead of %f J (i.e. %i cores should have been used)", name.c_str(), actual_consumption, expected_consumption, nb_cores );
+ failed_test++;
+ } else {
+ XBT_INFO("Passed: %s consumed %f J (i.e. %i cores used) ", name.c_str(), actual_consumption, nb_cores);
+ }
+}
+
+static void run_test(std::string chooser)
+{
+ simgrid::s4u::Host *pm0 = simgrid::s4u::Host::by_name("node-0.1core.org");
+ simgrid::s4u::Host *pm1 = simgrid::s4u::Host::by_name("node-1.1core.org");
+ simgrid::s4u::Host *pm2 = simgrid::s4u::Host::by_name("node-0.2cores.org"); // 2 cores
+ simgrid::s4u::Host *pm4 = simgrid::s4u::Host::by_name("node-0.4cores.org");
+
+ simgrid::s4u::VirtualMachine *vm0;
+ xbt_assert(pm0, "Host node-0.1core.org does not seem to exist");
+ xbt_assert(pm2, "Host node-0.2cores.org does not seem to exist");
+ xbt_assert(pm4, "Host node-0.4cores.org does not seem to exist");
+
+ // syntax of the process name:
+ // "( )1" means PM with one core; "( )2" means PM with 2 cores
+ // "( [ ]2 )4" means a VM with 2 cores, on a PM with 4 cores.
+ // "o" means another process is there
+ // "X" means the process which holds this name
+
+ if (chooser == "(o)1") {
+ XBT_INFO("### Test '%s'. A task on a regular PM", chooser.c_str());
+ run_test_process("(X)1", pm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+
+ } else if (chooser == "(oo)1") {
+ XBT_INFO("### Test '%s'. 2 tasks on a regular PM", chooser.c_str());
+ run_test_process("(Xo)1", pm0, flop_amount / 2);
+ run_test_process("(oX)1", pm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+
+ } else if (chooser == "(o)1 (o)1") {
+ XBT_INFO("### Test '%s'. 2 regular PMs, with a task each.", chooser.c_str());
+ run_test_process("(X)1 (o)1", pm0, flop_amount);
+ run_test_process("(o)1 (X)1", pm1, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+
+ } else if (chooser == "( [o]1 )1") {
+ XBT_INFO("### Test '%s'. A task in a VM on a PM.", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm0,1);
+ run_test_process("( [X]1 )1", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]1 )1") {
+ XBT_INFO("### Test '%s'. 2 tasks co-located in a VM on a PM.", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm0,1);
+ run_test_process("( [Xo]1 )1", vm0, flop_amount / 2);
+ run_test_process("( [oX]1 )1", vm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]1 o )1") {
+ XBT_INFO("### Test '%s'. 1 task collocated with an empty VM", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm0,1);
+ run_test_process("( [ ]1 X )1", pm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]1 o )1") {
+ XBT_INFO("### Test '%s'. A task in a VM, plus a task", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm0,1);
+ run_test_process("( [X]1 o )1", vm0, flop_amount / 2);
+ run_test_process("( [o]1 X )1", pm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]1 o )1") {
+ XBT_INFO("### Test '%s'. 2 tasks in a VM, plus a task", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm0,1);
+ run_test_process("( [Xo]1 o )1", vm0, flop_amount / 4);
+ run_test_process("( [oX]1 o )1", vm0, flop_amount / 4);
+ run_test_process("( [oo]1 X )1", pm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( o )2") {
+ XBT_INFO("### Test '%s'. A task on bicore PM", chooser.c_str());
+ run_test_process("(X)2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+
+ } else if (chooser == "( oo )2") {
+ XBT_INFO("### Test '%s'. 2 tasks on a bicore PM", chooser.c_str());
+ run_test_process("(Xx)2", pm2, flop_amount);
+ run_test_process("(xX)2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+
+ } else if (chooser == "( ooo )2") {
+ XBT_INFO("### Test '%s'. 3 tasks on a bicore PM", chooser.c_str());
+ run_test_process("(Xxx)2", pm2, flop_amount * 2 / 3);
+ run_test_process("(xXx)2", pm2, flop_amount * 2 / 3);
+ run_test_process("(xxX)2", pm2, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+
+ } else if (chooser == "( [o]1 )2") {
+ XBT_INFO("### Test '%s'. A task in a VM on a bicore PM", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm2,1);
+ run_test_process("( [X]1 )2", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]1 )2") {
+ XBT_INFO("### Test '%s'. 2 tasks in a VM on a bicore PM", chooser.c_str());
+ vm0 = new simgrid::s4u::VirtualMachine("VM0",pm2,1);
+ run_test_process("( [Xx]1 )2", vm0, flop_amount / 2);
+ run_test_process("( [xX]1 )2", vm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]1 o )2") {
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ run_test_process("( [ ]1 X )2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]1 o )2") {
+ XBT_INFO("### Put a VM on a PM, put a task to the PM and a task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ run_test_process("( [X]1 x )2", vm0, flop_amount);
+ run_test_process("( [x]1 X )2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]1 [ ]1 )2") {
+ XBT_INFO("### Put two VMs on a PM, and put a task to one VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ simgrid::s4u::VirtualMachine *vm1 = new simgrid::s4u::VirtualMachine("VM1", pm2,1);
+ run_test_process("( [X]1 [ ]1 )2", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+ vm1->destroy();
+
+ } else if (chooser == "( [o]1 [o]1 )2") {
+ XBT_INFO("### Put two VMs on a PM, and put a task to each VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ simgrid::s4u::VirtualMachine *vm1 = new simgrid::s4u::VirtualMachine("VM1", pm2,1);
+ run_test_process("( [X]1 [x]1 )2", vm0, flop_amount);
+ run_test_process("( [x]1 [X]1 )2", vm1, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+ vm1->destroy();
+
+ } else if (chooser == "( [o]1 [o]1 [ ]1 )2") {
+ XBT_INFO("### Put three VMs on a PM, and put a task to two VMs");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ simgrid::s4u::VirtualMachine *vm1 = new simgrid::s4u::VirtualMachine("VM1", pm2,1);
+ simgrid::s4u::VirtualMachine *vm2 = new simgrid::s4u::VirtualMachine("VM2", pm2,1);
+ run_test_process("( [X]1 [x]1 [ ]1 )2", vm0, flop_amount);
+ run_test_process("( [x]1 [X]1 [ ]1 )2", vm1, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+ vm1->destroy();
+ vm2->destroy();
+
+ } else if (chooser == "( [o]1 [o]1 [o]1 )2") {
+ XBT_INFO("### Put three VMs on a PM, and put a task to each VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2,1);
+ simgrid::s4u::VirtualMachine *vm1 = new simgrid::s4u::VirtualMachine("VM1", pm2,1);
+ simgrid::s4u::VirtualMachine *vm2 = new simgrid::s4u::VirtualMachine("VM2", pm2,1);
+ run_test_process("( [X]1 [o]1 [o]1 )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [o]1 [X]1 [o]1 )2", vm1, flop_amount * 2 / 3);
+ run_test_process("( [o]1 [o]1 [X]1 )2", vm2, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+ vm1->destroy();
+ vm2->destroy();
+
+ } else if (chooser == "( [o]2 )2") {
+ XBT_INFO("### Put a VM on a PM, and put a task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [X]2 )2", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 )2") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [Xo]2 )2", vm0, flop_amount);
+ run_test_process("( [oX]2 )2", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ooo]2 )2") {
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [Xoo]2 )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [oXo]2 )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [ooX]2 )2", vm0, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 o )2") {
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [ ]2 X )2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 o )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [o]2 X )2", pm2, flop_amount);
+ run_test_process("( [X]2 o )2", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 o )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [oo]2 X )2", pm2, flop_amount * 2 / 3);
+ run_test_process("( [Xo]2 o )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [oX]2 o )2", vm0, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ooo]2 o )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [ooo]2 X )2", pm2, flop_amount * 2 / 3);
+ run_test_process("( [Xoo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [oXo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [ooX]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 oo )2") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [ ]2 Xo )2", pm2, flop_amount);
+ run_test_process("( [ ]2 oX )2", pm2, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 oo )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [o]2 Xo )2", pm2, flop_amount * 2 / 3);
+ run_test_process("( [o]2 oX )2", pm2, flop_amount * 2 / 3);
+ run_test_process("( [X]2 oo )2", vm0, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 oo )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [oo]2 Xo )2", pm2, flop_amount / 2);
+ run_test_process("( [oo]2 oX )2", pm2, flop_amount / 2);
+ run_test_process("( [Xo]2 oo )2", vm0, flop_amount / 2);
+ run_test_process("( [oX]2 oo )2", vm0, flop_amount / 2);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ooo]2 oo )2") {
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
+ run_test_process("( [ooo]2 Xo )2", pm2, flop_amount * 2 / 4);
+ run_test_process("( [ooo]2 oX )2", pm2, flop_amount * 2 / 4);
+ run_test_process("( [Xoo]2 oo )2", vm0, flop_amount / 3);
+ run_test_process("( [oXo]2 oo )2", vm0, flop_amount / 3);
+ run_test_process("( [ooX]2 oo )2", vm0, flop_amount / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 )4") {
+ XBT_INFO("### Put a VM on a PM, and put a task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [X]2 )4", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 )4") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [Xo]2 )4", vm0, flop_amount);
+ run_test_process("( [oX]2 )4", vm0, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ooo]2 )4") {
+ XBT_INFO("### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [Xoo]2 )4", vm0, flop_amount * 2 / 3);
+ run_test_process("( [oXo]2 )4", vm0, flop_amount * 2 / 3);
+ run_test_process("( [ooX]2 )4", vm0, flop_amount * 2 / 3);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 o )4") {
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [ ]2 X )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,1);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 oo )4") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [ ]2 Xo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 oX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 ooo )4") {
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [ ]2 Xoo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 oXo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 ooX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,3);
+ vm0->destroy();
+
+ } else if (chooser == "( [ ]2 oooo )4") {
+ XBT_INFO("### Put a VM on a PM, and put four tasks to the PM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [ ]2 Xooo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 oXoo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 ooXo )4", pm4, flop_amount);
+ run_test_process("( [ ]2 oooX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,4);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 o )4") {
+ XBT_INFO("### Put a VM on a PM, and put one task to the PM and one task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [X]2 o )4", vm0, flop_amount);
+ run_test_process("( [o]2 X )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,2);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 oo )4") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and one task to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [X]2 oo )4", vm0, flop_amount);
+ run_test_process("( [o]2 Xo )4", pm4, flop_amount);
+ run_test_process("( [o]2 oX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,3);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 oo )4") {
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [Xo]2 oo )4", vm0, flop_amount);
+ run_test_process("( [oX]2 oo )4", vm0, flop_amount);
+ run_test_process("( [oo]2 Xo )4", pm4, flop_amount);
+ run_test_process("( [oo]2 oX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,4);
+ vm0->destroy();
+
+ } else if (chooser == "( [o]2 ooo )4") {
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [X]2 ooo )4", vm0, flop_amount);
+ run_test_process("( [o]2 Xoo )4", pm4, flop_amount);
+ run_test_process("( [o]2 oXo )4", pm4, flop_amount);
+ run_test_process("( [o]2 ooX )4", pm4, flop_amount);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,4);
+ vm0->destroy();
+
+ } else if (chooser == "( [oo]2 ooo )4") {
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [Xo]2 ooo )4", vm0, flop_amount * 4 / 5);
+ run_test_process("( [oX]2 ooo )4", vm0, flop_amount * 4 / 5);
+ run_test_process("( [oo]2 Xoo )4", pm4, flop_amount * 4 / 5);
+ run_test_process("( [oo]2 oXo )4", pm4, flop_amount * 4 / 5);
+ run_test_process("( [oo]2 ooX )4", pm4, flop_amount * 4 / 5);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,4);
+ vm0->destroy();
+
+ } else if (chooser == "( [ooo]2 ooo )4") {
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM");
+ vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
+ run_test_process("( [Xoo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM
+ run_test_process("( [oXo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
+ run_test_process("( [ooX]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
+
+ run_test_process("( [ooo]2 Xoo )4", pm4, flop_amount * 4 / 5);
+ run_test_process("( [ooo]2 oXo )4", pm4, flop_amount * 4 / 5);
+ run_test_process("( [ooo]2 ooX )4", pm4, flop_amount * 4 / 5);
+ simgrid::s4u::this_actor::sleep_for(2);
+ test_energy_consumption(chooser,4);
+ vm0->destroy();
+
+ } else {
+ xbt_die("Unknown chooser: %s", chooser.c_str());
+ }
+}
+static int master_main()
+{
+ XBT_INFO("# TEST ON SINGLE-CORE PMs");
+ XBT_INFO("## Check computation on regular PMs");
+ run_test("(o)1");
+ run_test("(oo)1");
+ run_test("(o)1 (o)1");
+ XBT_INFO("# TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs");
+
+ XBT_INFO("## Check the impact of running tasks inside a VM (no degradation for the moment)");
+ run_test("( [o]1 )1");
+ run_test("( [oo]1 )1");
+
+ XBT_INFO("## Check impact of running tasks collocated with VMs (no VM noise for the moment)");
+ run_test("( [ ]1 o )1");
+ run_test("( [o]1 o )1");
+ run_test("( [oo]1 o )1");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs");
+ XBT_INFO("## Check computation on 2 cores PMs");
+ run_test("( o )2");
+ run_test("( oo )2");
+ run_test("( ooo )2");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs AND SINGLE-CORE VMs");
+ XBT_INFO("## Check impact of a single VM (no degradation for the moment)");
+ run_test("( [o]1 )2");
+ run_test("( [oo]1 )2");
+ run_test("( [ ]1 o )2");
+ run_test("( [o]1 o )2");
+
+ XBT_INFO("## Check impact of a several VMs (there is no degradation for the moment)");
+ run_test("( [o]1 [ ]1 )2");
+ run_test("( [o]1 [o]1 )2");
+ run_test("( [o]1 [o]1 [ ]1 )2");
+ run_test("( [o]1 [o]1 [o]1 )2");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs AND TWO-CORE VMs");
+
+ XBT_INFO("## Check impact of a single VM (there is no degradation for the moment)");
+ run_test("( [o]2 )2");
+ run_test("( [oo]2 )2");
+ run_test("( [ooo]2 )2");
+
+ XBT_INFO("## Check impact of a single VM collocated with a task (there is no degradation for the moment)");
+ run_test("( [ ]2 o )2");
+ run_test("( [o]2 o )2");
+ run_test("( [oo]2 o )2");
+ run_test("( [ooo]2 o )2");
+ run_test("( [ ]2 oo )2");
+ run_test("( [o]2 oo )2");
+ run_test("( [oo]2 oo )2");
+ run_test("( [ooo]2 oo )2");
+
+ XBT_INFO("# TEST ON FOUR-CORE PMs AND TWO-CORE VMs");
+
+ XBT_INFO("## Check impact of a single VM");
+ run_test("( [o]2 )4");
+ run_test("( [oo]2 )4");
+ run_test("( [ooo]2 )4");
+
+ XBT_INFO("## Check impact of a single empty VM collocated with tasks");
+ run_test("( [ ]2 o )4");
+ run_test("( [ ]2 oo )4");
+ run_test("( [ ]2 ooo )4");
+ run_test("( [ ]2 oooo )4");
+
+ XBT_INFO("## Check impact of a single working VM collocated with tasks");
+ run_test("( [o]2 o )4");
+ run_test("( [o]2 oo )4");
+ run_test("( [oo]2 oo )4");
+ run_test("( [o]2 ooo )4");
+ run_test("( [oo]2 ooo )4");
+ run_test("( [ooo]2 ooo )4");
+
+ XBT_INFO(" ");
+ XBT_INFO(" ");
+ XBT_INFO("## %d test failed", failed_test);
+ XBT_INFO(" ");
+ return 0;
+}
+int main(int argc, char* argv[])
+{
+ /* Get the arguments */
+ simgrid::s4u::Engine e(&argc, argv);
+ sg_host_energy_plugin_init();
+
+ /* load the platform file */
+ const char* platform = "../../../platforms/cloud-sharing.xml";
+ if (argc == 2)
+ platform = argv[1];
+ e.load_platform(platform);
+
+ simgrid::s4u::Host *pm0 = simgrid::s4u::Host::by_name("node-0.1core.org");
+ xbt_assert(pm0, "Host 'node-0.1core.org' not found");
+ simgrid::s4u::Actor::create("master", pm0, master_main);
+
+ e.run();
+
+ return failed_test;
+}
--- /dev/null
+$ ./cloud-sharing ${platfdir}/cluster_multi.xml
+>[node-0.1core.org:master:(1) 0.000000] [s4u_test/INFO] # TEST ON SINGLE-CORE PMs
+>[node-0.1core.org:master:(1) 0.000000] [s4u_test/INFO] ## Check computation on regular PMs
+>[node-0.1core.org:master:(1) 0.000000] [s4u_test/INFO] ### Test '(o)1'. A task on a regular PM
+>[node-0.1core.org:(X)1:(2) 0.100000] [s4u_test/INFO] Passed: (X)1 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 2.000000] [s4u_test/INFO] Passed: (o)1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 2.000000] [s4u_test/INFO] ### Test '(oo)1'. 2 tasks on a regular PM
+>[node-0.1core.org:(oX)1:(4) 2.100000] [s4u_test/INFO] Passed: (oX)1 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:(Xo)1:(3) 2.100000] [s4u_test/INFO] Passed: (Xo)1 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 4.000000] [s4u_test/INFO] Passed: (oo)1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 4.000000] [s4u_test/INFO] ### Test '(o)1 (o)1'. 2 regular PMs, with a task each.
+>[node-0.1core.org:(X)1 (o)1:(5) 4.100000] [s4u_test/INFO] Passed: (X)1 (o)1 with 1 load (100000000flops) took 0.1s as expected
+>[node-1.1core.org:(o)1 (X)1:(6) 4.100000] [s4u_test/INFO] Passed: (o)1 (X)1 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 6.000000] [s4u_test/INFO] Passed: (o)1 (o)1 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 6.000000] [s4u_test/INFO] # TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs
+>[node-0.1core.org:master:(1) 6.000000] [s4u_test/INFO] ## Check the impact of running tasks inside a VM (no degradation for the moment)
+>[node-0.1core.org:master:(1) 6.000000] [s4u_test/INFO] ### Test '( [o]1 )1'. A task in a VM on a PM.
+>[VM0:( [X]1 )1:(7) 6.100000] [s4u_test/INFO] Passed: ( [X]1 )1 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 8.000000] [s4u_test/INFO] Passed: ( [o]1 )1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 8.000000] [s4u_test/INFO] ### Test '( [oo]1 )1'. 2 tasks co-located in a VM on a PM.
+>[VM0:( [oX]1 )1:(9) 8.100000] [s4u_test/INFO] Passed: ( [oX]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [Xo]1 )1:(8) 8.100000] [s4u_test/INFO] Passed: ( [Xo]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 10.000000] [s4u_test/INFO] Passed: ( [oo]1 )1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 10.000000] [s4u_test/INFO] ## Check impact of running tasks collocated with VMs (no VM noise for the moment)
+>[node-0.1core.org:master:(1) 10.000000] [s4u_test/INFO] ### Test '( [ ]1 o )1'. 1 task collocated with an empty VM
+>[node-0.1core.org:( [ ]1 X )1:(10) 10.100000] [s4u_test/INFO] Passed: ( [ ]1 X )1 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 12.000000] [s4u_test/INFO] Passed: ( [ ]1 o )1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 12.000000] [s4u_test/INFO] ### Test '( [o]1 o )1'. A task in a VM, plus a task
+>[node-0.1core.org:( [o]1 X )1:(12) 12.100000] [s4u_test/INFO] Passed: ( [o]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [X]1 o )1:(11) 12.100000] [s4u_test/INFO] Passed: ( [X]1 o )1 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 14.000000] [s4u_test/INFO] Passed: ( [o]1 o )1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 14.000000] [s4u_test/INFO] ### Test '( [oo]1 o )1'. 2 tasks in a VM, plus a task
+>[node-0.1core.org:( [oo]1 X )1:(15) 14.100000] [s4u_test/INFO] Passed: ( [oo]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [oX]1 o )1:(14) 14.100000] [s4u_test/INFO] Passed: ( [oX]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
+>[VM0:( [Xo]1 o )1:(13) 14.100000] [s4u_test/INFO] Passed: ( [Xo]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 16.000000] [s4u_test/INFO] Passed: ( [oo]1 o )1 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 16.000000] [s4u_test/INFO] # TEST ON TWO-CORE PMs
+>[node-0.1core.org:master:(1) 16.000000] [s4u_test/INFO] ## Check computation on 2 cores PMs
+>[node-0.1core.org:master:(1) 16.000000] [s4u_test/INFO] ### Test '( o )2'. A task on bicore PM
+>[node-0.2cores.org:(X)2:(16) 16.100000] [s4u_test/INFO] Passed: (X)2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 18.000000] [s4u_test/INFO] Passed: ( o )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 18.000000] [s4u_test/INFO] ### Test '( oo )2'. 2 tasks on a bicore PM
+>[node-0.2cores.org:(xX)2:(18) 18.100000] [s4u_test/INFO] Passed: (xX)2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.2cores.org:(Xx)2:(17) 18.100000] [s4u_test/INFO] Passed: (Xx)2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 20.000000] [s4u_test/INFO] Passed: ( oo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 20.000000] [s4u_test/INFO] ### Test '( ooo )2'. 3 tasks on a bicore PM
+>[node-0.2cores.org:(xxX)2:(21) 20.100000] [s4u_test/INFO] Passed: (xxX)2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.2cores.org:(xXx)2:(20) 20.100000] [s4u_test/INFO] Passed: (xXx)2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.2cores.org:(Xxx)2:(19) 20.100000] [s4u_test/INFO] Passed: (Xxx)2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 22.000000] [s4u_test/INFO] Passed: ( ooo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 22.000000] [s4u_test/INFO] # TEST ON TWO-CORE PMs AND SINGLE-CORE VMs
+>[node-0.1core.org:master:(1) 22.000000] [s4u_test/INFO] ## Check impact of a single VM (no degradation for the moment)
+>[node-0.1core.org:master:(1) 22.000000] [s4u_test/INFO] ### Test '( [o]1 )2'. A task in a VM on a bicore PM
+>[VM0:( [X]1 )2:(22) 22.100000] [s4u_test/INFO] Passed: ( [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 24.000000] [s4u_test/INFO] Passed: ( [o]1 )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 24.000000] [s4u_test/INFO] ### Test '( [oo]1 )2'. 2 tasks in a VM on a bicore PM
+>[VM0:( [xX]1 )2:(24) 24.100000] [s4u_test/INFO] Passed: ( [xX]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [Xx]1 )2:(23) 24.100000] [s4u_test/INFO] Passed: ( [Xx]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 26.000000] [s4u_test/INFO] Passed: ( [oo]1 )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 26.000000] [s4u_test/INFO] ### Put a VM on a PM, and put a task to the PM
+>[node-0.2cores.org:( [ ]1 X )2:(25) 26.100000] [s4u_test/INFO] Passed: ( [ ]1 X )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 28.000000] [s4u_test/INFO] Passed: ( [ ]1 o )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 28.000000] [s4u_test/INFO] ### Put a VM on a PM, put a task to the PM and a task to the VM
+>[node-0.2cores.org:( [x]1 X )2:(27) 28.100000] [s4u_test/INFO] Passed: ( [x]1 X )2 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [X]1 x )2:(26) 28.100000] [s4u_test/INFO] Passed: ( [X]1 x )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 30.000000] [s4u_test/INFO] Passed: ( [o]1 o )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 30.000000] [s4u_test/INFO] ## Check impact of a several VMs (there is no degradation for the moment)
+>[node-0.1core.org:master:(1) 30.000000] [s4u_test/INFO] ### Put two VMs on a PM, and put a task to one VM
+>[VM0:( [X]1 [ ]1 )2:(28) 30.100000] [s4u_test/INFO] Passed: ( [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 32.000000] [s4u_test/INFO] Passed: ( [o]1 [ ]1 )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 32.000000] [s4u_test/INFO] ### Put two VMs on a PM, and put a task to each VM
+>[VM0:( [X]1 [x]1 )2:(29) 32.100000] [s4u_test/INFO] Passed: ( [X]1 [x]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[VM1:( [x]1 [X]1 )2:(30) 32.100000] [s4u_test/INFO] Passed: ( [x]1 [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 34.000000] [s4u_test/INFO] Passed: ( [o]1 [o]1 )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 34.000000] [s4u_test/INFO] ### Put three VMs on a PM, and put a task to two VMs
+>[VM0:( [X]1 [x]1 [ ]1 )2:(31) 34.100000] [s4u_test/INFO] Passed: ( [X]1 [x]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[VM1:( [x]1 [X]1 [ ]1 )2:(32) 34.100000] [s4u_test/INFO] Passed: ( [x]1 [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 36.000000] [s4u_test/INFO] Passed: ( [o]1 [o]1 [ ]1 )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 36.000000] [s4u_test/INFO] ### Put three VMs on a PM, and put a task to each VM
+>[VM0:( [X]1 [o]1 [o]1 )2:(33) 36.100000] [s4u_test/INFO] Passed: ( [X]1 [o]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM1:( [o]1 [X]1 [o]1 )2:(34) 36.100000] [s4u_test/INFO] Passed: ( [o]1 [X]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM2:( [o]1 [o]1 [X]1 )2:(35) 36.100000] [s4u_test/INFO] Passed: ( [o]1 [o]1 [X]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 38.000000] [s4u_test/INFO] Passed: ( [o]1 [o]1 [o]1 )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 38.000000] [s4u_test/INFO] # TEST ON TWO-CORE PMs AND TWO-CORE VMs
+>[node-0.1core.org:master:(1) 38.000000] [s4u_test/INFO] ## Check impact of a single VM (there is no degradation for the moment)
+>[node-0.1core.org:master:(1) 38.000000] [s4u_test/INFO] ### Put a VM on a PM, and put a task to the VM
+>[VM0:( [X]2 )2:(36) 38.100000] [s4u_test/INFO] Passed: ( [X]2 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 40.000000] [s4u_test/INFO] Passed: ( [o]2 )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 40.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the VM
+>[VM0:( [oX]2 )2:(38) 40.100000] [s4u_test/INFO] Passed: ( [oX]2 )2 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [Xo]2 )2:(37) 40.100000] [s4u_test/INFO] Passed: ( [Xo]2 )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 42.000000] [s4u_test/INFO] Passed: ( [oo]2 )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 42.000000] [s4u_test/INFO] ### Put a VM on a PM, and put three tasks to the VM
+>[VM0:( [ooX]2 )2:(41) 42.100000] [s4u_test/INFO] Passed: ( [ooX]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [oXo]2 )2:(40) 42.100000] [s4u_test/INFO] Passed: ( [oXo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [Xoo]2 )2:(39) 42.100000] [s4u_test/INFO] Passed: ( [Xoo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 44.000000] [s4u_test/INFO] Passed: ( [ooo]2 )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 44.000000] [s4u_test/INFO] ## Check impact of a single VM collocated with a task (there is no degradation for the moment)
+>[node-0.1core.org:master:(1) 44.000000] [s4u_test/INFO] ### Put a VM on a PM, and put a task to the PM
+>[node-0.2cores.org:( [ ]2 X )2:(42) 44.100000] [s4u_test/INFO] Passed: ( [ ]2 X )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 46.000000] [s4u_test/INFO] Passed: ( [ ]2 o )2 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 46.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and one task to the VM
+>[node-0.2cores.org:( [o]2 X )2:(43) 46.100000] [s4u_test/INFO] Passed: ( [o]2 X )2 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [X]2 o )2:(44) 46.100000] [s4u_test/INFO] Passed: ( [X]2 o )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 48.000000] [s4u_test/INFO] Passed: ( [o]2 o )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 48.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and two tasks to the VM
+>[node-0.2cores.org:( [oo]2 X )2:(45) 48.100000] [s4u_test/INFO] Passed: ( [oo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [oX]2 o )2:(47) 48.100000] [s4u_test/INFO] Passed: ( [oX]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [Xo]2 o )2:(46) 48.100000] [s4u_test/INFO] Passed: ( [Xo]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 50.000000] [s4u_test/INFO] Passed: ( [oo]2 o )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 50.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and three tasks to the VM
+>[node-0.2cores.org:( [ooo]2 X )2:(48) 50.100000] [s4u_test/INFO] Passed: ( [ooo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [ooX]2 o )2:(51) 50.100000] [s4u_test/INFO] Passed: ( [ooX]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
+>[VM0:( [oXo]2 o )2:(50) 50.100000] [s4u_test/INFO] Passed: ( [oXo]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
+>[VM0:( [Xoo]2 o )2:(49) 50.100000] [s4u_test/INFO] Passed: ( [Xoo]2 o )2 with 0.4444 load (44444444flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 52.000000] [s4u_test/INFO] Passed: ( [ooo]2 o )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 52.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the PM
+>[node-0.2cores.org:( [ ]2 oX )2:(53) 52.100000] [s4u_test/INFO] Passed: ( [ ]2 oX )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.2cores.org:( [ ]2 Xo )2:(52) 52.100000] [s4u_test/INFO] Passed: ( [ ]2 Xo )2 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 54.000000] [s4u_test/INFO] Passed: ( [ ]2 oo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 54.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and one task to the VM
+>[node-0.2cores.org:( [o]2 oX )2:(55) 54.100000] [s4u_test/INFO] Passed: ( [o]2 oX )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.2cores.org:( [o]2 Xo )2:(54) 54.100000] [s4u_test/INFO] Passed: ( [o]2 Xo )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [X]2 oo )2:(56) 54.100000] [s4u_test/INFO] Passed: ( [X]2 oo )2 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 56.000000] [s4u_test/INFO] Passed: ( [o]2 oo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 56.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and two tasks to the VM
+>[node-0.2cores.org:( [oo]2 oX )2:(58) 56.100000] [s4u_test/INFO] Passed: ( [oo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.2cores.org:( [oo]2 Xo )2:(57) 56.100000] [s4u_test/INFO] Passed: ( [oo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [oX]2 oo )2:(60) 56.100000] [s4u_test/INFO] Passed: ( [oX]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [Xo]2 oo )2:(59) 56.100000] [s4u_test/INFO] Passed: ( [Xo]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 58.000000] [s4u_test/INFO] Passed: ( [oo]2 oo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 58.000000] [s4u_test/INFO] ### Put a VM on a PM, put one task to the PM and three tasks to the VM
+>[node-0.2cores.org:( [ooo]2 oX )2:(62) 58.100000] [s4u_test/INFO] Passed: ( [ooo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[node-0.2cores.org:( [ooo]2 Xo )2:(61) 58.100000] [s4u_test/INFO] Passed: ( [ooo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
+>[VM0:( [ooX]2 oo )2:(65) 58.100000] [s4u_test/INFO] Passed: ( [ooX]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+>[VM0:( [oXo]2 oo )2:(64) 58.100000] [s4u_test/INFO] Passed: ( [oXo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+>[VM0:( [Xoo]2 oo )2:(63) 58.100000] [s4u_test/INFO] Passed: ( [Xoo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 60.000000] [s4u_test/INFO] Passed: ( [ooo]2 oo )2 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 60.000000] [s4u_test/INFO] # TEST ON FOUR-CORE PMs AND TWO-CORE VMs
+>[node-0.1core.org:master:(1) 60.000000] [s4u_test/INFO] ## Check impact of a single VM
+>[node-0.1core.org:master:(1) 60.000000] [s4u_test/INFO] ### Put a VM on a PM, and put a task to the VM
+>[VM0:( [X]2 )4:(66) 60.100000] [s4u_test/INFO] Passed: ( [X]2 )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 62.000000] [s4u_test/INFO] Passed: ( [o]2 )4 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 62.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the VM
+>[VM0:( [oX]2 )4:(68) 62.100000] [s4u_test/INFO] Passed: ( [oX]2 )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [Xo]2 )4:(67) 62.100000] [s4u_test/INFO] Passed: ( [Xo]2 )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 64.000000] [s4u_test/INFO] Passed: ( [oo]2 )4 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 64.000000] [s4u_test/INFO] ### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM
+>[VM0:( [ooX]2 )4:(71) 64.100000] [s4u_test/INFO] Passed: ( [ooX]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [oXo]2 )4:(70) 64.100000] [s4u_test/INFO] Passed: ( [oXo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+>[VM0:( [Xoo]2 )4:(69) 64.100000] [s4u_test/INFO] Passed: ( [Xoo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 66.000000] [s4u_test/INFO] Passed: ( [ooo]2 )4 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 66.000000] [s4u_test/INFO] ## Check impact of a single empty VM collocated with tasks
+>[node-0.1core.org:master:(1) 66.000000] [s4u_test/INFO] ### Put a VM on a PM, and put a task to the PM
+>[node-0.4cores.org:( [ ]2 X )4:(72) 66.100000] [s4u_test/INFO] Passed: ( [ ]2 X )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 68.000000] [s4u_test/INFO] Passed: ( [ ]2 o )4 consumed 0.100000 J (i.e. 1 cores used)
+>[node-0.1core.org:master:(1) 68.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the PM
+>[node-0.4cores.org:( [ ]2 oX )4:(74) 68.100000] [s4u_test/INFO] Passed: ( [ ]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 Xo )4:(73) 68.100000] [s4u_test/INFO] Passed: ( [ ]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 70.000000] [s4u_test/INFO] Passed: ( [ ]2 oo )4 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 70.000000] [s4u_test/INFO] ### Put a VM on a PM, and put three tasks to the PM
+>[node-0.4cores.org:( [ ]2 ooX )4:(77) 70.100000] [s4u_test/INFO] Passed: ( [ ]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 oXo )4:(76) 70.100000] [s4u_test/INFO] Passed: ( [ ]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 Xoo )4:(75) 70.100000] [s4u_test/INFO] Passed: ( [ ]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 72.000000] [s4u_test/INFO] Passed: ( [ ]2 ooo )4 consumed 0.300000 J (i.e. 3 cores used)
+>[node-0.1core.org:master:(1) 72.000000] [s4u_test/INFO] ### Put a VM on a PM, and put four tasks to the PM
+>[node-0.4cores.org:( [ ]2 oooX )4:(81) 72.100000] [s4u_test/INFO] Passed: ( [ ]2 oooX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 ooXo )4:(80) 72.100000] [s4u_test/INFO] Passed: ( [ ]2 ooXo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 oXoo )4:(79) 72.100000] [s4u_test/INFO] Passed: ( [ ]2 oXoo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ ]2 Xooo )4:(78) 72.100000] [s4u_test/INFO] Passed: ( [ ]2 Xooo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 74.000000] [s4u_test/INFO] Passed: ( [ ]2 oooo )4 consumed 0.400000 J (i.e. 4 cores used)
+>[node-0.1core.org:master:(1) 74.000000] [s4u_test/INFO] ## Check impact of a single working VM collocated with tasks
+>[node-0.1core.org:master:(1) 74.000000] [s4u_test/INFO] ### Put a VM on a PM, and put one task to the PM and one task to the VM
+>[node-0.4cores.org:( [o]2 X )4:(83) 74.100000] [s4u_test/INFO] Passed: ( [o]2 X )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [X]2 o )4:(82) 74.100000] [s4u_test/INFO] Passed: ( [X]2 o )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 76.000000] [s4u_test/INFO] Passed: ( [o]2 o )4 consumed 0.200000 J (i.e. 2 cores used)
+>[node-0.1core.org:master:(1) 76.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the PM and one task to the VM
+>[node-0.4cores.org:( [o]2 oX )4:(86) 76.100000] [s4u_test/INFO] Passed: ( [o]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [o]2 Xo )4:(85) 76.100000] [s4u_test/INFO] Passed: ( [o]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [X]2 oo )4:(84) 76.100000] [s4u_test/INFO] Passed: ( [X]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 78.000000] [s4u_test/INFO] Passed: ( [o]2 oo )4 consumed 0.300000 J (i.e. 3 cores used)
+>[node-0.1core.org:master:(1) 78.000000] [s4u_test/INFO] ### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM
+>[node-0.4cores.org:( [oo]2 oX )4:(90) 78.100000] [s4u_test/INFO] Passed: ( [oo]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [oo]2 Xo )4:(89) 78.100000] [s4u_test/INFO] Passed: ( [oo]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [oX]2 oo )4:(88) 78.100000] [s4u_test/INFO] Passed: ( [oX]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [Xo]2 oo )4:(87) 78.100000] [s4u_test/INFO] Passed: ( [Xo]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 80.000000] [s4u_test/INFO] Passed: ( [oo]2 oo )4 consumed 0.400000 J (i.e. 4 cores used)
+>[node-0.1core.org:master:(1) 80.000000] [s4u_test/INFO] ### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM
+>[node-0.4cores.org:( [o]2 ooX )4:(94) 80.100000] [s4u_test/INFO] Passed: ( [o]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [o]2 oXo )4:(93) 80.100000] [s4u_test/INFO] Passed: ( [o]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [o]2 Xoo )4:(92) 80.100000] [s4u_test/INFO] Passed: ( [o]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
+>[VM0:( [X]2 ooo )4:(91) 80.100000] [s4u_test/INFO] Passed: ( [X]2 ooo )4 with 1 load (100000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 82.000000] [s4u_test/INFO] Passed: ( [o]2 ooo )4 consumed 0.400000 J (i.e. 4 cores used)
+>[node-0.1core.org:master:(1) 82.000000] [s4u_test/INFO] ### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM
+>[node-0.4cores.org:( [oo]2 ooX )4:(99) 82.100000] [s4u_test/INFO] Passed: ( [oo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [oo]2 oXo )4:(98) 82.100000] [s4u_test/INFO] Passed: ( [oo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [oo]2 Xoo )4:(97) 82.100000] [s4u_test/INFO] Passed: ( [oo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[VM0:( [oX]2 ooo )4:(96) 82.100000] [s4u_test/INFO] Passed: ( [oX]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[VM0:( [Xo]2 ooo )4:(95) 82.100000] [s4u_test/INFO] Passed: ( [Xo]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 84.000000] [s4u_test/INFO] Passed: ( [oo]2 ooo )4 consumed 0.400000 J (i.e. 4 cores used)
+>[node-0.1core.org:master:(1) 84.000000] [s4u_test/INFO] ### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM
+>[node-0.4cores.org:( [ooo]2 ooX )4:(105) 84.100000] [s4u_test/INFO] Passed: ( [ooo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ooo]2 oXo )4:(104) 84.100000] [s4u_test/INFO] Passed: ( [ooo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[node-0.4cores.org:( [ooo]2 Xoo )4:(103) 84.100000] [s4u_test/INFO] Passed: ( [ooo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
+>[VM0:( [ooX]2 ooo )4:(102) 84.100000] [s4u_test/INFO] Passed: ( [ooX]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
+>[VM0:( [oXo]2 ooo )4:(101) 84.100000] [s4u_test/INFO] Passed: ( [oXo]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
+>[VM0:( [Xoo]2 ooo )4:(100) 84.100000] [s4u_test/INFO] Passed: ( [Xoo]2 ooo )4 with 0.5333 load (53333333flops) took 0.1s as expected
+>[node-0.1core.org:master:(1) 86.000000] [s4u_test/INFO] Passed: ( [ooo]2 ooo )4 consumed 0.400000 J (i.e. 4 cores used)
+>[node-0.1core.org:master:(1) 86.000000] [s4u_test/INFO]
+>[node-0.1core.org:master:(1) 86.000000] [s4u_test/INFO]
+>[node-0.1core.org:master:(1) 86.000000] [s4u_test/INFO] ## 0 test failed
+>[node-0.1core.org:master:(1) 86.000000] [s4u_test/INFO]
+>[86.000000] [surf_energy/INFO] Total energy consumption: 8.200000 Joules (used hosts: 8.200000 Joules; unused/idle hosts: 0.000000)
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-0.1core.org: 0.800000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-0.2cores.org: 3.700000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-0.4cores.org: 3.600000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-1.1core.org: 0.100000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-1.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-1.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-2.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-2.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-2.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-3.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-3.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-3.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-4.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-4.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-4.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-5.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-5.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-5.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-6.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-6.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-6.4cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-7.1core.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-7.2cores.org: 0.000000 Joules
+>[86.000000] [surf_energy/INFO] Energy consumption of host node-7.4cores.org: 0.000000 Joules