Host host3 = Host.getByName("MyHost3");
Msg.info("Creating and starting two VMs");
- VM vmHost1 = new VM(host1, "vmHost1", 2048, 10, 50);
+ VM vmHost1 = new VM(host1, "vmHost1");
vmHost1.start();
- VM vmHost2 = new VM(host2, "vmHost3", 2048, 10, 50);
+ VM vmHost2 = new VM(host2, "vmHost3");
vmHost2.start();
Msg.info("Create two tasks on Host1: one inside a VM, the other directly on the host");
new DummyProcess (vmHost1, "p12").start();
Msg.info("Create two tasks on Host2: both directly on the host");
- new DummyProcess (vmHost2, "p21").start();
- new DummyProcess (host2, "p22").start();
+ new DummyProcess (vmHost2, "p21").start();
+ new DummyProcess (host2, "p22").start();
Msg.info("Create two tasks on Host3: both inside a VM");
- new DummyProcess (host3, "p31").start();
- new DummyProcess (host3, "p312").start();
+ new DummyProcess (host3, "p31").start();
+ new DummyProcess (host3, "p312").start();
Msg.info("Wait 5 seconds. The tasks are still running (they run for 3 seconds, but 2 tasks are co-located, "
+ "so they run for 6 seconds)");
- waitFor(5);
+ waitFor(5);
Msg.info("Wait another 5 seconds. The tasks stop at some point in between");
- waitFor(5);
+ waitFor(5);
- vmHost1.destroy();
- vmHost2.destroy();
+ vmHost1.destroy();
+ vmHost2.destroy();
}
}
> [VM00:WRK02:(4) 10.280841] [msg_test/INFO] WRK02 executed task(Task02)
> [VM01:WRK03:(5) 10.361121] [msg_test/INFO] WRK03 received task(Task03) from mailbox(MBOX:WRK03)
> [node-0.acme.org:master:(1) 10.361121] [msg_test/INFO] # Migrate all VMs to PM(node-1.acme.org)
-> [node-1.acme.org:__pr_mig_tx:VM00(node-1.acme.org-node-1.acme.org):(7) 10.361121] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [VM01:WRK03:(5) 10.371121] [msg_test/INFO] WRK03 executed task(Task03)
-> [node-10.acme.org:__pr_mig_tx:VM01(node-10.acme.org-node-1.acme.org):(9) 19.682922] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [node-0.acme.org:master:(1) 28.561942] [msg_test/INFO] # Migrate all VMs to PM(node-10.acme.org)
-> [node-1.acme.org:__pr_mig_tx:VM00(node-1.acme.org-node-10.acme.org):(11) 28.561942] [vm_live_migration/WARNING] use the default max_downtime value 30ms
-> [node-1.acme.org:__pr_mig_tx:VM01(node-1.acme.org-node-10.acme.org):(13) 37.440963] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [node-0.acme.org:master:(1) 46.319984] [msg_test/INFO] # Shutdown the half of worker processes gracefully. The remaining half will be forcibly killed.
> [VM00:WRK00:(2) 46.327790] [msg_test/INFO] WRK00 received task(finalize) from mailbox(MBOX:WRK00)
> [VM01:WRK01:(3) 46.335596] [msg_test/INFO] WRK01 received task(finalize) from mailbox(MBOX:WRK01)
$ $SG_TEST_EXENV ${bindir:=.}/cloud-migration ${platfdir}/small_platform.xml --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:master_@Fafard) Test: Migrate a VM with 1000 Mbytes RAM
-> [ 0.000000] (3:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
> [132.765801] (1:master_@Fafard) VM0 migrated: Fafard->Tremblay in 132.766 s
> [132.765801] (1:master_@Fafard) Test: Migrate a VM with 100 Mbytes RAM
-> [132.765801] (5:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
> [146.111793] (1:master_@Fafard) VM0 migrated: Fafard->Tremblay in 13.346 s
> [146.111793] (1:master_@Fafard) Test: Migrate two VMs at once from PM0 to PM1
-> [146.111793] (8:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
-> [146.111793] (11:__pr_mig_tx:VM1(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
> [411.566271] (9:mig_wrk@Fafard) VM1 migrated: Fafard->Tremblay in 265.454 s
> [411.566271] (6:mig_wrk@Fafard) VM0 migrated: Fafard->Tremblay in 265.454 s
> [10146.111793] (1:master_@Fafard) Test: Migrate two VMs at once to different PMs
-> [10146.111793] (14:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
-> [10146.111793] (17:__pr_mig_tx:VM1(Fafard-Bourassa)@Fafard) use the default max_downtime value 30ms
> [10362.620589] (15:mig_wrk@Fafard) VM1 migrated: Fafard->Bourassa in 216.509 s
> [10411.547334] (12:mig_wrk@Fafard) VM0 migrated: Fafard->Tremblay in 265.436 s
> [20146.111793] (0:maestro@) Bye (simulation time 20146.1)
/* Host 1 */
XBT_INFO("Creating and starting two VMs");
- msg_vm_t vm_host1 = MSG_vm_create(host1, "vm_host1", 1, 2048, 10, 50);
+ msg_vm_t vm_host1 = MSG_vm_create_core(host1, "vm_host1");
MSG_vm_start(vm_host1);
- msg_vm_t vm_host2 = MSG_vm_create(host2, "vm_host2", 1, 2048, 10, 50);
+ msg_vm_t vm_host2 = MSG_vm_create_core(host2, "vm_host2");
MSG_vm_start(vm_host2);
XBT_INFO("Create two tasks on Host1: both inside a VM");
vm0->destroy();
vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
- s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- vm0->setParameters(¶ms);
vm0->setRamsize(1e9); // 1GB
vm0->start();
> [51100.000000] (88:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
> [52000.000000] (1:master_@Fafard)
> [52000.000000] (1:master_@Fafard) # 10. (c) migrate
-> [52000.000000] (90:__pr_mig_tx:VM0(Fafard-Fafard)@Fafard) use the default max_downtime value 30ms
> [52002.070722] (1:master_@Fafard)
> [52002.070722] (1:master_@Fafard) # 10. (d) Put a task again on the VM.
> [52102.070722] (91:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
" network one");
XBT_INFO("### Relocate VM0 between PM0 and PM1");
vm0 = new simgrid::s4u::VirtualMachine("VM0", pm0, 1);
- s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- vm0->setParameters(¶ms);
vm0->setRamsize(1L * 1024 * 1024 * 1024); // 1GiB
vm0->start();
> [Fafard:master_:(1) 49.000000] [s4u_test/INFO] ## Test 5 (ended)
> [Fafard:master_:(1) 49.000000] [s4u_test/INFO] ## Test 6 (started): Check migration impact (not yet implemented neither on the CPU resource nor on the network one
> [Fafard:master_:(1) 49.000000] [s4u_test/INFO] ### Relocate VM0 between PM0 and PM1
-> [Fafard:__pr_mig_tx:VM0(Fafard-Tremblay):(38) 49.010000] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [Bourassa:comm_rx:(36) 49.204993] [s4u_test/INFO] VM0:comm_tx to Bourassa:comm_rx => 0.204993 sec
-> [Tremblay:__pr_mig_tx:VM0(Tremblay-Fafard):(40) 191.674258] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [Fafard:master_:(1) 339.199251] [s4u_test/INFO] ## Test 6 (ended)
> [339.199251] [s4u_test/INFO] Simulation time 339.199
+++ /dev/null
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#ifndef SIMGRID_DATATYPES_H_
-#define SIMGRID_DATATYPES_H_
-
-#include "simgrid/forward.h"
-
-struct vm_params {
- double max_downtime;
- double dp_intensity; // Percentage of pages that get dirty compared to netspeed [0;1] bytes per 1 flop execution
- double dp_cap;
- double mig_speed; // Set migration speed
-};
-
-typedef struct vm_params s_vm_params_t;
-typedef struct vm_params* vm_params_t;
-
-#endif /* SIMGRID_DATATYPES_H_ */
XBT_PUBLIC(int) MSG_vm_is_suspended(msg_vm_t vm);
XBT_PUBLIC(const char*) MSG_vm_get_name(msg_vm_t vm);
-XBT_PUBLIC(void) MSG_vm_get_params(msg_vm_t vm, vm_params_t params);
-XBT_PUBLIC(void) MSG_vm_set_params(msg_vm_t vm, vm_params_t params);
XBT_PUBLIC(void) MSG_vm_set_ramsize(msg_vm_t vm, size_t size);
XBT_PUBLIC(size_t) MSG_vm_get_ramsize(msg_vm_t vm);
XBT_PUBLIC(void) sg_vm_stop_dirty_page_tracking(sg_vm_t vm);
XBT_PUBLIC(double) sg_vm_lookup_computed_flops(sg_vm_t vm);
XBT_PUBLIC(void) sg_vm_migrate(sg_vm_t vm, sg_host_t dst_pm);
+XBT_PUBLIC(int) sg_vm_is_migratable(sg_vm_t vm);
+XBT_PUBLIC(void) sg_vm_set_dirty_page_intensity(sg_vm_t vm, double intensity);
+XBT_PUBLIC(double) sg_vm_get_dirty_page_intensity(sg_vm_t vm);
+XBT_PUBLIC(void) sg_vm_set_working_set_memory(sg_vm_t vm, sg_size_t size);
+XBT_PUBLIC(sg_size_t) sg_vm_get_working_set_memory(sg_vm_t vm);
+XBT_PUBLIC(void) sg_vm_set_migration_speed(sg_vm_t vm, double speed);
+XBT_PUBLIC(double) sg_vm_get_migration_speed(sg_vm_t vm);
+XBT_PUBLIC(double) sg_vm_get_max_downtime(sg_vm_t vm);
#define MSG_vm_live_migration_plugin_init() sg_vm_live_migration_plugin_init()
#define MSG_vm_migrate(vm, dst_pm) sg_vm_migrate(vm, dst_pm)
bool isMigrating();
- void getParameters(vm_params_t params);
- void setParameters(vm_params_t params);
simgrid::s4u::Host* getPm();
void setPm(simgrid::s4u::Host * pm);
size_t getRamsize();
return vm->getPm();
}
-/** \ingroup m_vm_management
- * \brief Set the parameters of a given host
- *
- * \param vm a vm
- * \param params a parameter object
- */
-void MSG_vm_set_params(msg_vm_t vm, vm_params_t params)
-{
- vm->setParameters(params);
-}
-
-/** \ingroup m_vm_management
- * \brief Get the parameters of a given host
- *
- * \param vm the vm you are interested into
- * \param params a prameter object
- */
-void MSG_vm_get_params(msg_vm_t vm, vm_params_t params)
-{
- vm->getParameters(params);
-}
-
void MSG_vm_set_ramsize(msg_vm_t vm, size_t size)
{
vm->setRamsize(size);
/* For the moment, intensity_rate is the percentage against the migration bandwidth */
msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount, static_cast<sg_size_t>(ramsize) * 1024 * 1024);
- s_vm_params_t params;
- params.max_downtime = 0.03;
- params.mig_speed = static_cast<double>(mig_netspeed) * 1024 * 1024; // mig_speed
- params.dp_intensity = static_cast<double>(dp_intensity) / 100;
- params.dp_cap = vm->getRamsize() * 0.9; // assume working set memory is 90% of ramsize
-
- XBT_DEBUG("migspeed : %f intensity mem : %d", params.mig_speed, dp_intensity);
- vm->setParameters(¶ms);
+ if (not sg_vm_is_migratable(vm)) {
+ if (mig_netspeed != 0 || dp_intensity != 0)
+ XBT_WARN("The live migration is not enabled. dp_intensity and mig_netspeed can't be used");
+ } else {
+ sg_vm_set_dirty_page_intensity(vm, dp_intensity / 100.0);
+ sg_vm_set_working_set_memory(vm, vm->getRamsize() * 0.9); // assume working set memory is 90% of ramsize
+ sg_vm_set_migration_speed(vm, mig_netspeed * 1024 * 1024.0);
+
+ XBT_DEBUG("migspeed : %f intensity mem : %d", mig_netspeed * 1024 * 1024.0, dp_intensity);
+ }
return vm;
}
"Cannot create a VM named %s: this name is already used by an host or a VM", name);
msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, 1);
- s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- vm->setParameters(¶ms);
return vm;
}
/** @brief Create a new VM object with the default parameters, but with a specified amount of cores
"Cannot create a VM named %s: this name is already used by an host or a VM", name);
msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount);
- s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- vm->setParameters(¶ms);
return vm;
}
action_->setBound(bound);
}
-void VirtualMachineImpl::getParams(vm_params_t params)
-{
- *params = params_;
-}
-
-void VirtualMachineImpl::setParams(vm_params_t params)
-{
- /* may check something here. */
- params_ = *params;
-}
}
}
virtual void setBound(double bound);
- void getParams(vm_params_t params);
- void setParams(vm_params_t params);
-
/* The vm object of the lower layer */
surf::Action* action_ = nullptr;
private:
simgrid::s4u::Host* hostPM_;
- s_vm_params_t params_;
int coreAmount_;
size_t ramsize_ = 0;
e_surf_vm_state_t vmState_ = SURF_VM_STATE_CREATED;
XBT_DEBUG("mig: rx_done");
}
-static sg_size_t get_updated_size(double computed, double dp_rate, double dp_cap)
+static sg_size_t get_updated_size(double computed, double dp_rate, sg_size_t dp_cap)
{
- double updated_size = computed * dp_rate;
- XBT_DEBUG("updated_size %f dp_rate %f", updated_size, dp_rate);
+ sg_size_t updated_size = static_cast<sg_size_t>(computed * dp_rate);
+ XBT_DEBUG("updated_size %llu dp_rate %f", updated_size, dp_rate);
if (updated_size > dp_cap) {
updated_size = dp_cap;
}
- return static_cast<sg_size_t>(updated_size);
+ return updated_size;
}
sg_size_t MigrationTx::sendMigrationData(sg_size_t size, int stage, int stage2_round, double mig_speed, double timeout)
XBT_DEBUG("mig: tx_start");
double host_speed = vm_->getPm()->getSpeed();
- s_vm_params_t params;
- vm_->getParameters(¶ms);
const sg_size_t ramsize = vm_->getRamsize();
- const double dp_rate = host_speed ? (params.mig_speed * params.dp_intensity) / host_speed : 1;
- const double dp_cap = params.dp_cap;
- const double mig_speed = params.mig_speed;
- double max_downtime = params.max_downtime;
+ const double dp_rate =
+ host_speed ? (sg_vm_get_migration_speed(vm_) * sg_vm_get_dirty_page_intensity(vm_)) / host_speed : 1;
+ const sg_size_t dp_cap = sg_vm_get_working_set_memory(vm_);
+ const double mig_speed = sg_vm_get_migration_speed(vm_);
+ double max_downtime = sg_vm_get_max_downtime(vm_);
double mig_timeout = 10000000.0;
bool skip_stage2 = false;
updated_size = get_updated_size(computed, dp_rate, dp_cap);
}
- XBT_DEBUG("mig-stage 2:%d updated_size %llu computed_during_stage1 %f dp_rate %f dp_cap %f", stage2_round,
+ XBT_DEBUG("mig-stage 2:%d updated_size %llu computed_during_stage1 %f dp_rate %f dp_cap %llu", stage2_round,
updated_size, computed_during_stage1, dp_rate, dp_cap);
/* Check whether the remaining size is below the threshold value. If so, move to stage 3. */
{
simgrid::simix::kernelImmediate([this, bound]() { pimpl_vm_->setBound(bound); });
}
-/** @brief Retrieve a copy of the parameters of that VM/PM
- * @details The ramsize and overcommit fields are used on the PM too */
-void VirtualMachine::getParameters(vm_params_t params)
-{
- pimpl_vm_->getParams(params);
-}
-/** @brief Sets the params of that VM/PM */
-void VirtualMachine::setParameters(vm_params_t params)
-{
- simgrid::simix::kernelImmediate([this, params] { pimpl_vm_->setParams(params); });
-}
} // namespace simgrid
} // namespace s4u
class VmDirtyPageTrackingExt {
bool dp_tracking = false;
std::map<kernel::activity::ExecImplPtr, double> dp_objs;
- double dp_updated_by_deleted_tasks = 0;
+ double dp_updated_by_deleted_tasks = 0.0;
+ // Percentage of pages that get dirty compared to netspeed [0;1] bytes per 1 flop execution
+ double dp_intensity = 0.0;
+ sg_size_t working_set_memory = 0.0;
+ double max_downtime = 0.03;
+ double mig_speed = 0.0;
public:
void startTracking();
double getStoredRemains(kernel::activity::ExecImplPtr exec) { return dp_objs.at(exec); }
void updateDirtyPageCount(double delta) { dp_updated_by_deleted_tasks += delta; }
double computedFlopsLookup();
+ double getIntensity() { return dp_intensity; }
+ void setIntensity(double intensity) { dp_intensity = intensity; }
+ double getWorkingSetMemory() { return working_set_memory; }
+ void setWorkingSetMemory(sg_size_t size) { working_set_memory = size; }
+ void setMigrationSpeed(double speed) { mig_speed = speed; }
+ double getMigrationSpeed() { return mig_speed; }
+ double getMaxDowntime() { return max_downtime; }
static simgrid::xbt::Extension<VirtualMachineImpl, VmDirtyPageTrackingExt> EXTENSION_ID;
virtual ~VmDirtyPageTrackingExt() = default;
}
}
+int sg_vm_is_migratable(sg_vm_t vm)
+{
+ return simgrid::vm::VmDirtyPageTrackingExt::EXTENSION_ID.valid();
+}
+
void sg_vm_start_dirty_page_tracking(sg_vm_t vm)
{
vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->startTracking();
{
return vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->computedFlopsLookup();
}
+
+void sg_vm_set_dirty_page_intensity(sg_vm_t vm, double intensity)
+{
+ vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->setIntensity(intensity);
+}
+
+double sg_vm_get_dirty_page_intensity(sg_vm_t vm)
+{
+ return vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->getIntensity();
+}
+
+void sg_vm_set_working_set_memory(sg_vm_t vm, sg_size_t size)
+{
+ vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->setWorkingSetMemory(size);
+}
+
+sg_size_t sg_vm_get_working_set_memory(sg_vm_t vm)
+{
+ return vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->getWorkingSetMemory();
+}
+
+void sg_vm_set_migration_speed(sg_vm_t vm, double speed)
+{
+ vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->setMigrationSpeed(speed);
+}
+
+double sg_vm_get_migration_speed(sg_vm_t vm)
+{
+ return vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->getMigrationSpeed();
+}
+
+double sg_vm_get_max_downtime(sg_vm_t vm)
+{
+ return vm->pimpl_vm_->extension<simgrid::vm::VmDirtyPageTrackingExt>()->getMaxDowntime();
+}
+
SG_END_DECL()
> [51100.000000] (88:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
> [52000.000000] (1:master_@Fafard)
> [52000.000000] (1:master_@Fafard) # 10. (c) migrate
-> [52000.000000] (90:__pr_mig_tx:VM0(Fafard-Fafard)@Fafard) use the default max_downtime value 30ms
> [52002.070722] (1:master_@Fafard)
> [52002.070722] (1:master_@Fafard) # 10. (d) Put a task again on the VM.
> [52102.070722] (91:worker0@VM0) not bound => duration 100.000000 (7629600.000000 flops/s)
> [Fafard:master_:(1) 49.000000] [msg_test/INFO] ## Test 5 (ended)
> [Fafard:master_:(1) 49.000000] [msg_test/INFO] ## Test 6 (started): Check migration impact (not yet implemented neither on the CPU resource nor on the network one
> [Fafard:master_:(1) 49.000000] [msg_test/INFO] ### Relocate VM0 between PM0 and PM1
-> [Fafard:__pr_mig_tx:VM0(Fafard-Tremblay):(38) 49.010000] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [Bourassa:comm_rx:(36) 49.204993] [msg_test/INFO] VM0:comm_tx to Bourassa:comm_rx => 0.204993 sec
-> [Tremblay:__pr_mig_tx:VM0(Tremblay-Fafard):(40) 191.674258] [vm_live_migration/WARNING] use the default max_downtime value 30ms
> [Fafard:master_:(1) 339.199251] [msg_test/INFO] ## Test 6 (ended)
> [339.199251] [msg_test/INFO] Bye (simulation time 339.199)
XBT_INFO("Test 6: Turn on Jupiter, assign a VM on Jupiter, launch a process inside the VM, and turn off the node");
// Create VM0
- int dpRate = 70;
- msg_vm_t vm0;
- msg_process_t daemon;
-
- vm0 = MSG_vm_create(jupiter, "vm0", 1, 2048, 125, dpRate);
+ msg_vm_t vm0 = MSG_vm_create_core(jupiter, "vm0");
MSG_vm_start(vm0);
argvF = xbt_new(char*, 2);
argvF[0] = xbt_strdup("process_daemon");
- daemon = MSG_process_create_with_arguments("process_daemon", process_daemon, NULL, (msg_host_t)vm0, 1, argvF);
+ msg_process_t daemon =
+ MSG_process_create_with_arguments("process_daemon", process_daemon, NULL, (msg_host_t)vm0, 1, argvF);
argvF = xbt_new(char*, 2);
argvF[0] = xbt_strdup("process_daemonJUPI");
include/simgrid/instr.h
include/simgrid/msg.h
include/simgrid/simdag.h
- include/simgrid/datatypes.h
include/simgrid/modelchecker.h
include/simgrid/forward.h
include/simgrid/simix.h