X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/6aa1cd51bc162ae5288aa155446fd39dc2e1b772..172a73b13fe909117c7fbf3d69d4ce5e87efdbc6:/src/msg/msg_vm.cpp diff --git a/src/msg/msg_vm.cpp b/src/msg/msg_vm.cpp index 2eb403d578..00ea1e87ec 100644 --- a/src/msg/msg_vm.cpp +++ b/src/msg/msg_vm.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2015. The SimGrid Team. +/* Copyright (c) 2012-2017. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -11,23 +11,22 @@ #include -#include "src/instr/instr_private.h" -#include "src/msg/msg_private.h" +#include "src/instr/instr_private.hpp" +#include "src/msg/msg_private.hpp" #include "src/plugins/vm/VirtualMachineImpl.hpp" #include "src/plugins/vm/VmHostExt.hpp" #include "simgrid/host.h" #include "simgrid/simix.hpp" -SG_BEGIN_DECL() +extern "C" { -struct dirty_page { +struct s_dirty_page { double prev_clock; double prev_remaining; msg_task_t task; }; -typedef struct dirty_page s_dirty_page; -typedef struct dirty_page* dirty_page_t; +typedef s_dirty_page* dirty_page_t; XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_vm, msg, "Cloud-oriented parts of the MSG API"); @@ -171,9 +170,9 @@ void MSG_vm_destroy(msg_vm_t vm) simgrid::simix::kernelImmediate([vm]() { vm->destroy(); }); if (TRACE_msg_vm_is_enabled()) { - container_t container = PJ_container_get(vm->getCname()); - PJ_container_remove_from_parent(container); - PJ_container_free(container); + container_t container = simgrid::instr::Container::byName(vm->getName()); + container->removeFromParent(); + delete container; } } @@ -184,39 +183,12 @@ void MSG_vm_destroy(msg_vm_t vm) */ void MSG_vm_start(msg_vm_t vm) { - simgrid::simix::kernelImmediate([vm]() { - simgrid::vm::VmHostExt::ensureVmExtInstalled(); - - simgrid::s4u::Host* pm = vm->pimpl_vm_->getPm(); - if (pm->extension() == nullptr) - pm->extension_set(new simgrid::vm::VmHostExt()); - - long pm_ramsize = pm->extension()->ramsize; - int pm_overcommit = pm->extension()->overcommit; - long vm_ramsize = vm->getRamsize(); - - if (pm_ramsize && not pm_overcommit) { /* Only verify that we don't overcommit on need */ - /* Retrieve the memory occupied by the VMs on that host. Yep, we have to traverse all VMs of all hosts for that */ - long total_ramsize_of_vms = 0; - for (simgrid::s4u::VirtualMachine* ws_vm : simgrid::vm::VirtualMachineImpl::allVms_) - if (pm == ws_vm->pimpl_vm_->getPm()) - total_ramsize_of_vms += ws_vm->pimpl_vm_->getRamsize(); - - if (vm_ramsize > pm_ramsize - total_ramsize_of_vms) { - XBT_WARN("cannnot start %s@%s due to memory shortage: vm_ramsize %ld, free %ld, pm_ramsize %ld (bytes).", - vm->getCname(), pm->getCname(), vm_ramsize, pm_ramsize - total_ramsize_of_vms, pm_ramsize); - THROWF(vm_error, 0, "Memory shortage on host '%s', VM '%s' cannot be started", pm->getCname(), vm->getCname()); - } - } - - vm->pimpl_vm_->setState(SURF_VM_STATE_RUNNING); - }); - + vm->start(); if (TRACE_msg_vm_is_enabled()) { - container_t vm_container = PJ_container_get(vm->getCname()); - type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type); - val_t value = PJ_value_get_or_new("start", "0 0 1", type); // start is blue - new PushStateEvent(MSG_get_clock(), vm_container, type, value); + container_t vm_container = simgrid::instr::Container::byName(vm->getName()); + simgrid::instr::Type* state = vm_container->type_->byName("MSG_VM_STATE"); + state->addEntityValue("start", "0 0 1"); // start is blue + new simgrid::instr::PushStateEvent(MSG_get_clock(), vm_container, state, state->getEntityValue("start")); } } @@ -229,27 +201,27 @@ void MSG_vm_start(msg_vm_t vm) */ void MSG_vm_shutdown(msg_vm_t vm) { - smx_actor_t issuer=SIMIX_process_self(); + smx_actor_t issuer = SIMIX_process_self(); simgrid::simix::kernelImmediate([vm, issuer]() { vm->pimpl_vm_->shutdown(issuer); }); - // Make sure that the processes in the VM are killed in this scheduling round before processing - // (eg with the VM destroy) + // Make sure that processes in the VM are killed in this scheduling round before processing (eg with the VM destroy) MSG_process_sleep(0.); } -static inline char *get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) +static std::string get_mig_process_tx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { - return bprintf("__pr_mig_tx:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname()); + return std::string("__pr_mig_tx:") + vm->getCname() + "(" + src_pm->getCname() + "-" + dst_pm->getCname() + ")"; } -static inline char *get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) +static std::string get_mig_process_rx_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm) { - return bprintf("__pr_mig_rx:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname()); + return std::string("__pr_mig_rx:") + vm->getCname() + "(" + src_pm->getCname() + "-" + dst_pm->getCname() + ")"; } -static inline char *get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage) +static std::string get_mig_task_name(msg_vm_t vm, msg_host_t src_pm, msg_host_t dst_pm, int stage) { - return bprintf("__task_mig_stage%d:%s(%s-%s)", stage, vm->getCname(), src_pm->getCname(), dst_pm->getCname()); + return std::string("__task_mig_stage") + std::to_string(stage) + ":" + vm->getCname() + "(" + src_pm->getCname() + + "-" + dst_pm->getCname() + ")"; } struct migration_session { @@ -269,11 +241,11 @@ static int migration_rx_fun(int argc, char *argv[]) XBT_DEBUG("mig: rx_start"); // The structure has been created in the do_migration function and should only be freed in the same place ;) - struct migration_session* ms = static_cast(MSG_process_get_data(MSG_process_self())); + migration_session* ms = static_cast(MSG_process_get_data(MSG_process_self())); bool received_finalize = false; - char *finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3); + std::string finalize_task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 3); while (not received_finalize) { msg_task_t task = nullptr; int ret = MSG_task_recv(&task, ms->mbox); @@ -281,16 +253,14 @@ static int migration_rx_fun(int argc, char *argv[]) if (ret != MSG_OK) { // An error occurred, clean the code and return // The owner did not change, hence the task should be only destroyed on the other side - xbt_free(finalize_task_name); return 0; } - if (strcmp(task->name, finalize_task_name) == 0) + if (finalize_task_name == task->name) received_finalize = 1; MSG_task_destroy(task); } - xbt_free(finalize_task_name); // Here Stage 1, 2 and 3 have been performed. // Hence complete the migration @@ -319,34 +289,32 @@ static int migration_rx_fun(int argc, char *argv[]) if (TRACE_msg_vm_is_enabled()) { static long long int counter = 0; - char key[INSTR_DEFAULT_STR_SIZE]; - snprintf(key, INSTR_DEFAULT_STR_SIZE, "%lld", counter); + std::string key = std::to_string(counter); counter++; // start link - container_t msg = PJ_container_get(vm->getCname()); - type_t type = PJ_type_get("MSG_VM_LINK", PJ_type_get_root()); - new StartLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key); + container_t msg = simgrid::instr::Container::byName(vm->getName()); + simgrid::instr::Type* type = simgrid::instr::Type::getRootType()->byName("MSG_VM_LINK"); + new simgrid::instr::StartLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key); // destroy existing container of this vm - container_t existing_container = PJ_container_get(vm->getCname()); - PJ_container_remove_from_parent(existing_container); - PJ_container_free(existing_container); + container_t existing_container = simgrid::instr::Container::byName(vm->getName()); + existing_container->removeFromParent(); + delete existing_container; // create new container on the new_host location - PJ_container_new(vm->getCname(), INSTR_MSG_VM, PJ_container_get(ms->dst_pm->getCname())); + new simgrid::instr::Container(vm->getCname(), "MSG_VM", simgrid::instr::Container::byName(ms->dst_pm->getName())); // end link - msg = PJ_container_get(vm->getCname()); - type = PJ_type_get("MSG_VM_LINK", PJ_type_get_root()); - new EndLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key); + msg = simgrid::instr::Container::byName(vm->getName()); + type = simgrid::instr::Type::getRootType()->byName("MSG_VM_LINK"); + new simgrid::instr::EndLinkEvent(MSG_get_clock(), PJ_container_get_root(), type, msg, "M", key); } // Inform the SRC that the migration has been correctly performed - char *task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 4); - msg_task_t task = MSG_task_create(task_name, 0, 0, nullptr); + std::string task_name = get_mig_task_name(ms->vm, ms->src_pm, ms->dst_pm, 4); + msg_task_t task = MSG_task_create(task_name.c_str(), 0, 0, nullptr); msg_error_t ret = MSG_task_send(task, ms->mbox_ctl); - // xbt_assert(ret == MSG_OK); if(ret == MSG_HOST_FAILURE){ // The DST has crashed, this is a problem has the VM since we are not sure whether SRC is considering that the VM // has been correctly migrated on the DST node @@ -356,7 +324,6 @@ static int migration_rx_fun(int argc, char *argv[]) // The SRC has crashed, this is not a problem has the VM has been correctly migrated on the DST node MSG_task_destroy(task); } - xbt_free(task_name); XBT_DEBUG("mig: rx_done"); return 0; @@ -368,7 +335,7 @@ static void start_dirty_page_tracking(msg_vm_t vm) if (vm->pimpl_vm_->dp_objs.empty()) return; - for (auto elm : vm->pimpl_vm_->dp_objs) { + for (auto const& elm : vm->pimpl_vm_->dp_objs) { dirty_page_t dp = elm.second; double remaining = MSG_task_get_flops_amount(dp->task); dp->prev_clock = MSG_get_clock(); @@ -397,14 +364,13 @@ static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug { double total = 0; - for (auto elm : vm->pimpl_vm_->dp_objs) { + for (auto const& elm : vm->pimpl_vm_->dp_objs) { const char* key = elm.first.c_str(); dirty_page_t dp = elm.second; double remaining = MSG_task_get_flops_amount(dp->task); double clock = MSG_get_clock(); - // total += calc_updated_pages(key, vm, dp, remaining, clock); total += get_computed(key, vm, dp, remaining, clock); dp->prev_remaining = remaining; @@ -462,7 +428,6 @@ void MSG_host_del_task(msg_host_t host, msg_task_t task) if (vm->pimpl_vm_->dp_enabled) { double remaining = MSG_task_get_flops_amount(task); double clock = MSG_get_clock(); - // double updated = calc_updated_pages(key, host, dp, remaining, clock); double updated = get_computed(key, vm, dp, remaining, clock); // was host instead of vm vm->pimpl_vm_->dp_updated_by_deleted_tasks += updated; @@ -479,8 +444,8 @@ static sg_size_t send_migration_data(msg_vm_t vm, msg_host_t src_pm, msg_host_t int stage, int stage2_round, double mig_speed, double timeout) { sg_size_t sent = 0; - char *task_name = get_mig_task_name(vm, src_pm, dst_pm, stage); - msg_task_t task = MSG_task_create(task_name, 0, static_cast(size), nullptr); + std::string task_name = get_mig_task_name(vm, src_pm, dst_pm, stage); + msg_task_t task = MSG_task_create(task_name.c_str(), 0, static_cast(size), nullptr); double clock_sta = MSG_get_clock(); @@ -490,8 +455,6 @@ static sg_size_t send_migration_data(msg_vm_t vm, msg_host_t src_pm, msg_host_t else ret = MSG_task_send(task, mbox); - xbt_free(task_name); - if (ret == MSG_OK) { sent = size; } else if (ret == MSG_TIMEOUT) { @@ -530,8 +493,6 @@ static sg_size_t get_updated_size(double computed, double dp_rate, double dp_cap double updated_size = computed * dp_rate; XBT_DEBUG("updated_size %f dp_rate %f", updated_size, dp_rate); if (updated_size > dp_cap) { - // XBT_INFO("mig-stage2.%d: %f bytes updated, but cap it with the working set size %f", stage2_round, updated_size, - // dp_cap); updated_size = dp_cap; } @@ -543,7 +504,7 @@ static int migration_tx_fun(int argc, char *argv[]) XBT_DEBUG("mig: tx_start"); // Note that the ms structure has been allocated in do_migration and hence should be freed in the same function ;) - migration_session *ms = static_cast(MSG_process_get_data(MSG_process_self())); + migration_session* ms = static_cast(MSG_process_get_data(MSG_process_self())); double host_speed = ms->vm->pimpl_vm_->getPm()->getSpeed(); s_vm_params_t params; @@ -742,7 +703,7 @@ void MSG_vm_migrate(msg_vm_t vm, msg_host_t dst_pm) vm->pimpl_vm_->isMigrating = true; - struct migration_session *ms = xbt_new(struct migration_session, 1); + migration_session* ms = xbt_new(migration_session, 1); ms->vm = vm; ms->src_pm = src_pm; ms->dst_pm = dst_pm; @@ -752,18 +713,12 @@ void MSG_vm_migrate(msg_vm_t vm, msg_host_t dst_pm) ms->mbox_ctl = bprintf("__mbox_mig_ctl:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname()); ms->mbox = bprintf("__mbox_mig_src_dst:%s(%s-%s)", vm->getCname(), src_pm->getCname(), dst_pm->getCname()); - char *pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm); - char *pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm); + std::string pr_rx_name = get_mig_process_rx_name(vm, src_pm, dst_pm); + std::string pr_tx_name = get_mig_process_tx_name(vm, src_pm, dst_pm); - char** argv = xbt_new(char*, 2); - argv[0] = pr_rx_name; - argv[1] = nullptr; - MSG_process_create_with_arguments(pr_rx_name, migration_rx_fun, ms, dst_pm, 1, argv); + MSG_process_create(pr_rx_name.c_str(), migration_rx_fun, ms, dst_pm); - argv = xbt_new(char*, 2); - argv[0] = pr_tx_name; - argv[1] = nullptr; - MSG_process_create_with_arguments(pr_tx_name, migration_tx_fun, ms, src_pm, 1, argv); + MSG_process_create(pr_tx_name.c_str(), migration_tx_fun, ms, src_pm); /* wait until the migration have finished or on error has occurred */ XBT_DEBUG("wait for reception of the final ACK (i.e. migration has been correctly performed"); @@ -791,9 +746,7 @@ void MSG_vm_migrate(msg_vm_t vm, msg_host_t dst_pm) vm->getCname()); } - char* expected_task_name = get_mig_task_name(vm, src_pm, dst_pm, 4); - xbt_assert(strcmp(task->name, expected_task_name) == 0); - xbt_free(expected_task_name); + xbt_assert(get_mig_task_name(vm, src_pm, dst_pm, 4) == task->name); MSG_task_destroy(task); } @@ -813,10 +766,10 @@ void MSG_vm_suspend(msg_vm_t vm) XBT_DEBUG("vm_suspend done"); if (TRACE_msg_vm_is_enabled()) { - container_t vm_container = PJ_container_get(vm->getCname()); - type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type); - val_t value = PJ_value_get_or_new("suspend", "1 0 0", type); // suspend is red - new PushStateEvent(MSG_get_clock(), vm_container, type, value); + container_t vm_container = simgrid::instr::Container::byName(vm->getName()); + simgrid::instr::Type* state = vm_container->type_->byName("MSG_VM_STATE"); + state->addEntityValue("suspend", "1 0 0"); // suspend is red + new simgrid::instr::PushStateEvent(MSG_get_clock(), vm_container, state, state->getEntityValue("suspend")); } } @@ -830,9 +783,9 @@ void MSG_vm_resume(msg_vm_t vm) vm->pimpl_vm_->resume(); if (TRACE_msg_vm_is_enabled()) { - container_t vm_container = PJ_container_get(vm->getCname()); - type_t type = PJ_type_get("MSG_VM_STATE", vm_container->type); - new PopStateEvent(MSG_get_clock(), vm_container, type); + container_t vm_container = simgrid::instr::Container::byName(vm->getName()); + simgrid::instr::Type* type = vm_container->type_->byName("MSG_VM_STATE"); + new simgrid::instr::PopStateEvent(MSG_get_clock(), vm_container, type); } } @@ -874,5 +827,4 @@ void MSG_vm_set_bound(msg_vm_t vm, double bound) { simgrid::simix::kernelImmediate([vm, bound]() { vm->pimpl_vm_->setBound(bound); }); } - -SG_END_DECL() +}