-/* Copyright (c) 2013-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2013-2018. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/plugins/vm/VirtualMachineImpl.hpp"
-#include "src/surf/FileImpl.hpp"
#include <string>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_host, surf, "Logging specific to the SURF host module");
* Model *
*********/
-/* Each VM has a dummy CPU action on the PM layer. This CPU action works as the
- * constraint (capacity) of the VM in the PM layer. If the VM does not have any
- * active task, the dummy CPU action must be deactivated, so that the VM does
- * not get any CPU share in the PM layer. */
-void HostModel::ignoreEmptyVmInPmLMM()
+/* Each VM has a dummy CPU action on the PM layer. This CPU action works as the constraint (capacity) of the VM in the
+ * PM layer. If the VM does not have any active task, the dummy CPU action must be deactivated, so that the VM does not
+ * get any CPU share in the PM layer. */
+void HostModel::ignore_empty_vm_in_pm_LMM()
{
/* iterate for all virtual machines */
for (s4u::VirtualMachine* const& ws_vm : vm::VirtualMachineImpl::allVms_) {
Cpu* cpu = ws_vm->pimpl_cpu;
- int active_tasks = lmm_constraint_get_variable_amount(cpu->constraint());
+ int active_tasks = cpu->get_constraint()->get_variable_amount();
/* The impact of the VM over its PM is the min between its vCPU amount and the amount of tasks it contains */
- int impact = std::min(active_tasks, ws_vm->pimpl_vm_->coreAmount());
+ int impact = std::min(active_tasks, ws_vm->getImpl()->coreAmount());
XBT_DEBUG("set the weight of the dummy CPU action of VM%p on PM to %d (#tasks: %d)", ws_vm, impact, active_tasks);
if (impact > 0)
- ws_vm->pimpl_vm_->action_->setSharingWeight(1. / impact);
+ ws_vm->getImpl()->action_->set_priority(1. / impact);
else
- ws_vm->pimpl_vm_->action_->setSharingWeight(0.);
+ ws_vm->getImpl()->action_->set_priority(0.);
}
}
return -1.0;
}
-Action* HostModel::executeParallelTask(int host_nb, simgrid::s4u::Host** host_list, double* flops_amount,
- double* bytes_amount, double rate)
+kernel::resource::Action* HostModel::execute_parallel(int host_nb, s4u::Host** host_list, double* flops_amount,
+ double* bytes_amount, double rate)
{
- Action* action = nullptr;
+ kernel::resource::Action* action = nullptr;
if ((host_nb == 1) && (has_cost(bytes_amount, 0) <= 0)) {
action = host_list[0]->pimpl_cpu->execution_start(flops_amount[0]);
} else if ((host_nb == 1) && (has_cost(flops_amount, 0) <= 0)) {
xbt_die("Cannot have a communication that is not a simple point-to-point in this model. You should consider "
"using the ptask model");
}
- } else
+ } else {
xbt_die(
"This model only accepts one of the following. You should consider using the ptask model for the other cases.\n"
" - execution with one host only and no communication\n"
" - Self-comms with one host only\n"
" - Communications with two hosts and no computation");
- xbt_free(host_list);
+ }
+ delete[] host_list;
+ delete[] flops_amount;
+ delete[] bytes_amount;
return action;
}
piface_->pimpl_ = this;
}
-void HostImpl::getAttachedStorageList(std::vector<const char*>* storages)
+std::vector<const char*> HostImpl::get_attached_storages()
{
+ std::vector<const char*> storages;
for (auto const& s : storage_)
- if (s.second->getHost() == piface_->getCname())
- storages->push_back(s.second->piface_.getName());
+ if (s.second->getHost() == piface_->get_cname())
+ storages.push_back(s.second->piface_.get_cname());
+ return storages;
}
}