{
msg_host_t pm0 = MSG_host_by_name("Fafard");
- msg_host_t vm0 = MSG_vm_create_core(pm0, "VM0");
- msg_host_t vm1 = MSG_vm_create_core(pm0, "VM1");
+ msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0");
+ msg_vm_t vm1 = MSG_vm_create_core(pm0, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
msg_task_t task0 = MSG_task_create("Task0", DOUBLE_MAX, 0, NULL);
msg_task_t task1 = MSG_task_create("Task1", DOUBLE_MAX, 0, NULL);
- MSG_process_create("worker0", worker_busy_loop_main, &task0, vm0);
- MSG_process_create("worker1", worker_busy_loop_main, &task1, vm1);
+ MSG_process_create("worker0", worker_busy_loop_main, &task0, (msg_host_t)vm0);
+ MSG_process_create("worker1", worker_busy_loop_main, &task1, (msg_host_t)vm1);
double task0_remain_prev = MSG_task_get_flops_amount(task0);
double task1_remain_prev = MSG_task_get_flops_amount(task1);
test_two_tasks(pm0, pm0);
XBT_INFO(" ");
- msg_host_t vm0 = MSG_vm_create_core(pm0, "VM0");
+ msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
XBT_INFO("# 3. Put a single task on a VM. ");
- test_one_task(vm0);
+ test_one_task((msg_host_t)vm0);
XBT_INFO(" ");
XBT_INFO("# 4. Put two tasks on a VM.");
- test_two_tasks(vm0, vm0);
+ test_two_tasks((msg_host_t)vm0, (msg_host_t)vm0);
XBT_INFO(" ");
MSG_vm_destroy(vm0);
MSG_vm_start(vm0);
XBT_INFO("# 6. Put a task on a PM and a task on a VM.");
- test_two_tasks(pm0, vm0);
+ test_two_tasks(pm0, (msg_host_t)vm0);
XBT_INFO(" ");
MSG_vm_destroy(vm0);
MSG_vm_start(vm0);
XBT_INFO("# 7. Put a single task on the VM capped by 10%%.");
- test_one_task(vm0);
+ test_one_task((msg_host_t)vm0);
XBT_INFO(" ");
XBT_INFO("# 8. Put two tasks on the VM capped by 10%%.");
- test_two_tasks(vm0, vm0);
+ test_two_tasks((msg_host_t)vm0, (msg_host_t)vm0);
XBT_INFO(" ");
XBT_INFO("# 9. Put a task on a PM and a task on the VM capped by 10%%.");
- test_two_tasks(pm0, vm0);
+ test_two_tasks(pm0, (msg_host_t)vm0);
XBT_INFO(" ");
MSG_vm_destroy(vm0);
const double computation_amount = cpu_speed * 10;
XBT_INFO("# 10. (a) Put a task on a VM without any bound.");
- launch_worker(vm0, "worker0", computation_amount, 0, 0);
+ launch_worker((msg_host_t)vm0, "worker0", computation_amount, 0, 0);
MSG_process_sleep(1000);
XBT_INFO(" ");
XBT_INFO("# 10. (b) set 10%% bound to the VM, and then put a task on the VM.");
MSG_vm_set_bound(vm0, cpu_speed / 10);
- launch_worker(vm0, "worker0", computation_amount, 0, 0);
+ launch_worker((msg_host_t)vm0, "worker0", computation_amount, 0, 0);
MSG_process_sleep(1000);
XBT_INFO(" ");
XBT_INFO(" ");
XBT_INFO("# 10. (d) Put a task again on the VM.");
- launch_worker(vm0, "worker0", computation_amount, 0, 0);
+ launch_worker((msg_host_t)vm0, "worker0", computation_amount, 0, 0);
MSG_process_sleep(1000);
XBT_INFO(" ");
xbt_dynar_push(vms, &vm);
XBT_INFO("put a process (%s) on %s", pr_name, vm_name);
- MSG_process_create(pr_name, worker_fun, NULL, vm);
+ MSG_process_create(pr_name, worker_fun, NULL, (msg_host_t)vm);
xbt_free(vm_name);
xbt_free(pr_name);
XBT_INFO("# Suspend all VMs");
xbt_dynar_foreach(vms, i, vm) {
- XBT_INFO("suspend %s", MSG_host_get_name(vm));
+ XBT_INFO("suspend %s", MSG_vm_get_name(vm));
MSG_vm_suspend(vm);
}
char *pr_name = bprintf("WRK%02d", index);
XBT_INFO("put a process (%s) on %s", pr_name, vm_name);
- MSG_process_create(pr_name, worker_fun, NULL, vm);
+ MSG_process_create(pr_name, worker_fun, NULL, (msg_host_t)vm);
xbt_free(vm_name);
xbt_free(pr_name);
XBT_INFO("# Shutdown and destroy all the VMs. The remaining worker processes will be forcibly killed.");
xbt_dynar_foreach(vms, i, vm) {
- XBT_INFO("shutdown %s", MSG_host_get_name(vm));
+ XBT_INFO("shutdown %s", MSG_vm_get_name(vm));
MSG_vm_shutdown(vm);
- XBT_INFO("destroy %s", MSG_host_get_name(vm));
+ XBT_INFO("destroy %s", MSG_vm_get_name(vm));
MSG_vm_destroy(vm);
}
char *vm_name = argv[1];
char *dst_pm_name = argv[2];
- msg_vm_t vm = MSG_host_by_name(vm_name);
+ msg_vm_t vm = (msg_vm_t)MSG_host_by_name(vm_name);
msg_host_t dst_pm = MSG_host_by_name(dst_pm_name);
vm_migrate(vm, dst_pm);
XBT_INFO("### Put a VM on a PM, and put a task to the VM");
msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- launch_computation_worker(vm0);
+ launch_computation_worker((msg_host_t)vm0);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
msg_vm_t vm1 = MSG_vm_create_core(pm0, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- launch_computation_worker(vm0);
- launch_computation_worker(vm1);
+ launch_computation_worker((msg_host_t)vm0);
+ launch_computation_worker((msg_host_t)vm1);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
vm1 = MSG_vm_create_core(pm1, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- launch_computation_worker(vm0);
- launch_computation_worker(vm1);
+ launch_computation_worker((msg_host_t)vm0);
+ launch_computation_worker((msg_host_t)vm1);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
XBT_INFO("### Make a connection between PM0 and VM0@PM0");
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- launch_communication_worker(pm0, vm0);
+ launch_communication_worker(pm0, (msg_host_t)vm0);
MSG_process_sleep(5);
MSG_vm_destroy(vm0);
XBT_INFO("### Make a connection between PM0 and VM0@PM1");
vm0 = MSG_vm_create_core(pm1, "VM0");
MSG_vm_start(vm0);
- launch_communication_worker(pm0, vm0);
+ launch_communication_worker(pm0, (msg_host_t)vm0);
MSG_process_sleep(5);
MSG_vm_destroy(vm0);
XBT_INFO("### Make two connections between PM0 and VM0@PM1");
vm0 = MSG_vm_create_core(pm1, "VM0");
MSG_vm_start(vm0);
- launch_communication_worker(pm0, vm0);
- launch_communication_worker(pm0, vm0);
+ launch_communication_worker(pm0, (msg_host_t)vm0);
+ launch_communication_worker(pm0, (msg_host_t)vm0);
MSG_process_sleep(5);
MSG_vm_destroy(vm0);
XBT_INFO("### Make a connection between PM0 and VM0@PM1, and also make a connection between PM0 and PM1");
vm0 = MSG_vm_create_core(pm1, "VM0");
MSG_vm_start(vm0);
- launch_communication_worker(pm0, vm0);
+ launch_communication_worker(pm0, (msg_host_t)vm0);
launch_communication_worker(pm0, pm1);
MSG_process_sleep(5);
MSG_vm_destroy(vm0);
vm1 = MSG_vm_create_core(pm1, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- launch_communication_worker(vm0, vm1);
- launch_communication_worker(vm0, vm1);
+ launch_communication_worker((msg_host_t)vm0, (msg_host_t)vm1);
+ launch_communication_worker((msg_host_t)vm0, (msg_host_t)vm1);
MSG_process_sleep(5);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
MSG_vm_set_params(vm0, ¶ms);
MSG_vm_start(vm0);
- launch_communication_worker(vm0, pm2);
+ launch_communication_worker((msg_host_t)vm0, pm2);
MSG_process_sleep(0.01);
MSG_vm_migrate(vm0, pm1);
MSG_process_sleep(0.01);
msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- launch_computation_worker(vm0);
+ launch_computation_worker((msg_host_t)vm0);
while(MSG_get_clock()<100) {
if (atask != NULL)
MSG_vm_start(vm_host2);
XBT_INFO("Create two tasks on Host1: one inside a VM, the other directly on the host");
- MSG_process_create("p11", worker_func, NULL, vm_host1);
- MSG_process_create("p12", worker_func, NULL, vm_host1);
+ MSG_process_create("p11", worker_func, NULL, (msg_host_t)vm_host1);
+ MSG_process_create("p12", worker_func, NULL, (msg_host_t)vm_host1);
XBT_INFO("Create two tasks on Host2: both directly on the host");
- MSG_process_create("p21", worker_func, NULL, vm_host2);
+ MSG_process_create("p21", worker_func, NULL, (msg_host_t)vm_host2);
MSG_process_create("p22", worker_func, NULL, host2);
XBT_INFO("Create two tasks on Host3: both inside a VM");
typedef simgrid::s4u::File s4u_File;
typedef simgrid::s4u::Storage s4u_Storage;
typedef simgrid::s4u::NetZone s4u_NetZone;
+typedef simgrid::s4u::VirtualMachine s4u_VM;
typedef boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> smx_activity_t;
typedef simgrid::kernel::routing::NetPoint routing_NetPoint;
typedef simgrid::surf::Resource surf_Resource;
typedef struct s4u_File s4u_File;
typedef struct s4u_Storage s4u_Storage;
typedef struct s4u_NetZone s4u_NetZone;
+typedef struct s4u_VM s4u_VM;
typedef struct kernel_Activity* smx_activity_t;
typedef struct routing_NetPoint routing_NetPoint;
typedef struct surf_Resource surf_Resource;
typedef s4u_Link* sg_link_t;
typedef s4u_Storage* sg_storage_t;
typedef s4u_File* sg_file_t;
+typedef s4u_VM* sg_vm_t;
typedef routing_NetPoint* sg_netpoint_t;
typedef surf_Resource *sg_resource_t;
typedef struct msg_task *msg_task_t;
/* ******************************** VM ************************************* */
-typedef msg_host_t msg_vm_t;
+typedef sg_vm_t msg_vm_t;
/* ******************************** File ************************************ */
typedef sg_file_t msg_file_t;
XBT_PUBLIC(int) MSG_vm_is_migrating(msg_vm_t vm);
XBT_PUBLIC(int) MSG_vm_is_suspended(msg_vm_t vm);
-#define MSG_vm_get_name(vm) MSG_host_get_name(vm)
-
+XBT_PUBLIC(const char*) MSG_vm_get_name(msg_vm_t vm);
XBT_PUBLIC(void) MSG_vm_get_params(msg_vm_t vm, vm_params_t params);
XBT_PUBLIC(void) MSG_vm_set_params(msg_vm_t vm, vm_params_t params);
/** Retrieves the path to the file */
const char* getPath() { return path_; }
- /** Simulates a read action. Returns the size of data actually read
- *
- * FIXME: reading from a remotely mounted disk is not implemented yet.
- * Any storage is considered as local, and no network communication ever occur.
- */
+ /** Simulates a local read action. Returns the size of data actually read */
sg_size_t read(sg_size_t size);
/** Simulates a write action. Returns the size of data actually written. */
/** Remove a file from disk */
int unlink();
- int unlink(sg_host_t host);
- /* FIXME: add these to the S4U API:
- XBT_PUBLIC(const char *) MSG_file_get_name(msg_file_t file);
- XBT_PUBLIC(msg_error_t) MSG_file_rcopy(msg_file_t fd, msg_host_t host, const char* fullpath);
- XBT_PUBLIC(msg_error_t) MSG_file_rmove(msg_file_t fd, msg_host_t host, const char* fullpath);
- */
const char* storage_type;
const char* storageId;
std::string mount_point;
surf_file_t pimpl_ = nullptr;
const char* path_ = nullptr;
void* userdata_ = nullptr;
- sg_host_t host_ = nullptr;
};
}
} // namespace simgrid::s4u
const char* getName();
const char* getType();
Host* getHost();
+ sg_size_t getSize(); /** Retrieve the total amount of space of this storage element */
sg_size_t getSizeFree();
sg_size_t getSizeUsed();
- /** Retrieve the total amount of space of this storage element */
- sg_size_t getSize();
xbt_dict_t getProperties();
const char* getProperty(const char* key);
void setUserdata(void* data) { userdata_ = data; }
void* getUserdata() { return userdata_; }
+ surf::StorageImpl* getImpl() { return pimpl_; }
+
/* The signals */
/** @brief Callback signal fired when a new Link is created */
static simgrid::xbt::signal<void(s4u::Storage&)> onCreation;
static simgrid::xbt::signal<void(s4u::Storage&)> onDestruction;
Host* attached_to_ = nullptr;
- surf::StorageImpl* const pimpl_ = nullptr;
private:
+ surf::StorageImpl* const pimpl_ = nullptr;
std::string name_;
void* userdata_ = nullptr;
};
using MailboxPtr = boost::intrusive_ptr<Mailbox>;
class Mutex;
class NetZone;
+class VirtualMachine;
class File;
class Storage;
/***************************** File **********************************/
XBT_PUBLIC(sg_size_t) simcall_file_read(surf_file_t fd, sg_size_t size, sg_host_t host);
XBT_PUBLIC(sg_size_t) simcall_file_write(surf_file_t fd, sg_size_t size, sg_host_t host);
-XBT_PUBLIC(surf_file_t) simcall_file_open(const char* mount, const char* path, sg_storage_t st);
-XBT_PUBLIC(int) simcall_file_close(surf_file_t fd, sg_host_t host);
/************************** MC simcalls **********************************/
XBT_PUBLIC(int) simcall_mc_random(int min, int max);
typedef simgrid::surf::Cpu surf_Cpu;
typedef simgrid::surf::HostModel surf_HostModel;
typedef simgrid::surf::NetworkModel surf_NetworkModel;
-typedef simgrid::surf::StorageImpl surf_Storage;
typedef simgrid::surf::StorageModel surf_StorageModel;
typedef simgrid::surf::Resource surf_Resource;
typedef simgrid::surf::Action surf_Action;
typedef struct surf_Cpu surf_Cpu;
typedef struct surf_HostModel surf_HostModel;
typedef struct surf_NetworkModel surf_NetworkModel;
-typedef struct surf_StorageImpl surf_Storage;
typedef struct surf_StorageModel surf_StorageModel;
typedef struct surf_Resource surf_Resource;
typedef struct surf_Host surf_Host;
*/
XBT_PUBLIC(int) surf_model_running_action_set_size(surf_model_t model);
-/** @brief Create a file closing action on the given host */
-XBT_PUBLIC(surf_action_t) surf_host_close(sg_host_t host, surf_file_t fd);
-
-/** @brief Create a file reading action on the given host */
-XBT_PUBLIC(surf_action_t) surf_host_read(sg_host_t host, surf_file_t fd, sg_size_t size);
-
-/** @brief Create a file writing action on the given host */
-XBT_PUBLIC(surf_action_t) surf_host_write(sg_host_t host, surf_file_t fd, sg_size_t size);
-
-/**
- * @brief Move a file to another location on the *same mount point*.
- * @details [long description]
- *
- * @param host The surf host
- * @param fd The file descriptor
- * @param fullpath The new full path
- *
- * @return MSG_OK if successful, otherwise MSG_TASK_CANCELED
- */
-XBT_PUBLIC(int) surf_host_file_move(sg_host_t host, surf_file_t fd, const char* fullpath);
-
/**
* @brief [brief description]
* @details [long description]
*/
XBT_PUBLIC(double) surf_network_action_get_latency_limited(surf_action_t action);
-/**
- * @brief Get the file associated to a storage action
- *
- * @param action The surf storage action
- * @return The file associated to a storage action
- */
-XBT_PUBLIC(surf_file_t) surf_storage_action_get_file(surf_action_t action);
-
/** @} */
/**************************************/
#include "src/kernel/activity/SynchroIo.hpp"
#include "src/simix/smx_private.h"
-#include "src/surf/FileImpl.hpp"
#include "src/surf/surf_interface.hpp"
void simgrid::kernel::activity::IoImpl::suspend()
{
for (smx_simcall_t simcall : simcalls) {
switch (simcall->call) {
- case SIMCALL_FILE_OPEN: {
- surf_file_t tmp = surf_storage_action_get_file(surf_io);
- simcall_file_open__set__result(simcall, tmp);
- break;
- }
- case SIMCALL_FILE_CLOSE:
- delete simcall_file_close__get__fd(simcall);
- simcall_file_close__set__result(simcall, 0);
- break;
case SIMCALL_FILE_WRITE:
simcall_file_write__set__result(simcall, surf_io->getCost());
break;
-
case SIMCALL_FILE_READ:
simcall_file_read__set__result(simcall, surf_io->getCost());
break;
-
default:
break;
}
}
switch (surf_io->getState()) {
-
case simgrid::surf::Action::State::failed:
state = SIMIX_FAILED;
break;
-
case simgrid::surf::Action::State::done:
state = SIMIX_DONE;
break;
-
default:
THROW_IMPOSSIBLE;
break;
void resume() override;
void post() override;
- sg_host_t host = nullptr;
surf_action_t surf_io = nullptr;
};
*/
msg_error_t MSG_file_unlink(msg_file_t fd)
{
- /* Find the host where the file is physically located (remote or local)*/
- msg_storage_t storage_src = simgrid::s4u::Storage::byName(fd->storageId);
- msg_host_t attached_host = storage_src->getHost();
- fd->unlink(attached_host);
+ fd->unlink();
delete fd;
return MSG_OK;
}
XBT_PUBLIC_DATA(MSG_Global_t) msg_global;
/*************************************************************/
-
-XBT_PRIVATE msg_host_t __MSG_host_create(sg_host_t host);
-XBT_PRIVATE void __MSG_file_destroy(msg_file_t file);
-
XBT_PRIVATE void MSG_process_cleanup_from_SIMIX(smx_actor_t smx_proc);
XBT_PRIVATE smx_actor_t MSG_process_create_from_SIMIX(const char* name, std::function<void()> code, void* data,
sg_host_t host, xbt_dict_t properties,
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_vm, msg, "Cloud-oriented parts of the MSG API");
/* **** ******** GENERAL ********* **** */
+const char* MSG_vm_get_name(msg_vm_t vm)
+{
+ return vm->getCname();
+}
/** \ingroup m_vm_management
* \brief Set the parameters of a given host
*/
void MSG_vm_set_params(msg_vm_t vm, vm_params_t params)
{
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->setParameters(params);
+ vm->setParameters(params);
}
/** \ingroup m_vm_management
*/
void MSG_vm_get_params(msg_vm_t vm, vm_params_t params)
{
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->getParameters(params);
+ vm->getParameters(params);
}
/* **** Check state of a VM **** */
static inline int __MSG_vm_is_state(msg_vm_t vm, e_surf_vm_state_t state)
{
- simgrid::s4u::VirtualMachine* castedVm = static_cast<simgrid::s4u::VirtualMachine*>(vm);
- return castedVm->pimpl_vm_ != nullptr && castedVm->pimpl_vm_->getState() == state;
+ return vm->pimpl_vm_ != nullptr && vm->pimpl_vm_->getState() == state;
}
/** @brief Returns whether the given VM has just created, not running.
*/
int MSG_vm_is_migrating(msg_vm_t vm)
{
- return static_cast<simgrid::s4u::VirtualMachine*>(vm)->isMigrating();
+ return vm->isMigrating();
}
/** @brief Returns whether the given VM is currently suspended, not running.
/* For the moment, intensity_rate is the percentage against the migration bandwidth */
- msg_vm_t vm = MSG_vm_create_multicore(pm, name, coreAmount);
+ msg_vm_t vm = new simgrid::s4u::VirtualMachine(name, pm, coreAmount);
s_vm_params_t params;
memset(¶ms, 0, sizeof(params));
params.ramsize = static_cast<sg_size_t>(ramsize) * 1024 * 1024;
params.dp_cap = params.ramsize * 0.9; // assume working set memory is 90% of ramsize
XBT_DEBUG("migspeed : %f intensity mem : %d", params.mig_speed, dp_intensity);
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->setParameters(¶ms);
+ vm->setParameters(¶ms);
return vm;
}
*/
void MSG_vm_destroy(msg_vm_t vm)
{
- if (MSG_vm_is_migrating(vm))
+ if (vm->isMigrating())
THROWF(vm_error, 0, "Cannot destroy VM '%s', which is migrating.", vm->getCname());
/* First, terminate all processes on the VM if necessary */
MSG_vm_shutdown(vm);
/* Then, destroy the VM object */
- simgrid::simix::kernelImmediate([vm]() {
- vm->destroy();
- });
+ simgrid::simix::kernelImmediate([vm]() { vm->destroy(); });
if (TRACE_msg_vm_is_enabled()) {
container_t container = PJ_container_get(vm->getCname());
simgrid::simix::kernelImmediate([vm]() {
simgrid::vm::VmHostExt::ensureVmExtInstalled();
- simgrid::s4u::VirtualMachine* typedVM = static_cast<simgrid::s4u::VirtualMachine*>(vm);
- simgrid::s4u::Host* pm = typedVM->pimpl_vm_->getPm();
+ simgrid::s4u::Host* pm = vm->pimpl_vm_->getPm();
if (pm->extension<simgrid::vm::VmHostExt>() == nullptr)
pm->extension_set(new simgrid::vm::VmHostExt());
long pm_ramsize = pm->extension<simgrid::vm::VmHostExt>()->ramsize;
int pm_overcommit = pm->extension<simgrid::vm::VmHostExt>()->overcommit;
- long vm_ramsize = typedVM->getRamsize();
+ long vm_ramsize = vm->getRamsize();
if (pm_ramsize && not pm_overcommit) { /* Only verify that we don't overcommit on need */
/* Retrieve the memory occupied by the VMs on that host. Yep, we have to traverse all VMs of all hosts for that */
}
}
- typedVM->pimpl_vm_->setState(SURF_VM_STATE_RUNNING);
+ vm->pimpl_vm_->setState(SURF_VM_STATE_RUNNING);
});
if (TRACE_msg_vm_is_enabled()) {
void MSG_vm_shutdown(msg_vm_t vm)
{
smx_actor_t issuer=SIMIX_process_self();
- simgrid::simix::kernelImmediate([vm,issuer]() {
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->shutdown(issuer);
- });
+ simgrid::simix::kernelImmediate([vm, issuer]() { vm->pimpl_vm_->shutdown(issuer); });
// Make sure that the processes in the VM are killed in this scheduling round before processing
// (eg with the VM destroy)
// Copy the reference to the vm (if SRC crashes now, do_migration will free ms)
// This is clearly ugly but I (Adrien) need more time to do something cleaner (actually we should copy the whole ms
// structure at the beginning and free it at the end of each function)
- simgrid::s4u::VirtualMachine* vm = static_cast<simgrid::s4u::VirtualMachine*>(ms->vm);
+ simgrid::s4u::VirtualMachine* vm = ms->vm;
msg_host_t dst_pm = ms->dst_pm;
// Make sure that we cannot get interrupted between the migrate and the resume to not end in an inconsistent state
static void start_dirty_page_tracking(msg_vm_t vm)
{
- simgrid::vm::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
-
- pimpl->dp_enabled = 1;
- if (not pimpl->dp_objs)
+ vm->pimpl_vm_->dp_enabled = 1;
+ if (not vm->pimpl_vm_->dp_objs)
return;
char *key = nullptr;
xbt_dict_cursor_t cursor = nullptr;
dirty_page_t dp = nullptr;
- xbt_dict_foreach (pimpl->dp_objs, cursor, key, dp) {
+ xbt_dict_foreach (vm->pimpl_vm_->dp_objs, cursor, key, dp) {
double remaining = MSG_task_get_flops_amount(dp->task);
dp->prev_clock = MSG_get_clock();
dp->prev_remaining = remaining;
static void stop_dirty_page_tracking(msg_vm_t vm)
{
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->dp_enabled = 0;
+ vm->pimpl_vm_->dp_enabled = 0;
}
static double get_computed(char *key, msg_vm_t vm, dirty_page_t dp, double remaining, double clock)
static double lookup_computed_flop_counts(msg_vm_t vm, int stage_for_fancy_debug, int stage2_round_for_fancy_debug)
{
- simgrid::vm::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
double total = 0;
char *key = nullptr;
xbt_dict_cursor_t cursor = nullptr;
dirty_page_t dp = nullptr;
- xbt_dict_foreach (pimpl->dp_objs, cursor, key, dp) {
+ xbt_dict_foreach (vm->pimpl_vm_->dp_objs, cursor, key, dp) {
double remaining = MSG_task_get_flops_amount(dp->task);
double clock = MSG_get_clock();
dp->prev_clock = clock;
}
- total += pimpl->dp_updated_by_deleted_tasks;
+ total += vm->pimpl_vm_->dp_updated_by_deleted_tasks;
XBT_DEBUG("mig-stage%d.%d: computed %f flop_counts (including %f by deleted tasks)", stage_for_fancy_debug,
- stage2_round_for_fancy_debug, total, pimpl->dp_updated_by_deleted_tasks);
+ stage2_round_for_fancy_debug, total, vm->pimpl_vm_->dp_updated_by_deleted_tasks);
- pimpl->dp_updated_by_deleted_tasks = 0;
+ vm->pimpl_vm_->dp_updated_by_deleted_tasks = 0;
return total;
}
simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(host);
if (vm == nullptr)
return;
- simgrid::vm::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
double remaining = MSG_task_get_flops_amount(task);
char *key = bprintf("%s-%p", task->name, task);
dirty_page_t dp = xbt_new0(s_dirty_page, 1);
dp->task = task;
- if (pimpl->dp_enabled) {
+ if (vm->pimpl_vm_->dp_enabled) {
dp->prev_clock = MSG_get_clock();
dp->prev_remaining = remaining;
}
- if (not pimpl->dp_objs)
- pimpl->dp_objs = xbt_dict_new_homogeneous(nullptr);
- xbt_assert(xbt_dict_get_or_null(pimpl->dp_objs, key) == nullptr);
- xbt_dict_set(pimpl->dp_objs, key, dp, nullptr);
- XBT_DEBUG("add %s on %s (remaining %f, dp_enabled %d)", key, host->getCname(), remaining, pimpl->dp_enabled);
+ if (not vm->pimpl_vm_->dp_objs)
+ vm->pimpl_vm_->dp_objs = xbt_dict_new_homogeneous(nullptr);
+ xbt_assert(xbt_dict_get_or_null(vm->pimpl_vm_->dp_objs, key) == nullptr);
+ xbt_dict_set(vm->pimpl_vm_->dp_objs, key, dp, nullptr);
+ XBT_DEBUG("add %s on %s (remaining %f, dp_enabled %d)", key, host->getCname(), remaining, vm->pimpl_vm_->dp_enabled);
xbt_free(key);
}
simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(host);
if (vm == nullptr)
return;
- simgrid::vm::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_;
char *key = bprintf("%s-%p", task->name, task);
- dirty_page_t dp = (dirty_page_t)(pimpl->dp_objs ? xbt_dict_get_or_null(pimpl->dp_objs, key) : NULL);
+ dirty_page_t dp = (dirty_page_t)(vm->pimpl_vm_->dp_objs ? xbt_dict_get_or_null(vm->pimpl_vm_->dp_objs, key) : NULL);
xbt_assert(dp->task == task);
/* If we are in the middle of dirty page tracking, we record how much computation has been done until now, and keep
* the information for the lookup_() function that will called soon. */
- if (pimpl->dp_enabled) {
+ if (vm->pimpl_vm_->dp_enabled) {
double remaining = MSG_task_get_flops_amount(task);
double clock = MSG_get_clock();
// double updated = calc_updated_pages(key, host, dp, remaining, clock);
- double updated = get_computed(key, host, dp, remaining, clock);
+ double updated = get_computed(key, vm, dp, remaining, clock); // was host instead of vm
- pimpl->dp_updated_by_deleted_tasks += updated;
+ vm->pimpl_vm_->dp_updated_by_deleted_tasks += updated;
}
- if (pimpl->dp_objs)
- xbt_dict_remove(pimpl->dp_objs, key);
+ if (vm->pimpl_vm_->dp_objs)
+ xbt_dict_remove(vm->pimpl_vm_->dp_objs, key);
xbt_free(dp);
XBT_DEBUG("del %s on %s", key, host->getCname());
// Note that the ms structure has been allocated in do_migration and hence should be freed in the same function ;)
migration_session *ms = static_cast<migration_session *>(MSG_process_get_data(MSG_process_self()));
- double host_speed = MSG_host_get_speed(MSG_vm_get_pm(ms->vm));
+ double host_speed = ms->vm->pimpl_vm_->getPm()->getSpeed();
s_vm_params_t params;
- static_cast<simgrid::s4u::VirtualMachine*>(ms->vm)->getParameters(¶ms);
+ ms->vm->getParameters(¶ms);
const sg_size_t ramsize = params.ramsize;
const sg_size_t devsize = params.devsize;
const int skip_stage1 = params.skip_stage1;
/* Stage3: stop the VM and copy the rest of states. */
XBT_DEBUG("mig-stage3: remaining_size %f", remaining_size);
- simgrid::vm::VirtualMachineImpl* pimpl = static_cast<simgrid::s4u::VirtualMachine*>(ms->vm)->pimpl_vm_;
+ simgrid::vm::VirtualMachineImpl* pimpl = ms->vm->pimpl_vm_;
pimpl->setState(SURF_VM_STATE_RUNNING); // FIXME: this bypass of the checks in suspend() is not nice
pimpl->isMigrating = false; // FIXME: this bypass of the checks in suspend() is not nice
pimpl->suspend(SIMIX_process_self());
catch(xbt_ex& e) {
//hostfailure (if you want to know whether this is the SRC or the DST check directly in send_migration_data code)
// Stop the dirty page tracking an return (there is no memory space to release)
- static_cast<simgrid::s4u::VirtualMachine*>(ms->vm)->pimpl_vm_->resume();
+ ms->vm->pimpl_vm_->resume();
return 0;
}
* The second one would be easier.
*/
- simgrid::s4u::VirtualMachine* typedVm = static_cast<simgrid::s4u::VirtualMachine*>(vm);
- simgrid::vm::VirtualMachineImpl* pimpl = typedVm->pimpl_vm_;
- msg_host_t src_pm = pimpl->getPm();
+ msg_host_t src_pm = vm->pimpl_vm_->getPm();
if (src_pm->isOff())
THROWF(vm_error, 0, "Cannot migrate VM '%s' from host '%s', which is offline.", vm->getCname(), src_pm->getCname());
THROWF(vm_error, 0, "Cannot migrate VM '%s' to host '%s', which is offline.", vm->getCname(), dst_pm->getCname());
if (not MSG_vm_is_running(vm))
THROWF(vm_error, 0, "Cannot migrate VM '%s' that is not running yet.", vm->getCname());
- if (typedVm->isMigrating())
+ if (vm->isMigrating())
THROWF(vm_error, 0, "Cannot migrate VM '%s' that is already migrating.", vm->getCname());
- pimpl->isMigrating = true;
+ vm->pimpl_vm_->isMigrating = true;
struct migration_session *ms = xbt_new(struct migration_session, 1);
ms->vm = vm;
msg_task_t task = nullptr;
msg_error_t ret = MSG_task_receive(&task, ms->mbox_ctl);
- pimpl->isMigrating = false;
+ vm->pimpl_vm_->isMigrating = false;
xbt_free(ms->mbox_ctl);
xbt_free(ms->mbox);
void MSG_vm_suspend(msg_vm_t vm)
{
smx_actor_t issuer = SIMIX_process_self();
- simgrid::simix::kernelImmediate([vm,issuer]() {
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->suspend(issuer);
- });
+ simgrid::simix::kernelImmediate([vm, issuer]() { vm->pimpl_vm_->suspend(issuer); });
XBT_DEBUG("vm_suspend done");
*/
void MSG_vm_resume(msg_vm_t vm)
{
- static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->resume();
+ vm->pimpl_vm_->resume();
if (TRACE_msg_vm_is_enabled()) {
container_t vm_container = PJ_container_get(vm->getCname());
*/
msg_host_t MSG_vm_get_pm(msg_vm_t vm)
{
- return static_cast<simgrid::s4u::VirtualMachine*>(vm)->getPm();
+ return vm->getPm();
}
/** @brief Set a CPU bound for a given VM.
*/
void MSG_vm_set_bound(msg_vm_t vm, double bound)
{
- simgrid::simix::kernelImmediate(
- [vm, bound]() { static_cast<simgrid::s4u::VirtualMachine*>(vm)->pimpl_vm_->setBound(bound); });
+ simgrid::simix::kernelImmediate([vm, bound]() { vm->pimpl_vm_->setBound(bound); });
}
SG_END_DECL()
File::File(const char* fullpath, void* userdata) : File(fullpath, Host::current(), userdata){};
-File::File(const char* fullpath, sg_host_t host, void* userdata) : path_(fullpath), userdata_(userdata), host_(host)
+File::File(const char* fullpath, sg_host_t host, void* userdata) : path_(fullpath), userdata_(userdata)
{
// this cannot fail because we get a xbt_die if the mountpoint does not exist
Storage* st = nullptr;
} else
xbt_die("Can't find mount point for '%s' on '%s'", fullpath, host->getCname());
- pimpl_ = simcall_file_open(mount_point.c_str(), path.c_str(), st);
+ pimpl_ =
+ simgrid::simix::kernelImmediate([this, st, path] { return new simgrid::surf::FileImpl(st, path, mount_point); });
storage_type = st->getType();
storageId = st->getName();
}
File::~File()
{
- simcall_file_close(pimpl_, host_);
+ simgrid::simix::kernelImmediate([this] { delete pimpl_; });
}
sg_size_t File::read(sg_size_t size)
void File::move(const char* fullpath)
{
- sg_host_t host = Host::current();
- simgrid::simix::kernelImmediate([this, host, fullpath] { pimpl_->move(host, fullpath); });
+ simgrid::simix::kernelImmediate([this, fullpath] { pimpl_->move(fullpath); });
}
int File::unlink()
{
- return unlink(Host::current());
-}
-
-int File::unlink(sg_host_t host)
-{
- return simgrid::simix::kernelImmediate([this, host] { return pimpl_->unlink(host); });
+ return simgrid::simix::kernelImmediate([this] { return pimpl_->unlink(); });
}
}} // namespace simgrid::s4u
sg_size_t Storage::getSize()
{
- return pimpl_->size_;
+ return pimpl_->getSize();
}
xbt_dict_t Storage::getProperties()
return simcall_BODY_file_write(fd, size, host);
}
-/**
- * \ingroup simix_file_management
- * \brief
- */
-surf_file_t simcall_file_open(const char* mount, const char* path, sg_storage_t st)
-{
- return simcall_BODY_file_open(mount, path, st);
-}
-
-/**
- * \ingroup simix_file_management
- *
- */
-int simcall_file_close(surf_file_t fd, sg_host_t host)
-{
- return simcall_BODY_file_close(fd, host);
-}
-
void simcall_run_kernel(std::function<void()> const& code)
{
simcall_BODY_run_kernel(&code);
simgrid::simix::marshal<sg_size_t>(simcall->result, result);
}
-static inline const char* simcall_file_open__get__mount(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<const char*>(simcall->args[0]);
-}
-static inline const char* simcall_file_open__getraw__mount(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<const char*>(simcall->args[0]);
-}
-static inline void simcall_file_open__set__mount(smx_simcall_t simcall, const char* arg)
-{
- simgrid::simix::marshal<const char*>(simcall->args[0], arg);
-}
-static inline const char* simcall_file_open__get__path(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<const char*>(simcall->args[1]);
-}
-static inline const char* simcall_file_open__getraw__path(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<const char*>(simcall->args[1]);
-}
-static inline void simcall_file_open__set__path(smx_simcall_t simcall, const char* arg)
-{
- simgrid::simix::marshal<const char*>(simcall->args[1], arg);
-}
-static inline sg_storage_t simcall_file_open__get__st(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<sg_storage_t>(simcall->args[2]);
-}
-static inline sg_storage_t simcall_file_open__getraw__st(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<sg_storage_t>(simcall->args[2]);
-}
-static inline void simcall_file_open__set__st(smx_simcall_t simcall, sg_storage_t arg)
-{
- simgrid::simix::marshal<sg_storage_t>(simcall->args[2], arg);
-}
-static inline surf_file_t simcall_file_open__get__result(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<surf_file_t>(simcall->result);
-}
-static inline surf_file_t simcall_file_open__getraw__result(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<surf_file_t>(simcall->result);
-}
-static inline void simcall_file_open__set__result(smx_simcall_t simcall, surf_file_t result)
-{
- simgrid::simix::marshal<surf_file_t>(simcall->result, result);
-}
-
-static inline surf_file_t simcall_file_close__get__fd(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<surf_file_t>(simcall->args[0]);
-}
-static inline surf_file_t simcall_file_close__getraw__fd(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<surf_file_t>(simcall->args[0]);
-}
-static inline void simcall_file_close__set__fd(smx_simcall_t simcall, surf_file_t arg)
-{
- simgrid::simix::marshal<surf_file_t>(simcall->args[0], arg);
-}
-static inline sg_host_t simcall_file_close__get__host(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
-}
-static inline sg_host_t simcall_file_close__getraw__host(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[1]);
-}
-static inline void simcall_file_close__set__host(smx_simcall_t simcall, sg_host_t arg)
-{
- simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
-}
-static inline int simcall_file_close__get__result(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal<int>(simcall->result);
-}
-static inline int simcall_file_close__getraw__result(smx_simcall_t simcall)
-{
- return simgrid::simix::unmarshal_raw<int>(simcall->result);
-}
-static inline void simcall_file_close__set__result(smx_simcall_t simcall, int result){
- simgrid::simix::marshal<int>(simcall->result, result);
-}
-
static inline int simcall_mc_random__get__min(smx_simcall_t simcall)
{
return simgrid::simix::unmarshal<int>(simcall->args[0]);
XBT_PRIVATE int simcall_HANDLER_sem_get_capacity(smx_simcall_t simcall, smx_sem_t sem);
XBT_PRIVATE void simcall_HANDLER_file_read(smx_simcall_t simcall, surf_file_t fd, sg_size_t size, sg_host_t host);
XBT_PRIVATE void simcall_HANDLER_file_write(smx_simcall_t simcall, surf_file_t fd, sg_size_t size, sg_host_t host);
-XBT_PRIVATE void simcall_HANDLER_file_open(smx_simcall_t simcall, const char* mount, const char* path, sg_storage_t st);
-XBT_PRIVATE void simcall_HANDLER_file_close(smx_simcall_t simcall, surf_file_t fd, sg_host_t host);
XBT_PRIVATE int simcall_HANDLER_mc_random(smx_simcall_t simcall, int min, int max);
\ No newline at end of file
return simcall<sg_size_t, surf_file_t, sg_size_t, sg_host_t>(SIMCALL_FILE_WRITE, fd, size, host);
}
- inline static surf_file_t simcall_BODY_file_open(const char* mount, const char* path, sg_storage_t st)
- {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0)
- simcall_HANDLER_file_open(&SIMIX_process_self()->simcall, mount, path, st);
- return simcall<surf_file_t, const char*, const char*, sg_storage_t>(SIMCALL_FILE_OPEN, mount, path, st);
- }
-
- inline static int simcall_BODY_file_close(surf_file_t fd, sg_host_t host)
- {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0) simcall_HANDLER_file_close(&SIMIX_process_self()->simcall, fd, host);
- return simcall<int, surf_file_t, sg_host_t>(SIMCALL_FILE_CLOSE, fd, host);
- }
-
inline static int simcall_BODY_mc_random(int min, int max) {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_mc_random(&SIMIX_process_self()->simcall, min, max);
SIMCALL_SEM_GET_CAPACITY,
SIMCALL_FILE_READ,
SIMCALL_FILE_WRITE,
- SIMCALL_FILE_OPEN,
- SIMCALL_FILE_CLOSE,
SIMCALL_MC_RANDOM,
SIMCALL_SET_CATEGORY,
SIMCALL_RUN_KERNEL,
"SIMCALL_SEM_GET_CAPACITY",
"SIMCALL_FILE_READ",
"SIMCALL_FILE_WRITE",
- "SIMCALL_FILE_OPEN",
- "SIMCALL_FILE_CLOSE",
"SIMCALL_MC_RANDOM",
"SIMCALL_SET_CATEGORY",
"SIMCALL_RUN_KERNEL",
simgrid::simix::unmarshal<sg_host_t>(simcall->args[2]));
break;
-case SIMCALL_FILE_OPEN:
- simcall_HANDLER_file_open(simcall, simgrid::simix::unmarshal<const char*>(simcall->args[0]),
- simgrid::simix::unmarshal<const char*>(simcall->args[1]),
- simgrid::simix::unmarshal<sg_storage_t>(simcall->args[2]));
- break;
-
-case SIMCALL_FILE_CLOSE:
- simcall_HANDLER_file_close(simcall, simgrid::simix::unmarshal<surf_file_t>(simcall->args[0]),
- simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]));
- break;
-
case SIMCALL_MC_RANDOM:
simgrid::simix::marshal<int>(simcall->result, simcall_HANDLER_mc_random(simcall, simgrid::simix::unmarshal<int>(simcall->args[0]), simgrid::simix::unmarshal<int>(simcall->args[1])));
SIMIX_simcall_answer(simcall);
sg_size_t file_read(surf_file_t fd, sg_size_t size, sg_host_t host) [[block]];
sg_size_t file_write(surf_file_t fd, sg_size_t size, sg_host_t host) [[block]];
-surf_file_t file_open(const char* mount, const char* path, sg_storage_t st) [[block]];
-int file_close(surf_file_t fd, sg_host_t host) [[block]];
int mc_random(int min, int max);
void set_category(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> synchro, const char* category) [[nohandler]];
#include <xbt/ex.hpp>
#include <xbt/sysdep.h>
#include <xbt/log.h>
-#include <xbt/dict.h>
#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/Storage.hpp"
#include "src/surf/FileImpl.hpp"
+#include "src/surf/HostImpl.hpp"
#include "src/surf/StorageImpl.hpp"
#include "surf/surf.h"
-#include <mc/mc.h>
-
#include "src/surf/surf_interface.hpp"
#include "smx_private.h"
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->getCname());
simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
- synchro->host = host;
- synchro->surf_io = surf_host_read(host, file, size);
+ synchro->surf_io = file->read(size);
synchro->surf_io->setData(synchro);
XBT_DEBUG("Create io synchro %p", synchro);
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->getCname());
simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
- synchro->host = host;
- synchro->surf_io = surf_host_write(host, file, size);
- synchro->surf_io->setData(synchro);
- XBT_DEBUG("Create io synchro %p", synchro);
-
- return synchro;
-}
-
-//SIMIX FILE OPEN
-void simcall_HANDLER_file_open(smx_simcall_t simcall, const char* mount, const char* path, sg_storage_t st)
-{
- smx_activity_t synchro = SIMIX_file_open(mount, path, st);
- synchro->simcalls.push_back(simcall);
- simcall->issuer->waiting_synchro = synchro;
-}
-
-smx_activity_t SIMIX_file_open(const char* mount, const char* path, sg_storage_t st)
-{
- if (st->getHost()->isOff())
- THROWF(host_error, 0, "Host %s failed, you cannot call this function", st->getHost()->getCname());
-
- simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
- synchro->host = st->getHost();
- synchro->surf_io = st->pimpl_->open(mount, path);
- synchro->surf_io->setData(synchro);
- XBT_DEBUG("Create io synchro %p", synchro);
-
- return synchro;
-}
-
-//SIMIX FILE CLOSE
-void simcall_HANDLER_file_close(smx_simcall_t simcall, surf_file_t fd, sg_host_t host)
-{
- smx_activity_t synchro = SIMIX_file_close(fd, host);
- synchro->simcalls.push_back(simcall);
- simcall->issuer->waiting_synchro = synchro;
-}
-
-smx_activity_t SIMIX_file_close(surf_file_t file, sg_host_t host)
-{
- if (host->isOff())
- THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->getCname());
-
- simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
- synchro->host = host;
- synchro->surf_io = surf_host_close(host, file);
+ synchro->surf_io = file->write(size);
synchro->surf_io->setData(synchro);
XBT_DEBUG("Create io synchro %p", synchro);
XBT_PRIVATE smx_activity_t SIMIX_file_read(surf_file_t fd, sg_size_t size, sg_host_t host);
XBT_PRIVATE smx_activity_t SIMIX_file_write(surf_file_t fd, sg_size_t size, sg_host_t host);
-XBT_PRIVATE smx_activity_t SIMIX_file_open(const char* mount, const char* path, sg_storage_t st);
-XBT_PRIVATE smx_activity_t SIMIX_file_close(surf_file_t fd, sg_host_t host);
-XBT_PRIVATE int SIMIX_file_unlink(surf_file_t fd, sg_host_t host);
-XBT_PRIVATE int SIMIX_file_move(smx_actor_t process, surf_file_t fd, const char* fullpath);
XBT_PRIVATE void SIMIX_io_destroy(smx_activity_t synchro);
XBT_PRIVATE void SIMIX_io_finish(smx_activity_t synchro);
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/surf/FileImpl.hpp"
-#include "src/surf/HostImpl.hpp"
+#include "src/surf/StorageImpl.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_file, surf, "Logging specific to the SURF file module");
namespace simgrid {
namespace surf {
+FileImpl::FileImpl(sg_storage_t st, std::string path, std::string mount) : path_(path), mount_point_(mount)
+{
+ XBT_DEBUG("\tOpen file '%s'", path.c_str());
+ location_ = st->getImpl();
+ std::map<std::string, sg_size_t>* content = location_->getContent();
+ // if file does not exist create an empty file
+ if (content->find(path) != content->end())
+ size_ = content->at(path);
+ else {
+ size_ = 0;
+ content->insert({path, size_});
+ XBT_DEBUG("File '%s' was not found, file created.", path.c_str());
+ }
+}
+
+Action* FileImpl::read(sg_size_t size)
+{
+ XBT_DEBUG("READ %s on disk '%s'", cname(), location_->cname());
+ if (current_position_ + size > size_) {
+ if (current_position_ > size_) {
+ size = 0;
+ } else {
+ size = size_ - current_position_;
+ }
+ current_position_ = size_;
+ } else
+ current_position_ += size;
+
+ return location_->read(size);
+}
+
+Action* FileImpl::write(sg_size_t size)
+{
+ XBT_DEBUG("WRITE %s on disk '%s'. size '%llu/%llu'", cname(), location_->cname(), size, size_);
+
+ StorageAction* action = location_->write(size);
+ action->file_ = this;
+ /* Substract the part of the file that might disappear from the used sized on the storage element */
+ location_->usedSize_ -= (size_ - current_position_);
+ // If the storage is full before even starting to write
+ if (location_->usedSize_ >= location_->getSize()) {
+ action->setState(Action::State::failed);
+ }
+ return action;
+}
+
int FileImpl::seek(sg_offset_t offset, int origin)
{
switch (origin) {
}
}
-int FileImpl::unlink(sg_host_t host)
+int FileImpl::unlink()
{
- simgrid::surf::StorageImpl* st = host->pimpl_->findStorageOnMountList(mount_point_.c_str());
/* Check if the file is on this storage */
- if (st->content_->find(path_) == st->content_->end()) {
- XBT_WARN("File %s is not on disk %s. Impossible to unlink", cname(), st->cname());
+ if (location_->getContent()->find(path_) == location_->getContent()->end()) {
+ XBT_WARN("File %s is not on disk %s. Impossible to unlink", cname(), location_->cname());
return -1;
} else {
- XBT_DEBUG("UNLINK %s on disk '%s'", cname(), st->cname());
- st->usedSize_ -= size_;
+ XBT_DEBUG("UNLINK %s on disk '%s'", cname(), location_->cname());
+ location_->usedSize_ -= size_;
// Remove the file from storage
- st->content_->erase(path_);
+ location_->getContent()->erase(path_);
return 0;
}
}
-void FileImpl::move(sg_host_t host, const char* fullpath)
+void FileImpl::move(const char* fullpath)
{
/* Check if the new full path is on the same mount point */
if (not strncmp(mount_point_.c_str(), fullpath, mount_point_.size())) {
- std::map<std::string, sg_size_t>* content = host->pimpl_->findStorageOnMountList(mount_point_.c_str())->content_;
+ std::map<std::string, sg_size_t>* content = location_->getContent();
if (content->find(path_) != content->end()) { // src file exists
sg_size_t new_size = content->at(path_);
content->erase(path_);
class FileImpl {
public:
- FileImpl(const char* path, const char* mount, sg_size_t size) : path_(path), mount_point_(mount), size_(size) {}
+ FileImpl(sg_storage_t st, std::string path, std::string mount);
~FileImpl() = default;
std::string name() { return path_; }
void incrPosition(sg_size_t incr) { current_position_ += incr; }
sg_size_t tell() { return current_position_; }
int seek(sg_offset_t offset, int origin);
- int unlink(sg_host_t host);
- void move(sg_host_t host, const char* fullpath);
+ int unlink();
+ void move(const char* fullpath);
+ Action* read(sg_size_t size);
+ Action* write(sg_size_t size);
private:
+ StorageImpl* location_;
std::string path_;
std::string mount_point_;
sg_size_t size_;
piface_->pimpl_ = this;
}
-simgrid::surf::StorageImpl* HostImpl::findStorageOnMountList(const char* mount)
-{
- XBT_DEBUG("Search for storage name '%s' on '%s'", mount, piface_->getCname());
- if (storage_.find(mount) == storage_.end())
- xbt_die("Can't find mount '%s' for '%s'", mount, piface_->getCname());
-
- return storage_.at(mount);
-}
-
void HostImpl::getAttachedStorageList(std::vector<const char*>* storages)
{
for (auto s : storage_)
- if (s.second->attach_ == piface_->getCname())
+ if (s.second->getHost() == piface_->getCname())
storages->push_back(s.second->piface_.getName());
}
-Action* HostImpl::close(surf_file_t fd)
-{
- simgrid::surf::StorageImpl* st = findStorageOnMountList(fd->mount());
- XBT_DEBUG("CLOSE %s on disk '%s'", fd->cname(), st->cname());
- return st->close(fd);
-}
-
-Action* HostImpl::read(surf_file_t fd, sg_size_t size)
-{
- simgrid::surf::StorageImpl* st = findStorageOnMountList(fd->mount());
- XBT_DEBUG("READ %s on disk '%s'", fd->cname(), st->cname());
- return st->read(fd, size);
-}
-
-Action* HostImpl::write(surf_file_t fd, sg_size_t size)
-{
- simgrid::surf::StorageImpl* st = findStorageOnMountList(fd->mount());
- XBT_DEBUG("WRITE %s on disk '%s'", fd->cname(), st->cname());
- return st->write(fd, size);
-}
-
}
}
explicit HostImpl(s4u::Host* host);
virtual ~HostImpl() = default;
- /** @brief Return the storage of corresponding mount point */
- virtual simgrid::surf::StorageImpl* findStorageOnMountList(const char* storage);
-
- /** @brief Get the xbt_dynar_t of storages attached to the Host */
+ /** @brief Get the vector of storages (by names) attached to the Host */
virtual void getAttachedStorageList(std::vector<const char*>* storages);
- /**
- * @brief Close a file
- *
- * @param fd The file descriptor to close
- * @return The StorageAction corresponding to the closing
- */
- virtual Action* close(surf_file_t fd);
-
- /**
- * @brief Read a file
- *
- * @param fd The file descriptor to read
- * @param size The size in bytes to read
- * @return The StorageAction corresponding to the reading
- */
- virtual Action* read(surf_file_t fd, sg_size_t size);
-
- /**
- * @brief Write a file
- *
- * @param fd The file descriptor to write
- * @param size The size in bytes to write
- * @return The StorageAction corresponding to the writing
- */
- virtual Action* write(surf_file_t fd, sg_size_t size);
-
std::map<std::string, simgrid::surf::StorageImpl*> storage_;
simgrid::s4u::Host* piface_ = nullptr;
const char* type_id, const char* content_name, sg_size_t size, const char* attach)
: Resource(model, name, lmm_constraint_new(maxminSystem, this, MAX(bread, bwrite)))
, piface_(this)
- , size_(size)
- , usedSize_(0)
, typeId_(type_id)
+ , size_(size)
, attach_(attach)
- , writeActions_(std::vector<StorageAction*>())
{
content_ = parseContent(content_name);
turnOn();
void turnOn() override;
void turnOff() override;
- std::map<std::string, sg_size_t>* content_;
- sg_size_t size_;
- sg_size_t usedSize_;
- std::string typeId_;
- std::string attach_; // Name of the host to which this storage is attached.
- // Only used until the platform is fully parsed only.
- // Then the interface stores the Host directly.
- /**
- * @brief Open a file
- *
- * @param mount The mount point
- * @param path The path to the file
- *
- * @return The StorageAction corresponding to the opening
- */
- virtual StorageAction* open(const char* mount, const char* path) = 0;
-
- /**
- * @brief Close a file
- *
- * @param fd The file descriptor to close
- * @return The StorageAction corresponding to the closing
- */
- virtual StorageAction* close(surf_file_t fd) = 0;
-
/**
* @brief Read a file
*
- * @param fd The file descriptor to read
* @param size The size in bytes to read
* @return The StorageAction corresponding to the reading
*/
- virtual StorageAction* read(surf_file_t fd, sg_size_t size) = 0;
+ virtual StorageAction* read(sg_size_t size) = 0;
/**
* @brief Write a file
*
- * @param fd The file descriptor to write
* @param size The size in bytes to write
* @return The StorageAction corresponding to the writing
*/
- virtual StorageAction* write(surf_file_t fd, sg_size_t size) = 0;
+ virtual StorageAction* write(sg_size_t size) = 0;
/**
* @brief Get the content of the current Storage
* @return The used size in bytes of the current Storage
*/
virtual sg_size_t getUsedSize();
+ virtual sg_size_t getSize() { return size_; }
+ virtual std::string getHost() { return attach_; }
std::map<std::string, sg_size_t>* parseContent(const char* filename);
static std::unordered_map<std::string, StorageImpl*>* storages;
static std::unordered_map<std::string, StorageImpl*>* storagesMap() { return StorageImpl::storages; }
- std::vector<StorageAction*> writeActions_;
lmm_constraint_t constraintWrite_; /* Constraint for maximum write bandwidth*/
lmm_constraint_t constraintRead_; /* Constraint for maximum write bandwidth*/
+
+ std::string typeId_;
+ sg_size_t usedSize_ = 0;
+
+private:
+ sg_size_t size_;
+ std::map<std::string, sg_size_t>* content_;
+ // Name of the host to which this storage is attached. Only used at platform parsing time, then the interface stores
+ // the Host directly.
+ std::string attach_;
};
/**********
*/
typedef enum {
READ = 0, /**< Read a file */
- WRITE, /**< Write in a file */
- STAT, /**< Stat a file */
- OPEN, /**< Open a file */
- CLOSE /**< Close a file */
+ WRITE /**< Write in a file */
} e_surf_action_storage_type_t;
/** @ingroup SURF_storage_interface
static void check_disk_attachment()
{
for (auto s : *simgrid::surf::StorageImpl::storagesMap()) {
- simgrid::kernel::routing::NetPoint* host_elm = sg_netpoint_by_name_or_null(s.second->attach_.c_str());
+ simgrid::kernel::routing::NetPoint* host_elm = sg_netpoint_by_name_or_null(s.second->getHost().c_str());
if (not host_elm)
surf_parse_error("Unable to attach storage %s: host %s does not exist.", s.second->cname(),
- s.second->attach_.c_str());
+ s.second->getHost().c_str());
else
- s.second->piface_.attached_to_ = sg_host_by_name(s.second->attach_.c_str());
+ s.second->piface_.attached_to_ = sg_host_by_name(s.second->getHost().c_str());
}
}
double StorageN11Model::nextOccuringEvent(double now)
{
- double min_completion = StorageModel::nextOccuringEventFull(now);
-
- for(auto storage: p_storageList) {
- double rate = 0;
- // Foreach write action on that disk
- for (auto write_action: storage->writeActions_) {
- rate += lmm_variable_getvalue(write_action->getVariable());
- }
- if(rate > 0)
- min_completion = MIN(min_completion, (storage->size_-storage->usedSize_)/rate);
- }
-
- return min_completion;
+ return StorageModel::nextOccuringEventFull(now);
}
void StorageN11Model::updateActionsState(double /*now*/, double delta)
{
-
ActionList *actionSet = getRunningActionSet();
- for(ActionList::iterator it(actionSet->begin()), itNext=it, itend(actionSet->end())
- ; it != itend ; it=itNext) {
+ for (ActionList::iterator it(actionSet->begin()), itNext = it, itend(actionSet->end()); it != itend; it = itNext) {
++itNext;
StorageAction *action = static_cast<StorageAction*>(&*it);
// which becomes the new file size
action->file_->setSize(action->file_->tell());
- action->storage_->content_->erase(action->file_->cname());
- action->storage_->content_->insert({action->file_->cname(), action->file_->size()});
+ action->storage_->getContent()->erase(action->file_->cname());
+ action->storage_->getContent()->insert({action->file_->cname(), action->file_->size()});
}
action->updateRemains(lmm_variable_getvalue(action->getVariable()) * delta);
action->updateMaxDuration(delta);
if (action->getRemainsNoUpdate() > 0 && lmm_get_variable_weight(action->getVariable()) > 0 &&
- action->storage_->usedSize_ == action->storage_->size_) {
+ action->storage_->usedSize_ == action->storage_->getSize()) {
action->finish();
action->setState(Action::State::failed);
} else if (((action->getRemainsNoUpdate() <= 0) && (lmm_get_variable_weight(action->getVariable()) > 0)) ||
simgrid::s4u::Storage::onCreation(this->piface_);
}
-StorageAction *StorageN11::open(const char* mount, const char* path)
-{
- XBT_DEBUG("\tOpen file '%s'",path);
-
- sg_size_t size;
- // if file does not exist create an empty file
- if (content_->find(path) != content_->end())
- size = content_->at(path);
- else {
- size = 0;
- content_->insert({path, size});
- XBT_DEBUG("File '%s' was not found, file created.",path);
- }
- FileImpl* file = new FileImpl(path, mount, size);
-
- StorageAction* action = new StorageN11Action(model(), 0, isOff(), this, OPEN);
- action->file_ = file;
-
- return action;
-}
-
-StorageAction *StorageN11::close(surf_file_t fd)
+StorageAction* StorageN11::read(sg_size_t size)
{
- XBT_DEBUG("\tClose file '%s' size '%llu'", fd->cname(), fd->size());
- // unref write actions from storage
- for (std::vector<StorageAction*>::iterator it = writeActions_.begin(); it != writeActions_.end();) {
- StorageAction *write_action = *it;
- if ((write_action->file_) == fd) {
- write_action->unref();
- it = writeActions_.erase(it);
- } else {
- ++it;
- }
- }
- StorageAction* action = new StorageN11Action(model(), 0, isOff(), this, CLOSE);
- return action;
+ return new StorageN11Action(model(), size, isOff(), this, READ);
}
-StorageAction *StorageN11::read(surf_file_t fd, sg_size_t size)
+StorageAction* StorageN11::write(sg_size_t size)
{
- if (fd->tell() + size > fd->size()) {
- if (fd->tell() > fd->size()) {
- size = 0;
- } else {
- size = fd->size() - fd->tell();
- }
- fd->setPosition(fd->size());
- }
- else
- fd->incrPosition(size);
-
- StorageAction* action = new StorageN11Action(model(), size, isOff(), this, READ);
- return action;
-}
-
-StorageAction *StorageN11::write(surf_file_t fd, sg_size_t size)
-{
- XBT_DEBUG("\tWrite file '%s' size '%llu/%llu'", fd->cname(), size, fd->size());
-
- StorageAction* action = new StorageN11Action(model(), size, isOff(), this, WRITE);
- action->file_ = fd;
- /* Substract the part of the file that might disappear from the used sized on the storage element */
- usedSize_ -= (fd->size() - fd->tell());
- // If the storage is full before even starting to write
- if(usedSize_==size_) {
- action->setState(Action::State::failed);
- }
- return action;
+ return new StorageN11Action(model(), size, isOff(), this, WRITE);
}
/**********
// Must be less than the max bandwidth for all actions
lmm_expand(model->getMaxminSystem(), storage->constraint(), getVariable(), 1.0);
switch(type) {
- case OPEN:
- case CLOSE:
- case STAT:
- break;
case READ:
lmm_expand(model->getMaxminSystem(), storage->constraintRead_, getVariable(), 1.0);
break;
case WRITE:
lmm_expand(model->getMaxminSystem(), storage->constraintWrite_, getVariable(), 1.0);
-
- //TODO there is something annoying with what's below. Have to sort it out...
- // Action *action = this;
- // storage->p_writeActions->push_back(action);
- // ref();
break;
default:
THROW_UNIMPLEMENTED;
const char* type_id, char* content_name, sg_size_t size, char* attach);
virtual ~StorageN11() = default;
StorageAction *open(const char* mount, const char* path);
- StorageAction *close(surf_file_t fd);
StorageAction *ls(const char *path);
- StorageAction* read(surf_file_t fd, sg_size_t size);
- StorageAction* write(surf_file_t fd, sg_size_t size);
+ StorageAction* read(sg_size_t size);
+ StorageAction* write(sg_size_t size);
void rename(const char *src, const char *dest);
};
return model->getRunningActionSet()->size();
}
-surf_action_t surf_host_close(sg_host_t host, surf_file_t fd){
- return host->pimpl_->close(fd);
-}
-
-surf_action_t surf_host_read(sg_host_t host, surf_file_t fd, sg_size_t size){
- return host->pimpl_->read(fd, size);
-}
-
-surf_action_t surf_host_write(sg_host_t host, surf_file_t fd, sg_size_t size){
- return host->pimpl_->write(fd, size);
-}
-
void surf_cpu_action_set_bound(surf_action_t action, double bound) {
static_cast<simgrid::surf::CpuAction*>(action)->setBound(bound);
}
-
-surf_file_t surf_storage_action_get_file(surf_action_t action){
- return static_cast<simgrid::surf::StorageAction*>(action)->file_;
-}
msg_host_t pm2 = MSG_host_by_name("node-0.2cores.org"); // 2 cores
msg_host_t pm4 = MSG_host_by_name("node-0.4cores.org");
- msg_host_t vm0;
+ msg_vm_t vm0;
xbt_assert(pm0, "Host node-0.1core.org does not seem to exist");
xbt_assert(pm2, "Host node-0.2cores.org does not seem to exist");
xbt_assert(pm4, "Host node-0.4cores.org does not seem to exist");
XBT_INFO("### Test '%s'. A task in a VM on a PM.", chooser);
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [X]1 )1", vm0, flop_amount);
+ run_test_process("( [X]1 )1", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Test '%s'. 2 tasks co-located in a VM on a PM.", chooser);
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [Xo]1 )1", vm0, flop_amount / 2);
- run_test_process("( [oX]1 )1", vm0, flop_amount / 2);
+ run_test_process("( [Xo]1 )1", (msg_host_t)vm0, flop_amount / 2);
+ run_test_process("( [oX]1 )1", (msg_host_t)vm0, flop_amount / 2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Test '%s'. A task in a VM, plus a task", chooser);
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [X]1 o )1", vm0, flop_amount / 2);
+ run_test_process("( [X]1 o )1", (msg_host_t)vm0, flop_amount / 2);
run_test_process("( [o]1 X )1", pm0, flop_amount / 2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Test '%s'. 2 tasks in a VM, plus a task", chooser);
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [Xo]1 o )1", vm0, flop_amount / 4);
- run_test_process("( [oX]1 o )1", vm0, flop_amount / 4);
+ run_test_process("( [Xo]1 o )1", (msg_host_t)vm0, flop_amount / 4);
+ run_test_process("( [oX]1 o )1", (msg_host_t)vm0, flop_amount / 4);
run_test_process("( [oo]1 X )1", pm0, flop_amount / 2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Test '%s'. A task in a VM on a bicore PM", chooser);
vm0 = MSG_vm_create_core(pm2, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [X]1 )2", vm0, flop_amount);
+ run_test_process("( [X]1 )2", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Test '%s'. 2 tasks in a VM on a bicore PM", chooser);
vm0 = MSG_vm_create_core(pm2, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [Xx]1 )2", vm0, flop_amount / 2);
- run_test_process("( [xX]1 )2", vm0, flop_amount / 2);
+ run_test_process("( [Xx]1 )2", (msg_host_t)vm0, flop_amount / 2);
+ run_test_process("( [xX]1 )2", (msg_host_t)vm0, flop_amount / 2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, put a task to the PM and a task to the VM");
vm0 = MSG_vm_create_core(pm2, "VM0");
MSG_vm_start(vm0);
- run_test_process("( [X]1 x )2", vm0, flop_amount);
+ run_test_process("( [X]1 x )2", (msg_host_t)vm0, flop_amount);
run_test_process("( [x]1 X )2", pm2, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- run_test_process("( [X]1 [ ]1 )2", vm0, flop_amount);
+ run_test_process("( [X]1 [ ]1 )2", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- run_test_process("( [X]1 [x]1 )2", vm0, flop_amount);
- run_test_process("( [x]1 [X]1 )2", vm1, flop_amount);
+ run_test_process("( [X]1 [x]1 )2", (msg_host_t)vm0, flop_amount);
+ run_test_process("( [x]1 [X]1 )2", (msg_host_t)vm1, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
MSG_vm_start(vm0);
MSG_vm_start(vm1);
MSG_vm_start(vm2);
- run_test_process("( [X]1 [x]1 [ ]1 )2", vm0, flop_amount);
- run_test_process("( [x]1 [X]1 [ ]1 )2", vm1, flop_amount);
+ run_test_process("( [X]1 [x]1 [ ]1 )2", (msg_host_t)vm0, flop_amount);
+ run_test_process("( [x]1 [X]1 [ ]1 )2", (msg_host_t)vm1, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
MSG_vm_start(vm0);
MSG_vm_start(vm1);
MSG_vm_start(vm2);
- run_test_process("( [X]1 [o]1 [o]1 )2", vm0, flop_amount * 2 / 3);
- run_test_process("( [o]1 [X]1 [o]1 )2", vm1, flop_amount * 2 / 3);
- run_test_process("( [o]1 [o]1 [X]1 )2", vm2, flop_amount * 2 / 3);
+ run_test_process("( [X]1 [o]1 [o]1 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [o]1 [X]1 [o]1 )2", (msg_host_t)vm1, flop_amount * 2 / 3);
+ run_test_process("( [o]1 [o]1 [X]1 )2", (msg_host_t)vm2, flop_amount * 2 / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
XBT_INFO("### Put a VM on a PM, and put a task to the VM");
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [X]2 )2", vm0, flop_amount);
+ run_test_process("( [X]2 )2", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xo]2 )2", vm0, flop_amount);
- run_test_process("( [oX]2 )2", vm0, flop_amount);
+ run_test_process("( [Xo]2 )2", (msg_host_t)vm0, flop_amount);
+ run_test_process("( [oX]2 )2", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put three tasks to the VM");
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 )2", vm0, flop_amount * 2 / 3);
- run_test_process("( [oXo]2 )2", vm0, flop_amount * 2 / 3);
- run_test_process("( [ooX]2 )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [Xoo]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [oXo]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [ooX]2 )2", (msg_host_t)vm0, flop_amount * 2 / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
run_test_process("( [o]2 X )2", pm2, flop_amount);
- run_test_process("( [X]2 o )2", vm0, flop_amount);
+ run_test_process("( [X]2 o )2", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
run_test_process("( [oo]2 X )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [Xo]2 o )2", vm0, flop_amount * 2 / 3);
- run_test_process("( [oX]2 o )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [Xo]2 o )2", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [oX]2 o )2", (msg_host_t)vm0, flop_amount * 2 / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
MSG_vm_start(vm0);
run_test_process("( [ooo]2 X )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [Xoo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [oXo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [ooX]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [Xoo]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [oXo]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [ooX]2 o )2", (msg_host_t)vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_start(vm0);
run_test_process("( [o]2 Xo )2", pm2, flop_amount * 2 / 3);
run_test_process("( [o]2 oX )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [X]2 oo )2", vm0, flop_amount * 2 / 3);
+ run_test_process("( [X]2 oo )2", (msg_host_t)vm0, flop_amount * 2 / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_start(vm0);
run_test_process("( [oo]2 Xo )2", pm2, flop_amount / 2);
run_test_process("( [oo]2 oX )2", pm2, flop_amount / 2);
- run_test_process("( [Xo]2 oo )2", vm0, flop_amount / 2);
- run_test_process("( [oX]2 oo )2", vm0, flop_amount / 2);
+ run_test_process("( [Xo]2 oo )2", (msg_host_t)vm0, flop_amount / 2);
+ run_test_process("( [oX]2 oo )2", (msg_host_t)vm0, flop_amount / 2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_start(vm0);
run_test_process("( [ooo]2 Xo )2", pm2, flop_amount * 2 / 4);
run_test_process("( [ooo]2 oX )2", pm2, flop_amount * 2 / 4);
- run_test_process("( [Xoo]2 oo )2", vm0, flop_amount / 3);
- run_test_process("( [oXo]2 oo )2", vm0, flop_amount / 3);
- run_test_process("( [ooX]2 oo )2", vm0, flop_amount / 3);
+ run_test_process("( [Xoo]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
+ run_test_process("( [oXo]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
+ run_test_process("( [ooX]2 oo )2", (msg_host_t)vm0, flop_amount / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put a task to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [X]2 )4", vm0, flop_amount);
+ run_test_process("( [X]2 )4", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xo]2 )4", vm0, flop_amount);
- run_test_process("( [oX]2 )4", vm0, flop_amount);
+ run_test_process("( [Xo]2 )4", (msg_host_t)vm0, flop_amount);
+ run_test_process("( [oX]2 )4", (msg_host_t)vm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 )4", vm0, flop_amount * 2 / 3);
- run_test_process("( [oXo]2 )4", vm0, flop_amount * 2 / 3);
- run_test_process("( [ooX]2 )4", vm0, flop_amount * 2 / 3);
+ run_test_process("( [Xoo]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [oXo]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
+ run_test_process("( [ooX]2 )4", (msg_host_t)vm0, flop_amount * 2 / 3);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put one task to the PM and one task to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [X]2 o )4", vm0, flop_amount);
+ run_test_process("( [X]2 o )4", (msg_host_t)vm0, flop_amount);
run_test_process("( [o]2 X )4", pm4, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and one task to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [X]2 oo )4", vm0, flop_amount);
+ run_test_process("( [X]2 oo )4", (msg_host_t)vm0, flop_amount);
run_test_process("( [o]2 Xo )4", pm4, flop_amount);
run_test_process("( [o]2 oX )4", pm4, flop_amount);
MSG_process_sleep(2);
XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xo]2 oo )4", vm0, flop_amount);
- run_test_process("( [oX]2 oo )4", vm0, flop_amount);
+ run_test_process("( [Xo]2 oo )4", (msg_host_t)vm0, flop_amount);
+ run_test_process("( [oX]2 oo )4", (msg_host_t)vm0, flop_amount);
run_test_process("( [oo]2 Xo )4", pm4, flop_amount);
run_test_process("( [oo]2 oX )4", pm4, flop_amount);
MSG_process_sleep(2);
XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [X]2 ooo )4", vm0, flop_amount);
+ run_test_process("( [X]2 ooo )4", (msg_host_t)vm0, flop_amount);
run_test_process("( [o]2 Xoo )4", pm4, flop_amount);
run_test_process("( [o]2 oXo )4", pm4, flop_amount);
run_test_process("( [o]2 ooX )4", pm4, flop_amount);
XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xo]2 ooo )4", vm0, flop_amount * 4 / 5);
- run_test_process("( [oX]2 ooo )4", vm0, flop_amount * 4 / 5);
+ run_test_process("( [Xo]2 ooo )4", (msg_host_t)vm0, flop_amount * 4 / 5);
+ run_test_process("( [oX]2 ooo )4", (msg_host_t)vm0, flop_amount * 4 / 5);
run_test_process("( [oo]2 Xoo )4", pm4, flop_amount * 4 / 5);
run_test_process("( [oo]2 oXo )4", pm4, flop_amount * 4 / 5);
run_test_process("( [oo]2 ooX )4", pm4, flop_amount * 4 / 5);
XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM");
vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
MSG_vm_start(vm0);
- run_test_process("( [Xoo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM
- run_test_process("( [oXo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
- run_test_process("( [ooX]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
+ run_test_process("( [Xoo]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM
+ run_test_process("( [oXo]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3);
+ run_test_process("( [ooX]2 ooo )4", (msg_host_t)vm0, flop_amount * (8. / 5) * 1 / 3);
run_test_process("( [ooo]2 Xoo )4", pm4, flop_amount * 4 / 5);
run_test_process("( [ooo]2 oXo )4", pm4, flop_amount * 4 / 5);
argvF = xbt_new(char*, 2);
argvF[0] = xbt_strdup("process_daemon");
- daemon = MSG_process_create_with_arguments("process_daemon", process_daemon, NULL, vm0, 1, argvF);
+ daemon = MSG_process_create_with_arguments("process_daemon", process_daemon, NULL, (msg_host_t)vm0, 1, argvF);
argvF = xbt_new(char*, 2);
argvF[0] = xbt_strdup("process_daemonJUPI");