SimGrid (3.14) UNRELEASED; urgency=low
+ Documentation
+ * The whole documentation was reworked and reorganized. There is
+ still many room for improvement here, but we're on it.
+
SMPI
* Call-location tracing for SMPI.
You can add the exact location (filename / linenumber) of an MPI call to
your trace files and slow down or speed up the simulation between two
- consecutive calls by using an adjustment file. (See the documentation.)
+ consecutive calls by using an adjustment file (see the documentation).
- * Fixed computation of timings for MPI_Send, MPI_Recv but possibly also others
+ * Fixed computation of timings for MPI_Send, MPI_Recv & possibly also others
We've found a bug that prevented SMPI to account for MPI_Send, MPI_Recv
and others (in some cases) in a correct way. That is, the smpi/os, smpi/or
values were ignored in some cases. The timings of these functions can now
be significantly different.
- MSG
+ Dropped functions and features
* msg_mailbox_t and associated functions. Use s4u::Mailbox instead.
- MSG_mailbox_is_empty() -> Mailbox::empty()
- MSG_mailbox_front() -> Mailbox::front()
- MSG_mailbox_get_by_alias() -> simgrid::s4u::Mailbox::byName(name)
- MSG_mailbox_get_task_ext() -> MSG_task_receive_ext()
- MSG_mailbox_get_task_ext_bounded -> MSG_task_receive_ext_bounded
+ * Task affinity. Its intended behavior (that was very badly tested
+ and probably not really working) was deceiving what most users
+ would have hoped here.
XML:
* Add Exa- and Peta- units such as EiB, EB, Eib, Eb for size, and
-# disabled test: cloud-multicore. It's built (in the following list), but not tested (below)
-
foreach(x actions-comm actions-storage app-masterworker app-pingpong app-pmm app-token-ring async-wait async-waitall
- async-waitany cloud-capping cloud-masterworker cloud-migration cloud-multicore cloud-simple cloud-two-tasks
+ async-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple cloud-two-tasks
dht-chord dht-pastry energy-consumption energy-onoff energy-pstate energy-ptask energy-vm platform-failures
io-file io-remote io-storage task-priority process-create process-kill process-migration process-suspend
platform-properties maestro-set process-startkilltime synchro-semaphore trace-categories
${CMAKE_CURRENT_SOURCE_DIR}/process-startkilltime/start_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/process-startkilltime/start_kill_d.xml PARENT_SCOPE)
-# Reenable the cloud-multicore test in the following list
-
foreach(x actions-comm actions-storage app-bittorrent app-chainsend app-masterworker app-pingpong app-token-ring
async-wait async-waitall async-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple
cloud-two-tasks dht-chord dht-pastry dht-kademlia platform-failures io-file io-remote io-storage task-priority
+++ /dev/null
-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-static int worker_main(int argc, char *argv[])
-{
- msg_task_t task = MSG_process_get_data(MSG_process_self());
- MSG_task_execute(task);
-
- XBT_INFO("task %p bye", task);
-
- return 0;
-}
-
-struct task_data {
- msg_task_t task;
- double prev_computation_amount;
- double prev_clock;
-};
-
-static void task_data_init_clock(struct task_data *t)
-{
- t->prev_computation_amount = MSG_task_get_flops_amount(t->task);
- t->prev_clock = MSG_get_clock();
-}
-
-static void task_data_get_clock(struct task_data *t)
-{
- double now_computation_amount = MSG_task_get_flops_amount(t->task);
- double now_clock = MSG_get_clock();
-
- double done = t->prev_computation_amount - now_computation_amount;
- double duration = now_clock - t->prev_clock;
-
- XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration);
-
- t->prev_computation_amount = now_computation_amount;
- t->prev_clock = now_clock;
-}
-
-static void test_pm_pin(void)
-{
- xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
- msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
- msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
- msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
- xbt_dynar_free(&hosts_dynar);
-
- struct task_data t1;
- struct task_data t2;
- struct task_data t3;
- struct task_data t4;
-
- t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
- t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
- t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
- t4.task = MSG_task_create("Task4", 1e16, 0, NULL);
-
- MSG_process_create("worker1", worker_main, t1.task, pm2);
- MSG_process_create("worker2", worker_main, t2.task, pm2);
- MSG_process_create("worker3", worker_main, t3.task, pm2);
- MSG_process_create("worker4", worker_main, t4.task, pm2);
-
- XBT_INFO("## 1. start 4 tasks on PM2 (2 cores)");
- task_data_init_clock(&t1);
- task_data_init_clock(&t2);
- task_data_init_clock(&t3);
- task_data_init_clock(&t4);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- XBT_INFO("## 2. pin all tasks to CPU0");
- MSG_task_set_affinity(t1.task, pm2, 0x01);
- MSG_task_set_affinity(t2.task, pm2, 0x01);
- MSG_task_set_affinity(t3.task, pm2, 0x01);
- MSG_task_set_affinity(t4.task, pm2, 0x01);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- XBT_INFO("## 3. clear the affinity of task4");
- MSG_task_set_affinity(t4.task, pm2, 0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- XBT_INFO("## 4. clear the affinity of task3");
- MSG_task_set_affinity(t3.task, pm2, 0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- XBT_INFO("## 5. clear the affinity of task2");
- MSG_task_set_affinity(t2.task, pm2, 0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)");
- MSG_task_set_affinity(t1.task, pm0, 0);
- MSG_task_set_affinity(t2.task, pm0, 0);
- MSG_task_set_affinity(t3.task, pm1, 0);
- MSG_task_set_affinity(t4.task, pm1, 0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
- task_data_get_clock(&t4);
-
- MSG_task_cancel(t1.task);
- MSG_task_cancel(t2.task);
- MSG_task_cancel(t3.task);
- MSG_task_cancel(t4.task);
- MSG_process_sleep(10);
- MSG_task_destroy(t1.task);
- MSG_task_destroy(t2.task);
- MSG_task_destroy(t3.task);
- MSG_task_destroy(t4.task);
-}
-
-static void test_vm_pin(void)
-{
- xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
- msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores
- msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores
- msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores
- xbt_dynar_free(&hosts_dynar);
-
- /* set up VMs on PM1 (4 cores) */
- msg_vm_t vm0 = MSG_vm_create_core(pm1, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm1, "VM1");
- msg_vm_t vm2 = MSG_vm_create_core(pm1, "VM2");
- msg_vm_t vm3 = MSG_vm_create_core(pm1, "VM3");
-
- s_vm_params_t params;
- memset(¶ms, 0, sizeof(params));
- params.ramsize = 1L * 1024 * 1024;
- params.skip_stage1 = 1;
- params.skip_stage2 = 1;
- //params.mig_speed = 1L * 1024 * 1024;
- MSG_host_set_params(vm0, ¶ms);
- MSG_host_set_params(vm1, ¶ms);
- MSG_host_set_params(vm2, ¶ms);
- MSG_host_set_params(vm3, ¶ms);
-
- MSG_vm_start(vm0);
- MSG_vm_start(vm1);
- MSG_vm_start(vm2);
- MSG_vm_start(vm3);
-
- /* set up tasks and processes */
- struct task_data t0;
- struct task_data t1;
- struct task_data t2;
- struct task_data t3;
-
- t0.task = MSG_task_create("Task0", 1e16, 0, NULL);
- t1.task = MSG_task_create("Task1", 1e16, 0, NULL);
- t2.task = MSG_task_create("Task2", 1e16, 0, NULL);
- t3.task = MSG_task_create("Task3", 1e16, 0, NULL);
-
- MSG_process_create("worker0", worker_main, t0.task, vm0);
- MSG_process_create("worker1", worker_main, t1.task, vm1);
- MSG_process_create("worker2", worker_main, t2.task, vm2);
- MSG_process_create("worker3", worker_main, t3.task, vm3);
-
- /* start experiments */
- XBT_INFO("## 1. start 4 VMs on PM1 (4 cores)");
- task_data_init_clock(&t0);
- task_data_init_clock(&t1);
- task_data_init_clock(&t2);
- task_data_init_clock(&t3);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- XBT_INFO("## 2. pin all VMs to CPU0 of PM1");
- MSG_vm_set_affinity(vm0, pm1, 0x01);
- MSG_vm_set_affinity(vm1, pm1, 0x01);
- MSG_vm_set_affinity(vm2, pm1, 0x01);
- MSG_vm_set_affinity(vm3, pm1, 0x01);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- XBT_INFO("## 3. pin all VMs to CPU0 of PM2(no effect at now)");
- /* Because VMs are on PM2, the below operations do not effect computation now. */
- MSG_vm_set_affinity(vm0, pm2, 0x01);
- MSG_vm_set_affinity(vm1, pm2, 0x01);
- MSG_vm_set_affinity(vm2, pm2, 0x01);
- MSG_vm_set_affinity(vm3, pm2, 0x01);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM1");
- MSG_vm_set_affinity(vm0, pm1, 0x00);
- MSG_vm_set_affinity(vm2, pm1, 0x02);
- MSG_vm_set_affinity(vm3, pm1, 0x02);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)");
- MSG_vm_migrate(vm0, pm0);
- MSG_vm_migrate(vm1, pm0);
- MSG_vm_migrate(vm2, pm0);
- MSG_vm_migrate(vm3, pm0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- XBT_INFO("## 6. migrate all VMs to PM2 (2 CPU cores, with affinity settings)");
- MSG_vm_migrate(vm0, pm2);
- MSG_vm_migrate(vm1, pm2);
- MSG_vm_migrate(vm2, pm2);
- MSG_vm_migrate(vm3, pm2);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
-
- XBT_INFO("## 7. clear affinity settings on PM1");
- MSG_vm_set_affinity(vm0, pm2, 0);
- MSG_vm_set_affinity(vm1, pm2, 0);
- MSG_vm_set_affinity(vm2, pm2, 0);
- MSG_vm_set_affinity(vm3, pm2, 0);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- MSG_process_sleep(10);
- task_data_get_clock(&t0);
- task_data_get_clock(&t1);
- task_data_get_clock(&t2);
- task_data_get_clock(&t3);
-
- /* clean up everything */
- MSG_task_cancel(t0.task);
- MSG_task_cancel(t1.task);
- MSG_task_cancel(t2.task);
- MSG_task_cancel(t3.task);
- MSG_process_sleep(10);
- MSG_task_destroy(t0.task);
- MSG_task_destroy(t1.task);
- MSG_task_destroy(t2.task);
- MSG_task_destroy(t3.task);
-
- MSG_vm_destroy(vm0);
- MSG_vm_destroy(vm1);
- MSG_vm_destroy(vm2);
- MSG_vm_destroy(vm3);
-}
-
-static int master_main(int argc, char *argv[])
-{
- XBT_INFO("=== Test PM (set affinity) ===");
- test_pm_pin();
-
- XBT_INFO("=== Test VM (set affinity) ===");
- test_vm_pin();
-
- return 0;
-}
-
-int main(int argc, char *argv[])
-{
- /* Get the arguments */
- MSG_init(&argc, argv);
-
- /* load the platform file */
- if (argc != 2) {
- printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]);
- return 1;
- }
-
- MSG_create_environment(argv[1]);
-
- xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar();
- msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t);
- msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t);
- msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t);
- xbt_dynar_free(&hosts_dynar);
-
- XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0),
- MSG_host_get_speed(pm0));
- XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1),
- MSG_host_get_speed(pm1));
- XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2),
- MSG_host_get_speed(pm2));
-
- MSG_process_create("master", master_main, NULL, pm0);
-
- int res = MSG_main();
- XBT_INFO("Bye (simulation time %g)", MSG_get_clock());
-
- return !(res == MSG_OK);
-}
+++ /dev/null
-#! ./tesh
-
-$ $SG_TEST_EXENV ${bindir:=.}/cloud-multicore$EXEEXT --log=no_loc ${srcdir:=.}/three_multicore_hosts.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (0:maestro@) PM0: 4 core(s), 8095000000.000001 flops/s per each
-> [ 0.000000] (0:maestro@) PM1: 4 core(s), 8095000000.000001 flops/s per each
-> [ 0.000000] (0:maestro@) PM2: 2 core(s), 8095000000.000001 flops/s per each
-> [ 0.000000] (1:master@PM0) === Test PM (set affinity) ===
-> [ 0.000000] (1:master@PM0) ## 1. start 4 tasks on PM2 (2 cores)
-> [ 10.000000] (1:master@PM0) Task1: 4047500000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task2: 4047500000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task3: 4047500000.000000 fops/s
-> [ 10.000000] (1:master@PM0) Task4: 4047500000.000000 fops/s
-> [ 10.000000] (1:master@PM0) ## 2. pin all tasks to CPU0
-> [ 10.000000] (1:master@PM0) set affinity(0x0001@PM2) for Task1
double dp_updated_by_deleted_tasks;
int is_migrating;
- xbt_dict_t affinity_mask_db;
xbt_dynar_t file_descriptor_table;
} s_msg_host_priv_t;
XBT_PUBLIC(msg_error_t) MSG_parallel_task_execute(msg_task_t task);
XBT_PUBLIC(void) MSG_task_set_priority(msg_task_t task, double priority);
XBT_PUBLIC(void) MSG_task_set_bound(msg_task_t task, double bound);
-XBT_PUBLIC(void) MSG_task_set_affinity(msg_task_t task, msg_host_t host, unsigned long mask);
XBT_PUBLIC(msg_error_t) MSG_process_join(msg_process_t process, double timeout);
XBT_PUBLIC(msg_error_t) MSG_process_sleep(double nb_sec);
XBT_PUBLIC(msg_host_t) MSG_vm_get_pm(msg_vm_t vm);
XBT_PUBLIC(void) MSG_vm_set_bound(msg_vm_t vm, double bound);
-XBT_PUBLIC(void) MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask);
/* TODO: do we need this? */
// XBT_PUBLIC(xbt_dynar_t) MSG_vms_as_dynar();
XBT_PUBLIC(smx_activity_t) simcall_execution_start(const char *name,
double flops_amount,
- double priority, double bound, unsigned long affinity_mask);
+ double priority, double bound);
XBT_PUBLIC(smx_activity_t) simcall_execution_parallel_start(const char *name,
int host_nb,
sg_host_t *host_list,
XBT_PUBLIC(void) simcall_execution_cancel(smx_activity_t execution);
XBT_PUBLIC(void) simcall_execution_set_priority(smx_activity_t execution, double priority);
XBT_PUBLIC(void) simcall_execution_set_bound(smx_activity_t execution, double bound);
-XBT_PUBLIC(void) simcall_execution_set_affinity(smx_activity_t execution, sg_host_t host, unsigned long mask);
XBT_PUBLIC(e_smx_state_t) simcall_execution_wait(smx_activity_t execution);
/******************************* VM simcalls ********************************/
XBT_PUBLIC(void) simcall_vm_migrate(sg_host_t vm, sg_host_t dst_pm);
XBT_PUBLIC(void *) simcall_vm_get_pm(sg_host_t vm);
XBT_PUBLIC(void) simcall_vm_set_bound(sg_host_t vm, double bound);
-XBT_PUBLIC(void) simcall_vm_set_affinity(sg_host_t vm, sg_host_t pm, unsigned long mask);
XBT_PUBLIC(void) simcall_vm_resume(sg_host_t vm);
XBT_PUBLIC(void) simcall_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm);
XBT_PUBLIC(void) simcall_vm_save(sg_host_t vm);
*/
XBT_PUBLIC(void) surf_vm_set_bound(sg_host_t resource, double bound);
-/**
- * @brief [brief description]
- * @details [long description]
- *
- * @param resource [description]
- * @param cpu [description]
- * @param mask [description]
- */
-XBT_PUBLIC(void) surf_vm_set_affinity(sg_host_t resource, sg_host_t cpu, unsigned long mask);
-
/**
* @brief Unlink a file descriptor
*
1.0, -1.0));
XBT_DEBUG("Parallel execution action created: %p", simdata->compute);
} else {
- unsigned long affinity_mask =
- static_cast<unsigned long>((uintptr_t) xbt_dict_get_or_null_ext(simdata->affinity_mask_db, (char *) p_simdata->m_host,
- sizeof(msg_host_t)));
- XBT_DEBUG("execute %s@%s with affinity(0x%04lx)",
- MSG_task_get_name(task), MSG_host_get_name(p_simdata->m_host), affinity_mask);
-
- simdata->compute = static_cast<simgrid::kernel::activity::Exec*>(
- simcall_execution_start(task->name, simdata->flops_amount, simdata->priority,
- simdata->bound, affinity_mask));
+ simdata->compute = static_cast<simgrid::kernel::activity::Exec*>(
+ simcall_execution_start(task->name, simdata->flops_amount, simdata->priority, simdata->bound));
}
simcall_set_category(simdata->compute, task->category);
p_simdata->waiting_action = simdata->compute;
priv->dp_updated_by_deleted_tasks = 0;
priv->is_migrating = 0;
- priv->affinity_mask_db = xbt_dict_new_homogeneous(nullptr);
-
priv->file_descriptor_table = xbt_dynar_new(sizeof(int), nullptr);
for (int i=sg_storage_max_file_descriptors-1; i>=0;i--)
xbt_dynar_push_as(priv->file_descriptor_table, int, i);
if (size > 0)
XBT_WARN("dp_objs: %u pending task?", size);
xbt_dict_free(&priv->dp_objs);
- xbt_dict_free(&priv->affinity_mask_db);
xbt_dynar_free(&priv->file_descriptor_table);
free(priv);
/* parallel tasks only */
xbt_free(this->host_list);
-
- xbt_dict_free(&this->affinity_mask_db);
}
void setUsed();
void setNotUsed()
double bound = 0.0; /* Capping for CPU resource */
double rate = 0.0; /* Capping for network resource */
- /* CPU affinity database of this task */
- xbt_dict_t affinity_mask_db = nullptr; /* smx_host_t host => unsigned long mask */
-
bool isused = false; /* Indicates whether the task is used in SIMIX currently */
int host_nb = 0; /* ==0 if sequential task; parallel task if not */
/******* Parallel Tasks Only !!!! *******/
simdata->source = nullptr;
simdata->priority = 1.0;
simdata->bound = 0;
- simdata->affinity_mask_db = xbt_dict_new_homogeneous(nullptr);
simdata->rate = -1.0;
simdata->isused = 0;
if (task->simdata->compute)
simcall_execution_set_bound(task->simdata->compute, task->simdata->bound);
}
-
-/** \ingroup m_task_management
- * \brief Changes the CPU affinity of a computation task.
- *
- * When pinning the given task to the first CPU core of the given host, use 0x01 for the mask value. Each bit of the
- * mask value corresponds to each CPU core. See taskset(1) on Linux.
- *
- * \param task a target task
- * \param host the host having a multi-core CPU
- * \param mask the bit mask of a new CPU affinity setting for the task
- *
- * Usage:
- * 0. Define a host with multiple cores.
- * \<host id="PM0" power="1E8" core="2"/\>
- *
- * 1. Pin a given task to the first CPU core of a host.
- * MSG_task_set_affinity(task, pm0, 0x01);
- *
- * 2. Pin a given task to the third CPU core of a host. Turn on the third bit of the mask.
- * MSG_task_set_affinity(task, pm0, 0x04); // 0x04 == 100B
- *
- * 3. Pin a given VM to the first CPU core of a host.
- * MSG_vm_set_affinity(vm, pm0, 0x01);
- *
- * See examples/msg/cloud/multicore.c for more information.
- *
- * Note:
- * 1. The current code does not allow an affinity of a task to multiple cores.
- * The mask value 0x03 (i.e., a given task will be executed on the first core or the second core) is not allowed.
- * The mask value 0x01 or 0x02 works. See cpu_cas01.c for details.
- *
- * 2. It is recommended to first compare simulation results in both the Lazy and Full calculation modes
- * (using --cfg=cpu/optim:Full or not). Fix cpu_cas01.c if you find wrong results in the Lazy mode.
- */
-void MSG_task_set_affinity(msg_task_t task, msg_host_t host, unsigned long mask)
-{
- xbt_assert(task, "Invalid parameter");
- xbt_assert(task->simdata, "Invalid parameter");
-
- if (mask == 0) {
- /* 0 means clear */
- /* We need remove_ext() not throwing exception. */
- void *ret = xbt_dict_get_or_null_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(msg_host_t));
- if (ret != nullptr)
- xbt_dict_remove_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(host));
- } else
- xbt_dict_set_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(host), (void *)(uintptr_t) mask, nullptr);
-
- /* We set affinity data of this task. If the task is being executed, we actually change the affinity setting of the
- * task. Otherwise, this change will be applied when the task is executed. */
- if (!task->simdata->compute) {
- /* task is not yet executed */
- XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(host),
- MSG_task_get_name(task));
- return;
- }
-
- simgrid::kernel::activity::Exec *compute = task->simdata->compute;
- msg_host_t host_now = compute->host; // simix_private.h is necessary
- if (host_now != host) {
- /* task is not yet executed on this host */
- XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(host),
- MSG_task_get_name(task));
- return;
- }
-
- /* task is being executed on this host. so change the affinity now */
- /* check it works. remove me if it works. */
- xbt_assert(static_cast<unsigned long>((uintptr_t) xbt_dict_get_or_null_ext(task->simdata->affinity_mask_db,
- (char*)(host), sizeof(msg_host_t))) == mask);
- XBT_INFO("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(host), MSG_task_get_name(task));
- simcall_execution_set_affinity(task->simdata->compute, host, mask);
-}
msg_vm_t vm = ms->vm;
msg_host_t src_pm = ms->src_pm;
msg_host_t dst_pm = ms-> dst_pm;
- msg_host_priv_t priv = sg_host_msg(vm);
// TODO: we have an issue, if the DST node is turning off during the three next calls, then the VM is in an inconsistent
// state. I should check with Takahiro in order to make this portion of code atomic
-// /* deinstall the current affinity setting for the CPU */
-// simcall_vm_set_affinity(vm, src_pm, 0);
//
// /* Update the vm location */
// simcall_vm_migrate(vm, dst_pm);
//
simcall_vm_migratefrom_resumeto(vm, src_pm, dst_pm);
- /* install the affinity setting of the VM on the destination pm */
- {
-
- unsigned long affinity_mask =
- (unsigned long)(uintptr_t) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *)dst_pm, sizeof(msg_host_t));
- simcall_vm_set_affinity(vm, dst_pm, affinity_mask);
- XBT_DEBUG("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(dst_pm), MSG_host_get_name(vm));
- }
-
{
// Now the VM is running on the new host (the migration is completed) (even if the SRC crash)
msg_host_priv_t priv = sg_host_msg(vm);
{
simcall_vm_set_bound(vm, bound);
}
-
-/** @brief Set the CPU affinity of a given VM.
- * @ingroup msg_VMs
- *
- * This function changes the CPU affinity of a given VM. Usage is the same as
- * MSG_task_set_affinity(). See the MSG_task_set_affinity() for details.
- */
-void MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask)
-{
- msg_host_priv_t priv = sg_host_msg(vm);
-
- if (mask == 0)
- xbt_dict_remove_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm));
- else
- xbt_dict_set_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm), (void *)(uintptr_t) mask, nullptr);
-
- msg_host_t pm_now = MSG_vm_get_pm(vm);
- if (pm_now == pm) {
- XBT_DEBUG("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(pm), MSG_host_get_name(vm));
- simcall_vm_set_affinity(vm, pm, mask);
- } else
- XBT_DEBUG("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(pm), MSG_host_get_name(vm));
-}
}
e_smx_state_t execute(double flops) {
- smx_activity_t s = simcall_execution_start(nullptr,flops,1.0/*priority*/,0./*bound*/, 0L/*affinity*/);
+ smx_activity_t s = simcall_execution_start(nullptr,flops,1.0/*priority*/,0./*bound*/);
return simcall_execution_wait(s);
}
return nullptr;
} else {
/* FIXME: computation size is zero. Is it okay that bound is zero ? */
- return SIMIX_execution_start(process, "suspend", 0.0, 1.0, 0.0, 0);
+ return SIMIX_execution_start(process, "suspend", 0.0, 1.0, 0.0);
}
}
* \param flops_amount amount Computation amount (in flops)
* \param priority computation priority
* \param bound
- * \param affinity_mask
* \return A new SIMIX execution synchronization
*/
smx_activity_t simcall_execution_start(const char *name,
double flops_amount,
- double priority, double bound, unsigned long affinity_mask)
+ double priority, double bound)
{
/* checking for infinite values */
xbt_assert(std::isfinite(flops_amount), "flops_amount is not finite!");
xbt_assert(std::isfinite(priority), "priority is not finite!");
- return simcall_BODY_execution_start(name, flops_amount, priority, bound, affinity_mask);
+ return simcall_BODY_execution_start(name, flops_amount, priority, bound);
}
/**
simcall_BODY_execution_set_bound(execution, bound);
}
-/**
- * \ingroup simix_process_management
- * \brief Changes the CPU affinity of an execution synchro.
- *
- * This functions changes the CPU affinity of an execution synchro. See taskset(1) on Linux.
- * \param execution The execution synchro
- * \param host Host
- * \param mask Affinity mask
- */
-void simcall_execution_set_affinity(smx_activity_t execution, sg_host_t host, unsigned long mask)
-{
- simcall_BODY_execution_set_affinity(execution, host, mask);
-}
-
/**
* \ingroup simix_host_management
* \brief Waits for the completion of an execution synchro and destroy it.
simgrid::simix::kernelImmediate(std::bind(SIMIX_vm_set_bound, vm, bound));
}
-void simcall_vm_set_affinity(sg_host_t vm, sg_host_t pm, unsigned long mask)
-{
- simgrid::simix::kernelImmediate(std::bind(SIMIX_vm_set_affinity, vm, pm, mask));
-}
-
/**
* \ingroup simix_vm_management
* \brief Migrate the given VM to the given physical host
* \ingroup simix_vm_management
* \brief Encompassing simcall to prevent the removal of the src or the dst node at the end of a VM migration
* The simcall actually invokes the following calls:
- * simcall_vm_set_affinity(vm, src_pm, 0);
* simcall_vm_migrate(vm, dst_pm);
* simcall_vm_resume(vm);
*
static inline void simcall_execution_start__set__bound(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[3], arg);
}
-static inline unsigned long simcall_execution_start__get__affinity_mask(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<unsigned long>(simcall->args[4]);
-}
-static inline void simcall_execution_start__set__affinity_mask(smx_simcall_t simcall, unsigned long arg) {
- simgrid::simix::marshal<unsigned long>(simcall->args[4], arg);
-}
static inline smx_activity_t simcall_execution_start__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
}
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
-static inline smx_activity_t simcall_execution_set_affinity__get__execution(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
-}
-static inline void simcall_execution_set_affinity__set__execution(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
-}
-static inline sg_host_t simcall_execution_set_affinity__get__ws(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
-}
-static inline void simcall_execution_set_affinity__set__ws(smx_simcall_t simcall, sg_host_t arg) {
- simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
-}
-static inline unsigned long simcall_execution_set_affinity__get__mask(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<unsigned long>(simcall->args[2]);
-}
-static inline void simcall_execution_set_affinity__set__mask(smx_simcall_t simcall, unsigned long arg) {
- simgrid::simix::marshal<unsigned long>(simcall->args[2], arg);
-}
-
static inline smx_activity_t simcall_execution_wait__get__execution(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
}
XBT_PRIVATE void simcall_HANDLER_process_set_host(smx_simcall_t simcall, smx_process_t process, sg_host_t dest);
XBT_PRIVATE void simcall_HANDLER_process_join(smx_simcall_t simcall, smx_process_t process, double timeout);
XBT_PRIVATE void simcall_HANDLER_process_sleep(smx_simcall_t simcall, double duration);
-XBT_PRIVATE smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask);
+XBT_PRIVATE smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority, double bound);
XBT_PRIVATE void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t execution);
XBT_PRIVATE smx_process_t simcall_HANDLER_process_restart(smx_simcall_t simcall, smx_process_t process);
XBT_PRIVATE smx_activity_t simcall_HANDLER_comm_iprobe(smx_simcall_t simcall, smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data);
return simcall<int, double>(SIMCALL_PROCESS_SLEEP, duration);
}
-inline static smx_activity_t simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask) {
+inline static smx_activity_t simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound) {
/* Go to that function to follow the code flow through the simcall barrier */
- if (0) simcall_HANDLER_execution_start(&SIMIX_process_self()->simcall, name, flops_amount, priority, bound, affinity_mask);
- return simcall<smx_activity_t, const char*, double, double, double, unsigned long>(SIMCALL_EXECUTION_START, name, flops_amount, priority, bound, affinity_mask);
+ if (0) simcall_HANDLER_execution_start(&SIMIX_process_self()->simcall, name, flops_amount, priority, bound);
+ return simcall<smx_activity_t, const char*, double, double, double>(SIMCALL_EXECUTION_START, name, flops_amount, priority, bound);
}
inline static smx_activity_t simcall_BODY_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate) {
return simcall<void, smx_activity_t, double>(SIMCALL_EXECUTION_SET_BOUND, execution, bound);
}
-inline static void simcall_BODY_execution_set_affinity(smx_activity_t execution, sg_host_t ws, unsigned long mask) {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0) SIMIX_execution_set_affinity(execution, ws, mask);
- return simcall<void, smx_activity_t, sg_host_t, unsigned long>(SIMCALL_EXECUTION_SET_AFFINITY, execution, ws, mask);
- }
-
inline static int simcall_BODY_execution_wait(smx_activity_t execution) {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_execution_wait(&SIMIX_process_self()->simcall, execution);
SIMCALL_EXECUTION_CANCEL,
SIMCALL_EXECUTION_SET_PRIORITY,
SIMCALL_EXECUTION_SET_BOUND,
- SIMCALL_EXECUTION_SET_AFFINITY,
SIMCALL_EXECUTION_WAIT,
SIMCALL_PROCESS_ON_EXIT,
SIMCALL_PROCESS_AUTO_RESTART_SET,
"SIMCALL_EXECUTION_CANCEL",
"SIMCALL_EXECUTION_SET_PRIORITY",
"SIMCALL_EXECUTION_SET_BOUND",
- "SIMCALL_EXECUTION_SET_AFFINITY",
"SIMCALL_EXECUTION_WAIT",
"SIMCALL_PROCESS_ON_EXIT",
"SIMCALL_PROCESS_AUTO_RESTART_SET",
break;
case SIMCALL_EXECUTION_START:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal<const char*>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]), simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3]), simgrid::simix::unmarshal<unsigned long>(simcall->args[4])));
+ simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal<const char*>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]), simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3])));
SIMIX_simcall_answer(simcall);
break;
SIMIX_simcall_answer(simcall);
break;
-case SIMCALL_EXECUTION_SET_AFFINITY:
- SIMIX_execution_set_affinity(simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]), simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]), simgrid::simix::unmarshal<unsigned long>(simcall->args[2]));
- SIMIX_simcall_answer(simcall);
- break;
-
case SIMCALL_EXECUTION_WAIT:
simcall_HANDLER_execution_wait(simcall, simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]));
break;
int process_join(smx_process_t process, double timeout) [[block]];
int process_sleep(double duration) [[block]];
-smx_activity_t execution_start(const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask);
+smx_activity_t execution_start(const char* name, double flops_amount, double priority, double bound);
smx_activity_t execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate) [[nohandler]];
void execution_cancel(smx_activity_t execution) [[nohandler]];
void execution_set_priority(smx_activity_t execution, double priority) [[nohandler]];
void execution_set_bound(smx_activity_t execution, double bound) [[nohandler]];
-void execution_set_affinity(smx_activity_t execution, sg_host_t ws, unsigned long mask) [[nohandler]];
int execution_wait(smx_activity_t execution) [[block]];
void process_on_exit(smx_process_t process, int_f_pvoid_pvoid_t fun, void* data) [[nohandler]];
}
smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount,
- double priority, double bound, unsigned long affinity_mask) {
- return SIMIX_execution_start(simcall->issuer, name,flops_amount,priority,bound,affinity_mask);
+ double priority, double bound) {
+ return SIMIX_execution_start(simcall->issuer, name,flops_amount,priority,bound);
}
smx_activity_t SIMIX_execution_start(smx_process_t issuer, const char *name, double flops_amount, double priority,
- double bound, unsigned long affinity_mask){
+ double bound){
/* alloc structures and initialize */
simgrid::kernel::activity::Exec *exec = new simgrid::kernel::activity::Exec(name, issuer->host);
if (bound > 0)
static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)->setBound(bound);
-
- if (affinity_mask != 0) {
- /* just a double check to confirm that this host is the host where this task is running. */
- xbt_assert(exec->host == issuer->host);
- static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)
- ->setAffinity(issuer->host->pimpl_cpu, affinity_mask);
- }
}
XBT_DEBUG("Create execute synchro %p: %s", exec, exec->name.c_str());
static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)->setBound(bound);
}
-void SIMIX_execution_set_affinity(smx_activity_t synchro, sg_host_t host, unsigned long mask)
-{
- simgrid::kernel::activity::Exec *exec = static_cast<simgrid::kernel::activity::Exec *>(synchro);
- if(exec->surf_exec) {
- /* just a double check to confirm that this host is the host where this task is running. */
- xbt_assert(exec->host == host);
- static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)->setAffinity(host->pimpl_cpu, mask);
- }
-}
-
void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t synchro)
{
simgrid::kernel::activity::Exec *exec = static_cast<simgrid::kernel::activity::Exec *>(synchro);
XBT_PRIVATE void SIMIX_host_restart_processes(sg_host_t host);
XBT_PRIVATE void SIMIX_host_autorestart(sg_host_t host);
XBT_PRIVATE smx_activity_t SIMIX_execution_start(smx_process_t issuer, const char *name,
- double flops_amount, double priority, double bound, unsigned long affinity_mask);
+ double flops_amount, double priority, double bound);
XBT_PRIVATE smx_activity_t SIMIX_execution_parallel_start(const char *name,
int host_nb, sg_host_t *host_list,
double *flops_amount, double *bytes_amount,
XBT_PRIVATE void SIMIX_execution_cancel(smx_activity_t synchro);
XBT_PRIVATE void SIMIX_execution_set_priority(smx_activity_t synchro, double priority);
XBT_PRIVATE void SIMIX_execution_set_bound(smx_activity_t synchro, double bound);
-XBT_PRIVATE void SIMIX_execution_set_affinity(smx_activity_t synchro, sg_host_t host, unsigned long mask);
XBT_PRIVATE void SIMIX_execution_finish(simgrid::kernel::activity::Exec *exec);
XBT_PRIVATE void SIMIX_vm_set_bound(sg_host_t ind_vm, double bound);
-XBT_PRIVATE void SIMIX_vm_set_affinity(sg_host_t ind_vm, sg_host_t ind_pm, unsigned long mask);
-
XBT_PRIVATE void SIMIX_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm);
SG_END_DECL()
/**
* @brief Encompassing simcall to prevent the removal of the src or the dst node at the end of a VM migration
* The simcall actually invokes the following calls:
- * simcall_vm_set_affinity(vm, src_pm, 0);
* simcall_vm_migrate(vm, dst_pm);
* simcall_vm_resume(vm);
*
*/
void SIMIX_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm)
{
- /* deinstall the current affinity setting for the CPU */
- SIMIX_vm_set_affinity(vm, src_pm, 0);
-
/* Update the vm location */
SIMIX_vm_migrate(vm, dst_pm);
surf_vm_set_bound(host, bound);
}
-/**
- * @brief Function to set the CPU affinity of the given SIMIX VM host.
- *
- * @param host the vm host (a sg_host_t)
- * @param host the pm host (a sg_host_t)
- * @param mask affinity mask (a unsigned long)
- */
-void SIMIX_vm_set_affinity(sg_host_t ind_vm, sg_host_t ind_pm, unsigned long mask)
-{
- /* make sure this at the MSG layer. */
- xbt_assert(SIMIX_vm_get_pm(ind_vm) == ind_pm);
-
- surf_vm_set_affinity(ind_vm, ind_pm, mask);
-}
-
-
/**
* @brief Function to suspend a SIMIX VM host. This function stops the execution of the
* VM. All the processes on this VM will pause. The state of the VM is
void smpi_execute_flops(double flops) {
smx_activity_t action;
XBT_DEBUG("Handle real computation time: %f flops", flops);
- action = simcall_execution_start("computation", flops, 1, 0, 0);
+ action = simcall_execution_start("computation", flops, 1, 0);
simcall_set_category (action, TRACE_internal_smpi_get_category());
simcall_execution_wait(action);
smpi_switch_data_segment(smpi_process_index());
}
xbt_assert(model == surf_cpu_model_pm || core==1, "Currently, VM cannot be multicore");
-
- if (model->getUpdateMechanism() != UM_UNDEFINED) {
- p_constraintCore = xbt_new(lmm_constraint_t, core);
- p_constraintCoreId = xbt_new(void*, core);
-
- for (int i = 0; i < core; i++) {
- /* just for a unique id, never used as a string. */
- p_constraintCoreId[i] = bprintf("%s:%i", host->name().c_str(), i);
- p_constraintCore[i] = lmm_constraint_new(model->getMaxminSystem(), p_constraintCoreId[i], speed_.scale * speed_.peak);
- }
- }
}
Cpu::~Cpu()
{
- if (p_constraintCoreId){
- for (int i = 0; i < coresAmount_; i++)
- xbt_free(p_constraintCoreId[i]);
- xbt_free(p_constraintCore);
- }
- xbt_free(p_constraintCoreId);
xbt_dynar_free(&speedPerPstate_);
}
lastValue_ = lmm_variable_getvalue(getVariable());
}
-/*
- *
- * This function formulates a constraint problem that pins a given task to
- * particular cores. Currently, it is possible to pin a task to an exactly one
- * specific core. The system links the variable object of the task to the
- * per-core constraint object.
- *
- * But, the taskset command on Linux takes a mask value specifying a CPU
- * affinity setting of a given task. If the mask value is 0x03, the given task
- * will be executed on the first core (CPU0) or the second core (CPU1) on the
- * given PM. The schedular will determine appropriate placements of tasks,
- * considering given CPU affinities and task activities.
- *
- * How should the system formulate constraint problems for an affinity to
- * multiple cores?
- *
- * The cpu argument must be the host where the task is being executed. The
- * action object does not have the information about the location where the
- * action is being executed.
- */
-void CpuAction::setAffinity(Cpu *cpu, unsigned long mask)
-{
- lmm_variable_t var_obj = getVariable();
- XBT_IN("(%p,%lx)", this, mask);
-
- {
- unsigned long nbits = 0;
-
- /* FIXME: There is much faster algorithms doing this. */
- for (int i = 0; i < cpu->coresAmount_; i++) {
- unsigned long has_affinity = (1UL << i) & mask;
- if (has_affinity)
- nbits += 1;
- }
-
- xbt_assert(nbits <= 1, "Affinity mask cannot span over multiple cores.");
- }
-
- for (int i = 0; i < cpu->coresAmount_; i++) {
- XBT_DEBUG("clear affinity %p to cpu-%d@%s", this, i, cpu->getName());
- lmm_shrink(cpu->getModel()->getMaxminSystem(), cpu->p_constraintCore[i], var_obj);
-
- unsigned long has_affinity = (1UL << i) & mask;
- if (has_affinity) {
- /* This function only accepts an affinity setting on the host where the
- * task is now running. In future, a task might move to another host.
- * But, at this moment, this function cannot take an affinity setting on
- * that future host.
- *
- * It might be possible to extend the code to allow this function to
- * accept affinity settings on a future host. We might be able to assign
- * zero to elem->value to maintain such inactive affinity settings in the
- * system. But, this will make the system complex. */
- XBT_DEBUG("set affinity %p to cpu-%d@%s", this, i, cpu->getName());
- lmm_expand(cpu->getModel()->getMaxminSystem(), cpu->p_constraintCore[i], var_obj, 1.0);
- }
- }
-
- if (cpu->getModel()->getUpdateMechanism() == UM_LAZY) {
- /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */
- }
- XBT_OUT();
-}
-
simgrid::xbt::signal<void(simgrid::surf::CpuAction*, Action::State)> CpuAction::onStateChange;
void CpuAction::setState(Action::State state){
xbt_dynar_t speedPerPstate_ = nullptr; /*< List of supported CPU capacities (pstate related) */
int pstate_ = 0; /*< Current pstate (index in the speedPeakList)*/
- /* Note (hypervisor): */
- lmm_constraint_t *p_constraintCore=nullptr;
- void **p_constraintCoreId=nullptr;
-
public:
virtual void setStateTrace(tmgr_trace_t trace); /*< setup the trace file with states events (ON or OFF). Trace must contain boolean values (0 or 1). */
virtual void setSpeedTrace(tmgr_trace_t trace); /*< setup the trace file with availability events (peak speed changes due to external load). Trace must contain relative values (ratio between 0 and 1) */
CpuAction(simgrid::surf::Model *model, double cost, bool failed, lmm_variable_t var)
: Action(model, cost, failed, var) {}
- /** @brief Set the affinity of the current CpuAction */
- virtual void setAffinity(Cpu *cpu, unsigned long mask);
-
void setState(simgrid::surf::Action::State state) override;
void updateRemainingLazy(double now) override;
void setMaxDuration(double duration) override;
void setPriority(double priority) override;
double getRemains() override;
- void setAffinity(Cpu * /*cpu*/, unsigned long /*mask*/) override {};
CpuTi *cpu_;
int indexHeap_;
get_casted_vm(vm)->setBound(bound);
}
-void surf_vm_set_affinity(sg_host_t vm, sg_host_t host, unsigned long mask){
- get_casted_vm(vm)->setAffinity(host->pimpl_cpu, mask);
-}
-
xbt_dict_t surf_storage_get_content(surf_resource_t resource){
return static_cast<simgrid::surf::Storage*>(surf_storage_resource_priv(resource))->getContent();
}
sg_host_t getPm();
virtual void setBound(double bound)=0;
- virtual void setAffinity(Cpu *cpu, unsigned long mask)=0;
/* The vm object of the lower layer */
CpuAction *action_;
action_->setBound(bound);
}
-void VMHL13::setAffinity(Cpu *cpu, unsigned long mask){
- action_->setAffinity(cpu, mask);
-}
-
}
}
void migrate(sg_host_t ind_dst_pm) override;
void setBound(double bound) override;
- void setAffinity(Cpu *cpu, unsigned long mask) override;
};
/**********