From: Martin Quinson Date: Wed, 10 Aug 2016 23:41:59 +0000 (+0200) Subject: kill the half backen setAffinity feature X-Git-Tag: v3_14~584 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/e5a5b0f6ee35cb592f9345a9dafa0e29df8477ef kill the half backen setAffinity feature --- diff --git a/ChangeLog b/ChangeLog index 0736be8556..35b92f5530 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,24 +1,31 @@ SimGrid (3.14) UNRELEASED; urgency=low + Documentation + * The whole documentation was reworked and reorganized. There is + still many room for improvement here, but we're on it. + SMPI * Call-location tracing for SMPI. You can add the exact location (filename / linenumber) of an MPI call to your trace files and slow down or speed up the simulation between two - consecutive calls by using an adjustment file. (See the documentation.) + consecutive calls by using an adjustment file (see the documentation). - * Fixed computation of timings for MPI_Send, MPI_Recv but possibly also others + * Fixed computation of timings for MPI_Send, MPI_Recv & possibly also others We've found a bug that prevented SMPI to account for MPI_Send, MPI_Recv and others (in some cases) in a correct way. That is, the smpi/os, smpi/or values were ignored in some cases. The timings of these functions can now be significantly different. - MSG + Dropped functions and features * msg_mailbox_t and associated functions. Use s4u::Mailbox instead. - MSG_mailbox_is_empty() -> Mailbox::empty() - MSG_mailbox_front() -> Mailbox::front() - MSG_mailbox_get_by_alias() -> simgrid::s4u::Mailbox::byName(name) - MSG_mailbox_get_task_ext() -> MSG_task_receive_ext() - MSG_mailbox_get_task_ext_bounded -> MSG_task_receive_ext_bounded + * Task affinity. Its intended behavior (that was very badly tested + and probably not really working) was deceiving what most users + would have hoped here. XML: * Add Exa- and Peta- units such as EiB, EB, Eib, Eb for size, and diff --git a/examples/msg/CMakeLists.txt b/examples/msg/CMakeLists.txt index 42892fdc21..ee73989b74 100644 --- a/examples/msg/CMakeLists.txt +++ b/examples/msg/CMakeLists.txt @@ -1,7 +1,5 @@ -# disabled test: cloud-multicore. It's built (in the following list), but not tested (below) - foreach(x actions-comm actions-storage app-masterworker app-pingpong app-pmm app-token-ring async-wait async-waitall - async-waitany cloud-capping cloud-masterworker cloud-migration cloud-multicore cloud-simple cloud-two-tasks + async-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple cloud-two-tasks dht-chord dht-pastry energy-consumption energy-onoff energy-pstate energy-ptask energy-vm platform-failures io-file io-remote io-storage task-priority process-create process-kill process-migration process-suspend platform-properties maestro-set process-startkilltime synchro-semaphore trace-categories @@ -85,8 +83,6 @@ set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/actio ${CMAKE_CURRENT_SOURCE_DIR}/process-startkilltime/start_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/process-startkilltime/start_kill_d.xml PARENT_SCOPE) -# Reenable the cloud-multicore test in the following list - foreach(x actions-comm actions-storage app-bittorrent app-chainsend app-masterworker app-pingpong app-token-ring async-wait async-waitall async-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple cloud-two-tasks dht-chord dht-pastry dht-kademlia platform-failures io-file io-remote io-storage task-priority diff --git a/examples/msg/cloud-multicore/cloud-multicore.c b/examples/msg/cloud-multicore/cloud-multicore.c deleted file mode 100644 index ebb0a22209..0000000000 --- a/examples/msg/cloud-multicore/cloud-multicore.c +++ /dev/null @@ -1,353 +0,0 @@ -/* Copyright (c) 2007-2015. The SimGrid Team. - * All rights reserved. */ - -/* This program is free software; you can redistribute it and/or modify it - * under the terms of the license (GNU LGPL) which comes with this package. */ - -#include "simgrid/msg.h" -XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example"); - -static int worker_main(int argc, char *argv[]) -{ - msg_task_t task = MSG_process_get_data(MSG_process_self()); - MSG_task_execute(task); - - XBT_INFO("task %p bye", task); - - return 0; -} - -struct task_data { - msg_task_t task; - double prev_computation_amount; - double prev_clock; -}; - -static void task_data_init_clock(struct task_data *t) -{ - t->prev_computation_amount = MSG_task_get_flops_amount(t->task); - t->prev_clock = MSG_get_clock(); -} - -static void task_data_get_clock(struct task_data *t) -{ - double now_computation_amount = MSG_task_get_flops_amount(t->task); - double now_clock = MSG_get_clock(); - - double done = t->prev_computation_amount - now_computation_amount; - double duration = now_clock - t->prev_clock; - - XBT_INFO("%s: %f fops/s", MSG_task_get_name(t->task), done / duration); - - t->prev_computation_amount = now_computation_amount; - t->prev_clock = now_clock; -} - -static void test_pm_pin(void) -{ - xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar(); - msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); - msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); - msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); - xbt_dynar_free(&hosts_dynar); - - struct task_data t1; - struct task_data t2; - struct task_data t3; - struct task_data t4; - - t1.task = MSG_task_create("Task1", 1e16, 0, NULL); - t2.task = MSG_task_create("Task2", 1e16, 0, NULL); - t3.task = MSG_task_create("Task3", 1e16, 0, NULL); - t4.task = MSG_task_create("Task4", 1e16, 0, NULL); - - MSG_process_create("worker1", worker_main, t1.task, pm2); - MSG_process_create("worker2", worker_main, t2.task, pm2); - MSG_process_create("worker3", worker_main, t3.task, pm2); - MSG_process_create("worker4", worker_main, t4.task, pm2); - - XBT_INFO("## 1. start 4 tasks on PM2 (2 cores)"); - task_data_init_clock(&t1); - task_data_init_clock(&t2); - task_data_init_clock(&t3); - task_data_init_clock(&t4); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - XBT_INFO("## 2. pin all tasks to CPU0"); - MSG_task_set_affinity(t1.task, pm2, 0x01); - MSG_task_set_affinity(t2.task, pm2, 0x01); - MSG_task_set_affinity(t3.task, pm2, 0x01); - MSG_task_set_affinity(t4.task, pm2, 0x01); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - XBT_INFO("## 3. clear the affinity of task4"); - MSG_task_set_affinity(t4.task, pm2, 0); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - XBT_INFO("## 4. clear the affinity of task3"); - MSG_task_set_affinity(t3.task, pm2, 0); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - XBT_INFO("## 5. clear the affinity of task2"); - MSG_task_set_affinity(t2.task, pm2, 0); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - XBT_INFO("## 6. pin all tasks to CPU0 of another PM (no effect now)"); - MSG_task_set_affinity(t1.task, pm0, 0); - MSG_task_set_affinity(t2.task, pm0, 0); - MSG_task_set_affinity(t3.task, pm1, 0); - MSG_task_set_affinity(t4.task, pm1, 0); - - MSG_process_sleep(10); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - task_data_get_clock(&t4); - - MSG_task_cancel(t1.task); - MSG_task_cancel(t2.task); - MSG_task_cancel(t3.task); - MSG_task_cancel(t4.task); - MSG_process_sleep(10); - MSG_task_destroy(t1.task); - MSG_task_destroy(t2.task); - MSG_task_destroy(t3.task); - MSG_task_destroy(t4.task); -} - -static void test_vm_pin(void) -{ - xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar(); - msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); // 1 cores - msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); // 2 cores - msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); // 4 cores - xbt_dynar_free(&hosts_dynar); - - /* set up VMs on PM1 (4 cores) */ - msg_vm_t vm0 = MSG_vm_create_core(pm1, "VM0"); - msg_vm_t vm1 = MSG_vm_create_core(pm1, "VM1"); - msg_vm_t vm2 = MSG_vm_create_core(pm1, "VM2"); - msg_vm_t vm3 = MSG_vm_create_core(pm1, "VM3"); - - s_vm_params_t params; - memset(¶ms, 0, sizeof(params)); - params.ramsize = 1L * 1024 * 1024; - params.skip_stage1 = 1; - params.skip_stage2 = 1; - //params.mig_speed = 1L * 1024 * 1024; - MSG_host_set_params(vm0, ¶ms); - MSG_host_set_params(vm1, ¶ms); - MSG_host_set_params(vm2, ¶ms); - MSG_host_set_params(vm3, ¶ms); - - MSG_vm_start(vm0); - MSG_vm_start(vm1); - MSG_vm_start(vm2); - MSG_vm_start(vm3); - - /* set up tasks and processes */ - struct task_data t0; - struct task_data t1; - struct task_data t2; - struct task_data t3; - - t0.task = MSG_task_create("Task0", 1e16, 0, NULL); - t1.task = MSG_task_create("Task1", 1e16, 0, NULL); - t2.task = MSG_task_create("Task2", 1e16, 0, NULL); - t3.task = MSG_task_create("Task3", 1e16, 0, NULL); - - MSG_process_create("worker0", worker_main, t0.task, vm0); - MSG_process_create("worker1", worker_main, t1.task, vm1); - MSG_process_create("worker2", worker_main, t2.task, vm2); - MSG_process_create("worker3", worker_main, t3.task, vm3); - - /* start experiments */ - XBT_INFO("## 1. start 4 VMs on PM1 (4 cores)"); - task_data_init_clock(&t0); - task_data_init_clock(&t1); - task_data_init_clock(&t2); - task_data_init_clock(&t3); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - XBT_INFO("## 2. pin all VMs to CPU0 of PM1"); - MSG_vm_set_affinity(vm0, pm1, 0x01); - MSG_vm_set_affinity(vm1, pm1, 0x01); - MSG_vm_set_affinity(vm2, pm1, 0x01); - MSG_vm_set_affinity(vm3, pm1, 0x01); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - XBT_INFO("## 3. pin all VMs to CPU0 of PM2(no effect at now)"); - /* Because VMs are on PM2, the below operations do not effect computation now. */ - MSG_vm_set_affinity(vm0, pm2, 0x01); - MSG_vm_set_affinity(vm1, pm2, 0x01); - MSG_vm_set_affinity(vm2, pm2, 0x01); - MSG_vm_set_affinity(vm3, pm2, 0x01); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - XBT_INFO("## 4. unpin VM0, and pin VM2 and VM3 to CPU1 of PM1"); - MSG_vm_set_affinity(vm0, pm1, 0x00); - MSG_vm_set_affinity(vm2, pm1, 0x02); - MSG_vm_set_affinity(vm3, pm1, 0x02); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - XBT_INFO("## 5. migrate all VMs to PM0 (only 1 CPU core)"); - MSG_vm_migrate(vm0, pm0); - MSG_vm_migrate(vm1, pm0); - MSG_vm_migrate(vm2, pm0); - MSG_vm_migrate(vm3, pm0); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - XBT_INFO("## 6. migrate all VMs to PM2 (2 CPU cores, with affinity settings)"); - MSG_vm_migrate(vm0, pm2); - MSG_vm_migrate(vm1, pm2); - MSG_vm_migrate(vm2, pm2); - MSG_vm_migrate(vm3, pm2); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - - XBT_INFO("## 7. clear affinity settings on PM1"); - MSG_vm_set_affinity(vm0, pm2, 0); - MSG_vm_set_affinity(vm1, pm2, 0); - MSG_vm_set_affinity(vm2, pm2, 0); - MSG_vm_set_affinity(vm3, pm2, 0); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - MSG_process_sleep(10); - task_data_get_clock(&t0); - task_data_get_clock(&t1); - task_data_get_clock(&t2); - task_data_get_clock(&t3); - - /* clean up everything */ - MSG_task_cancel(t0.task); - MSG_task_cancel(t1.task); - MSG_task_cancel(t2.task); - MSG_task_cancel(t3.task); - MSG_process_sleep(10); - MSG_task_destroy(t0.task); - MSG_task_destroy(t1.task); - MSG_task_destroy(t2.task); - MSG_task_destroy(t3.task); - - MSG_vm_destroy(vm0); - MSG_vm_destroy(vm1); - MSG_vm_destroy(vm2); - MSG_vm_destroy(vm3); -} - -static int master_main(int argc, char *argv[]) -{ - XBT_INFO("=== Test PM (set affinity) ==="); - test_pm_pin(); - - XBT_INFO("=== Test VM (set affinity) ==="); - test_vm_pin(); - - return 0; -} - -int main(int argc, char *argv[]) -{ - /* Get the arguments */ - MSG_init(&argc, argv); - - /* load the platform file */ - if (argc != 2) { - printf("Usage: %s examples/msg/cloud/multicore_plat.xml\n", argv[0]); - return 1; - } - - MSG_create_environment(argv[1]); - - xbt_dynar_t hosts_dynar = MSG_hosts_as_dynar(); - msg_host_t pm0 = xbt_dynar_get_as(hosts_dynar, 0, msg_host_t); - msg_host_t pm1 = xbt_dynar_get_as(hosts_dynar, 1, msg_host_t); - msg_host_t pm2 = xbt_dynar_get_as(hosts_dynar, 2, msg_host_t); - xbt_dynar_free(&hosts_dynar); - - XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm0), MSG_host_get_core_number(pm0), - MSG_host_get_speed(pm0)); - XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm1), MSG_host_get_core_number(pm1), - MSG_host_get_speed(pm1)); - XBT_INFO("%s: %d core(s), %f flops/s per each", MSG_host_get_name(pm2), MSG_host_get_core_number(pm2), - MSG_host_get_speed(pm2)); - - MSG_process_create("master", master_main, NULL, pm0); - - int res = MSG_main(); - XBT_INFO("Bye (simulation time %g)", MSG_get_clock()); - - return !(res == MSG_OK); -} diff --git a/examples/msg/cloud-multicore/cloud-multicore.tesh b/examples/msg/cloud-multicore/cloud-multicore.tesh deleted file mode 100644 index 11c3790671..0000000000 --- a/examples/msg/cloud-multicore/cloud-multicore.tesh +++ /dev/null @@ -1,14 +0,0 @@ -#! ./tesh - -$ $SG_TEST_EXENV ${bindir:=.}/cloud-multicore$EXEEXT --log=no_loc ${srcdir:=.}/three_multicore_hosts.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) PM0: 4 core(s), 8095000000.000001 flops/s per each -> [ 0.000000] (0:maestro@) PM1: 4 core(s), 8095000000.000001 flops/s per each -> [ 0.000000] (0:maestro@) PM2: 2 core(s), 8095000000.000001 flops/s per each -> [ 0.000000] (1:master@PM0) === Test PM (set affinity) === -> [ 0.000000] (1:master@PM0) ## 1. start 4 tasks on PM2 (2 cores) -> [ 10.000000] (1:master@PM0) Task1: 4047500000.000000 fops/s -> [ 10.000000] (1:master@PM0) Task2: 4047500000.000000 fops/s -> [ 10.000000] (1:master@PM0) Task3: 4047500000.000000 fops/s -> [ 10.000000] (1:master@PM0) Task4: 4047500000.000000 fops/s -> [ 10.000000] (1:master@PM0) ## 2. pin all tasks to CPU0 -> [ 10.000000] (1:master@PM0) set affinity(0x0001@PM2) for Task1 diff --git a/include/simgrid/msg.h b/include/simgrid/msg.h index bddd04eeb1..5f2f05cff1 100644 --- a/include/simgrid/msg.h +++ b/include/simgrid/msg.h @@ -37,7 +37,6 @@ typedef struct s_msg_host_priv { double dp_updated_by_deleted_tasks; int is_migrating; - xbt_dict_t affinity_mask_db; xbt_dynar_t file_descriptor_table; } s_msg_host_priv_t; @@ -371,7 +370,6 @@ XBT_PUBLIC(msg_error_t) MSG_task_execute(msg_task_t task); XBT_PUBLIC(msg_error_t) MSG_parallel_task_execute(msg_task_t task); XBT_PUBLIC(void) MSG_task_set_priority(msg_task_t task, double priority); XBT_PUBLIC(void) MSG_task_set_bound(msg_task_t task, double bound); -XBT_PUBLIC(void) MSG_task_set_affinity(msg_task_t task, msg_host_t host, unsigned long mask); XBT_PUBLIC(msg_error_t) MSG_process_join(msg_process_t process, double timeout); XBT_PUBLIC(msg_error_t) MSG_process_sleep(double nb_sec); @@ -520,7 +518,6 @@ XBT_PUBLIC(void) MSG_vm_restore(msg_vm_t vm); XBT_PUBLIC(msg_host_t) MSG_vm_get_pm(msg_vm_t vm); XBT_PUBLIC(void) MSG_vm_set_bound(msg_vm_t vm, double bound); -XBT_PUBLIC(void) MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask); /* TODO: do we need this? */ // XBT_PUBLIC(xbt_dynar_t) MSG_vms_as_dynar(); diff --git a/include/simgrid/simix.h b/include/simgrid/simix.h index 982bedf5e2..42a9c5808f 100644 --- a/include/simgrid/simix.h +++ b/include/simgrid/simix.h @@ -248,7 +248,7 @@ XBT_PUBLIC(void) simcall_host_set_data(sg_host_t host, void *data); XBT_PUBLIC(smx_activity_t) simcall_execution_start(const char *name, double flops_amount, - double priority, double bound, unsigned long affinity_mask); + double priority, double bound); XBT_PUBLIC(smx_activity_t) simcall_execution_parallel_start(const char *name, int host_nb, sg_host_t *host_list, @@ -259,7 +259,6 @@ XBT_PUBLIC(smx_activity_t) simcall_execution_parallel_start(const char *name, XBT_PUBLIC(void) simcall_execution_cancel(smx_activity_t execution); XBT_PUBLIC(void) simcall_execution_set_priority(smx_activity_t execution, double priority); XBT_PUBLIC(void) simcall_execution_set_bound(smx_activity_t execution, double bound); -XBT_PUBLIC(void) simcall_execution_set_affinity(smx_activity_t execution, sg_host_t host, unsigned long mask); XBT_PUBLIC(e_smx_state_t) simcall_execution_wait(smx_activity_t execution); /******************************* VM simcalls ********************************/ @@ -270,7 +269,6 @@ XBT_PUBLIC(void) simcall_vm_start(sg_host_t vm); XBT_PUBLIC(void) simcall_vm_migrate(sg_host_t vm, sg_host_t dst_pm); XBT_PUBLIC(void *) simcall_vm_get_pm(sg_host_t vm); XBT_PUBLIC(void) simcall_vm_set_bound(sg_host_t vm, double bound); -XBT_PUBLIC(void) simcall_vm_set_affinity(sg_host_t vm, sg_host_t pm, unsigned long mask); XBT_PUBLIC(void) simcall_vm_resume(sg_host_t vm); XBT_PUBLIC(void) simcall_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm); XBT_PUBLIC(void) simcall_vm_save(sg_host_t vm); diff --git a/src/include/surf/surf.h b/src/include/surf/surf.h index bc29606acd..d63b9d302d 100644 --- a/src/include/surf/surf.h +++ b/src/include/surf/surf.h @@ -306,16 +306,6 @@ XBT_PUBLIC(sg_host_t) surf_vm_get_pm(sg_host_t resource); */ XBT_PUBLIC(void) surf_vm_set_bound(sg_host_t resource, double bound); -/** - * @brief [brief description] - * @details [long description] - * - * @param resource [description] - * @param cpu [description] - * @param mask [description] - */ -XBT_PUBLIC(void) surf_vm_set_affinity(sg_host_t resource, sg_host_t cpu, unsigned long mask); - /** * @brief Unlink a file descriptor * diff --git a/src/msg/msg_gos.cpp b/src/msg/msg_gos.cpp index 2ed14c864b..b43b0a094d 100644 --- a/src/msg/msg_gos.cpp +++ b/src/msg/msg_gos.cpp @@ -69,15 +69,8 @@ msg_error_t MSG_parallel_task_execute(msg_task_t task) 1.0, -1.0)); XBT_DEBUG("Parallel execution action created: %p", simdata->compute); } else { - unsigned long affinity_mask = - static_cast((uintptr_t) xbt_dict_get_or_null_ext(simdata->affinity_mask_db, (char *) p_simdata->m_host, - sizeof(msg_host_t))); - XBT_DEBUG("execute %s@%s with affinity(0x%04lx)", - MSG_task_get_name(task), MSG_host_get_name(p_simdata->m_host), affinity_mask); - - simdata->compute = static_cast( - simcall_execution_start(task->name, simdata->flops_amount, simdata->priority, - simdata->bound, affinity_mask)); + simdata->compute = static_cast( + simcall_execution_start(task->name, simdata->flops_amount, simdata->priority, simdata->bound)); } simcall_set_category(simdata->compute, task->category); p_simdata->waiting_action = simdata->compute; diff --git a/src/msg/msg_host.cpp b/src/msg/msg_host.cpp index 367b64927c..d9bf83de84 100644 --- a/src/msg/msg_host.cpp +++ b/src/msg/msg_host.cpp @@ -33,8 +33,6 @@ msg_host_t __MSG_host_create(sg_host_t host) // FIXME: don't return our paramete priv->dp_updated_by_deleted_tasks = 0; priv->is_migrating = 0; - priv->affinity_mask_db = xbt_dict_new_homogeneous(nullptr); - priv->file_descriptor_table = xbt_dynar_new(sizeof(int), nullptr); for (int i=sg_storage_max_file_descriptors-1; i>=0;i--) xbt_dynar_push_as(priv->file_descriptor_table, int, i); @@ -121,7 +119,6 @@ void __MSG_host_priv_free(msg_host_priv_t priv) if (size > 0) XBT_WARN("dp_objs: %u pending task?", size); xbt_dict_free(&priv->dp_objs); - xbt_dict_free(&priv->affinity_mask_db); xbt_dynar_free(&priv->file_descriptor_table); free(priv); diff --git a/src/msg/msg_private.h b/src/msg/msg_private.h index a98478735c..a275943730 100644 --- a/src/msg/msg_private.h +++ b/src/msg/msg_private.h @@ -38,8 +38,6 @@ typedef struct simdata_task { /* parallel tasks only */ xbt_free(this->host_list); - - xbt_dict_free(&this->affinity_mask_db); } void setUsed(); void setNotUsed() @@ -58,9 +56,6 @@ typedef struct simdata_task { double bound = 0.0; /* Capping for CPU resource */ double rate = 0.0; /* Capping for network resource */ - /* CPU affinity database of this task */ - xbt_dict_t affinity_mask_db = nullptr; /* smx_host_t host => unsigned long mask */ - bool isused = false; /* Indicates whether the task is used in SIMIX currently */ int host_nb = 0; /* ==0 if sequential task; parallel task if not */ /******* Parallel Tasks Only !!!! *******/ diff --git a/src/msg/msg_task.cpp b/src/msg/msg_task.cpp index e2adf779f1..130eab765b 100644 --- a/src/msg/msg_task.cpp +++ b/src/msg/msg_task.cpp @@ -68,7 +68,6 @@ msg_task_t MSG_task_create(const char *name, double flop_amount, double message_ simdata->source = nullptr; simdata->priority = 1.0; simdata->bound = 0; - simdata->affinity_mask_db = xbt_dict_new_homogeneous(nullptr); simdata->rate = -1.0; simdata->isused = 0; @@ -333,76 +332,3 @@ void MSG_task_set_bound(msg_task_t task, double bound) if (task->simdata->compute) simcall_execution_set_bound(task->simdata->compute, task->simdata->bound); } - -/** \ingroup m_task_management - * \brief Changes the CPU affinity of a computation task. - * - * When pinning the given task to the first CPU core of the given host, use 0x01 for the mask value. Each bit of the - * mask value corresponds to each CPU core. See taskset(1) on Linux. - * - * \param task a target task - * \param host the host having a multi-core CPU - * \param mask the bit mask of a new CPU affinity setting for the task - * - * Usage: - * 0. Define a host with multiple cores. - * \ - * - * 1. Pin a given task to the first CPU core of a host. - * MSG_task_set_affinity(task, pm0, 0x01); - * - * 2. Pin a given task to the third CPU core of a host. Turn on the third bit of the mask. - * MSG_task_set_affinity(task, pm0, 0x04); // 0x04 == 100B - * - * 3. Pin a given VM to the first CPU core of a host. - * MSG_vm_set_affinity(vm, pm0, 0x01); - * - * See examples/msg/cloud/multicore.c for more information. - * - * Note: - * 1. The current code does not allow an affinity of a task to multiple cores. - * The mask value 0x03 (i.e., a given task will be executed on the first core or the second core) is not allowed. - * The mask value 0x01 or 0x02 works. See cpu_cas01.c for details. - * - * 2. It is recommended to first compare simulation results in both the Lazy and Full calculation modes - * (using --cfg=cpu/optim:Full or not). Fix cpu_cas01.c if you find wrong results in the Lazy mode. - */ -void MSG_task_set_affinity(msg_task_t task, msg_host_t host, unsigned long mask) -{ - xbt_assert(task, "Invalid parameter"); - xbt_assert(task->simdata, "Invalid parameter"); - - if (mask == 0) { - /* 0 means clear */ - /* We need remove_ext() not throwing exception. */ - void *ret = xbt_dict_get_or_null_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(msg_host_t)); - if (ret != nullptr) - xbt_dict_remove_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(host)); - } else - xbt_dict_set_ext(task->simdata->affinity_mask_db, (char *) host, sizeof(host), (void *)(uintptr_t) mask, nullptr); - - /* We set affinity data of this task. If the task is being executed, we actually change the affinity setting of the - * task. Otherwise, this change will be applied when the task is executed. */ - if (!task->simdata->compute) { - /* task is not yet executed */ - XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(host), - MSG_task_get_name(task)); - return; - } - - simgrid::kernel::activity::Exec *compute = task->simdata->compute; - msg_host_t host_now = compute->host; // simix_private.h is necessary - if (host_now != host) { - /* task is not yet executed on this host */ - XBT_INFO("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(host), - MSG_task_get_name(task)); - return; - } - - /* task is being executed on this host. so change the affinity now */ - /* check it works. remove me if it works. */ - xbt_assert(static_cast((uintptr_t) xbt_dict_get_or_null_ext(task->simdata->affinity_mask_db, - (char*)(host), sizeof(msg_host_t))) == mask); - XBT_INFO("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(host), MSG_task_get_name(task)); - simcall_execution_set_affinity(task->simdata->compute, host, mask); -} diff --git a/src/msg/msg_vm.cpp b/src/msg/msg_vm.cpp index 058904139c..4f93f968e7 100644 --- a/src/msg/msg_vm.cpp +++ b/src/msg/msg_vm.cpp @@ -369,12 +369,9 @@ static int migration_rx_fun(int argc, char *argv[]) msg_vm_t vm = ms->vm; msg_host_t src_pm = ms->src_pm; msg_host_t dst_pm = ms-> dst_pm; - msg_host_priv_t priv = sg_host_msg(vm); // TODO: we have an issue, if the DST node is turning off during the three next calls, then the VM is in an inconsistent // state. I should check with Takahiro in order to make this portion of code atomic -// /* deinstall the current affinity setting for the CPU */ -// simcall_vm_set_affinity(vm, src_pm, 0); // // /* Update the vm location */ // simcall_vm_migrate(vm, dst_pm); @@ -384,15 +381,6 @@ static int migration_rx_fun(int argc, char *argv[]) // simcall_vm_migratefrom_resumeto(vm, src_pm, dst_pm); - /* install the affinity setting of the VM on the destination pm */ - { - - unsigned long affinity_mask = - (unsigned long)(uintptr_t) xbt_dict_get_or_null_ext(priv->affinity_mask_db, (char *)dst_pm, sizeof(msg_host_t)); - simcall_vm_set_affinity(vm, dst_pm, affinity_mask); - XBT_DEBUG("set affinity(0x%04lx@%s) for %s", affinity_mask, MSG_host_get_name(dst_pm), MSG_host_get_name(vm)); - } - { // Now the VM is running on the new host (the migration is completed) (even if the SRC crash) msg_host_priv_t priv = sg_host_msg(vm); @@ -1034,26 +1022,3 @@ void MSG_vm_set_bound(msg_vm_t vm, double bound) { simcall_vm_set_bound(vm, bound); } - -/** @brief Set the CPU affinity of a given VM. - * @ingroup msg_VMs - * - * This function changes the CPU affinity of a given VM. Usage is the same as - * MSG_task_set_affinity(). See the MSG_task_set_affinity() for details. - */ -void MSG_vm_set_affinity(msg_vm_t vm, msg_host_t pm, unsigned long mask) -{ - msg_host_priv_t priv = sg_host_msg(vm); - - if (mask == 0) - xbt_dict_remove_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm)); - else - xbt_dict_set_ext(priv->affinity_mask_db, (char *) pm, sizeof(pm), (void *)(uintptr_t) mask, nullptr); - - msg_host_t pm_now = MSG_vm_get_pm(vm); - if (pm_now == pm) { - XBT_DEBUG("set affinity(0x%04lx@%s) for %s", mask, MSG_host_get_name(pm), MSG_host_get_name(vm)); - simcall_vm_set_affinity(vm, pm, mask); - } else - XBT_DEBUG("set affinity(0x%04lx@%s) for %s (not active now)", mask, MSG_host_get_name(pm), MSG_host_get_name(vm)); -} diff --git a/src/s4u/s4u_actor.cpp b/src/s4u/s4u_actor.cpp index 6df64747f8..d14a9bfb0c 100644 --- a/src/s4u/s4u_actor.cpp +++ b/src/s4u/s4u_actor.cpp @@ -134,7 +134,7 @@ XBT_PUBLIC(void) sleep_until(double timeout) } e_smx_state_t execute(double flops) { - smx_activity_t s = simcall_execution_start(nullptr,flops,1.0/*priority*/,0./*bound*/, 0L/*affinity*/); + smx_activity_t s = simcall_execution_start(nullptr,flops,1.0/*priority*/,0./*bound*/); return simcall_execution_wait(s); } diff --git a/src/simix/ActorImpl.cpp b/src/simix/ActorImpl.cpp index 931c2b85b5..d071d520d9 100644 --- a/src/simix/ActorImpl.cpp +++ b/src/simix/ActorImpl.cpp @@ -631,7 +631,7 @@ smx_activity_t SIMIX_process_suspend(smx_process_t process, smx_process_t issuer return nullptr; } else { /* FIXME: computation size is zero. Is it okay that bound is zero ? */ - return SIMIX_execution_start(process, "suspend", 0.0, 1.0, 0.0, 0); + return SIMIX_execution_start(process, "suspend", 0.0, 1.0, 0.0); } } diff --git a/src/simix/libsmx.cpp b/src/simix/libsmx.cpp index f84cbd9b75..7ceacadc7c 100644 --- a/src/simix/libsmx.cpp +++ b/src/simix/libsmx.cpp @@ -69,18 +69,17 @@ xbt_dict_t simcall_asr_get_properties(const char *name) * \param flops_amount amount Computation amount (in flops) * \param priority computation priority * \param bound - * \param affinity_mask * \return A new SIMIX execution synchronization */ smx_activity_t simcall_execution_start(const char *name, double flops_amount, - double priority, double bound, unsigned long affinity_mask) + double priority, double bound) { /* checking for infinite values */ xbt_assert(std::isfinite(flops_amount), "flops_amount is not finite!"); xbt_assert(std::isfinite(priority), "priority is not finite!"); - return simcall_BODY_execution_start(name, flops_amount, priority, bound, affinity_mask); + return simcall_BODY_execution_start(name, flops_amount, priority, bound); } /** @@ -169,20 +168,6 @@ void simcall_execution_set_bound(smx_activity_t execution, double bound) simcall_BODY_execution_set_bound(execution, bound); } -/** - * \ingroup simix_process_management - * \brief Changes the CPU affinity of an execution synchro. - * - * This functions changes the CPU affinity of an execution synchro. See taskset(1) on Linux. - * \param execution The execution synchro - * \param host Host - * \param mask Affinity mask - */ -void simcall_execution_set_affinity(smx_activity_t execution, sg_host_t host, unsigned long mask) -{ - simcall_BODY_execution_set_affinity(execution, host, mask); -} - /** * \ingroup simix_host_management * \brief Waits for the completion of an execution synchro and destroy it. @@ -249,11 +234,6 @@ void simcall_vm_set_bound(sg_host_t vm, double bound) simgrid::simix::kernelImmediate(std::bind(SIMIX_vm_set_bound, vm, bound)); } -void simcall_vm_set_affinity(sg_host_t vm, sg_host_t pm, unsigned long mask) -{ - simgrid::simix::kernelImmediate(std::bind(SIMIX_vm_set_affinity, vm, pm, mask)); -} - /** * \ingroup simix_vm_management * \brief Migrate the given VM to the given physical host @@ -336,7 +316,6 @@ void simcall_vm_destroy(sg_host_t vm) * \ingroup simix_vm_management * \brief Encompassing simcall to prevent the removal of the src or the dst node at the end of a VM migration * The simcall actually invokes the following calls: - * simcall_vm_set_affinity(vm, src_pm, 0); * simcall_vm_migrate(vm, dst_pm); * simcall_vm_resume(vm); * diff --git a/src/simix/popping_accessors.h b/src/simix/popping_accessors.h index 9c9da14913..1acf3f05b9 100644 --- a/src/simix/popping_accessors.h +++ b/src/simix/popping_accessors.h @@ -166,12 +166,6 @@ static inline double simcall_execution_start__get__bound(smx_simcall_t simcall) static inline void simcall_execution_start__set__bound(smx_simcall_t simcall, double arg) { simgrid::simix::marshal(simcall->args[3], arg); } -static inline unsigned long simcall_execution_start__get__affinity_mask(smx_simcall_t simcall) { - return simgrid::simix::unmarshal(simcall->args[4]); -} -static inline void simcall_execution_start__set__affinity_mask(smx_simcall_t simcall, unsigned long arg) { - simgrid::simix::marshal(simcall->args[4], arg); -} static inline smx_activity_t simcall_execution_start__get__result(smx_simcall_t simcall){ return simgrid::simix::unmarshal(simcall->result); } @@ -261,25 +255,6 @@ static inline void simcall_execution_set_bound__set__bound(smx_simcall_t simcall simgrid::simix::marshal(simcall->args[1], arg); } -static inline smx_activity_t simcall_execution_set_affinity__get__execution(smx_simcall_t simcall) { - return simgrid::simix::unmarshal(simcall->args[0]); -} -static inline void simcall_execution_set_affinity__set__execution(smx_simcall_t simcall, smx_activity_t arg) { - simgrid::simix::marshal(simcall->args[0], arg); -} -static inline sg_host_t simcall_execution_set_affinity__get__ws(smx_simcall_t simcall) { - return simgrid::simix::unmarshal(simcall->args[1]); -} -static inline void simcall_execution_set_affinity__set__ws(smx_simcall_t simcall, sg_host_t arg) { - simgrid::simix::marshal(simcall->args[1], arg); -} -static inline unsigned long simcall_execution_set_affinity__get__mask(smx_simcall_t simcall) { - return simgrid::simix::unmarshal(simcall->args[2]); -} -static inline void simcall_execution_set_affinity__set__mask(smx_simcall_t simcall, unsigned long arg) { - simgrid::simix::marshal(simcall->args[2], arg); -} - static inline smx_activity_t simcall_execution_wait__get__execution(smx_simcall_t simcall) { return simgrid::simix::unmarshal(simcall->args[0]); } @@ -1183,7 +1158,7 @@ XBT_PRIVATE void simcall_HANDLER_process_resume(smx_simcall_t simcall, smx_proce XBT_PRIVATE void simcall_HANDLER_process_set_host(smx_simcall_t simcall, smx_process_t process, sg_host_t dest); XBT_PRIVATE void simcall_HANDLER_process_join(smx_simcall_t simcall, smx_process_t process, double timeout); XBT_PRIVATE void simcall_HANDLER_process_sleep(smx_simcall_t simcall, double duration); -XBT_PRIVATE smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask); +XBT_PRIVATE smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority, double bound); XBT_PRIVATE void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t execution); XBT_PRIVATE smx_process_t simcall_HANDLER_process_restart(smx_simcall_t simcall, smx_process_t process); XBT_PRIVATE smx_activity_t simcall_HANDLER_comm_iprobe(smx_simcall_t simcall, smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data); diff --git a/src/simix/popping_bodies.cpp b/src/simix/popping_bodies.cpp index 3aab71bd26..9dee59693c 100644 --- a/src/simix/popping_bodies.cpp +++ b/src/simix/popping_bodies.cpp @@ -119,10 +119,10 @@ inline static int simcall_BODY_process_sleep(double duration) { return simcall(SIMCALL_PROCESS_SLEEP, duration); } -inline static smx_activity_t simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask) { +inline static smx_activity_t simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound) { /* Go to that function to follow the code flow through the simcall barrier */ - if (0) simcall_HANDLER_execution_start(&SIMIX_process_self()->simcall, name, flops_amount, priority, bound, affinity_mask); - return simcall(SIMCALL_EXECUTION_START, name, flops_amount, priority, bound, affinity_mask); + if (0) simcall_HANDLER_execution_start(&SIMIX_process_self()->simcall, name, flops_amount, priority, bound); + return simcall(SIMCALL_EXECUTION_START, name, flops_amount, priority, bound); } inline static smx_activity_t simcall_BODY_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate) { @@ -149,12 +149,6 @@ inline static void simcall_BODY_execution_set_bound(smx_activity_t execution, do return simcall(SIMCALL_EXECUTION_SET_BOUND, execution, bound); } -inline static void simcall_BODY_execution_set_affinity(smx_activity_t execution, sg_host_t ws, unsigned long mask) { - /* Go to that function to follow the code flow through the simcall barrier */ - if (0) SIMIX_execution_set_affinity(execution, ws, mask); - return simcall(SIMCALL_EXECUTION_SET_AFFINITY, execution, ws, mask); - } - inline static int simcall_BODY_execution_wait(smx_activity_t execution) { /* Go to that function to follow the code flow through the simcall barrier */ if (0) simcall_HANDLER_execution_wait(&SIMIX_process_self()->simcall, execution); diff --git a/src/simix/popping_enum.h b/src/simix/popping_enum.h index 4f631eb282..ffd838e53b 100644 --- a/src/simix/popping_enum.h +++ b/src/simix/popping_enum.h @@ -37,7 +37,6 @@ typedef enum { SIMCALL_EXECUTION_CANCEL, SIMCALL_EXECUTION_SET_PRIORITY, SIMCALL_EXECUTION_SET_BOUND, - SIMCALL_EXECUTION_SET_AFFINITY, SIMCALL_EXECUTION_WAIT, SIMCALL_PROCESS_ON_EXIT, SIMCALL_PROCESS_AUTO_RESTART_SET, diff --git a/src/simix/popping_generated.cpp b/src/simix/popping_generated.cpp index 176daadef2..e98ae72455 100644 --- a/src/simix/popping_generated.cpp +++ b/src/simix/popping_generated.cpp @@ -42,7 +42,6 @@ const char* simcall_names[] = { "SIMCALL_EXECUTION_CANCEL", "SIMCALL_EXECUTION_SET_PRIORITY", "SIMCALL_EXECUTION_SET_BOUND", - "SIMCALL_EXECUTION_SET_AFFINITY", "SIMCALL_EXECUTION_WAIT", "SIMCALL_PROCESS_ON_EXIT", "SIMCALL_PROCESS_AUTO_RESTART_SET", @@ -172,7 +171,7 @@ case SIMCALL_PROCESS_SLEEP: break; case SIMCALL_EXECUTION_START: - simgrid::simix::marshal(simcall->result, simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal(simcall->args[0]), simgrid::simix::unmarshal(simcall->args[1]), simgrid::simix::unmarshal(simcall->args[2]), simgrid::simix::unmarshal(simcall->args[3]), simgrid::simix::unmarshal(simcall->args[4]))); + simgrid::simix::marshal(simcall->result, simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal(simcall->args[0]), simgrid::simix::unmarshal(simcall->args[1]), simgrid::simix::unmarshal(simcall->args[2]), simgrid::simix::unmarshal(simcall->args[3]))); SIMIX_simcall_answer(simcall); break; @@ -196,11 +195,6 @@ case SIMCALL_EXECUTION_SET_BOUND: SIMIX_simcall_answer(simcall); break; -case SIMCALL_EXECUTION_SET_AFFINITY: - SIMIX_execution_set_affinity(simgrid::simix::unmarshal(simcall->args[0]), simgrid::simix::unmarshal(simcall->args[1]), simgrid::simix::unmarshal(simcall->args[2])); - SIMIX_simcall_answer(simcall); - break; - case SIMCALL_EXECUTION_WAIT: simcall_HANDLER_execution_wait(simcall, simgrid::simix::unmarshal(simcall->args[0])); break; diff --git a/src/simix/simcalls.in b/src/simix/simcalls.in index b503f715c3..a54af013c9 100644 --- a/src/simix/simcalls.in +++ b/src/simix/simcalls.in @@ -52,12 +52,11 @@ int process_is_suspended(smx_process_t process) [[nohandler]]; int process_join(smx_process_t process, double timeout) [[block]]; int process_sleep(double duration) [[block]]; -smx_activity_t execution_start(const char* name, double flops_amount, double priority, double bound, unsigned long affinity_mask); +smx_activity_t execution_start(const char* name, double flops_amount, double priority, double bound); smx_activity_t execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate) [[nohandler]]; void execution_cancel(smx_activity_t execution) [[nohandler]]; void execution_set_priority(smx_activity_t execution, double priority) [[nohandler]]; void execution_set_bound(smx_activity_t execution, double bound) [[nohandler]]; -void execution_set_affinity(smx_activity_t execution, sg_host_t ws, unsigned long mask) [[nohandler]]; int execution_wait(smx_activity_t execution) [[block]]; void process_on_exit(smx_process_t process, int_f_pvoid_pvoid_t fun, void* data) [[nohandler]]; diff --git a/src/simix/smx_host.cpp b/src/simix/smx_host.cpp index 723423bf47..01d227f9c8 100644 --- a/src/simix/smx_host.cpp +++ b/src/simix/smx_host.cpp @@ -203,12 +203,12 @@ void SIMIX_host_autorestart(sg_host_t host) } smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, - double priority, double bound, unsigned long affinity_mask) { - return SIMIX_execution_start(simcall->issuer, name,flops_amount,priority,bound,affinity_mask); + double priority, double bound) { + return SIMIX_execution_start(simcall->issuer, name,flops_amount,priority,bound); } smx_activity_t SIMIX_execution_start(smx_process_t issuer, const char *name, double flops_amount, double priority, - double bound, unsigned long affinity_mask){ + double bound){ /* alloc structures and initialize */ simgrid::kernel::activity::Exec *exec = new simgrid::kernel::activity::Exec(name, issuer->host); @@ -222,13 +222,6 @@ smx_activity_t SIMIX_execution_start(smx_process_t issuer, const char *name, dou if (bound > 0) static_cast(exec->surf_exec)->setBound(bound); - - if (affinity_mask != 0) { - /* just a double check to confirm that this host is the host where this task is running. */ - xbt_assert(exec->host == issuer->host); - static_cast(exec->surf_exec) - ->setAffinity(issuer->host->pimpl_cpu, affinity_mask); - } } XBT_DEBUG("Create execute synchro %p: %s", exec, exec->name.c_str()); @@ -288,16 +281,6 @@ void SIMIX_execution_set_bound(smx_activity_t synchro, double bound) static_cast(exec->surf_exec)->setBound(bound); } -void SIMIX_execution_set_affinity(smx_activity_t synchro, sg_host_t host, unsigned long mask) -{ - simgrid::kernel::activity::Exec *exec = static_cast(synchro); - if(exec->surf_exec) { - /* just a double check to confirm that this host is the host where this task is running. */ - xbt_assert(exec->host == host); - static_cast(exec->surf_exec)->setAffinity(host->pimpl_cpu, mask); - } -} - void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t synchro) { simgrid::kernel::activity::Exec *exec = static_cast(synchro); diff --git a/src/simix/smx_host_private.h b/src/simix/smx_host_private.h index ba1362f8f0..66d6d4d829 100644 --- a/src/simix/smx_host_private.h +++ b/src/simix/smx_host_private.h @@ -41,7 +41,7 @@ XBT_PRIVATE void SIMIX_host_add_auto_restart_process(sg_host_t host, XBT_PRIVATE void SIMIX_host_restart_processes(sg_host_t host); XBT_PRIVATE void SIMIX_host_autorestart(sg_host_t host); XBT_PRIVATE smx_activity_t SIMIX_execution_start(smx_process_t issuer, const char *name, - double flops_amount, double priority, double bound, unsigned long affinity_mask); + double flops_amount, double priority, double bound); XBT_PRIVATE smx_activity_t SIMIX_execution_parallel_start(const char *name, int host_nb, sg_host_t *host_list, double *flops_amount, double *bytes_amount, @@ -49,7 +49,6 @@ XBT_PRIVATE smx_activity_t SIMIX_execution_parallel_start(const char *name, XBT_PRIVATE void SIMIX_execution_cancel(smx_activity_t synchro); XBT_PRIVATE void SIMIX_execution_set_priority(smx_activity_t synchro, double priority); XBT_PRIVATE void SIMIX_execution_set_bound(smx_activity_t synchro, double bound); -XBT_PRIVATE void SIMIX_execution_set_affinity(smx_activity_t synchro, sg_host_t host, unsigned long mask); XBT_PRIVATE void SIMIX_execution_finish(simgrid::kernel::activity::Exec *exec); @@ -81,8 +80,6 @@ XBT_PRIVATE void *SIMIX_vm_get_pm(sg_host_t ind_vm); XBT_PRIVATE void SIMIX_vm_set_bound(sg_host_t ind_vm, double bound); -XBT_PRIVATE void SIMIX_vm_set_affinity(sg_host_t ind_vm, sg_host_t ind_pm, unsigned long mask); - XBT_PRIVATE void SIMIX_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm); SG_END_DECL() diff --git a/src/simix/smx_vm.cpp b/src/simix/smx_vm.cpp index 073a9fb00c..25cf08746a 100644 --- a/src/simix/smx_vm.cpp +++ b/src/simix/smx_vm.cpp @@ -119,7 +119,6 @@ void SIMIX_vm_migrate(sg_host_t ind_vm, sg_host_t ind_dst_pm) /** * @brief Encompassing simcall to prevent the removal of the src or the dst node at the end of a VM migration * The simcall actually invokes the following calls: - * simcall_vm_set_affinity(vm, src_pm, 0); * simcall_vm_migrate(vm, dst_pm); * simcall_vm_resume(vm); * @@ -131,9 +130,6 @@ void SIMIX_vm_migrate(sg_host_t ind_vm, sg_host_t ind_dst_pm) */ void SIMIX_vm_migratefrom_resumeto(sg_host_t vm, sg_host_t src_pm, sg_host_t dst_pm) { - /* deinstall the current affinity setting for the CPU */ - SIMIX_vm_set_affinity(vm, src_pm, 0); - /* Update the vm location */ SIMIX_vm_migrate(vm, dst_pm); @@ -163,22 +159,6 @@ void SIMIX_vm_set_bound(sg_host_t host, double bound) surf_vm_set_bound(host, bound); } -/** - * @brief Function to set the CPU affinity of the given SIMIX VM host. - * - * @param host the vm host (a sg_host_t) - * @param host the pm host (a sg_host_t) - * @param mask affinity mask (a unsigned long) - */ -void SIMIX_vm_set_affinity(sg_host_t ind_vm, sg_host_t ind_pm, unsigned long mask) -{ - /* make sure this at the MSG layer. */ - xbt_assert(SIMIX_vm_get_pm(ind_vm) == ind_pm); - - surf_vm_set_affinity(ind_vm, ind_pm, mask); -} - - /** * @brief Function to suspend a SIMIX VM host. This function stops the execution of the * VM. All the processes on this VM will pause. The state of the VM is diff --git a/src/smpi/smpi_bench.cpp b/src/smpi/smpi_bench.cpp index 0dce63b7d2..a536790ea4 100644 --- a/src/smpi/smpi_bench.cpp +++ b/src/smpi/smpi_bench.cpp @@ -209,7 +209,7 @@ void smpi_execute_(double *duration) void smpi_execute_flops(double flops) { smx_activity_t action; XBT_DEBUG("Handle real computation time: %f flops", flops); - action = simcall_execution_start("computation", flops, 1, 0, 0); + action = simcall_execution_start("computation", flops, 1, 0); simcall_set_category (action, TRACE_internal_smpi_get_category()); simcall_execution_wait(action); smpi_switch_data_segment(smpi_process_index()); diff --git a/src/surf/cpu_interface.cpp b/src/surf/cpu_interface.cpp index c1ab0e1f6c..5c95964896 100644 --- a/src/surf/cpu_interface.cpp +++ b/src/surf/cpu_interface.cpp @@ -152,27 +152,10 @@ Cpu::Cpu(Model *model, simgrid::s4u::Host *host, lmm_constraint_t constraint, } xbt_assert(model == surf_cpu_model_pm || core==1, "Currently, VM cannot be multicore"); - - if (model->getUpdateMechanism() != UM_UNDEFINED) { - p_constraintCore = xbt_new(lmm_constraint_t, core); - p_constraintCoreId = xbt_new(void*, core); - - for (int i = 0; i < core; i++) { - /* just for a unique id, never used as a string. */ - p_constraintCoreId[i] = bprintf("%s:%i", host->name().c_str(), i); - p_constraintCore[i] = lmm_constraint_new(model->getMaxminSystem(), p_constraintCoreId[i], speed_.scale * speed_.peak); - } - } } Cpu::~Cpu() { - if (p_constraintCoreId){ - for (int i = 0; i < coresAmount_; i++) - xbt_free(p_constraintCoreId[i]); - xbt_free(p_constraintCore); - } - xbt_free(p_constraintCoreId); xbt_dynar_free(&speedPerPstate_); } @@ -273,70 +256,6 @@ void CpuAction::updateRemainingLazy(double now) lastValue_ = lmm_variable_getvalue(getVariable()); } -/* - * - * This function formulates a constraint problem that pins a given task to - * particular cores. Currently, it is possible to pin a task to an exactly one - * specific core. The system links the variable object of the task to the - * per-core constraint object. - * - * But, the taskset command on Linux takes a mask value specifying a CPU - * affinity setting of a given task. If the mask value is 0x03, the given task - * will be executed on the first core (CPU0) or the second core (CPU1) on the - * given PM. The schedular will determine appropriate placements of tasks, - * considering given CPU affinities and task activities. - * - * How should the system formulate constraint problems for an affinity to - * multiple cores? - * - * The cpu argument must be the host where the task is being executed. The - * action object does not have the information about the location where the - * action is being executed. - */ -void CpuAction::setAffinity(Cpu *cpu, unsigned long mask) -{ - lmm_variable_t var_obj = getVariable(); - XBT_IN("(%p,%lx)", this, mask); - - { - unsigned long nbits = 0; - - /* FIXME: There is much faster algorithms doing this. */ - for (int i = 0; i < cpu->coresAmount_; i++) { - unsigned long has_affinity = (1UL << i) & mask; - if (has_affinity) - nbits += 1; - } - - xbt_assert(nbits <= 1, "Affinity mask cannot span over multiple cores."); - } - - for (int i = 0; i < cpu->coresAmount_; i++) { - XBT_DEBUG("clear affinity %p to cpu-%d@%s", this, i, cpu->getName()); - lmm_shrink(cpu->getModel()->getMaxminSystem(), cpu->p_constraintCore[i], var_obj); - - unsigned long has_affinity = (1UL << i) & mask; - if (has_affinity) { - /* This function only accepts an affinity setting on the host where the - * task is now running. In future, a task might move to another host. - * But, at this moment, this function cannot take an affinity setting on - * that future host. - * - * It might be possible to extend the code to allow this function to - * accept affinity settings on a future host. We might be able to assign - * zero to elem->value to maintain such inactive affinity settings in the - * system. But, this will make the system complex. */ - XBT_DEBUG("set affinity %p to cpu-%d@%s", this, i, cpu->getName()); - lmm_expand(cpu->getModel()->getMaxminSystem(), cpu->p_constraintCore[i], var_obj, 1.0); - } - } - - if (cpu->getModel()->getUpdateMechanism() == UM_LAZY) { - /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */ - } - XBT_OUT(); -} - simgrid::xbt::signal CpuAction::onStateChange; void CpuAction::setState(Action::State state){ diff --git a/src/surf/cpu_interface.hpp b/src/surf/cpu_interface.hpp index 4a3f3b26da..2e72b9a94e 100644 --- a/src/surf/cpu_interface.hpp +++ b/src/surf/cpu_interface.hpp @@ -136,10 +136,6 @@ public: xbt_dynar_t speedPerPstate_ = nullptr; /*< List of supported CPU capacities (pstate related) */ int pstate_ = 0; /*< Current pstate (index in the speedPeakList)*/ - /* Note (hypervisor): */ - lmm_constraint_t *p_constraintCore=nullptr; - void **p_constraintCoreId=nullptr; - public: virtual void setStateTrace(tmgr_trace_t trace); /*< setup the trace file with states events (ON or OFF). Trace must contain boolean values (0 or 1). */ virtual void setSpeedTrace(tmgr_trace_t trace); /*< setup the trace file with availability events (peak speed changes due to external load). Trace must contain relative values (ratio between 0 and 1) */ @@ -168,9 +164,6 @@ public: CpuAction(simgrid::surf::Model *model, double cost, bool failed, lmm_variable_t var) : Action(model, cost, failed, var) {} - /** @brief Set the affinity of the current CpuAction */ - virtual void setAffinity(Cpu *cpu, unsigned long mask); - void setState(simgrid::surf::Action::State state) override; void updateRemainingLazy(double now) override; diff --git a/src/surf/cpu_ti.hpp b/src/surf/cpu_ti.hpp index c39ee4071c..93417f705f 100644 --- a/src/surf/cpu_ti.hpp +++ b/src/surf/cpu_ti.hpp @@ -95,7 +95,6 @@ public: void setMaxDuration(double duration) override; void setPriority(double priority) override; double getRemains() override; - void setAffinity(Cpu * /*cpu*/, unsigned long /*mask*/) override {}; CpuTi *cpu_; int indexHeap_; diff --git a/src/surf/surf_c_bindings.cpp b/src/surf/surf_c_bindings.cpp index 561b457718..561dc5f2ac 100644 --- a/src/surf/surf_c_bindings.cpp +++ b/src/surf/surf_c_bindings.cpp @@ -262,10 +262,6 @@ void surf_vm_set_bound(sg_host_t vm, double bound){ get_casted_vm(vm)->setBound(bound); } -void surf_vm_set_affinity(sg_host_t vm, sg_host_t host, unsigned long mask){ - get_casted_vm(vm)->setAffinity(host->pimpl_cpu, mask); -} - xbt_dict_t surf_storage_get_content(surf_resource_t resource){ return static_cast(surf_storage_resource_priv(resource))->getContent(); } diff --git a/src/surf/virtual_machine.hpp b/src/surf/virtual_machine.hpp index 5b1396d92e..c69a11ca16 100644 --- a/src/surf/virtual_machine.hpp +++ b/src/surf/virtual_machine.hpp @@ -77,7 +77,6 @@ public: sg_host_t getPm(); virtual void setBound(double bound)=0; - virtual void setAffinity(Cpu *cpu, unsigned long mask)=0; /* The vm object of the lower layer */ CpuAction *action_; diff --git a/src/surf/vm_hl13.cpp b/src/surf/vm_hl13.cpp index 4a4d61b4b4..c51615efaa 100644 --- a/src/surf/vm_hl13.cpp +++ b/src/surf/vm_hl13.cpp @@ -202,9 +202,5 @@ void VMHL13::setBound(double bound){ action_->setBound(bound); } -void VMHL13::setAffinity(Cpu *cpu, unsigned long mask){ - action_->setAffinity(cpu, mask); -} - } } diff --git a/src/surf/vm_hl13.hpp b/src/surf/vm_hl13.hpp index 3bcd9b85c3..478b5092a0 100644 --- a/src/surf/vm_hl13.hpp +++ b/src/surf/vm_hl13.hpp @@ -57,7 +57,6 @@ public: void migrate(sg_host_t ind_dst_pm) override; void setBound(double bound) override; - void setAffinity(Cpu *cpu, unsigned long mask) override; }; /**********