include examples/python/exec-async/exec-async.tesh
include examples/python/exec-basic/exec-basic.py
include examples/python/exec-basic/exec-basic.tesh
+include examples/python/exec-cpu-nonlinear/exec-cpu-nonlinear.py
+include examples/python/exec-cpu-nonlinear/exec-cpu-nonlinear.tesh
include examples/python/exec-dvfs/exec-dvfs.py
include examples/python/exec-dvfs/exec-dvfs.tesh
include examples/python/exec-remote/exec-remote.py
foreach(example actor-create actor-daemon actor-join actor-kill actor-migrate actor-suspend actor-yield actor-lifetime
comm-wait comm-waitall comm-waitany
exec-async exec-basic exec-dvfs exec-remote
- network-nonlinear clusters-multicpu io-degradation)
+ network-nonlinear clusters-multicpu io-degradation exec-cpu-nonlinear)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.tesh)
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.py)
--- /dev/null
+# Copyright (c) 2006-2021. The SimGrid Team. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the license (GNU LGPL) which comes with this package.
+
+# This example shows how to simulate a non-linear resource sharing for
+# CPUs.
+
+
+from simgrid import Actor, Engine, NetZone, Host, this_actor
+import sys
+import functools
+
+
+def runner():
+ computation_amount = this_actor.get_host().speed
+ n_task = 10
+
+ this_actor.info("Execute %d tasks of %g flops, should take %d second in a CPU without degradation. It will take the double here." % (
+ n_task, computation_amount, n_task))
+ tasks = [this_actor.exec_init(computation_amount).start()
+ for _ in range(n_task)]
+
+ this_actor.info("Waiting for all tasks to be done!")
+ for task in tasks:
+ task.wait()
+
+ this_actor.info("Finished executing. Goodbye now!")
+
+
+def cpu_nonlinear(host: Host, capacity: float, n: int) -> float:
+ """ Non-linear resource sharing for CPU """
+ # emulates a degradation in CPU according to the number of tasks
+ # totally unrealistic but for learning purposes
+ capacity = capacity / 2 if n > 1 else capacity
+ this_actor.info("Host %s, %d concurrent tasks, new capacity %f" %
+ (host.name, n, capacity))
+ return capacity
+
+
+def load_platform():
+ """ Create a simple 1-host platform """
+ zone = NetZone.create_empty_zone("Zone1")
+ runner_host = zone.create_host("runner", 1e6)
+ runner_host.set_sharing_policy(
+ Host.SharingPolicy.NONLINEAR, functools.partial(cpu_nonlinear, runner_host))
+ runner_host.seal()
+ zone.seal()
+
+ # create actor runner
+ Actor.create("runner", runner_host, runner)
+
+
+if __name__ == '__main__':
+ e = Engine(sys.argv)
+
+ # create platform
+ load_platform()
+
+ # runs the simulation
+ e.run()
+
+ # explicitly deleting Engine object to avoid segfault during cleanup phase.
+ # During Engine destruction, the cleanup of std::function linked to non_linear callback is called.
+ # If we let the cleanup by itself, it fails trying on its destruction because the python main program
+ # has already freed its variables
+ del(e)
--- /dev/null
+#!/usr/bin/env tesh
+
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${srcdir:=.}/exec-cpu-nonlinear.py "--log=root.fmt:[%10.6r]%e(%i:%a@%h)%e%m%n"
+> [ 0.000000] (1:runner@runner) Execute 10 tasks of 1e+06 flops, should take 10 second in a CPU without degradation. It will take the double here.
+> [ 0.000000] (1:runner@runner) Waiting for all tasks to be done!
+> [ 0.000000] (0:maestro@) Host runner, 10 concurrent tasks, new capacity 500000.000000
+> [ 20.000000] (1:runner@runner) Finished executing. Goodbye now!
const std::pair<unsigned int, unsigned int>&, unsigned int>());
/* Class Host */
- py::class_<simgrid::s4u::Host, std::unique_ptr<Host, py::nodelete>>(m, "Host", "Simulated host")
- .def("by_name", &Host::by_name, "Retrieves a host from its name, or die")
+ py::class_<simgrid::s4u::Host, std::unique_ptr<Host, py::nodelete>> host(m, "Host", "Simulated host");
+ host.def("by_name", &Host::by_name, "Retrieves a host from its name, or die")
.def("get_pstate_count", &Host::get_pstate_count, "Retrieve the count of defined pstate levels")
.def("get_pstate_speed", &Host::get_pstate_speed, "Retrieve the maximal speed at the given pstate")
.def("get_netpoint", &Host::get_netpoint, "Retrieve the netpoint associated to this host")
.def("get_disks", &Host::get_disks, "Retrieve the list of disks in this host")
.def("set_core_count", &Host::set_core_count, "Set the number of cores in the CPU")
.def("set_coordinates", &Host::set_coordinates, "Set the coordinates of this host")
+ .def("set_sharing_policy", &simgrid::s4u::Host::set_sharing_policy, "Describe how the CPU is shared",
+ py::arg("policy"), py::arg("cb") = simgrid::s4u::NonLinearResourceCb())
.def("create_disk", py::overload_cast<const std::string&, double, double>(&Host::create_disk), "Create a disk")
.def("create_disk",
py::overload_cast<const std::string&, const std::string&, const std::string&>(&Host::create_disk),
"speed", &Host::get_speed,
"The peak computing speed in flops/s at the current pstate, taking the external load into account. "
"This is the max potential speed.");
+ py::enum_<simgrid::s4u::Host::SharingPolicy>(host, "SharingPolicy")
+ .value("NONLINEAR", simgrid::s4u::Host::SharingPolicy::NONLINEAR)
+ .value("LINEAR", simgrid::s4u::Host::SharingPolicy::LINEAR)
+ .export_values();
/* Class Disk */
py::class_<simgrid::s4u::Disk, std::unique_ptr<simgrid::s4u::Disk, py::nodelete>> disk(m, "Disk", "Simulated disk");