+ py::class_<simgrid::s4u::Host, std::unique_ptr<Host, py::nodelete>> host(
+ m, "Host", "Simulated host. See the C++ documentation for details.");
+ host.def_static("by_name", &Host::by_name, py::arg("name"), "Retrieves a host from its name, or die")
+ .def(
+ "route_to",
+ [](const simgrid::s4u::Host* h, const simgrid::s4u::Host* to) {
+ auto* list = new std::vector<Link*>();
+ double bw = 0;
+ h->route_to(to, *list, &bw);
+ return make_tuple(list, bw);
+ },
+ "Retrieves the list of links and the bandwidth between two hosts.")
+ .def(
+ "set_speed_profile",
+ [](Host* h, const std::string& profile, double period) {
+ h->set_speed_profile(simgrid::kernel::profile::ProfileBuilder::from_string("", profile, period));
+ },
+ py::call_guard<py::gil_scoped_release>(),
+ "Specify a profile modeling the external load according to an exhaustive list. "
+ "Each line of the profile describes timed events as ``date ratio``. "
+ "For example, the following content describes an host which computational speed is initially 1 (i.e, 100%) "
+ "and then halved after 42 seconds:\n\n"
+ ".. code-block:: python\n\n"
+ " \"\"\"\n"
+ " 0 1.0\n"
+ " 42 0.5\n"
+ " \"\"\"\n\n"
+ ".. warning:: Don't get fooled: bandwidth and latency profiles of links contain absolute values,"
+ " while speed profiles of hosts contain ratios.\n\n"
+ "The second function parameter is the periodicity: the time to wait after the last event to start again over "
+ "the list. Set it to -1 to not loop over.")
+ .def(
+ "set_state_profile",
+ [](Host* h, const std::string& profile, double period) {
+ h->set_state_profile(simgrid::kernel::profile::ProfileBuilder::from_string("", profile, period));
+ },
+ py::call_guard<py::gil_scoped_release>(),
+ "Specify a profile modeling the churn. "
+ "Each line of the profile describes timed events as ``date boolean``, where the boolean (0 or 1) tells "
+ "whether the host is on. "
+ "For example, the following content describes a link which turns off at t=1 and back on at t=2:\n\n"
+ ".. code-block:: python\n\n"
+ " \"\"\"\n"
+ " 1.0 0\n"
+ " 2.0 1\n"
+ " \"\"\"\n\n"
+ "The second function parameter is the periodicity: the time to wait after the last event to start again over "
+ "the list. Set it to -1 to not loop over.")
+ .def_property_readonly("pstate_count", &Host::get_pstate_count, "Retrieve the count of defined pstate levels")
+ .def("pstate_speed", &Host::get_pstate_speed, "Retrieve the maximal speed at the given pstate")
+ .def_property_readonly("netpoint", &Host::get_netpoint, "Retrieve the netpoint associated to this zone")
+ .def_property_readonly("disks", &Host::get_disks, "The list of disks on this host (read-only).")
+ .def("get_disks", &Host::get_disks, "Retrieve the list of disks in this host")
+ .def_property("core_count", &Host::get_core_count,
+ py::cpp_function(&Host::set_core_count, py::call_guard<py::gil_scoped_release>()),
+ "Manage the number of cores in the CPU")
+ .def("set_coordinates", &Host::set_coordinates, py::call_guard<py::gil_scoped_release>(),
+ "Set the coordinates of this host")
+ .def("set_sharing_policy", &simgrid::s4u::Host::set_sharing_policy, py::call_guard<py::gil_scoped_release>(),
+ "Describe how the CPU is shared", py::arg("policy"), py::arg("cb") = simgrid::s4u::NonLinearResourceCb())
+ .def("create_disk", py::overload_cast<const std::string&, double, double>(&Host::create_disk),
+ py::call_guard<py::gil_scoped_release>(), "Create a disk")
+ .def("create_disk",
+ py::overload_cast<const std::string&, const std::string&, const std::string&>(&Host::create_disk),
+ py::call_guard<py::gil_scoped_release>(), "Create a disk")
+ .def("seal", &Host::seal, py::call_guard<py::gil_scoped_release>(), "Seal this host")
+ .def("turn_off", &Host::turn_off, py::call_guard<py::gil_scoped_release>(), "Turn off this host")
+ .def("turn_on", &Host::turn_on, py::call_guard<py::gil_scoped_release>(), "Turn on this host")
+ .def_property("pstate", &Host::get_pstate,
+ py::cpp_function(&Host::set_pstate, py::call_guard<py::gil_scoped_release>()),
+ "The current pstate (read/write property).")
+ .def_static("current", &Host::current, py::call_guard<py::gil_scoped_release>(),
+ "Retrieves the host on which the running actor is located.")
+ .def_property_readonly("name", &Host::get_name, "The name of this host (read-only property).")
+ .def_property_readonly("load", &Host::get_load,
+ "Returns the current computation load (in flops per second), NOT taking the external load "
+ "into account. This is the currently achieved speed (read-only property).")
+ .def_property_readonly(
+ "speed", &Host::get_speed,
+ "The peak computing speed in flops/s at the current pstate, NOT taking the external load into account. "
+ "This is the max potential speed (read-only property).")
+ .def_property_readonly("available_speed", &Host::get_available_speed,
+ "Get the available speed ratio, between 0 and 1.\n"
+ "This accounts for external load (see :py:func:`set_speed_profile() "
+ "<simgrid.Host.set_speed_profile>`) (read-only property).")
+ .def_static(
+ "on_creation_cb",
+ [](py::object cb) {
+ cb.inc_ref(); // keep alive after return
+ const py::gil_scoped_release gil_release;
+ Host::on_creation_cb([cb_p = cb.ptr()](Host& h) {
+ const py::gil_scoped_acquire py_context; // need a new context for callback
+ try {
+ const auto fun = py::reinterpret_borrow<py::function>(cb_p);
+ fun(&h);
+ } catch (const py::error_already_set& e) {
+ xbt_die("Error while executing the on_creation lambda : %s", e.what());
+ }
+ });
+ },
+ "")
+ .def(
+ "__repr__", [](const Host* h) { return "Host(" + h->get_name() + ")"; },
+ "Textual representation of the Host.");
+
+ m.def("sg_host_load_plugin_init", [host]() {
+ sg_host_load_plugin_init();
+
+ static_cast<pybind11::class_<simgrid::s4u::Host, std::unique_ptr<simgrid::s4u::Host, pybind11::nodelete>>>(host)
+ .def(
+ "reset_load", [](const Host* h) { sg_host_load_reset(h); }, py::call_guard<py::gil_scoped_release>(),
+ "Reset counters of the host load plugin for this host.")
+ .def_property_readonly(
+ "current_load", [](const Host* h) { return sg_host_get_current_load(h); }, "Current load of the host.")
+ .def_property_readonly(
+ "avg_load", [](const Host* h) { return sg_host_get_avg_load(h); }, "Average load of the host.")
+ .def_property_readonly(
+ "idle_time", [](const Host* h) { return sg_host_get_idle_time(h); }, "Idle time of the host")
+ .def_property_readonly(
+ "total_idle_time", [](const Host* h) { return sg_host_get_total_idle_time(h); },
+ "Total idle time of the host.")
+ .def_property_readonly(
+ "computed_flops", [](const Host* h) { return sg_host_get_computed_flops(h); },
+ "Computed flops of the host.");
+ });
+
+ py::enum_<simgrid::s4u::Host::SharingPolicy>(host, "SharingPolicy")
+ .value("NONLINEAR", simgrid::s4u::Host::SharingPolicy::NONLINEAR)
+ .value("LINEAR", simgrid::s4u::Host::SharingPolicy::LINEAR);
+
+ /* Class Disk */
+ py::class_<simgrid::s4u::Disk, std::unique_ptr<simgrid::s4u::Disk, py::nodelete>> disk(
+ m, "Disk", "Simulated disk. See the C++ documentation for details.");
+ disk.def("read", py::overload_cast<sg_size_t, double>(&simgrid::s4u::Disk::read, py::const_),
+ py::call_guard<py::gil_scoped_release>(), "Read data from disk", py::arg("size"), py::arg("priority") = 1)
+ .def("write", py::overload_cast<sg_size_t, double>(&simgrid::s4u::Disk::write, py::const_),
+ py::call_guard<py::gil_scoped_release>(), "Write data in disk", py::arg("size"), py::arg("priority") = 1)
+ .def("read_async", &simgrid::s4u::Disk::read_async, py::call_guard<py::gil_scoped_release>(),
+ "Non-blocking read data from disk")
+ .def("write_async", &simgrid::s4u::Disk::write_async, py::call_guard<py::gil_scoped_release>(),
+ "Non-blocking write data in disk")
+ .def("set_sharing_policy", &simgrid::s4u::Disk::set_sharing_policy, py::call_guard<py::gil_scoped_release>(),
+ "Set sharing policy for this disk", py::arg("op"), py::arg("policy"),
+ py::arg("cb") = simgrid::s4u::NonLinearResourceCb())
+ .def("seal", &simgrid::s4u::Disk::seal, py::call_guard<py::gil_scoped_release>(), "Seal this disk")
+ .def_property_readonly("name", &simgrid::s4u::Disk::get_name, "The name of this disk (read-only property).")
+ .def(
+ "__repr__", [](const Disk* d) { return "Disk(" + d->get_name() + ")"; },
+ "Textual representation of the Disk");
+ py::enum_<simgrid::s4u::Disk::SharingPolicy>(disk, "SharingPolicy")
+ .value("NONLINEAR", simgrid::s4u::Disk::SharingPolicy::NONLINEAR)
+ .value("LINEAR", simgrid::s4u::Disk::SharingPolicy::LINEAR);
+ py::enum_<simgrid::s4u::Disk::Operation>(disk, "Operation")
+ .value("READ", simgrid::s4u::Disk::Operation::READ)
+ .value("WRITE", simgrid::s4u::Disk::Operation::WRITE)
+ .value("READWRITE", simgrid::s4u::Disk::Operation::READWRITE);
+
+ /* Class NetPoint */
+ py::class_<simgrid::kernel::routing::NetPoint, std::unique_ptr<simgrid::kernel::routing::NetPoint, py::nodelete>>
+ netpoint(m, "NetPoint", "NetPoint object");
+
+ /* Class Link */
+ py::class_<Link, std::unique_ptr<Link, py::nodelete>> link(m, "Link",
+ "Network link. See the C++ documentation for details.");
+ link.def("set_latency", py::overload_cast<const std::string&>(&Link::set_latency),
+ py::call_guard<py::gil_scoped_release>(),
+ "Set the latency as a string. Accepts values with units, such as ‘1s’ or ‘7ms’.\nFull list of accepted "
+ "units: w (week), d (day), h, s, ms, us, ns, ps.")
+ .def("set_latency", py::overload_cast<double>(&Link::set_latency), py::call_guard<py::gil_scoped_release>(),
+ "Set the latency as a float (in seconds).")
+ .def("set_bandwidth", &Link::set_bandwidth, py::call_guard<py::gil_scoped_release>(),
+ "Set the bandwidth (in byte per second).")
+ .def(
+ "set_bandwidth_profile",
+ [](Link* l, const std::string& profile, double period) {
+ l->set_bandwidth_profile(simgrid::kernel::profile::ProfileBuilder::from_string("", profile, period));
+ },
+ py::call_guard<py::gil_scoped_release>(),
+ "Specify a profile modeling the external load according to an exhaustive list. "
+ "Each line of the profile describes timed events as ``date bandwidth`` (in bytes per second). "
+ "For example, the following content describes a link which bandwidth changes to 40 Mb/s at t=4, and to 6 "
+ "Mb/s at t=8:\n\n"
+ ".. code-block:: python\n\n"
+ " \"\"\"\n"
+ " 4.0 40000000\n"
+ " 8.0 60000000\n"
+ " \"\"\"\n\n"
+ ".. warning:: Don't get fooled: bandwidth and latency profiles of links contain absolute values,"
+ " while speed profiles of hosts contain ratios.\n\n"
+ "The second function parameter is the periodicity: the time to wait after the last event to start again over "
+ "the list. Set it to -1 to not loop over.")
+ .def(
+ "set_latency_profile",
+ [](Link* l, const std::string& profile, double period) {
+ l->set_latency_profile(simgrid::kernel::profile::ProfileBuilder::from_string("", profile, period));
+ },
+ py::call_guard<py::gil_scoped_release>(),
+ "Specify a profile modeling the external load according to an exhaustive list. "
+ "Each line of the profile describes timed events as ``date latency`` (in seconds). "
+ "For example, the following content describes a link which latency changes to 1ms (0.001s) at t=1, and to 2s "
+ "at t=2:\n\n"
+ ".. code-block:: python\n\n"
+ " \"\"\"\n"
+ " 1.0 0.001\n"
+ " 2.0 2\n"
+ " \"\"\"\n\n"
+ ".. warning:: Don't get fooled: bandwidth and latency profiles of links contain absolute values,"
+ " while speed profiles of hosts contain ratios.\n\n"
+ "The second function parameter is the periodicity: the time to wait after the last event to start again over "
+ "the list. Set it to -1 to not loop over.")
+ .def(
+ "set_state_profile",
+ [](Link* l, const std::string& profile, double period) {
+ l->set_state_profile(simgrid::kernel::profile::ProfileBuilder::from_string("", profile, period));
+ },
+ "Specify a profile modeling the churn. "
+ "Each line of the profile describes timed events as ``date boolean``, where the boolean (0 or 1) tells "
+ "whether the link is on. "
+ "For example, the following content describes a link which turns off at t=1 and back on at t=2:\n\n"
+ ".. code-block:: python\n\n"
+ " \"\"\"\n"
+ " 1.0 0\n"
+ " 2.0 1\n"
+ " \"\"\"\n\n"
+ "The second function parameter is the periodicity: the time to wait after the last event to start again over "
+ "the list. Set it to -1 to not loop over.")
+
+ .def("turn_on", &Link::turn_on, py::call_guard<py::gil_scoped_release>(), "Turns the link on.")
+ .def("turn_off", &Link::turn_off, py::call_guard<py::gil_scoped_release>(), "Turns the link off.")
+ .def("is_on", &Link::is_on, "Check whether the link is on.")
+
+ .def("set_sharing_policy", &Link::set_sharing_policy, py::call_guard<py::gil_scoped_release>(),
+ "Set sharing policy for this link")
+ .def("set_concurrency_limit", &Link::set_concurrency_limit, py::call_guard<py::gil_scoped_release>(),
+ "Set concurrency limit for this link")
+ .def("set_host_wifi_rate", &Link::set_host_wifi_rate, py::call_guard<py::gil_scoped_release>(),
+ "Set level of communication speed of given host on this Wi-Fi link")
+ .def_static("by_name", &Link::by_name, "Retrieves a Link from its name, or dies")
+ .def("seal", &Link::seal, py::call_guard<py::gil_scoped_release>(), "Seal this link")
+ .def_property_readonly("name", &Link::get_name, "The name of this link")
+ .def_property_readonly("bandwidth", &Link::get_bandwidth,
+ "The bandwidth (in bytes per second) (read-only property).")
+ .def_property_readonly("latency", &Link::get_latency, "The latency (in seconds) (read-only property).")
+ .def(
+ "__repr__", [](const Link* l) { return "Link(" + l->get_name() + ")"; },
+ "Textual representation of the Link");
+ py::enum_<Link::SharingPolicy>(link, "SharingPolicy")
+ .value("NONLINEAR", Link::SharingPolicy::NONLINEAR,
+ "This policy takes a callback that specifies the maximal capacity as a function of the number of usage. "
+ "See the examples with 'degradation' in their name.")
+ .value("WIFI", Link::SharingPolicy::WIFI, "Pseudo-sharing policy requesting wifi-specific sharing.")
+ .value("SPLITDUPLEX", Link::SharingPolicy::SPLITDUPLEX,
+ "Each link is split in 2, UP and DOWN, one per direction. These links are SHARED.")
+ .value("SHARED", Link::SharingPolicy::SHARED,
+ "The bandwidth is shared between all comms using that link, regardless of their direction.")
+ .value("FATPIPE", Link::SharingPolicy::FATPIPE,
+ "Each comm can use the link fully, with no sharing (only a maximum). This is intended to represent the "
+ "backbone links that cannot be saturated by concurrent links, but have a maximal bandwidth.");
+
+ /* Class LinkInRoute */
+ py::class_<simgrid::s4u::LinkInRoute> linkinroute(m, "LinkInRoute", "Abstraction to add link in routes");
+ linkinroute.def(py::init<const Link*>());
+ linkinroute.def(py::init<const Link*, simgrid::s4u::LinkInRoute::Direction>());
+ py::enum_<simgrid::s4u::LinkInRoute::Direction>(linkinroute, "Direction")
+ .value("UP", simgrid::s4u::LinkInRoute::Direction::UP)
+ .value("DOWN", simgrid::s4u::LinkInRoute::Direction::DOWN)
+ .value("NONE", simgrid::s4u::LinkInRoute::Direction::NONE);
+
+ /* Class Split-Duplex Link */
+ py::class_<simgrid::s4u::SplitDuplexLink, Link, std::unique_ptr<simgrid::s4u::SplitDuplexLink, py::nodelete>>(
+ m, "SplitDuplexLink", "Network split-duplex link")
+ .def_property_readonly("link_up", &simgrid::s4u::SplitDuplexLink::get_link_up, "Get link direction up")
+ .def_property_readonly("link_down", &simgrid::s4u::SplitDuplexLink::get_link_down, "Get link direction down");
+
+ /* Class Mailbox */
+ py::class_<simgrid::s4u::Mailbox, std::unique_ptr<Mailbox, py::nodelete>>(
+ m, "Mailbox", "Mailbox. See the C++ documentation for details.")
+ .def(
+ "__repr__", [](const Mailbox* self) { return "Mailbox(" + self->get_name() + ")"; },
+ "Textual representation of the Mailbox")
+ .def_static("by_name", &Mailbox::by_name, py::call_guard<py::gil_scoped_release>(), py::arg("name"),
+ "Retrieve a Mailbox from its name")
+ .def_property_readonly("name", &Mailbox::get_name, "The name of that mailbox (read-only property).")
+ .def_property_readonly("ready", &Mailbox::ready,
+ "Check if there is a communication ready to be consumed from a mailbox.")
+ .def(
+ "put",
+ [](Mailbox* self, py::object data, uint64_t size, double timeout) {
+ auto* data_ptr = data.inc_ref().ptr();
+ const py::gil_scoped_release gil_release;
+ self->put(data_ptr, size, timeout);
+ },
+ "Blocking data transmission with a timeout")
+ .def(
+ "put",
+ [](Mailbox* self, py::object data, uint64_t size) {
+ auto* data_ptr = data.inc_ref().ptr();
+ const py::gil_scoped_release gil_release;
+ self->put(data_ptr, size);
+ },
+ "Blocking data transmission")
+ .def(
+ "put_async",
+ [](Mailbox* self, py::object data, uint64_t size) {
+ auto* data_ptr = data.inc_ref().ptr();
+ const py::gil_scoped_release gil_release;
+ return self->put_async(data_ptr, size);
+ },
+ "Non-blocking data transmission")
+ .def(
+ "put_init",
+ [](Mailbox* self, py::object data, uint64_t size) {
+ auto* data_ptr = data.inc_ref().ptr();
+ const py::gil_scoped_release gil_release;
+ return self->put_init(data_ptr, size);
+ },
+ "Creates (but don’t start) a data transmission to that mailbox.")
+ .def(
+ "get", [](Mailbox* self) { return py::reinterpret_steal<py::object>(self->get<PyObject>()); },
+ py::call_guard<py::gil_scoped_release>(), "Blocking data reception")
+ .def(
+ "get_async", [](Mailbox* self) -> CommPtr { return self->get_async(); },
+ py::call_guard<py::gil_scoped_release>(),
+ "Non-blocking data reception. Use data.get() to get the python object after the communication has finished")
+ .def("set_receiver", &Mailbox::set_receiver, py::call_guard<py::gil_scoped_release>(),
+ "Sets the actor as permanent receiver");
+
+ /* class Activity */
+ py::class_<Activity, ActivityPtr>(m, "Activity", "Activity. See the C++ documentation for details.");
+
+ /* Class Comm */
+ py::class_<Comm, CommPtr, Activity>(m, "Comm", "Communication. See the C++ documentation for details.")
+ .def_property_readonly("dst_data_size", &Comm::get_dst_data_size, py::call_guard<py::gil_scoped_release>(),
+ "Retrieve the size of the received data.")
+ .def_property_readonly("mailbox", &Comm::get_mailbox, py::call_guard<py::gil_scoped_release>(),
+ "Retrieve the mailbox on which this comm acts.")
+ .def_property_readonly("sender", &Comm::get_sender, py::call_guard<py::gil_scoped_release>())
+ .def_property_readonly("state_str", &Comm::get_state_str, py::call_guard<py::gil_scoped_release>(),
+ "Retrieve the Comm state as string")
+ .def_property_readonly("remaining", &Comm::get_remaining, py::call_guard<py::gil_scoped_release>(),
+ "Remaining amount of work that this Comm entails")
+ .def_property_readonly("start_time", &Comm::get_start_time, py::call_guard<py::gil_scoped_release>(),
+ "Time at which this Comm started")
+ .def_property_readonly("finish_time", &Comm::get_finish_time, py::call_guard<py::gil_scoped_release>(),
+ "Time at which this Comm finished")
+ .def_property_readonly("is_suspended", &Comm::is_suspended, py::call_guard<py::gil_scoped_release>(),
+ "Whether this Comm is suspended")
+ .def("set_payload_size", &Comm::set_payload_size, py::call_guard<py::gil_scoped_release>(), py::arg("bytes"),
+ "Specify the amount of bytes which exchange should be simulated.")
+ .def("set_rate", &Comm::set_rate, py::call_guard<py::gil_scoped_release>(), py::arg("rate"),
+ "Sets the maximal communication rate (in byte/sec). Must be done before start")
+ .def("cancel", &Comm::cancel, py::call_guard<py::gil_scoped_release>(),
+ py::return_value_policy::reference_internal, "Cancel the activity.")
+ .def("start", &Comm::start, py::call_guard<py::gil_scoped_release>(), py::return_value_policy::reference_internal,
+ "Starts a previously created activity. This function is optional: you can call wait() even if you didn't "
+ "call start()")
+ .def("suspend", &Comm::suspend, py::call_guard<py::gil_scoped_release>(),
+ py::return_value_policy::reference_internal, "Suspend the activity.")
+ .def("resume", &Comm::resume, py::call_guard<py::gil_scoped_release>(),
+ py::return_value_policy::reference_internal, "Resume the activity.")
+ .def("test", &Comm::test, py::call_guard<py::gil_scoped_release>(),
+ "Test whether the communication is terminated.")
+ .def("wait", &Comm::wait, py::call_guard<py::gil_scoped_release>(),
+ "Block until the completion of that communication.")
+ .def("wait_for", &Comm::wait_for, py::call_guard<py::gil_scoped_release>(), py::arg("timeout"),
+ "Block until the completion of that communication, or raises TimeoutException after the specified timeout.")
+ .def("wait_until", &Comm::wait_until, py::call_guard<py::gil_scoped_release>(), py::arg("time_limit"),
+ "Block until the completion of that communication, or raises TimeoutException after the specified time.")
+ .def(
+ "get_payload",
+ [](const Comm* self) { return py::reinterpret_steal<py::object>((PyObject*)self->get_payload()); },
+ py::call_guard<py::gil_scoped_release>(),
+ "Retrieve the message's payload of a get_async. You cannot call this until after the comm termination.")
+ .def("detach", py::overload_cast<>(&Comm::detach), py::return_value_policy::reference_internal,
+ py::call_guard<py::gil_scoped_release>(),
+ "Start the comm, and ignore its result. It can be completely forgotten after that.")
+ .def_static("sendto", &Comm::sendto, py::call_guard<py::gil_scoped_release>(), py::arg("from"), py::arg("to"),
+ py::arg("simulated_size_in_bytes"), "Do a blocking communication between two arbitrary hosts.")
+ .def_static("sendto_init", py::overload_cast<Host*, Host*>(&Comm::sendto_init),
+ py::call_guard<py::gil_scoped_release>(), py::arg("from"), py::arg("to"),
+ "Creates a communication between the two given hosts, bypassing the mailbox mechanism.")
+ .def_static("sendto_async", &Comm::sendto_async, py::call_guard<py::gil_scoped_release>(), py::arg("from"),
+ py::arg("to"), py::arg("simulated_size_in_bytes"),
+ "Do a blocking communication between two arbitrary hosts.\n\nThis initializes a communication that "
+ "completely bypass the mailbox and actors mechanism. There is really no limit on the hosts involved. "
+ "In particular, the actor does not have to be on one of the involved hosts.");
+
+ /* Class Io */
+ py::class_<simgrid::s4u::Io, simgrid::s4u::IoPtr, Activity>(m, "Io",
+ "I/O activities. See the C++ documentation for details.")
+ .def("test", &simgrid::s4u::Io::test, py::call_guard<py::gil_scoped_release>(),
+ "Test whether the I/O is terminated.")
+ .def("wait", &simgrid::s4u::Io::wait, py::call_guard<py::gil_scoped_release>(),
+ "Block until the completion of that I/O operation");
+
+ /* Class Exec */
+ py::class_<simgrid::s4u::Exec, simgrid::s4u::ExecPtr, Activity>(m, "Exec",
+ "Execution. See the C++ documentation for details.")
+ .def_property_readonly("remaining", &simgrid::s4u::Exec::get_remaining, py::call_guard<py::gil_scoped_release>(),
+ "Amount of flops that remain to be computed until completion (read-only property).")
+ .def_property_readonly("remaining_ratio", &simgrid::s4u::Exec::get_remaining_ratio,
+ py::call_guard<py::gil_scoped_release>(),
+ "Amount of work remaining until completion from 0 (completely done) to 1 (nothing done "
+ "yet) (read-only property).")
+ .def_property("host", &simgrid::s4u::Exec::get_host, &simgrid::s4u::Exec::set_host,
+ "Host on which this execution runs. Only the first host is returned for parallel executions. "
+ "Changing this value migrates the execution.")
+ .def_property_readonly("is_suspended", &simgrid::s4u::Exec::is_suspended,
+ py::call_guard<py::gil_scoped_release>(), "Whether this Exec is suspended")
+ .def("test", &simgrid::s4u::Exec::test, py::call_guard<py::gil_scoped_release>(),
+ "Test whether the execution is terminated.")
+ .def("cancel", &simgrid::s4u::Exec::cancel, py::call_guard<py::gil_scoped_release>(), "Cancel that execution.")
+ .def("start", &simgrid::s4u::Exec::start, py::call_guard<py::gil_scoped_release>(), "Start that execution.")
+ .def("suspend", &simgrid::s4u::Exec::suspend, py::call_guard<py::gil_scoped_release>(), "Suspend that execution.")
+ .def("resume", &simgrid::s4u::Exec::resume, py::call_guard<py::gil_scoped_release>(), "Resume that execution.")
+ .def("wait", &simgrid::s4u::Exec::wait, py::call_guard<py::gil_scoped_release>(),
+ "Block until the completion of that execution.")
+ .def("wait_for", &simgrid::s4u::Exec::wait_for, py::call_guard<py::gil_scoped_release>(), py::arg("timeout"),
+ "Block until the completion of that activity, or raises TimeoutException after the specified timeout.");
+
+ /* Class Semaphore */
+ py::class_<Semaphore, SemaphorePtr>(m, "Semaphore",
+ "A classical semaphore, but blocking in the simulation world. See the C++ "
+ "documentation for details.")
+ .def(py::init<>(&Semaphore::create), py::call_guard<py::gil_scoped_release>(), py::arg("capacity"),
+ "Semaphore constructor.")
+ .def("acquire", &Semaphore::acquire, py::call_guard<py::gil_scoped_release>(),
+ "Acquire on the semaphore object with no timeout. Blocks until the semaphore is acquired.")
+ .def("acquire_timeout", &Semaphore::acquire_timeout, py::call_guard<py::gil_scoped_release>(), py::arg("timeout"),
+ "Acquire on the semaphore object with no timeout. Blocks until the semaphore is acquired or return "
+ "true if it has not been acquired after the specified timeout.")
+ .def("release", &Semaphore::release, py::call_guard<py::gil_scoped_release>(), "Release the semaphore.")
+ .def_property_readonly("capacity", &Semaphore::get_capacity, py::call_guard<py::gil_scoped_release>(),
+ "Get the semaphore capacity.")
+ .def_property_readonly("would_block", &Semaphore::would_block, py::call_guard<py::gil_scoped_release>(),
+ "Check whether trying to acquire the semaphore would block (in other word, checks whether "
+ "this semaphore has capacity).")
+ // Allow semaphores to be automatically acquired/released with a context manager: `with semaphore: ...`
+ .def("__enter__", &Semaphore::acquire, py::call_guard<py::gil_scoped_release>())
+ .def("__exit__",
+ [](Semaphore* self, const py::object&, const py::object&, const py::object&) { self->release(); });
+
+ /* Class Mutex */
+ py::class_<Mutex, MutexPtr>(m, "Mutex",
+ "A classical mutex, but blocking in the simulation world."
+ "See the C++ documentation for details.")
+ .def(py::init<>(&Mutex::create), py::call_guard<py::gil_scoped_release>(),
+ "Mutex constructor (pass True as a parameter to get a recursive Mutex).", py::arg("recursive") = false)
+ .def("lock", &Mutex::lock, py::call_guard<py::gil_scoped_release>(), "Block until the mutex is acquired.")
+ .def("try_lock", &Mutex::try_lock, py::call_guard<py::gil_scoped_release>(),
+ "Try to acquire the mutex. Return true if the mutex was acquired, false otherwise.")
+ .def("unlock", &Mutex::unlock, py::call_guard<py::gil_scoped_release>(), "Release the mutex.")
+ // Allow mutexes to be automatically acquired/released with a context manager: `with mutex: ...`
+ .def("__enter__", &Mutex::lock, py::call_guard<py::gil_scoped_release>())
+ .def(
+ "__exit__", [](Mutex* self, const py::object&, const py::object&, const py::object&) { self->unlock(); },
+ py::call_guard<py::gil_scoped_release>());
+
+ /* Class Barrier */
+ py::class_<Barrier, BarrierPtr>(m, "Barrier", "A classical barrier, but blocking in the simulation world.")
+ .def(py::init<>(&Barrier::create), py::call_guard<py::gil_scoped_release>(), py::arg("expected_actors"),
+ "Barrier constructor.")
+ .def("wait", &Barrier::wait, py::call_guard<py::gil_scoped_release>(),
+ "Blocks into the barrier. Every waiting actors will be unlocked once the expected amount of actors reaches "
+ "the barrier.");