* Since the priority is 2, it writes twice as fast as a regular one.
*
* So instead of a half/half sharing between the two, we get a 1/3 vs. 2/3 sharing. */
- disk_list.front()->io_init(4000000, simgrid::s4u::Io::OpType::WRITE)->set_priority(2)->wait();
+ disk_list.front()->write(4000000, 2);
XBT_INFO("First write done.");
/* Note that the timings printed when running this example are a bit misleading, because the uneven sharing only last
IoPtr read_async(sg_size_t size) const;
sg_size_t read(sg_size_t size) const;
+ sg_size_t read(sg_size_t size, double priority) const;
IoPtr write_async(sg_size_t size) const;
sg_size_t write(sg_size_t size) const;
+ sg_size_t write(sg_size_t size, double priority) const;
/** @brief Policy for sharing the disk among activities */
enum class SharingPolicy { NONLINEAR = 1, LINEAR = 0 };
/* Class Disk */
py::class_<simgrid::s4u::Disk, std::unique_ptr<simgrid::s4u::Disk, py::nodelete>> disk(m, "Disk", "Simulated disk");
- disk.def("read", &simgrid::s4u::Disk::read, py::call_guard<py::gil_scoped_release>(), "Read data from disk")
- .def("write", &simgrid::s4u::Disk::write, py::call_guard<py::gil_scoped_release>(), "Write data in disk")
+ disk.def("read", py::overload_cast<sg_size_t, double>(&simgrid::s4u::Disk::read, py::const_),
+ py::call_guard<py::gil_scoped_release>(), "Read data from disk", py::arg("size"), py::arg("priority") = 1)
+ .def("write", py::overload_cast<sg_size_t, double>(&simgrid::s4u::Disk::write, py::const_),
+ py::call_guard<py::gil_scoped_release>(), "Write data in disk", py::arg("size"), py::arg("priority") = 1)
.def("read_async", &simgrid::s4u::Disk::read_async, py::call_guard<py::gil_scoped_release>(),
"Non-blocking read data from disk")
.def("write_async", &simgrid::s4u::Disk::write_async, py::call_guard<py::gil_scoped_release>(),
return IoPtr(io_init(size, Io::OpType::READ))->vetoable_start()->wait()->get_performed_ioops();
}
+sg_size_t Disk::read(sg_size_t size, double priority) const
+{
+ return IoPtr(io_init(size, Io::OpType::READ))
+ ->set_priority(priority)
+ ->vetoable_start()
+ ->wait()
+ ->get_performed_ioops();
+}
+
IoPtr Disk::write_async(sg_size_t size) const
{
return IoPtr(io_init(size, Io::OpType::WRITE)->vetoable_start());
return IoPtr(io_init(size, Io::OpType::WRITE))->vetoable_start()->wait()->get_performed_ioops();
}
+sg_size_t Disk::write(sg_size_t size, double priority) const
+{
+ return IoPtr(io_init(size, Io::OpType::WRITE))
+ ->set_priority(priority)
+ ->vetoable_start()
+ ->wait()
+ ->get_performed_ioops();
+}
+
Disk* Disk::set_sharing_policy(Disk::Operation op, Disk::SharingPolicy policy, const NonLinearResourceCb& cb)
{
kernel::actor::simcall([this, op, policy, &cb] { pimpl_->set_sharing_policy(op, policy, cb); });