# Path to the pip requirements file
requirements_file: docs/requirements.txt
-
+
# Don't build any extra formats
formats: []
\ No newline at end of file
PYBIND11_MODULE(simgrid, m)
{
-
m.doc() = "SimGrid userspace API";
m.attr("simgrid_version") = simgrid_version;
py::class_<simgrid::s4u::Actor, ActorPtr>(m, "Actor",
"An actor is an independent stream of execution in your distributed "
"application, see :ref:`class s4u::Actor <API_s4u_Actor>`")
-
.def("create",
[](py::str name, py::object host, py::object fun, py::args args) {
-
return simgrid::s4u::Actor::create(name, host.cast<Host*>(), [fun, args]() {
-
try {
fun(*args);
} catch (py::error_already_set& ex) {
for (res = dwarf_child(die, &child); res == 0; res = dwarf_siblingof(&child, &child)) {
int tag = dwarf_tag(&child);
if (tag == DW_TAG_member || tag == DW_TAG_inheritance) {
-
// Skip declarations:
if (MC_dwarf_attr_flag(&child, DW_AT_declaration, false))
continue;
Dwarf_Addr high_pc;
switch (simgrid::dwarf::classify_form(dwarf_whatform(&attr))) {
-
- // DW_AT_high_pc if an offset from the low_pc:
+ // DW_AT_high_pc if an offset from the low_pc:
case simgrid::dwarf::FormClass::Constant:
xbt_assert(dwarf_formsdata(&attr, &offset) == 0, "Could not read constant");
int tag = dwarf_tag(die);
simgrid::dwarf::TagClass klass = simgrid::dwarf::classify_tag(tag);
switch (klass) {
-
- // Type:
+ // Type:
case simgrid::dwarf::TagClass::Type:
MC_dwarf_handle_type_die(info, die, unit, frame, ns);
break;
static void mc_post_process_scope(simgrid::mc::ObjectInformation* info, simgrid::mc::Frame* scope)
{
-
if (scope->tag == DW_TAG_inlined_subroutine) {
// Attach correct namespaced name in inlined subroutine:
auto i = info->subprograms.find(scope->abstract_origin_id);
void postProcessObjectInformation(simgrid::mc::RemoteClient* process, simgrid::mc::ObjectInformation* info)
{
for (auto& t : info->types) {
-
simgrid::mc::Type* type = &(t.second);
simgrid::mc::Type* subtype = type;
while (subtype->type == DW_TAG_typedef || subtype->type == DW_TAG_volatile_type ||
*
* It works for either parallel or sequential tasks.
*/
-double MSG_task_get_remaining_work_ratio(msg_task_t task) {
-
+double MSG_task_get_remaining_work_ratio(msg_task_t task)
+{
xbt_assert((task != nullptr), "Cannot get information from a nullptr task");
if (task->compute) {
// Task in progress
};
class HostEnergy {
+ simgrid::s4u::Host* host_ = nullptr;
+ /*< List of (idle_power, epsilon_power, max_power) tuple corresponding to each cpu pstate */
+ std::vector<PowerRange> power_range_watts_list_;
+
+ /* We need to keep track of what pstate has been used, as we will sometimes be notified only *after* a pstate has been
+ * used (but we need to update the energy consumption with the old pstate!)
+ */
+ int pstate_ = 0;
+ const int pstate_off_ = -1;
+
+ /* Only used to split total energy into unused/used hosts.
+ * If you want to get this info for something else, rather use the host_load plugin
+ */
+ bool host_was_used_ = false;
+
+ void init_watts_range_list();
friend void ::on_simulation_end(); // For access to host_was_used_
+
public:
static simgrid::xbt::Extension<simgrid::s4u::Host, HostEnergy> EXTENSION_ID;
double get_power_range_slope_at(int pstate);
void update();
-private:
- void init_watts_range_list();
- simgrid::s4u::Host* host_ = nullptr;
- /*< List of (idle_power, epsilon_power, max_power) tuple corresponding to each cpu pstate */
- std::vector<PowerRange> power_range_watts_list_;
-
- /* We need to keep track of what pstate has been used, as we will sometimes be notified only *after* a pstate has been
- * used (but we need to update the energy consumption with the old pstate!)
- */
- int pstate_ = 0;
- const int pstate_off_ = -1;
-
- /* Only used to split total energy into unused/used hosts.
- * If you want to get this info for something else, rather use the host_load plugin
- */
- bool host_was_used_ = false;
-public:
double watts_off_ = 0.0; /*< Consumption when the machine is turned off (shutdown) */
double total_energy_ = 0.0; /*< Total energy consumed by the host */
double last_updated_; /*< Timestamp of the last energy update event*/
for (simgrid::kernel::resource::Cpu* const& cpu : action.cpus()) {
simgrid::s4u::Host* host = cpu->get_host();
if (host != nullptr) {
-
// If it's a VM, take the corresponding PM
simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(host);
if (vm) // If it's a VM, take the corresponding PM
double used_hosts_energy = 0.0; // Energy consumed by hosts that computed something
for (size_t i = 0; i < hosts.size(); i++) {
if (dynamic_cast<simgrid::s4u::VirtualMachine*>(hosts[i]) == nullptr) { // Ignore virtual machines
-
double energy = hosts[i]->extension<HostEnergy>()->get_consumed_energy();
total_energy += energy;
if (hosts[i]->extension<HostEnergy>()->host_was_used_)
} else if(tag<0 && tag != MPI_ANY_TAG){
retval = MPI_ERR_TAG;
} else {
-
int my_proc_id = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(my_proc_id, __func__,
int PMPI_Bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
{
-
int retval = 0;
smpi_bench_end();
simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype);
}
xbt_free(recvbuf);
-
}
return retval;
}
}
}
-Datatype::~Datatype(){
+Datatype::~Datatype()
+{
xbt_assert(refcount_ >= 0);
if(flags_ & DT_FLAG_PREDEFINED)
xbt_free(name_);
}
-void Datatype::ref(){
-
+void Datatype::ref()
+{
refcount_++;
#if SIMGRID_HAVE_MC
return MPI_SUCCESS;
}
-int Datatype::copy(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
- void *recvbuf, int recvcount, MPI_Datatype recvtype){
-
-// FIXME Handle the case of a partial shared malloc.
+int Datatype::copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
+ MPI_Datatype recvtype)
+{
+ // FIXME Handle the case of a partial shared malloc.
if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
smpi_switch_data_segment(simgrid::s4u::Actor::self());
recvtype->unserialize(sendbuf, recvbuf, count / recvtype->size(), MPI_REPLACE);
} else if (not(recvtype->flags() & DT_FLAG_DERIVED)) {
sendtype->serialize(sendbuf, recvbuf, count / sendtype->size());
- }else{
-
+ } else {
void * buf_tmp = xbt_malloc(count);
sendtype->serialize( sendbuf, buf_tmp,count/sendtype->size());
return MPI_SUCCESS;
}
-Datatype* Datatype::f2c(int id){
+Datatype* Datatype::f2c(int id)
+{
return static_cast<Datatype*>(F2C::f2c(id));
}
-
-
-}
-}
-
+} // namespace smpi
+} // namespace simgrid
recv_win->requests_->push_back(rreq);
rreq->start();
recv_win->mut_->unlock();
-
- }else{
+ } else {
XBT_DEBUG("Entering MPI_Put from myself to myself, rank %d", target_rank);
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
if(request!=nullptr)
requests_->push_back(rreq);
mut_->unlock();
}
-
- }else{
+ } else {
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
if(request!=nullptr)
*request=MPI_REQUEST_NULL;
}
-
return MPI_SUCCESS;
}
-
int Win::accumulate(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
Request::wait(&req, MPI_STATUS_IGNORE);
send_win->atomic_mut_->unlock();
return MPI_SUCCESS;
-
}
int Win::compare_and_swap(const void *origin_addr, void *compare_addr,
return MPI_SUCCESS;
}
-MPI_Errhandler Win::errhandler(){
+MPI_Errhandler Win::errhandler()
+{
return errhandler_;
}
-void Win::set_errhandler(MPI_Errhandler errhandler){
- errhandler_=errhandler;
- if(errhandler_!= MPI_ERRHANDLER_NULL)
+void Win::set_errhandler(MPI_Errhandler errhandler)
+{
+ errhandler_ = errhandler;
+ if (errhandler_ != MPI_ERRHANDLER_NULL)
errhandler->ref();
}
-
-}
-}
+} // namespace smpi
+} // namespace simgrid