SD_task_t last_scheduled_task;
};
-static double sg_host_get_available_at(sg_host_t host)
+static double sg_host_get_available_at(const_sg_host_t host)
{
const struct _HostAttribute* attr = (HostAttribute)sg_host_data(host);
return attr->available_at;
sg_host_data_set(host, attr);
}
-static SD_task_t sg_host_get_last_scheduled_task( sg_host_t host){
+static SD_task_t sg_host_get_last_scheduled_task(const_sg_host_t host)
+{
const struct _HostAttribute* attr = (HostAttribute)sg_host_data(host);
return attr->last_scheduled_task;
}
return ready_tasks;
}
-static double finish_on_at(SD_task_t task, sg_host_t host)
+static double finish_on_at(SD_task_t task, const_sg_host_t host)
{
double result;
std::vector<simgrid::s4u::Disk*> const& disk_list = simgrid::s4u::Host::current()->get_disks();
/* - For each disk mounted on host, display disk name and mount point */
- for (auto disk : disk_list)
+ for (auto const& disk : disk_list)
XBT_INFO("Disk name: %s (read: %.0f B/s -- write: %.0f B/s ", disk->get_cname(), disk->get_read_bandwidth(),
disk->get_write_bandwidth());
*
* Note that argv is copied over, so you should free your own copy once the actor is started. */
XBT_PUBLIC void sg_actor_start(sg_actor_t actor, xbt_main_func_t code, int argc, char** argv);
-XBT_PUBLIC aid_t sg_actor_get_PID(sg_actor_t actor);
-XBT_PUBLIC aid_t sg_actor_get_PPID(sg_actor_t actor);
+XBT_PUBLIC aid_t sg_actor_get_PID(const_sg_actor_t actor);
+XBT_PUBLIC aid_t sg_actor_get_PPID(const_sg_actor_t actor);
XBT_PUBLIC sg_actor_t sg_actor_by_PID(aid_t pid);
-XBT_PUBLIC const char* sg_actor_get_name(sg_actor_t actor);
-XBT_PUBLIC sg_host_t sg_actor_get_host(sg_actor_t actor);
-XBT_PUBLIC const char* sg_actor_get_property_value(sg_actor_t actor, const char* name);
-XBT_PUBLIC xbt_dict_t sg_actor_get_properties(sg_actor_t actor);
+XBT_PUBLIC const char* sg_actor_get_name(const_sg_actor_t actor);
+XBT_PUBLIC sg_host_t sg_actor_get_host(const_sg_actor_t actor);
+XBT_PUBLIC const char* sg_actor_get_property_value(const_sg_actor_t actor, const char* name);
+XBT_PUBLIC xbt_dict_t sg_actor_get_properties(const_sg_actor_t actor);
XBT_PUBLIC void sg_actor_suspend(sg_actor_t actor);
XBT_PUBLIC void sg_actor_resume(sg_actor_t actor);
XBT_PUBLIC int sg_actor_is_suspended(sg_actor_t actor);
SG_BEGIN_DECL
XBT_PUBLIC sg_bar_t sg_barrier_init(unsigned int count);
-XBT_PUBLIC void sg_barrier_destroy(sg_bar_t bar);
+XBT_PUBLIC void sg_barrier_destroy(const_sg_bar_t bar);
XBT_PUBLIC int sg_barrier_wait(sg_bar_t bar);
SG_END_DECL
/** @brief Broadcasts the given mutex variable */
XBT_PUBLIC void sg_cond_notify_all(sg_cond_t cond);
/** @brief Destroys the given mutex variable */
-XBT_PUBLIC void sg_cond_destroy(sg_cond_t cond);
+XBT_PUBLIC void sg_cond_destroy(const_sg_cond_t cond);
SG_END_DECL
class Actor;
/** Smart pointer to a simgrid::s4u::Actor */
typedef boost::intrusive_ptr<Actor> ActorPtr;
-XBT_PUBLIC void intrusive_ptr_release(Actor* actor);
-XBT_PUBLIC void intrusive_ptr_add_ref(Actor* actor);
+XBT_PUBLIC void intrusive_ptr_release(const Actor* actor);
+XBT_PUBLIC void intrusive_ptr_add_ref(const Actor* actor);
class Barrier;
/** Smart pointer to a simgrid::s4u::Barrier */
class ConditionVariable;
/** Smart pointer to a simgrid::s4u::ConditionVariable */
typedef boost::intrusive_ptr<ConditionVariable> ConditionVariablePtr;
-XBT_PUBLIC void intrusive_ptr_release(ConditionVariable* c);
-XBT_PUBLIC void intrusive_ptr_add_ref(ConditionVariable* c);
+XBT_PUBLIC void intrusive_ptr_release(const ConditionVariable* c);
+XBT_PUBLIC void intrusive_ptr_add_ref(const ConditionVariable* c);
class Engine;
class Mailbox;
class Mutex;
-XBT_PUBLIC void intrusive_ptr_release(Mutex* m);
-XBT_PUBLIC void intrusive_ptr_add_ref(Mutex* m);
+XBT_PUBLIC void intrusive_ptr_release(const Mutex* m);
+XBT_PUBLIC void intrusive_ptr_add_ref(const Mutex* m);
/** Smart pointer to a simgrid::s4u::Mutex */
typedef boost::intrusive_ptr<Mutex> MutexPtr;
#endif
typedef s4u_Barrier* sg_bar_t;
+typedef const s4u_Barrier* const_sg_bar_t;
typedef s4u_ConditionVariable* sg_cond_t;
+typedef const s4u_ConditionVariable* const_sg_cond_t;
typedef s4u_Mutex* sg_mutex_t;
+typedef const s4u_Mutex* const_sg_mutex_t;
typedef s4u_Semaphore* sg_sem_t;
+typedef const s4u_Semaphore* const_sg_sem_t;
typedef s4u_NetZone* sg_netzone_t;
+typedef const s4u_NetZone* const_sg_netzone_t;
typedef s4u_Host* sg_host_t;
typedef const s4u_Host* const_sg_host_t;
typedef s4u_Link* sg_link_t;
XBT_PUBLIC xbt_dynar_t sg_hosts_as_dynar();
XBT_PUBLIC size_t sg_host_extension_create(void (*deleter)(void*));
-XBT_PUBLIC void* sg_host_extension_get(sg_host_t host, size_t rank);
+XBT_PUBLIC void* sg_host_extension_get(const_sg_host_t host, size_t rank);
/** @brief Finds a sg_host_t using its name.
*
XBT_PUBLIC sg_host_t sg_host_by_name(const char* name);
/** @brief Return the name of the #sg_host_t. */
-XBT_PUBLIC const char* sg_host_get_name(sg_host_t host);
+XBT_PUBLIC const char* sg_host_get_name(const_sg_host_t host);
// ========== User Data ==============
/** @brief Return the user data of a #sg_host_t.
*
* This functions returns the user data associated to @a host if any.
*/
-XBT_PUBLIC void* sg_host_data(sg_host_t host);
+XBT_PUBLIC void* sg_host_data(const_sg_host_t host);
XBT_ATTRIB_DEPRECATED_v328("Please use sg_host_data()") XBT_PUBLIC void* sg_host_user(sg_host_t host);
/** @brief Set the user data of a #sg_host_t.
*
* @param host a host
* @return a dynar containing all storages (name) attached to the host
*/
-XBT_PUBLIC xbt_dynar_t sg_host_get_attached_storage_list(sg_host_t host);
+XBT_PUBLIC xbt_dynar_t sg_host_get_attached_storage_list(const_sg_host_t host);
// =========== user-level functions ===============
/** @brief Return the speed of the processor (in flop/s), regardless of the current load on the machine. */
-XBT_PUBLIC double sg_host_speed(sg_host_t host);
-XBT_PUBLIC double sg_host_get_pstate_speed(sg_host_t host, int pstate_index);
+XBT_PUBLIC double sg_host_speed(const_sg_host_t host);
+XBT_PUBLIC double sg_host_get_pstate_speed(const_sg_host_t host, int pstate_index);
-XBT_PUBLIC double sg_host_get_available_speed(sg_host_t host);
+XBT_PUBLIC double sg_host_get_available_speed(const_sg_host_t host);
-XBT_PUBLIC int sg_host_core_count(sg_host_t host);
+XBT_PUBLIC int sg_host_core_count(const_sg_host_t host);
/** @brief Returns the current computation load (in flops per second).
* @param host a host
*/
-XBT_PUBLIC double sg_host_load(sg_host_t host);
+XBT_PUBLIC double sg_host_load(const_sg_host_t host);
/** @brief Return the location on which the current process is running. */
XBT_PUBLIC sg_host_t sg_host_self();
*
* @param host host to test
*/
-XBT_PUBLIC int sg_host_get_nb_pstates(sg_host_t host);
+XBT_PUBLIC int sg_host_get_nb_pstates(const_sg_host_t host);
-XBT_PUBLIC int sg_host_get_pstate(sg_host_t host);
+XBT_PUBLIC int sg_host_get_pstate(const_sg_host_t host);
XBT_PUBLIC void sg_host_set_pstate(sg_host_t host, int pstate);
XBT_PUBLIC void sg_host_turn_on(sg_host_t host);
XBT_PUBLIC void sg_host_turn_off(sg_host_t host);
-XBT_PUBLIC int sg_host_is_on(sg_host_t host);
+XBT_PUBLIC int sg_host_is_on(const_sg_host_t host);
/** @ingroup m_host_management
* @brief Returns a xbt_dict_t consisting of the list of properties assigned to this host
* @param host a host
* @return a dict containing the properties
*/
-XBT_PUBLIC xbt_dict_t sg_host_get_properties(sg_host_t host);
+XBT_PUBLIC xbt_dict_t sg_host_get_properties(const_sg_host_t host);
/** @ingroup m_host_management
* @brief Returns the value of a given host property
* @param name a property name
* @return value of a property (or nullptr if property not set)
*/
-XBT_PUBLIC const char* sg_host_get_property_value(sg_host_t host, const char* name);
+XBT_PUBLIC const char* sg_host_get_property_value(const_sg_host_t host, const char* name);
/** @ingroup m_host_management
* @brief Change the value of a given host property
*/
XBT_PUBLIC void sg_host_set_property_value(sg_host_t host, const char* name, const char* value);
-XBT_PUBLIC void sg_host_route(sg_host_t from, sg_host_t to, xbt_dynar_t links);
-XBT_PUBLIC double sg_host_route_latency(sg_host_t from, sg_host_t to);
-XBT_PUBLIC double sg_host_route_bandwidth(sg_host_t from, sg_host_t to);
+XBT_PUBLIC void sg_host_route(const_sg_host_t from, const_sg_host_t to, xbt_dynar_t links);
+XBT_PUBLIC double sg_host_route_latency(const_sg_host_t from, const_sg_host_t to);
+XBT_PUBLIC double sg_host_route_bandwidth(const_sg_host_t from, const_sg_host_t to);
void sg_host_send_to(sg_host_t from, sg_host_t to, double byte_amount);
-XBT_PUBLIC void sg_host_dump(sg_host_t ws);
+XBT_PUBLIC void sg_host_dump(const_sg_host_t ws);
-XBT_PUBLIC void sg_host_get_actor_list(sg_host_t host, xbt_dynar_t whereto);
+XBT_PUBLIC void sg_host_get_actor_list(const_sg_host_t host, xbt_dynar_t whereto);
SG_END_DECL
#endif /* SIMGRID_HOST_H_ */
/* C interface */
SG_BEGIN_DECL
-XBT_PUBLIC const char* sg_link_name(sg_link_t link);
+XBT_PUBLIC const char* sg_link_name(const_sg_link_t link);
XBT_PUBLIC sg_link_t sg_link_by_name(const char* name);
-XBT_PUBLIC int sg_link_is_shared(sg_link_t link);
-XBT_PUBLIC double sg_link_bandwidth(sg_link_t link);
-XBT_PUBLIC double sg_link_latency(sg_link_t link);
-XBT_PUBLIC void* sg_link_data(sg_link_t link);
+XBT_PUBLIC int sg_link_is_shared(const_sg_link_t link);
+XBT_PUBLIC double sg_link_bandwidth(const_sg_link_t link);
+XBT_PUBLIC double sg_link_latency(const_sg_link_t link);
+XBT_PUBLIC void* sg_link_data(const_sg_link_t link);
XBT_PUBLIC void sg_link_data_set(sg_link_t link, void* data);
XBT_PUBLIC int sg_link_count();
XBT_PUBLIC sg_link_t* sg_link_list();
XBT_PUBLIC xbt_dynar_t MSG_hosts_as_dynar();
/** @brief Returns the name of this host */
-XBT_PUBLIC const char* MSG_host_get_name(sg_host_t host);
+XBT_PUBLIC const char* MSG_host_get_name(const_sg_host_t host);
/** @brief Returns the user data of this host */
-XBT_PUBLIC void* MSG_host_get_data(sg_host_t host);
+XBT_PUBLIC void* MSG_host_get_data(const_sg_host_t host);
/** @brief Sets the user data of this host */
XBT_PUBLIC void MSG_host_set_data(sg_host_t host, void* data);
XBT_PUBLIC xbt_dict_t MSG_host_get_mounted_storage_list(sg_host_t host);
-XBT_PUBLIC xbt_dynar_t MSG_host_get_attached_storage_lists(sg_host_t host);
-XBT_PUBLIC double MSG_host_get_speed(sg_host_t host);
-XBT_PUBLIC double MSG_host_get_power_peak_at(sg_host_t host, int pstate_index);
-XBT_PUBLIC int MSG_host_get_core_number(sg_host_t host);
-XBT_PUBLIC int MSG_host_get_nb_pstates(sg_host_t host);
-XBT_PUBLIC int MSG_host_get_pstate(sg_host_t host);
+XBT_PUBLIC xbt_dynar_t MSG_host_get_attached_storage_lists(const_sg_host_t host);
+XBT_PUBLIC double MSG_host_get_speed(const_sg_host_t host);
+XBT_PUBLIC double MSG_host_get_power_peak_at(const_sg_host_t host, int pstate_index);
+XBT_PUBLIC int MSG_host_get_core_number(const_sg_host_t host);
+XBT_PUBLIC int MSG_host_get_nb_pstates(const_sg_host_t host);
+XBT_PUBLIC int MSG_host_get_pstate(const_sg_host_t host);
XBT_PUBLIC void MSG_host_set_pstate(sg_host_t host, int pstate);
/** @brief Start the host if it is off
*
* for more info on DVFS.
*/
XBT_PUBLIC void MSG_host_off(sg_host_t h);
-XBT_PUBLIC int MSG_host_is_on(sg_host_t h);
-XBT_PUBLIC xbt_dict_t MSG_host_get_properties(sg_host_t host);
-XBT_PUBLIC const char* MSG_host_get_property_value(sg_host_t host, const char* name);
+XBT_PUBLIC int MSG_host_is_on(const_sg_host_t h);
+XBT_PUBLIC xbt_dict_t MSG_host_get_properties(const_sg_host_t host);
+XBT_PUBLIC const char* MSG_host_get_property_value(const_sg_host_t host, const char* name);
XBT_PUBLIC void MSG_host_set_property_value(sg_host_t host, const char* name, const char* value);
-XBT_PUBLIC void MSG_host_get_process_list(sg_host_t host, xbt_dynar_t whereto);
+XBT_PUBLIC void MSG_host_get_process_list(const_sg_host_t host, xbt_dynar_t whereto);
/** @brief Return the location on which the current process is executed */
XBT_PUBLIC sg_host_t MSG_host_self();
-XBT_PUBLIC double MSG_host_get_load(sg_host_t host);
+XBT_PUBLIC double MSG_host_get_load(const_sg_host_t host);
/* ******************************** VMs ************************************* */
typedef sg_vm_t msg_vm_t;
*/
typedef sg_actor_t msg_process_t;
-XBT_PUBLIC int MSG_process_get_PID(msg_process_t process);
-XBT_PUBLIC int MSG_process_get_PPID(msg_process_t process);
+XBT_PUBLIC int MSG_process_get_PID(const_sg_actor_t process);
+XBT_PUBLIC int MSG_process_get_PPID(const_sg_actor_t process);
/** @brief Return a process from its PID (or NULL if not found).
*
* Note that the PID are unique in the whole simulation, not only on a given host.
*/
XBT_PUBLIC sg_actor_t MSG_process_from_PID(int pid);
-XBT_PUBLIC const char* MSG_process_get_name(msg_process_t process);
-XBT_PUBLIC sg_host_t MSG_process_get_host(msg_process_t process);
+XBT_PUBLIC const char* MSG_process_get_name(const_sg_actor_t process);
+XBT_PUBLIC sg_host_t MSG_process_get_host(const_sg_actor_t process);
/*property handlers*/
-XBT_PUBLIC xbt_dict_t MSG_process_get_properties(msg_process_t process);
-XBT_PUBLIC const char* MSG_process_get_property_value(msg_process_t process, const char* name);
+XBT_PUBLIC xbt_dict_t MSG_process_get_properties(const_sg_actor_t process);
+XBT_PUBLIC const char* MSG_process_get_property_value(const_sg_actor_t process, const char* name);
XBT_PUBLIC void MSG_process_suspend(msg_process_t process);
XBT_PUBLIC void MSG_process_resume(msg_process_t process);
/** @brief Initializes a barrier, with count elements */
XBT_PUBLIC msg_bar_t MSG_barrier_init(unsigned int count);
/** @brief Destroys barrier */
-XBT_PUBLIC void MSG_barrier_destroy(msg_bar_t bar);
+XBT_PUBLIC void MSG_barrier_destroy(const_sg_bar_t bar);
/** @brief Performs a barrier already initialized */
XBT_PUBLIC int MSG_barrier_wait(msg_bar_t bar);
XBT_PUBLIC void sg_mutex_lock(sg_mutex_t mutex);
XBT_PUBLIC void sg_mutex_unlock(sg_mutex_t mutex);
XBT_PUBLIC int sg_mutex_try_lock(sg_mutex_t mutex);
-XBT_PUBLIC void sg_mutex_destroy(sg_mutex_t mutex);
+XBT_PUBLIC void sg_mutex_destroy(const_sg_mutex_t mutex);
SG_END_DECL
Actor& operator=(Actor const&) = delete;
// ***** Reference count *****
- friend XBT_PUBLIC void intrusive_ptr_add_ref(Actor * actor);
- friend XBT_PUBLIC void intrusive_ptr_release(Actor * actor);
+ friend XBT_PUBLIC void intrusive_ptr_add_ref(const Actor* actor);
+ friend XBT_PUBLIC void intrusive_ptr_release(const Actor* actor);
#endif
/** Retrieve the amount of references on that object. Useful to debug the automatic refcounting */
int get_refcount();
/*! take a vector s4u::CommPtr and return when one of them is finished.
* The return value is the rank of the first finished CommPtr. */
- static int wait_any(std::vector<CommPtr> * comms) { return wait_any_for(comms, -1); }
+ static int wait_any(const std::vector<CommPtr>* comms) { return wait_any_for(comms, -1); }
/*! Same as wait_any, but with a timeout. If the timeout occurs, parameter last is returned.*/
- static int wait_any_for(std::vector<CommPtr>* comms_in, double timeout);
+ static int wait_any_for(const std::vector<CommPtr>* comms_in, double timeout);
/*! take a vector s4u::CommPtr and return when all of them is finished. */
- static void wait_all(std::vector<CommPtr>* comms);
+ static void wait_all(const std::vector<CommPtr>* comms);
/*! take a vector s4u::CommPtr and return the rank of the first finished one (or -1 if none is done). */
- static int test_any(std::vector<CommPtr> * comms);
+ static int test_any(const std::vector<CommPtr>* comms);
Comm* start() override;
Comm* wait() override;
ConditionVariable(ConditionVariable const&) = delete;
ConditionVariable& operator=(ConditionVariable const&) = delete;
- friend XBT_PUBLIC void intrusive_ptr_add_ref(ConditionVariable * cond);
- friend XBT_PUBLIC void intrusive_ptr_release(ConditionVariable * cond);
+ friend XBT_PUBLIC void intrusive_ptr_add_ref(const ConditionVariable* cond);
+ friend XBT_PUBLIC void intrusive_ptr_release(const ConditionVariable* cond);
#endif
static ConditionVariablePtr create();
// Wait functions without time:
void wait(MutexPtr lock);
- void wait(std::unique_lock<Mutex> & lock);
- template <class P> void wait(std::unique_lock<Mutex> & lock, P pred)
+ void wait(const std::unique_lock<Mutex>& lock);
+ template <class P> void wait(const std::unique_lock<Mutex>& lock, P pred)
{
while (not pred())
wait(lock);
// Wait function taking a plain double as time:
- std::cv_status wait_until(std::unique_lock<Mutex> & lock, double timeout_time);
- std::cv_status wait_for(std::unique_lock<Mutex> & lock, double duration);
+ std::cv_status wait_until(const std::unique_lock<Mutex>& lock, double timeout_time);
+ std::cv_status wait_for(const std::unique_lock<Mutex>& lock, double duration);
template <class P> bool wait_until(std::unique_lock<Mutex> & lock, double timeout_time, P pred)
{
while (not pred())
return pred();
return true;
}
- template <class P> bool wait_for(std::unique_lock<Mutex> & lock, double duration, P pred)
+ template <class P> bool wait_for(const std::unique_lock<Mutex>& lock, double duration, P pred)
{
return this->wait_until(lock, SIMIX_get_clock() + duration, std::move(pred));
}
// Wait function taking a C++ style time:
template <class Rep, class Period, class P>
- bool wait_for(std::unique_lock<Mutex> & lock, std::chrono::duration<Rep, Period> duration, P pred)
+ bool wait_for(const std::unique_lock<Mutex>& lock, std::chrono::duration<Rep, Period> duration, P pred)
{
auto seconds = std::chrono::duration_cast<SimulationClockDuration>(duration);
return this->wait_for(lock, seconds.count(), pred);
}
template <class Rep, class Period>
- std::cv_status wait_for(std::unique_lock<Mutex> & lock, std::chrono::duration<Rep, Period> duration)
+ std::cv_status wait_for(const std::unique_lock<Mutex>& lock, std::chrono::duration<Rep, Period> duration)
{
auto seconds = std::chrono::duration_cast<SimulationClockDuration>(duration);
return this->wait_for(lock, seconds.count());
}
template <class Duration>
- std::cv_status wait_until(std::unique_lock<Mutex> & lock, const SimulationTimePoint<Duration>& timeout_time)
+ std::cv_status wait_until(const std::unique_lock<Mutex>& lock, const SimulationTimePoint<Duration>& timeout_time)
{
auto timeout_native = std::chrono::time_point_cast<SimulationClockDuration>(timeout_time);
return this->wait_until(lock, timeout_native.time_since_epoch().count());
}
template <class Duration, class P>
- bool wait_until(std::unique_lock<Mutex> & lock, const SimulationTimePoint<Duration>& timeout_time, P pred)
+ bool wait_until(const std::unique_lock<Mutex>& lock, const SimulationTimePoint<Duration>& timeout_time, P pred)
{
auto timeout_native = std::chrono::time_point_cast<SimulationClockDuration>(timeout_time);
return this->wait_until(lock, timeout_native.time_since_epoch().count(), std::move(pred));
/** @brief Retrieves the name of that disk as a C string */
const char* get_cname() const { return name_.c_str(); }
double get_read_bandwidth() const;
- double get_write_bandwidth();
+ double get_write_bandwidth() const;
const std::unordered_map<std::string, std::string>* get_properties() const;
const char* get_property(const std::string& key) const;
void set_property(const std::string&, const std::string& value);
- Host* get_host();
+ Host* get_host() const;
IoPtr io_init(sg_size_t size, s4u::Io::OpType type);
friend kernel::resource::LinkImpl;
void host_register(const std::string& name, Host* host);
void host_unregister(const std::string& name);
- void link_register(const std::string& name, Link* link);
+ void link_register(const std::string& name, const Link* link);
void link_unregister(const std::string& name);
- void storage_register(const std::string& name, Storage* storage);
+ void storage_register(const std::string& name, const Storage* storage);
void storage_unregister(const std::string& name);
void netpoint_register(simgrid::kernel::routing::NetPoint* card);
void netpoint_unregister(simgrid::kernel::routing::NetPoint* card);
kernel::routing::NetPoint* netpoint_by_name_or_null(const std::string& name);
NetZone* get_netzone_root();
- void set_netzone_root(NetZone* netzone);
+ void set_netzone_root(const NetZone* netzone);
NetZone* netzone_by_name_or_null(const std::string& name);
kernel::routing::NetPoint* get_netpoint() const { return pimpl_netpoint_; }
- int get_actor_count();
- std::vector<ActorPtr> get_all_actors();
+ int get_actor_count() const;
+ std::vector<ActorPtr> get_all_actors() const;
/** Turns that host on if it was previously off
*
*/
std::unordered_map<std::string, Storage*> const& get_mounted_storages();
- void route_to(Host* dest, std::vector<Link*>& links, double* latency);
- void route_to(Host* dest, std::vector<kernel::resource::LinkImpl*>& links, double* latency);
+ void route_to(const Host* dest, std::vector<Link*>& links, double* latency) const;
+ void route_to(const Host* dest, std::vector<kernel::resource::LinkImpl*>& links, double* latency) const;
void send_to(Host* dest, double byte_amount);
NetZone* get_englobing_zone();
double get_latency() const;
/** @brief Describes how the link is shared between flows */
- SharingPolicy get_sharing_policy();
+ SharingPolicy get_sharing_policy() const;
/** @brief Returns the current load (in flops per second) */
- double get_usage();
+ double get_usage() const;
/** @brief Check if the Link is used (at least one flow uses the link) */
- bool is_used();
+ bool is_used() const;
void turn_on();
bool is_on() const;
kernel::activity::MutexImpl* const pimpl_;
/* refcounting */
- friend XBT_PUBLIC void intrusive_ptr_add_ref(Mutex* mutex);
- friend XBT_PUBLIC void intrusive_ptr_release(Mutex* mutex);
+ friend XBT_PUBLIC void intrusive_ptr_add_ref(const Mutex* mutex);
+ friend XBT_PUBLIC void intrusive_ptr_release(const Mutex* mutex);
public:
explicit Mutex(kernel::activity::MutexImpl* mutex) : pimpl_(mutex) {}
NetZone* get_father();
- std::vector<Host*> get_all_hosts();
+ std::vector<Host*> get_all_hosts() const;
int get_host_count();
kernel::routing::NetZoneImpl* get_impl() const { return pimpl_; }
const char* get_property(const std::string& key) const;
void set_property(const std::string& key, const std::string& value);
- std::vector<NetZone*> get_children();
+ std::vector<NetZone*> get_children() const;
/* Add content to the netzone, at parsing time. It should be sealed afterward. */
int add_component(kernel::routing::NetPoint* elm); /* A host, a router or a netzone, whatever */
/** @brief Retrieves the name of that storage as a C string */
const char* get_cname() const { return name_.c_str(); }
- const char* get_type();
- Host* get_host() { return attached_to_; };
+ const char* get_type() const;
+ Host* get_host() const { return attached_to_; };
void set_host(Host* host) { attached_to_ = host; }
const std::unordered_map<std::string, std::string>* get_properties() const;
XBT_PUBLIC int sg_sem_acquire_timeout(sg_sem_t sem, double timeout);
XBT_PUBLIC void sg_sem_release(sg_sem_t sem);
XBT_PUBLIC int sg_sem_get_capacity(sg_sem_t sem);
-XBT_PUBLIC void sg_sem_destroy(sg_sem_t sem);
+XBT_PUBLIC void sg_sem_destroy(const_sg_sem_t sem);
XBT_PUBLIC int sg_sem_would_block(sg_sem_t sem);
SG_END_DECL
/* C interface */
SG_BEGIN_DECL
-XBT_PUBLIC const char* sg_storage_get_name(sg_storage_t storage);
+XBT_PUBLIC const char* sg_storage_get_name(const_sg_storage_t storage);
XBT_PUBLIC sg_storage_t sg_storage_get_by_name(const char* name);
-XBT_PUBLIC xbt_dict_t sg_storage_get_properties(sg_storage_t storage);
+XBT_PUBLIC xbt_dict_t sg_storage_get_properties(const_sg_storage_t storage);
XBT_PUBLIC void sg_storage_set_property_value(sg_storage_t storage, const char* name, const char* value);
-XBT_PUBLIC const char* sg_storage_get_property_value(sg_storage_t storage, const char* name);
+XBT_PUBLIC const char* sg_storage_get_property_value(const_sg_storage_t storage, const char* name);
XBT_PUBLIC xbt_dynar_t sg_storages_as_dynar();
XBT_PUBLIC void sg_storage_set_data(sg_storage_t host, void* data);
-XBT_PUBLIC void* sg_storage_get_data(sg_storage_t storage);
-XBT_PUBLIC const char* sg_storage_get_host(sg_storage_t storage);
+XBT_PUBLIC void* sg_storage_get_data(const_sg_storage_t storage);
+XBT_PUBLIC const char* sg_storage_get_host(const_sg_storage_t storage);
XBT_PUBLIC sg_size_t sg_storage_read(sg_storage_t storage, sg_size_t size);
XBT_PUBLIC sg_size_t sg_storage_write(sg_storage_t storage, sg_size_t size);
SG_BEGIN_DECL
XBT_PUBLIC sg_netzone_t sg_zone_get_root();
-XBT_PUBLIC const char* sg_zone_get_name(sg_netzone_t zone);
+XBT_PUBLIC const char* sg_zone_get_name(const_sg_netzone_t zone);
XBT_PUBLIC sg_netzone_t sg_zone_get_by_name(const char* name);
-XBT_PUBLIC void sg_zone_get_sons(sg_netzone_t zone, xbt_dict_t whereto);
-XBT_PUBLIC const char* sg_zone_get_property_value(sg_netzone_t as, const char* name);
-XBT_PUBLIC void sg_zone_set_property_value(sg_netzone_t netzone, const char* name, char* value);
-XBT_PUBLIC void sg_zone_get_hosts(sg_netzone_t zone, xbt_dynar_t whereto);
+XBT_PUBLIC void sg_zone_get_sons(const_sg_netzone_t zone, xbt_dict_t whereto);
+XBT_PUBLIC const char* sg_zone_get_property_value(const_sg_netzone_t as, const char* name);
+XBT_PUBLIC void sg_zone_set_property_value(sg_netzone_t netzone, const char* name, const char* value);
+XBT_PUBLIC void sg_zone_get_hosts(const_sg_netzone_t zone, xbt_dynar_t whereto);
SG_END_DECL
xbt_assert((jprocess != nullptr), "Process allocation failed.");
jprocess = env->NewGlobalRef(jprocess);
//bind the process to the context
- msg_process_t process = MSG_process_self();
+ const_sg_actor_t process = MSG_process_self();
context->jprocess_ = jprocess;
/* sets the PID and the PPID of the process */
}
JNIEXPORT jdouble JNICALL Java_org_simgrid_msg_Host_getSpeed(JNIEnv * env, jobject jhost) {
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
}
JNIEXPORT jdouble JNICALL Java_org_simgrid_msg_Host_getCoreNumber(JNIEnv * env, jobject jhost) {
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
}
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Host_getProperty(JNIEnv *env, jobject jhost, jobject jname) {
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
JNIEXPORT jboolean JNICALL Java_org_simgrid_msg_Host_isOn(JNIEnv * env, jobject jhost)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
}
JNIEXPORT jint JNICALL Java_org_simgrid_msg_Host_getPstate(JNIEnv* env, jobject jhost)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
return MSG_host_get_pstate(host);
}
JNIEXPORT jint JNICALL Java_org_simgrid_msg_Host_getPstatesCount(JNIEnv* env, jobject jhost)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
return MSG_host_get_nb_pstates(host);
}
JNIEXPORT jdouble JNICALL Java_org_simgrid_msg_Host_getCurrentPowerPeak(JNIEnv* env, jobject jhost)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
return MSG_host_get_speed(host);
}
JNIEXPORT jdouble JNICALL Java_org_simgrid_msg_Host_getPowerPeakAt(JNIEnv* env, jobject jhost, jint pstate)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
return MSG_host_get_power_peak_at(host, pstate);
}
JNIEXPORT jdouble JNICALL Java_org_simgrid_msg_Host_getLoad(JNIEnv* env, jobject jhost)
{
- msg_host_t host = jhost_get_native(env, jhost);
+ const_sg_host_t host = jhost_get_native(env, jhost);
return MSG_host_get_load(host);
}
jfieldID jprocess_field_Process_pid;
jfieldID jprocess_field_Process_ppid;
-jobject jprocess_from_native(const simgrid::s4u::Actor* process)
+jobject jprocess_from_native(const_sg_actor_t process)
{
const simgrid::kernel::context::JavaContext* context =
static_cast<simgrid::kernel::context::JavaContext*>(process->get_impl()->context_.get());
return (msg_process_t)(intptr_t)env->GetLongField(jprocess, jprocess_field_Process_bind);
}
-void jprocess_bind(jobject jprocess, msg_process_t process, JNIEnv * env)
+void jprocess_bind(jobject jprocess, const_sg_actor_t process, JNIEnv* env)
{
env->SetLongField(jprocess, jprocess_field_Process_bind, (intptr_t)process);
}
JNIEXPORT jint JNICALL Java_org_simgrid_msg_Process_nativeGetPID(JNIEnv* env, jobject jprocess)
{
- msg_process_t process = jprocess_to_native(jprocess, env);
+ const_sg_actor_t process = jprocess_to_native(jprocess, env);
return MSG_process_get_PID(process);
}
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Process_getProperty(JNIEnv *env, jobject jprocess, jobject jname) {
- msg_process_t process = jprocess_to_native(jprocess, env);
+ const_sg_actor_t process = jprocess_to_native(jprocess, env);
if (not process) {
jxbt_throw_notbound(env, "process", jprocess);
void jprocess_unref(jobject jprocess, JNIEnv* env);
/** Binds a native instance to a java instance. */
-void jprocess_bind(jobject jprocess, msg_process_t process, JNIEnv* env);
+void jprocess_bind(jobject jprocess, const_sg_actor_t process, JNIEnv* env);
/** Extract the java instance from the native one */
-jobject jprocess_from_native(const simgrid::s4u::Actor* process);
+jobject jprocess_from_native(const_sg_actor_t process);
/** Extract the native instance from the java one */
msg_process_t jprocess_to_native(jobject jprocess, JNIEnv* env);
"Test whether the communication is terminated, see :cpp:func:`simgrid::s4u::Comm::test()`")
.def("wait", [](simgrid::s4u::CommPtr self) { self->wait(); },
"Block until the completion of that communication, see :cpp:func:`simgrid::s4u::Comm::wait()`")
- .def("wait_all", [](std::vector<simgrid::s4u::CommPtr>* comms) { simgrid::s4u::Comm::wait_all(comms); },
+ .def("wait_all", [](const std::vector<simgrid::s4u::CommPtr>* comms) { simgrid::s4u::Comm::wait_all(comms); },
"Block until the completion of all communications in the list, see "
":cpp:func:`simgrid::s4u::Comm::wait_all()`")
.def(
- "wait_any", [](std::vector<simgrid::s4u::CommPtr>* comms) { return simgrid::s4u::Comm::wait_any(comms); },
+ "wait_any",
+ [](const std::vector<simgrid::s4u::CommPtr>* comms) { return simgrid::s4u::Comm::wait_any(comms); },
"Block until the completion of any communication in the list and return the index of the terminated one, see "
":cpp:func:`simgrid::s4u::Comm::wait_any()`");
py::class_<simgrid::s4u::Exec, simgrid::s4u::ExecPtr>(m, "Exec", "Execution, see :ref:`class s4u::Exec <API_s4u_Exec>`")
void turn_on() override;
void turn_off() override;
- s4u::Host* get_host() { return host_; }
+ s4u::Host* get_host() const { return host_; }
void set_host(s4u::Host* host) { host_ = host; }
void destroy(); // Must be called instead of the destructor
}
/* ************************** Actors *************************** */
-int MSG_process_get_PID(sg_actor_t actor)
+int MSG_process_get_PID(const_sg_actor_t actor)
{
return sg_actor_get_PID(actor);
}
-int MSG_process_get_PPID(sg_actor_t actor)
+int MSG_process_get_PPID(const_sg_actor_t actor)
{
return sg_actor_get_PPID(actor);
}
{
return sg_actor_by_PID(PID);
}
-const char* MSG_process_get_name(sg_actor_t actor)
+const char* MSG_process_get_name(const_sg_actor_t actor)
{
return sg_actor_get_name(actor);
}
-sg_host_t MSG_process_get_host(sg_actor_t actor)
+sg_host_t MSG_process_get_host(const_sg_actor_t actor)
{
return sg_actor_get_host(actor);
}
-xbt_dict_t MSG_process_get_properties(sg_actor_t actor)
+xbt_dict_t MSG_process_get_properties(const_sg_actor_t actor)
{
return sg_actor_get_properties(actor);
}
-const char* MSG_process_get_property_value(sg_actor_t actor, const char* name)
+const char* MSG_process_get_property_value(const_sg_actor_t actor, const char* name)
{
return sg_actor_get_property_value(actor, name);
}
{
return sg_host_by_name(name);
}
-const char* MSG_host_get_name(sg_host_t host)
+const char* MSG_host_get_name(const_sg_host_t host)
{
return sg_host_get_name(host);
}
-void* MSG_host_get_data(sg_host_t host)
+void* MSG_host_get_data(const_sg_host_t host)
{
return sg_host_data(host);
}
{
return sg_host_get_mounted_storage_list(host);
}
-xbt_dynar_t MSG_host_get_attached_storage_lists(sg_host_t host)
+xbt_dynar_t MSG_host_get_attached_storage_lists(const_sg_host_t host)
{
return sg_host_get_attached_storage_list(host);
}
-double MSG_host_get_speed(sg_host_t host)
+double MSG_host_get_speed(const_sg_host_t host)
{
return sg_host_speed(host);
}
-double MSG_host_get_power_peak_at(sg_host_t host, int pstate_index)
+double MSG_host_get_power_peak_at(const_sg_host_t host, int pstate_index)
{
return sg_host_get_pstate_speed(host, pstate_index);
}
-int MSG_host_get_core_number(sg_host_t host)
+int MSG_host_get_core_number(const_sg_host_t host)
{
return sg_host_core_count(host);
}
-int MSG_host_get_nb_pstates(sg_host_t host)
+int MSG_host_get_nb_pstates(const_sg_host_t host)
{
return sg_host_get_nb_pstates(host);
}
-int MSG_host_get_pstate(sg_host_t host)
+int MSG_host_get_pstate(const_sg_host_t host)
{
return sg_host_get_pstate(host);
}
{
sg_host_turn_off(h);
}
-int MSG_host_is_on(sg_host_t h)
+int MSG_host_is_on(const_sg_host_t h)
{
return sg_host_is_on(h);
}
-xbt_dict_t MSG_host_get_properties(sg_host_t host)
+xbt_dict_t MSG_host_get_properties(const_sg_host_t host)
{
return sg_host_get_properties(host);
}
-const char* MSG_host_get_property_value(sg_host_t host, const char* name)
+const char* MSG_host_get_property_value(const_sg_host_t host, const char* name)
{
return sg_host_get_property_value(host, name);
}
{
sg_host_set_property_value(host, name, value);
}
-void MSG_host_get_process_list(sg_host_t host, xbt_dynar_t whereto)
+void MSG_host_get_process_list(const_sg_host_t host, xbt_dynar_t whereto)
{
sg_host_get_actor_list(host, whereto);
}
return sg_host_self();
}
-double MSG_host_get_load(sg_host_t host)
+double MSG_host_get_load(const_sg_host_t host)
{
return sg_host_load(host);
}
return sg_barrier_init(count);
}
-void MSG_barrier_destroy(sg_bar_t bar)
+void MSG_barrier_destroy(const_sg_bar_t bar)
{
sg_barrier_destroy(bar);
}
return create(name, host, factory(std::move(args)));
}
-void intrusive_ptr_add_ref(Actor* actor)
+void intrusive_ptr_add_ref(const Actor* actor)
{
intrusive_ptr_add_ref(actor->pimpl_);
}
-void intrusive_ptr_release(Actor* actor)
+void intrusive_ptr_release(const Actor* actor)
{
intrusive_ptr_release(actor->pimpl_);
}
*
* This function checks whether @a actor is a valid pointer and return its PID (or 0 in case of problem).
*/
-aid_t sg_actor_get_PID(sg_actor_t actor)
+aid_t sg_actor_get_PID(const_sg_actor_t actor)
{
/* Do not raise an exception here: this function is called by the logs
* and the exceptions, so it would be called back again and again */
* This function checks whether @a actor is a valid pointer and return its parent's PID.
* Returns -1 if the actor has not been created by any other actor.
*/
-aid_t sg_actor_get_PPID(sg_actor_t actor)
+aid_t sg_actor_get_PPID(const_sg_actor_t actor)
{
return actor->get_ppid();
}
/** @ingroup m_actor_management
* @brief Return the name of an actor.
*/
-const char* sg_actor_get_name(sg_actor_t actor)
+const char* sg_actor_get_name(const_sg_actor_t actor)
{
return actor->get_cname();
}
-sg_host_t sg_actor_get_host(sg_actor_t actor)
+sg_host_t sg_actor_get_host(const_sg_actor_t actor)
{
return actor->get_host();
}
* @param name a property name
* @return value of a property (or nullptr if the property is not set)
*/
-const char* sg_actor_get_property_value(sg_actor_t actor, const char* name)
+const char* sg_actor_get_property_value(const_sg_actor_t actor, const char* name)
{
return actor->get_property(name);
}
*
* This function returns all the parameters associated with an actor
*/
-xbt_dict_t sg_actor_get_properties(sg_actor_t actor)
+xbt_dict_t sg_actor_get_properties(const_sg_actor_t actor)
{
xbt_assert(actor != nullptr, "Invalid parameter: First argument must not be nullptr");
xbt_dict_t as_dict = xbt_dict_new_homogeneous(xbt_free_f);
}
/** @brief Initializes a barrier, with count elements */
-void sg_barrier_destroy(sg_bar_t bar)
+void sg_barrier_destroy(const_sg_bar_t bar)
{
delete bar;
}
}
}
-int Comm::wait_any_for(std::vector<CommPtr>* comms, double timeout)
+int Comm::wait_any_for(const std::vector<CommPtr>* comms, double timeout)
{
std::unique_ptr<kernel::activity::CommImpl* []> rcomms(new kernel::activity::CommImpl*[comms->size()]);
std::transform(begin(*comms), end(*comms), rcomms.get(),
return simcall_comm_waitany(rcomms.get(), comms->size(), timeout);
}
-void Comm::wait_all(std::vector<CommPtr>* comms)
+void Comm::wait_all(const std::vector<CommPtr>* comms)
{
// TODO: this should be a simcall or something
// TODO: we are missing a version with timeout
}
return this;
}
-int Comm::test_any(std::vector<CommPtr>* comms)
+int Comm::test_any(const std::vector<CommPtr>* comms)
{
std::unique_ptr<kernel::activity::CommImpl* []> rcomms(new kernel::activity::CommImpl*[comms->size()]);
std::transform(begin(*comms), end(*comms), rcomms.get(),
simcall_cond_wait(cond_, lock->pimpl_);
}
-void ConditionVariable::wait(std::unique_lock<Mutex>& lock)
+void ConditionVariable::wait(const std::unique_lock<Mutex>& lock)
{
simcall_cond_wait(cond_, lock.mutex()->pimpl_);
}
-std::cv_status s4u::ConditionVariable::wait_for(std::unique_lock<Mutex>& lock, double timeout)
+std::cv_status s4u::ConditionVariable::wait_for(const std::unique_lock<Mutex>& lock, double timeout)
{
// The simcall uses -1 for "any timeout" but we don't want this:
if (timeout < 0)
}
}
-std::cv_status ConditionVariable::wait_until(std::unique_lock<Mutex>& lock, double timeout_time)
+std::cv_status ConditionVariable::wait_until(const std::unique_lock<Mutex>& lock, double timeout_time)
{
double now = SIMIX_get_clock();
double timeout;
simgrid::kernel::actor::simcall([this]() { cond_->broadcast(); });
}
-void intrusive_ptr_add_ref(ConditionVariable* cond)
+void intrusive_ptr_add_ref(const ConditionVariable* cond)
{
intrusive_ptr_add_ref(cond->cond_);
}
-void intrusive_ptr_release(ConditionVariable* cond)
+void intrusive_ptr_release(const ConditionVariable* cond)
{
intrusive_ptr_release(cond->cond_);
}
cond->notify_all();
}
-void sg_cond_destroy(sg_cond_t cond)
+void sg_cond_destroy(const_sg_cond_t cond)
{
delete cond;
}
return this->pimpl_->get_read_bandwidth();
}
-double Disk::get_write_bandwidth()
+double Disk::get_write_bandwidth() const
{
return pimpl_->get_write_bandwidth();
}
-Host* Disk::get_host()
+Host* Disk::get_host() const
{
return pimpl_->get_host();
}
return link == pimpl->links_.end() ? nullptr : link->second->get_iface();
}
-void Engine::link_register(const std::string& name, Link* link)
+void Engine::link_register(const std::string& name, const Link* link)
{
pimpl->links_[name] = link->get_impl();
}
return storage == pimpl->storages_.end() ? nullptr : storage->second->get_iface();
}
-void Engine::storage_register(const std::string& name, Storage* storage)
+void Engine::storage_register(const std::string& name, const Storage* storage)
{
pimpl->storages_[name] = storage->get_impl();
}
return pimpl->netzone_root_->get_iface();
}
/** @brief Set the root netzone, containing all others. Once set, it cannot be changed. */
-void Engine::set_netzone_root(s4u::NetZone* netzone)
+void Engine::set_netzone_root(const s4u::NetZone* netzone)
{
xbt_assert(pimpl->netzone_root_ == nullptr, "The root NetZone cannot be changed once set");
pimpl->netzone_root_ = netzone->get_impl();
*
* Daemons and regular actors are all mixed in this list.
*/
-std::vector<ActorPtr> Host::get_all_actors()
+std::vector<ActorPtr> Host::get_all_actors() const
{
return pimpl_->get_all_actors();
}
/** @brief Returns how many actors (daemonized or not) have been launched on this host */
-int Host::get_actor_count()
+int Host::get_actor_count() const
{
return pimpl_->get_actor_count();
}
* walk through the routing components tree and find a route between hosts
* by calling each "get_route" function in each routing component.
*/
-void Host::route_to(Host* dest, std::vector<Link*>& links, double* latency)
+void Host::route_to(const Host* dest, std::vector<Link*>& links, double* latency) const
{
std::vector<kernel::resource::LinkImpl*> linkImpls;
this->route_to(dest, linkImpls, latency);
}
/** @brief Just like Host::routeTo, but filling an array of link implementations */
-void Host::route_to(Host* dest, std::vector<kernel::resource::LinkImpl*>& links, double* latency)
+void Host::route_to(const Host* dest, std::vector<kernel::resource::LinkImpl*>& links, double* latency) const
{
kernel::routing::NetZoneImpl::get_global_route(pimpl_netpoint_, dest->get_netpoint(), links, latency);
if (XBT_LOG_ISENABLED(surf_route, xbt_log_priority_debug)) {
return res;
}
-const char* sg_host_get_name(sg_host_t host)
+const char* sg_host_get_name(const_sg_host_t host)
{
return host->get_cname();
}
-void* sg_host_extension_get(sg_host_t host, size_t ext)
+void* sg_host_extension_get(const_sg_host_t host, size_t ext)
{
return host->extension(ext);
}
// ========= Layering madness ==============*
// ========== User data Layer ==========
-void* sg_host_data(sg_host_t host)
+void* sg_host_data(const_sg_host_t host)
{
return host->get_data();
}
return res;
}
-xbt_dynar_t sg_host_get_attached_storage_list(sg_host_t host)
+xbt_dynar_t sg_host_get_attached_storage_list(const_sg_host_t host)
{
xbt_dynar_t storage_dynar = xbt_dynar_new(sizeof(const char*), nullptr);
std::vector<const char*> storage_vector = host->get_attached_storages();
// =========== user-level functions ===============
// ================================================
/** @brief Returns the total speed of a host */
-double sg_host_speed(sg_host_t host)
+double sg_host_speed(const_sg_host_t host)
{
return host->get_speed();
}
* @param pstate_index pstate to test
* @return Returns the processor speed associated with pstate_index
*/
-double sg_host_get_pstate_speed(sg_host_t host, int pstate_index)
+double sg_host_get_pstate_speed(const_sg_host_t host, int pstate_index)
{
return host->get_pstate_speed(pstate_index);
}
* @param host a host
* @return the number of cores
*/
-int sg_host_core_count(sg_host_t host)
+int sg_host_core_count(const_sg_host_t host)
{
return host->get_core_count();
}
-double sg_host_get_available_speed(sg_host_t host)
+double sg_host_get_available_speed(const_sg_host_t host)
{
return host->get_available_speed();
}
*
* See also @ref plugin_energy.
*/
-int sg_host_get_nb_pstates(sg_host_t host)
+int sg_host_get_nb_pstates(const_sg_host_t host)
{
return host->get_pstate_count();
}
*
* See also @ref plugin_energy.
*/
-int sg_host_get_pstate(sg_host_t host)
+int sg_host_get_pstate(const_sg_host_t host)
{
return host->get_pstate();
}
* @param host host to test
* @return Returns true if the host is up and running, and false if it's currently down
*/
-int sg_host_is_on(sg_host_t host)
+int sg_host_is_on(const_sg_host_t host)
{
return host->is_on();
}
/** @brief Get the properties of a host */
-xbt_dict_t sg_host_get_properties(sg_host_t host)
+xbt_dict_t sg_host_get_properties(const_sg_host_t host)
{
xbt_dict_t as_dict = xbt_dict_new_homogeneous(xbt_free_f);
const std::unordered_map<std::string, std::string>* props = host->get_properties();
* @param name a property name
* @return value of a property (or nullptr if property not set)
*/
-const char* sg_host_get_property_value(sg_host_t host, const char* name)
+const char* sg_host_get_property_value(const_sg_host_t host, const char* name)
{
return host->get_property(name);
}
* @param to where to
* @param links [OUT] where to store the list of links (must exist, cannot be nullptr).
*/
-void sg_host_route(sg_host_t from, sg_host_t to, xbt_dynar_t links)
+void sg_host_route(const_sg_host_t from, const_sg_host_t to, xbt_dynar_t links)
{
std::vector<simgrid::s4u::Link*> vlinks;
from->route_to(to, vlinks, nullptr);
* @param from where from
* @param to where to
*/
-double sg_host_route_latency(sg_host_t from, sg_host_t to)
+double sg_host_route_latency(const_sg_host_t from, const_sg_host_t to)
{
std::vector<simgrid::s4u::Link*> vlinks;
double res = 0;
* @param from where from
* @param to where to
*/
-double sg_host_route_bandwidth(sg_host_t from, sg_host_t to)
+double sg_host_route_bandwidth(const_sg_host_t from, const_sg_host_t to)
{
double min_bandwidth = -1.0;
}
/** @brief Displays debugging information about a host */
-void sg_host_dump(sg_host_t host)
+void sg_host_dump(const_sg_host_t host)
{
XBT_INFO("Displaying host %s", host->get_cname());
XBT_INFO(" - speed: %.0f", host->get_speed());
* @param host a host
* @param whereto a dynar in which we should push actors living on that host
*/
-void sg_host_get_actor_list(sg_host_t host, xbt_dynar_t whereto)
+void sg_host_get_actor_list(const_sg_host_t host, xbt_dynar_t whereto)
{
auto const actors = host->get_all_actors();
for (auto const& actor : actors)
return res;
}
-double sg_host_load(sg_host_t host)
+double sg_host_load(const_sg_host_t host)
{
return host->get_load();
}
{
return this->pimpl_->get_cname();
}
-bool Link::is_used()
+bool Link::is_used() const
{
return this->pimpl_->is_used();
}
return this->pimpl_->get_bandwidth();
}
-Link::SharingPolicy Link::get_sharing_policy()
+Link::SharingPolicy Link::get_sharing_policy() const
{
return this->pimpl_->get_sharing_policy();
}
-double Link::get_usage()
+double Link::get_usage() const
{
return this->pimpl_->get_constraint()->get_usage();
}
/* **************************** Public C interface *************************** */
-const char* sg_link_name(sg_link_t link)
+const char* sg_link_name(const_sg_link_t link)
{
return link->get_cname();
}
return simgrid::s4u::Link::by_name(name);
}
-int sg_link_is_shared(sg_link_t link)
+int sg_link_is_shared(const_sg_link_t link)
{
return (int)link->get_sharing_policy();
}
-double sg_link_bandwidth(sg_link_t link)
+double sg_link_bandwidth(const_sg_link_t link)
{
return link->get_bandwidth();
}
-double sg_link_latency(sg_link_t link)
+double sg_link_latency(const_sg_link_t link)
{
return link->get_latency();
}
-void* sg_link_data(sg_link_t link)
+void* sg_link_data(const_sg_link_t link)
{
return link->get_data();
}
}
/* refcounting of the intrusive_ptr is delegated to the implementation object */
-void intrusive_ptr_add_ref(Mutex* mutex)
+void intrusive_ptr_add_ref(const Mutex* mutex)
{
xbt_assert(mutex);
if (mutex->pimpl_)
mutex->pimpl_->ref();
}
-void intrusive_ptr_release(Mutex* mutex)
+void intrusive_ptr_release(const Mutex* mutex)
{
xbt_assert(mutex);
if (mutex->pimpl_)
return mutex->try_lock();
}
-void sg_mutex_destroy(sg_mutex_t mutex)
+void sg_mutex_destroy(const_sg_mutex_t mutex)
{
delete mutex;
}
}
/** @brief Returns the list of direct children (no grand-children) */
-std::vector<NetZone*> NetZone::get_children()
+std::vector<NetZone*> NetZone::get_children() const
{
std::vector<NetZone*> res;
for (auto child : *(pimpl_->get_children()))
* Only the hosts that are directly contained in this NetZone are retrieved,
* not the ones contained in sub-netzones.
*/
-std::vector<Host*> NetZone::get_all_hosts()
+std::vector<Host*> NetZone::get_all_hosts() const
{
return pimpl_->get_all_hosts();
}
return simgrid::s4u::Engine::get_instance()->get_netzone_root();
}
-const char* sg_zone_get_name(sg_netzone_t netzone)
+const char* sg_zone_get_name(const_sg_netzone_t netzone)
{
return netzone->get_cname();
}
return simgrid::s4u::Engine::get_instance()->netzone_by_name_or_null(name);
}
-void sg_zone_get_sons(sg_netzone_t netzone, xbt_dict_t whereto)
+void sg_zone_get_sons(const_sg_netzone_t netzone, xbt_dict_t whereto)
{
for (auto const& elem : netzone->get_children()) {
xbt_dict_set(whereto, elem->get_cname(), static_cast<void*>(elem));
}
}
-const char* sg_zone_get_property_value(sg_netzone_t netzone, const char* name)
+const char* sg_zone_get_property_value(const_sg_netzone_t netzone, const char* name)
{
return netzone->get_property(name);
}
-void sg_zone_set_property_value(sg_netzone_t netzone, const char* name, char* value)
+void sg_zone_set_property_value(sg_netzone_t netzone, const char* name, const char* value)
{
netzone->set_property(name, value);
}
-void sg_zone_get_hosts(sg_netzone_t netzone, xbt_dynar_t whereto)
+void sg_zone_get_hosts(const_sg_netzone_t netzone, xbt_dynar_t whereto)
{
/* converts vector to dynar */
std::vector<simgrid::s4u::Host*> hosts = netzone->get_all_hosts();
return sem->get_capacity();
}
-void sg_sem_destroy(sg_sem_t sem)
+void sg_sem_destroy(const_sg_sem_t sem)
{
delete sem;
}
return Engine::get_instance()->storage_by_name_or_null(name);
}
-const char* Storage::get_type()
+const char* Storage::get_type() const
{
return pimpl_->typeId_.c_str();
}
*
* This functions checks whether a storage is a valid pointer or not and return its name.
*/
-const char* sg_storage_get_name(sg_storage_t storage)
+const char* sg_storage_get_name(const_sg_storage_t storage)
{
xbt_assert((storage != nullptr), "Invalid parameters");
return storage->get_cname();
}
-const char* sg_storage_get_host(sg_storage_t storage)
+const char* sg_storage_get_host(const_sg_storage_t storage)
{
xbt_assert((storage != nullptr), "Invalid parameters");
return storage->get_host()->get_cname();
* @param storage a storage
* @return a dict containing the properties
*/
-xbt_dict_t sg_storage_get_properties(sg_storage_t storage)
+xbt_dict_t sg_storage_get_properties(const_sg_storage_t storage)
{
xbt_assert((storage != nullptr), "Invalid parameters (storage is nullptr)");
xbt_dict_t as_dict = xbt_dict_new_homogeneous(xbt_free_f);
* @param name a property name
* @return value of a property (or nullptr if property not set)
*/
-const char* sg_storage_get_property_value(sg_storage_t storage, const char* name)
+const char* sg_storage_get_property_value(const_sg_storage_t storage, const char* name)
{
return storage->get_property(name);
}
return res;
}
-void* sg_storage_get_data(sg_storage_t storage)
+void* sg_storage_get_data(const_sg_storage_t storage)
{
xbt_assert((storage != nullptr), "Invalid parameters");
return storage->get_data();
controlled_process = MSG_process_self(); /* - Get controlled at checkpoint */
MSG_barrier_wait(barrier);
MSG_process_suspend(MSG_process_self());
- msg_host_t h = MSG_process_get_host(MSG_process_self());
+ const_sg_host_t h = MSG_process_get_host(MSG_process_self());
XBT_INFO("I've been moved on this new host: %s", MSG_host_get_name(h));
XBT_INFO("Uh, nothing to do here. Stopping now");
return 0;
sg_host_t *hosts = sg_host_list();
if (argc >= 3) {
if (!strcmp(argv[2], "ONE_LINK")) {
- sg_host_t h1 = hosts[0];
- sg_host_t h2 = hosts[1];
+ const_sg_host_t h1 = hosts[0];
+ const_sg_host_t h2 = hosts[1];
const char *name1 = sg_host_get_name(h1);
const char *name2 = sg_host_get_name(h2);
if (!strcmp(argv[2], "FULL_LINK")) {
int list_size = sg_host_count();
for (int i = 0; i < list_size; i++) {
- sg_host_t h1 = hosts[i];
+ const_sg_host_t h1 = hosts[i];
const char *name1 = sg_host_get_name(h1);
for (int j = 0; j < list_size; j++) {
- sg_host_t h2 = hosts[j];
+ const_sg_host_t h2 = hosts[j];
const char *name2 = sg_host_get_name(h2);
fprintf(stderr, "Route between %s and %s\n", name1, name2);
xbt_dynar_t route = xbt_dynar_new(sizeof(SD_link_t), NULL);
j = rand()%host_count;
} while(i==j);
- sg_host_t h1 = hosts[i];
- sg_host_t h2 = hosts[j];
+ const_sg_host_t h1 = hosts[i];
+ const_sg_host_t h2 = hosts[j];
printf("%d\tand\t%d\t\t",i,j);
xbt_dynar_t route = xbt_dynar_new(sizeof(SD_link_t), NULL);
unsigned int totalHosts = sg_host_count();
sg_host_t* hosts = sg_host_list();
std::sort(hosts, hosts + totalHosts,
- [](sg_host_t a, sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; });
+ [](const_sg_host_t a, const_sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; });
for (unsigned int i = 0; i < totalHosts; i++) {
std::printf(" <host id=\"%s\" speed=\"%.0f\"", hosts[i]->get_cname(), sg_host_speed(hosts[i]));