1 #include "cpu_interface.hpp"
3 XBT_LOG_EXTERNAL_CATEGORY(surf_kernel);
4 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
5 "Logging specific to the SURF cpu module");
7 CpuModelPtr surf_cpu_model_pm;
8 CpuModelPtr surf_cpu_model_vm;
14 void CpuModel::updateActionsStateLazy(double now, double /*delta*/)
17 CpuActionLmmPtr action;
18 while ((xbt_heap_size(p_actionHeap) > 0)
19 && (double_equals(xbt_heap_maxkey(p_actionHeap), now))) {
20 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionLmmPtr>(xbt_heap_pop(p_actionHeap)));
21 XBT_CDEBUG(surf_kernel, "Something happened to action %p", action);
23 if (TRACE_is_enabled()) {
24 CpuPtr cpu = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var(p_maxminSystem, action->p_variable, 0));
25 TRACE_surf_host_set_utilization(cpu->m_name, action->p_category,
26 lmm_variable_getvalue(action->p_variable),
28 now - action->m_lastUpdate);
32 action->m_finish = surf_get_clock();
33 XBT_CDEBUG(surf_kernel, "Action %p finished", action);
35 action->updateEnergy();
37 /* set the remains to 0 due to precision problems when updating the remaining amount */
38 action->m_remains = 0;
39 action->setState(SURF_ACTION_DONE);
40 action->heapRemove(p_actionHeap); //FIXME: strange call since action was already popped
43 if (TRACE_is_enabled()) {
44 //defining the last timestamp that we can safely dump to trace file
45 //without losing the event ascending order (considering all CPU's)
47 xbt_swag_foreach(_action, p_runningActionSet) {
48 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionPtr>(_action));
50 smaller = action->m_lastUpdate;
53 if (action->m_lastUpdate < smaller) {
54 smaller = action->m_lastUpdate;
58 TRACE_last_timestamp_to_dump = smaller;
65 void CpuModel::updateActionsStateFull(double now, double delta)
67 void *_action, *_next_action;
68 CpuActionLmmPtr action = NULL;
69 xbt_swag_t running_actions = p_runningActionSet;
71 xbt_swag_foreach_safe(_action, _next_action, running_actions) {
72 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionPtr>(_action));
74 if (TRACE_is_enabled()) {
75 CpuPtr x = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var
76 (p_maxminSystem, action->p_variable, 0));
78 TRACE_surf_host_set_utilization(x->m_name,
80 lmm_variable_getvalue(action->p_variable),
83 TRACE_last_timestamp_to_dump = now - delta;
87 double_update(&(action->m_remains),
88 lmm_variable_getvalue(action->p_variable) * delta);
91 if (action->m_maxDuration != NO_MAX_DURATION)
92 double_update(&(action->m_maxDuration), delta);
95 if ((action->m_remains <= 0) &&
96 (lmm_get_variable_weight(action->p_variable) > 0)) {
97 action->m_finish = surf_get_clock();
98 action->setState(SURF_ACTION_DONE);
100 } else if ((action->m_maxDuration != NO_MAX_DURATION) &&
101 (action->m_maxDuration <= 0)) {
102 action->m_finish = surf_get_clock();
103 action->setState(SURF_ACTION_DONE);
105 action->updateEnergy();
115 double Cpu::getSpeed(double load)
117 return load * m_powerPeak;
120 double Cpu::getAvailableSpeed()
122 /* number between 0 and 1 */
131 CpuLmm::CpuLmm(CpuModelPtr model, const char* name, xbt_dict_t properties, int core, double powerPeak, double powerScale)
132 : ResourceLmm(), Cpu(model, name, properties, core, powerPeak, powerScale) {
133 /* At now, we assume that a VM does not have a multicore CPU. */
135 xbt_assert(model == surf_cpu_model_pm);
137 p_constraintCore = xbt_new(lmm_constraint_t, core);
138 p_constraintCoreId = xbt_new(void*, core);
141 for (i = 0; i < core; i++) {
142 /* just for a unique id, never used as a string. */
143 p_constraintCoreId[i] = bprintf("%s:%i", name, i);
144 p_constraintCore[i] = lmm_constraint_new(p_model->p_maxminSystem, p_constraintCoreId[i], m_powerScale * m_powerPeak);
149 if (p_constraintCore){
150 for (int i = 0; i < m_core; i++) {
151 xbt_free(p_constraintCoreId[i]);
153 xbt_free(p_constraintCore);
154 xbt_free(p_constraintCoreId);
162 void CpuActionLmm::updateRemainingLazy(double now)
166 xbt_assert(p_stateSet == p_model->p_runningActionSet,
167 "You're updating an action that is not running.");
169 /* bogus priority, skip it */
170 xbt_assert(m_priority > 0,
171 "You're updating an action that seems suspended.");
173 delta = now - m_lastUpdate;
176 XBT_CDEBUG(surf_kernel, "Updating action(%p): remains was %lf, last_update was: %lf", this, m_remains, m_lastUpdate);
177 double_update(&(m_remains), m_lastValue * delta);
180 if (TRACE_is_enabled()) {
181 CpuPtr cpu = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var(p_model->p_maxminSystem, p_variable, 0));
182 TRACE_surf_host_set_utilization(cpu->m_name, p_category, m_lastValue, m_lastUpdate, now - m_lastUpdate);
185 XBT_CDEBUG(surf_kernel, "Updating action(%p): remains is now %lf", this, m_remains);
189 m_lastValue = lmm_variable_getvalue(p_variable);
192 void CpuActionLmm::setBound(double bound)
194 XBT_IN("(%p,%g)", this, bound);
196 lmm_update_variable_bound(p_model->p_maxminSystem, p_variable, bound);
198 if (p_model->p_updateMechanism == UM_LAZY)
199 heapRemove(p_model->p_actionHeap);
205 * This function formulates a constraint problem that pins a given task to
206 * particular cores. Currently, it is possible to pin a task to an exactly one
207 * specific core. The system links the variable object of the task to the
208 * per-core constraint object.
210 * But, the taskset command on Linux takes a mask value specifying a CPU
211 * affinity setting of a given task. If the mask value is 0x03, the given task
212 * will be executed on the first core (CPU0) or the second core (CPU1) on the
213 * given PM. The schedular will determine appropriate placements of tasks,
214 * considering given CPU affinities and task activities.
216 * How should the system formulate constraint problems for an affinity to
219 * The cpu argument must be the host where the task is being executed. The
220 * action object does not have the information about the location where the
221 * action is being executed.
223 void CpuActionLmm::setAffinity(CpuPtr _cpu, unsigned long mask)
225 lmm_variable_t var_obj = p_variable;
226 CpuLmmPtr cpu = reinterpret_cast<CpuLmmPtr>(_cpu);
227 XBT_IN("(%p,%lx)", this, mask);
230 unsigned long nbits = 0;
232 /* FIXME: There is much faster algorithms doing this. */
233 for (int i = 0; i < cpu->m_core; i++) {
234 unsigned long has_affinity = (1UL << i) & mask;
240 XBT_CRITICAL("Do not specify multiple cores for an affinity mask.");
241 XBT_CRITICAL("See the comment in cpu_action_set_affinity().");
246 for (int i = 0; i < cpu->m_core; i++) {
247 XBT_DEBUG("clear affinity %p to cpu-%d@%s", this, i, cpu->m_name);
248 lmm_shrink(cpu->p_model->p_maxminSystem, cpu->p_constraintCore[i], var_obj);
250 unsigned long has_affinity = (1UL << i) & mask;
252 /* This function only accepts an affinity setting on the host where the
253 * task is now running. In future, a task might move to another host.
254 * But, at this moment, this function cannot take an affinity setting on
257 * It might be possible to extend the code to allow this function to
258 * accept affinity settings on a future host. We might be able to assign
259 * zero to elem->value to maintain such inactive affinity settings in the
260 * system. But, this will make the system complex. */
261 XBT_DEBUG("set affinity %p to cpu-%d@%s", this, i, cpu->m_name);
262 lmm_expand(cpu->p_model->p_maxminSystem, cpu->p_constraintCore[i], var_obj, 1.0);
266 if (cpu->p_model->p_updateMechanism == UM_LAZY) {
267 /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */