4 XBT_LOG_EXTERNAL_CATEGORY(surf_kernel);
5 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
6 "Logging specific to the SURF cpu module");
9 CpuModelPtr surf_cpu_model_pm;
10 CpuModelPtr surf_cpu_model_vm;
16 void CpuModel::updateActionsStateLazy(double now, double /*delta*/)
19 CpuActionLmmPtr action;
20 while ((xbt_heap_size(p_actionHeap) > 0)
21 && (double_equals(xbt_heap_maxkey(p_actionHeap), now))) {
22 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionLmmPtr>(xbt_heap_pop(p_actionHeap)));
23 XBT_CDEBUG(surf_kernel, "Something happened to action %p", action);
25 if (TRACE_is_enabled()) {
26 CpuPtr cpu = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var(p_maxminSystem, action->p_variable, 0));
27 TRACE_surf_host_set_utilization(cpu->m_name, action->p_category,
28 lmm_variable_getvalue(action->p_variable),
30 now - action->m_lastUpdate);
34 action->m_finish = surf_get_clock();
35 XBT_CDEBUG(surf_kernel, "Action %p finished", action);
37 action->updateEnergy();
39 /* set the remains to 0 due to precision problems when updating the remaining amount */
40 action->m_remains = 0;
41 action->setState(SURF_ACTION_DONE);
42 action->heapRemove(p_actionHeap); //FIXME: strange call since action was already popped
45 if (TRACE_is_enabled()) {
46 //defining the last timestamp that we can safely dump to trace file
47 //without losing the event ascending order (considering all CPU's)
49 xbt_swag_foreach(_action, p_runningActionSet) {
50 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionPtr>(_action));
52 smaller = action->m_lastUpdate;
55 if (action->m_lastUpdate < smaller) {
56 smaller = action->m_lastUpdate;
60 TRACE_last_timestamp_to_dump = smaller;
67 void CpuModel::updateActionsStateFull(double now, double delta)
69 void *_action, *_next_action;
70 CpuActionLmmPtr action = NULL;
71 xbt_swag_t running_actions = p_runningActionSet;
73 xbt_swag_foreach_safe(_action, _next_action, running_actions) {
74 action = dynamic_cast<CpuActionLmmPtr>(static_cast<ActionPtr>(_action));
76 if (TRACE_is_enabled()) {
77 CpuPtr x = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var
78 (p_maxminSystem, action->p_variable, 0));
80 TRACE_surf_host_set_utilization(x->m_name,
82 lmm_variable_getvalue(action->p_variable),
85 TRACE_last_timestamp_to_dump = now - delta;
89 double_update(&(action->m_remains),
90 lmm_variable_getvalue(action->p_variable) * delta);
93 if (action->m_maxDuration != NO_MAX_DURATION)
94 double_update(&(action->m_maxDuration), delta);
97 if ((action->m_remains <= 0) &&
98 (lmm_get_variable_weight(action->p_variable) > 0)) {
99 action->m_finish = surf_get_clock();
100 action->setState(SURF_ACTION_DONE);
102 } else if ((action->m_maxDuration != NO_MAX_DURATION) &&
103 (action->m_maxDuration <= 0)) {
104 action->m_finish = surf_get_clock();
105 action->setState(SURF_ACTION_DONE);
107 action->updateEnergy();
117 double Cpu::getSpeed(double load)
119 return load * m_powerPeak;
122 double Cpu::getAvailableSpeed()
124 /* number between 0 and 1 */
133 CpuLmm::CpuLmm(CpuModelPtr model, const char* name, xbt_dict_t properties, int core, double powerPeak, double powerScale)
134 : ResourceLmm(), Cpu(model, name, properties, core, powerPeak, powerScale) {
135 /* At now, we assume that a VM does not have a multicore CPU. */
137 xbt_assert(model == surf_cpu_model_pm);
139 p_constraintCore = xbt_new(lmm_constraint_t, core);
142 for (i = 0; i < core; i++) {
143 /* just for a unique id, never used as a string. */
144 void *cnst_id = bprintf("%s:%i", name, i);
145 p_constraintCore[i] = lmm_constraint_new(p_model->p_maxminSystem, cnst_id, m_powerScale * m_powerPeak);
150 if (p_constraintCore){
151 for (int i = 0; i < m_core; i++) {
152 void *cnst_id = p_constraintCore[i]->id;
153 //FIXME:lmm_constraint_free(p_model->p_maxminSystem, p_constraintCore[i]);
156 xbt_free(p_constraintCore);
164 void CpuActionLmm::updateRemainingLazy(double now)
168 xbt_assert(p_stateSet == p_model->p_runningActionSet,
169 "You're updating an action that is not running.");
171 /* bogus priority, skip it */
172 xbt_assert(m_priority > 0,
173 "You're updating an action that seems suspended.");
175 delta = now - m_lastUpdate;
178 XBT_CDEBUG(surf_kernel, "Updating action(%p): remains was %lf, last_update was: %lf", this, m_remains, m_lastUpdate);
179 double_update(&(m_remains), m_lastValue * delta);
182 if (TRACE_is_enabled()) {
183 CpuPtr cpu = (CpuPtr) lmm_constraint_id(lmm_get_cnst_from_var(p_model->p_maxminSystem, p_variable, 0));
184 TRACE_surf_host_set_utilization(cpu->m_name, p_category, m_lastValue, m_lastUpdate, now - m_lastUpdate);
187 XBT_CDEBUG(surf_kernel, "Updating action(%p): remains is now %lf", this, m_remains);
191 m_lastValue = lmm_variable_getvalue(p_variable);
194 void CpuActionLmm::setBound(double bound)
196 XBT_IN("(%p,%g)", this, bound);
198 lmm_update_variable_bound(p_model->p_maxminSystem, p_variable, bound);
200 if (p_model->p_updateMechanism == UM_LAZY)
201 heapRemove(p_model->p_actionHeap);
207 * This function formulates a constraint problem that pins a given task to
208 * particular cores. Currently, it is possible to pin a task to an exactly one
209 * specific core. The system links the variable object of the task to the
210 * per-core constraint object.
212 * But, the taskset command on Linux takes a mask value specifying a CPU
213 * affinity setting of a given task. If the mask value is 0x03, the given task
214 * will be executed on the first core (CPU0) or the second core (CPU1) on the
215 * given PM. The schedular will determine appropriate placements of tasks,
216 * considering given CPU affinities and task activities.
218 * How should the system formulate constraint problems for an affinity to
221 * The cpu argument must be the host where the task is being executed. The
222 * action object does not have the information about the location where the
223 * action is being executed.
225 void CpuActionLmm::setAffinity(CpuPtr _cpu, unsigned long mask)
227 lmm_variable_t var_obj = p_variable;
228 CpuLmmPtr cpu = reinterpret_cast<CpuLmmPtr>(_cpu);
229 XBT_IN("(%p,%lx)", this, mask);
232 unsigned long nbits = 0;
234 /* FIXME: There is much faster algorithms doing this. */
235 for (int i = 0; i < cpu->m_core; i++) {
236 unsigned long has_affinity = (1UL << i) & mask;
242 XBT_CRITICAL("Do not specify multiple cores for an affinity mask.");
243 XBT_CRITICAL("See the comment in cpu_action_set_affinity().");
248 for (int i = 0; i < cpu->m_core; i++) {
249 XBT_DEBUG("clear affinity %p to cpu-%d@%s", this, i, cpu->m_name);
250 lmm_shrink(cpu->p_model->p_maxminSystem, cpu->p_constraintCore[i], var_obj);
252 unsigned long has_affinity = (1UL << i) & mask;
254 /* This function only accepts an affinity setting on the host where the
255 * task is now running. In future, a task might move to another host.
256 * But, at this moment, this function cannot take an affinity setting on
259 * It might be possible to extend the code to allow this function to
260 * accept affinity settings on a future host. We might be able to assign
261 * zero to elem->value to maintain such inactive affinity settings in the
262 * system. But, this will make the system complex. */
263 XBT_DEBUG("set affinity %p to cpu-%d@%s", this, i, cpu->m_name);
264 lmm_expand(cpu->p_model->p_maxminSystem, cpu->p_constraintCore[i], var_obj, 1.0);
268 if (cpu->p_model->p_updateMechanism == UM_LAZY) {
269 /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */