1 /* Copyright (c) 2009-2013. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "surf_private.h"
8 #include "surf/surf_resource.h"
9 #include "maxmin_private.h"
10 #include "simgrid/sg_config.h"
11 #include "cpu_cas01_private.h"
16 /* the model objects for physical machines and virtual machines */
17 surf_model_t surf_cpu_model_pm = NULL;
18 surf_model_t surf_cpu_model_vm = NULL;
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
21 "Logging specific to the SURF CPU IMPROVED module");
24 cpu_running_action_set_that_does_not_need_being_checked = NULL;
26 /* Additionnal callback function to cleanup some data, called from surf_resource_free */
28 static void cpu_cas01_cleanup(void* r){
29 cpu_Cas01_t cpu = (cpu_Cas01_t)r;
31 xbt_dynar_t power_tuple = NULL;
32 xbt_dynar_foreach(cpu->energy->power_range_watts_list, iter, power_tuple)
33 xbt_dynar_free(&power_tuple);
34 xbt_dynar_free(&cpu->energy->power_range_watts_list);
35 xbt_dynar_free(&cpu->power_peak_list);
36 xbt_free(cpu->energy);
40 /* This function is registered as a callback to sg_platf_new_host() and never called directly */
41 void *cpu_cas01_create_resource(const char *name, xbt_dynar_t power_peak,
44 tmgr_trace_t power_trace,
46 e_surf_resource_state_t state_initial,
47 tmgr_trace_t state_trace,
48 xbt_dict_t cpu_properties,
49 surf_model_t cpu_model)
51 cpu_Cas01_t cpu = NULL;
53 xbt_assert(!surf_cpu_resource_priv(surf_cpu_resource_by_name(name)),
54 "Host '%s' declared several times in the platform file",
56 cpu = (cpu_Cas01_t) surf_resource_new(sizeof(s_cpu_Cas01_t),
58 cpu_properties, &cpu_cas01_cleanup);
59 cpu->power_peak = xbt_dynar_get_as(power_peak, pstate, double);
60 cpu->power_peak_list = power_peak;
63 cpu->energy = xbt_new(s_energy_cpu_cas01_t, 1);
64 cpu->energy->total_energy = 0;
65 cpu->energy->power_range_watts_list = cpu_get_watts_range_list(cpu);
66 cpu->energy->last_updated = surf_get_clock();
68 XBT_DEBUG("CPU create: peak=%f, pstate=%d",cpu->power_peak, cpu->pstate);
70 xbt_assert(cpu->power_peak > 0, "Power has to be >0");
71 cpu->power_scale = power_scale;
73 xbt_assert(core > 0, "Invalid number of cores %d", core);
77 tmgr_history_add_trace(history, power_trace, 0.0, 0, cpu);
79 cpu->state_current = state_initial;
82 tmgr_history_add_trace(history, state_trace, 0.0, 0, cpu);
85 lmm_constraint_new(cpu_model->model_private->maxmin_system, cpu,
86 cpu->core * cpu->power_scale * cpu->power_peak);
88 /* Note (hypervisor): we create a constraint object for each CPU core, which
89 * is used for making a contraint problem of CPU affinity.
92 /* At now, we assume that a VM does not have a multicore CPU. */
94 xbt_assert(cpu_model == surf_cpu_model_pm);
96 cpu->constraint_core = xbt_new(lmm_constraint_t, core);
99 for (i = 0; i < core; i++) {
100 /* just for a unique id, never used as a string. */
101 void *cnst_id = bprintf("%s:%lu", name, i);
102 cpu->constraint_core[i] =
103 lmm_constraint_new(cpu_model->model_private->maxmin_system, cnst_id,
104 cpu->power_scale * cpu->power_peak);
108 xbt_lib_set(host_lib, name, SURF_CPU_LEVEL, cpu);
110 return xbt_lib_get_elm_or_null(host_lib, name);;
114 static void parse_cpu_init(sg_platf_host_cbarg_t host)
116 /* This function is called when a platform file is parsed. Physical machines
117 * are defined there. Thus, we use the cpu model object for the physical
119 cpu_cas01_create_resource(host->id,
126 host->state_trace, host->properties,
130 static void cpu_add_traces_cpu(void)
132 xbt_dict_cursor_t cursor = NULL;
133 char *trace_name, *elm;
134 static int called = 0;
139 /* connect all traces relative to hosts */
140 xbt_dict_foreach(trace_connect_list_host_avail, cursor, trace_name, elm) {
141 tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
142 cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
144 xbt_assert(host, "Host %s undefined", elm);
145 xbt_assert(trace, "Trace %s undefined", trace_name);
148 tmgr_history_add_trace(history, trace, 0.0, 0, host);
151 xbt_dict_foreach(trace_connect_list_power, cursor, trace_name, elm) {
152 tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
153 cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
155 xbt_assert(host, "Host %s undefined", elm);
156 xbt_assert(trace, "Trace %s undefined", trace_name);
159 tmgr_history_add_trace(history, trace, 0.0, 0, host);
163 static void cpu_define_callbacks_cas01()
165 sg_platf_host_add_cb(parse_cpu_init);
166 sg_platf_postparse_add_cb(cpu_add_traces_cpu);
169 static int cpu_resource_used(void *resource)
171 surf_model_t cpu_model = ((surf_resource_t) resource)->model;
173 /* Note (hypervisor): we do not need to look up constraint_core[i] here. Even
174 * when a task is pinned or not, its variable object is always linked to the
175 * basic contraint object.
178 return lmm_constraint_used(cpu_model->model_private->maxmin_system,
179 ((cpu_Cas01_t) resource)->constraint);
182 static double cpu_share_resources_lazy(surf_model_t cpu_model, double now)
184 return generic_share_resources_lazy(now, cpu_model);
187 static double cpu_share_resources_full(surf_model_t cpu_model, double now)
189 s_surf_action_cpu_Cas01_t action;
190 return generic_maxmin_share_resources(cpu_model->states.
192 xbt_swag_offset(action,
195 cpu_model->model_private->maxmin_system, lmm_solve);
198 static void cpu_update_actions_state_lazy(surf_model_t cpu_model, double now, double delta)
200 generic_update_actions_state_lazy(now, delta, cpu_model);
203 static void cpu_update_actions_state_full(surf_model_t cpu_model, double now, double delta)
205 generic_update_actions_state_full(now, delta, cpu_model);
208 xbt_dynar_t cpu_get_watts_range_list(cpu_Cas01_t cpu_model)
210 xbt_dynar_t power_range_list;
211 xbt_dynar_t power_tuple;
212 int i = 0, pstate_nb=0;
213 xbt_dynar_t current_power_values;
214 double min_power, max_power;
215 xbt_dict_t props = cpu_model->generic_resource.properties;
220 char* all_power_values_str = xbt_dict_get_or_null(props, "power_per_state");
222 if (all_power_values_str == NULL)
226 power_range_list = xbt_dynar_new(sizeof(xbt_dynar_t), NULL);
227 xbt_dynar_t all_power_values = xbt_str_split(all_power_values_str, ",");
229 pstate_nb = xbt_dynar_length(all_power_values);
230 for (i=0; i< pstate_nb; i++)
232 /* retrieve the power values associated with the current pstate */
233 current_power_values = xbt_str_split(xbt_dynar_get_as(all_power_values, i, char*), ":");
234 xbt_assert(xbt_dynar_length(current_power_values) > 1,
235 "Power properties incorrectly defined - could not retrieve min and max power values for host %s",
236 cpu_model->generic_resource.name);
238 /* min_power corresponds to the idle power (cpu load = 0) */
239 /* max_power is the power consumed at 100% cpu load */
240 min_power = atof(xbt_dynar_get_as(current_power_values, 0, char*));
241 max_power = atof(xbt_dynar_get_as(current_power_values, 1, char*));
243 power_tuple = xbt_dynar_new(sizeof(double), NULL);
244 xbt_dynar_push_as(power_tuple, double, min_power);
245 xbt_dynar_push_as(power_tuple, double, max_power);
247 xbt_dynar_push_as(power_range_list, xbt_dynar_t, power_tuple);
248 xbt_dynar_free(¤t_power_values);
250 xbt_dynar_free(&all_power_values);
251 return power_range_list;
256 * Computes the power consumed by the host according to the current pstate and processor load
259 static double cpu_get_current_watts_value(cpu_Cas01_t cpu_model, double cpu_load)
261 xbt_dynar_t power_range_list = cpu_model->energy->power_range_watts_list;
263 if (power_range_list == NULL)
265 XBT_DEBUG("No power range properties specified for host %s", cpu_model->generic_resource.name);
268 xbt_assert(xbt_dynar_length(power_range_list) == xbt_dynar_length(cpu_model->power_peak_list),
269 "The number of power ranges in the properties does not match the number of pstates for host %s",
270 cpu_model->generic_resource.name);
272 /* retrieve the power values associated with the current pstate */
273 xbt_dynar_t current_power_values = xbt_dynar_get_as(power_range_list, cpu_model->pstate, xbt_dynar_t);
275 /* min_power corresponds to the idle power (cpu load = 0) */
276 /* max_power is the power consumed at 100% cpu load */
277 double min_power = xbt_dynar_get_as(current_power_values, 0, double);
278 double max_power = xbt_dynar_get_as(current_power_values, 1, double);
279 double power_slope = max_power - min_power;
281 double current_power = min_power + cpu_load * power_slope;
283 XBT_DEBUG("[get_current_watts] min_power=%f, max_power=%f, slope=%f", min_power, max_power, power_slope);
284 XBT_DEBUG("[get_current_watts] Current power (watts) = %f, load = %f", current_power, cpu_load);
286 return current_power;
291 * Updates the total energy consumed as the sum of the current energy and
292 * the energy consumed by the current action
294 void cpu_update_energy(cpu_Cas01_t cpu_model, double cpu_load)
297 double start_time = cpu_model->energy->last_updated;
298 double finish_time = surf_get_clock();
300 XBT_DEBUG("[cpu_update_energy] action time interval=(%f-%f), current power peak=%f, current pstate=%d",
301 start_time, finish_time, cpu_model->power_peak, cpu_model->pstate);
302 double current_energy = cpu_model->energy->total_energy;
303 double action_energy = cpu_get_current_watts_value(cpu_model, cpu_load)*(finish_time-start_time);
305 cpu_model->energy->total_energy = current_energy + action_energy;
306 cpu_model->energy->last_updated = finish_time;
308 XBT_DEBUG("[cpu_update_energy] old_energy_value=%f, action_energy_value=%f", current_energy, action_energy);
312 static void cpu_update_resource_state(void *id,
313 tmgr_trace_event_t event_type,
314 double value, double date)
316 cpu_Cas01_t cpu = id;
317 lmm_variable_t var = NULL;
318 lmm_element_t elem = NULL;
319 surf_model_t cpu_model = ((surf_resource_t) cpu)->model;
321 if (event_type == cpu->power_event) {
322 /* TODO (Hypervisor): do the same thing for constraint_core[i] */
323 xbt_assert(cpu->core == 1, "FIXME: add power scaling code also for constraint_core[i]");
325 cpu->power_scale = value;
326 lmm_update_constraint_bound(cpu_model->model_private->maxmin_system, cpu->constraint,
327 cpu->core * cpu->power_scale *
330 TRACE_surf_host_set_power(date, cpu->generic_resource.name,
331 cpu->core * cpu->power_scale *
334 while ((var = lmm_get_var_from_cnst
335 (cpu_model->model_private->maxmin_system, cpu->constraint, &elem))) {
336 surf_action_cpu_Cas01_t action = lmm_variable_id(var);
337 lmm_update_variable_bound(cpu_model->model_private->maxmin_system,
338 GENERIC_LMM_ACTION(action).variable,
339 cpu->power_scale * cpu->power_peak);
341 if (tmgr_trace_event_free(event_type))
342 cpu->power_event = NULL;
343 } else if (event_type == cpu->state_event) {
344 /* TODO (Hypervisor): do the same thing for constraint_core[i] */
345 xbt_assert(cpu->core == 1, "FIXME: add state change code also for constraint_core[i]");
348 if(cpu->state_current == SURF_RESOURCE_OFF)
349 xbt_dynar_push_as(host_that_restart, char*, (cpu->generic_resource.name));
350 cpu->state_current = SURF_RESOURCE_ON;
352 lmm_constraint_t cnst = cpu->constraint;
354 cpu->state_current = SURF_RESOURCE_OFF;
356 while ((var = lmm_get_var_from_cnst(cpu_model->model_private->maxmin_system, cnst, &elem))) {
357 surf_action_t action = lmm_variable_id(var);
359 if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
360 surf_action_state_get(action) == SURF_ACTION_READY ||
361 surf_action_state_get(action) ==
362 SURF_ACTION_NOT_IN_THE_SYSTEM) {
363 action->finish = date;
364 surf_action_state_set(action, SURF_ACTION_FAILED);
368 if (tmgr_trace_event_free(event_type))
369 cpu->state_event = NULL;
371 XBT_CRITICAL("Unknown event ! \n");
381 * This function formulates a constraint problem that pins a given task to
382 * particular cores. Currently, it is possible to pin a task to an exactly one
383 * specific core. The system links the variable object of the task to the
384 * per-core constraint object.
386 * But, the taskset command on Linux takes a mask value specifying a CPU
387 * affinity setting of a given task. If the mask value is 0x03, the given task
388 * will be executed on the first core (CPU0) or the second core (CPU1) on the
389 * given PM. The schedular will determine appropriate placements of tasks,
390 * considering given CPU affinities and task activities.
392 * How should the system formulate constraint problems for an affinity to
395 * The cpu argument must be the host where the task is being executed. The
396 * action object does not have the information about the location where the
397 * action is being executed.
399 static void cpu_action_set_affinity(surf_action_t action, void *cpu, unsigned long mask)
401 lmm_variable_t var_obj = ((surf_action_lmm_t) action)->variable;
403 surf_model_t cpu_model = action->model_obj;
404 xbt_assert(cpu_model->type == SURF_MODEL_TYPE_CPU);
405 cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
407 XBT_IN("(%p,%lx)", action, mask);
410 unsigned long nbits = 0;
412 /* FIXME: There is much faster algorithms doing this. */
414 for (i = 0; i < CPU->core; i++) {
415 unsigned long has_affinity = (1UL << i) & mask;
421 XBT_CRITICAL("Do not specify multiple cores for an affinity mask.");
422 XBT_CRITICAL("See the comment in cpu_action_set_affinity().");
430 for (i = 0; i < CPU->core; i++) {
431 XBT_DEBUG("clear affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
432 lmm_shrink(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj);
434 unsigned long has_affinity = (1UL << i) & mask;
436 /* This function only accepts an affinity setting on the host where the
437 * task is now running. In future, a task might move to another host.
438 * But, at this moment, this function cannot take an affinity setting on
441 * It might be possible to extend the code to allow this function to
442 * accept affinity settings on a future host. We might be able to assign
443 * zero to elem->value to maintain such inactive affinity settings in the
444 * system. But, this will make the system complex. */
445 XBT_DEBUG("set affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
446 lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj, 1.0);
450 if (cpu_model->model_private->update_mechanism == UM_LAZY) {
451 /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */
457 static surf_action_t cpu_execute(void *cpu, double size)
459 surf_action_cpu_Cas01_t action = NULL;
460 //xbt_dict_cursor_t cursor = NULL;
461 cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
462 surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
464 XBT_IN("(%s,%g)", surf_resource_name(CPU), size);
466 surf_action_new(sizeof(s_surf_action_cpu_Cas01_t), size,
468 CPU->state_current != SURF_RESOURCE_ON);
470 GENERIC_LMM_ACTION(action).suspended = 0; /* Should be useless because of the
471 calloc but it seems to help valgrind... */
473 /* Note (hypervisor): here, the bound value of the variable is set to the
474 * capacity of a CPU core. But, after MSG_{task/vm}_set_bound() were added to
475 * the hypervisor branch, this bound value is overwritten in
476 * SIMIX_host_execute().
477 * TODO: cleanup this.
479 GENERIC_LMM_ACTION(action).variable =
480 lmm_variable_new(cpu_model->model_private->maxmin_system, action,
481 GENERIC_ACTION(action).priority,
482 CPU->power_scale * CPU->power_peak, 1 + CPU->core); // the basic constraint plus core-specific constraints
483 if (cpu_model->model_private->update_mechanism == UM_LAZY) {
484 GENERIC_LMM_ACTION(action).index_heap = -1;
485 GENERIC_LMM_ACTION(action).last_update = surf_get_clock();
486 GENERIC_LMM_ACTION(action).last_value = 0.0;
488 lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint,
489 GENERIC_LMM_ACTION(action).variable, 1.0);
491 return (surf_action_t) action;
494 static surf_action_t cpu_action_sleep(void *cpu, double duration)
496 surf_action_cpu_Cas01_t action = NULL;
497 cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
498 surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
501 duration = MAX(duration, MAXMIN_PRECISION);
503 XBT_IN("(%s,%g)", surf_resource_name(surf_cpu_resource_priv(cpu)), duration);
504 action = (surf_action_cpu_Cas01_t) cpu_execute(cpu, 1.0);
505 // FIXME: sleep variables should not consume 1.0 in lmm_expand
506 GENERIC_ACTION(action).max_duration = duration;
507 GENERIC_LMM_ACTION(action).suspended = 2;
508 if (duration == NO_MAX_DURATION) {
509 /* Move to the *end* of the corresponding action set. This convention
510 is used to speed up update_resource_state */
511 xbt_swag_remove(action, ((surf_action_t) action)->state_set);
512 ((surf_action_t) action)->state_set =
513 cpu_running_action_set_that_does_not_need_being_checked;
514 xbt_swag_insert(action, ((surf_action_t) action)->state_set);
517 lmm_update_variable_weight(cpu_model->model_private->maxmin_system,
518 GENERIC_LMM_ACTION(action).variable, 0.0);
519 if (cpu_model->model_private->update_mechanism == UM_LAZY) { // remove action from the heap
520 surf_action_lmm_heap_remove(cpu_model->model_private->action_heap,(surf_action_lmm_t)action);
521 // this is necessary for a variable with weight 0 since such
522 // variables are ignored in lmm and we need to set its max_duration
523 // correctly at the next call to share_resources
524 xbt_swag_insert_at_head(action, cpu_model->model_private->modified_set);
528 return (surf_action_t) action;
531 static e_surf_resource_state_t cpu_get_state(void *cpu)
533 return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current;
536 static void cpu_set_state(void *cpu, e_surf_resource_state_t state)
538 ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current = state;
541 static double cpu_get_speed(void *cpu, double load)
543 return load * ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
546 static int cpu_get_core(void *cpu)
548 return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->core;
550 static double cpu_get_available_speed(void *cpu)
552 /* number between 0 and 1 */
553 return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_scale;
556 static double cpu_get_current_power_peak(void *cpu)
558 return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
561 static double cpu_get_power_peak_at(void *cpu, int pstate_index)
563 xbt_dynar_t plist = ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak_list;
564 xbt_assert((pstate_index <= xbt_dynar_length(plist)), "Invalid parameters (pstate index out of bounds)");
566 return xbt_dynar_get_as(plist, pstate_index, double);
569 static int cpu_get_nb_pstates(void *cpu)
571 return xbt_dynar_length(((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak_list);
574 static void cpu_set_power_peak_at(void *cpu, int pstate_index)
576 cpu_Cas01_t cpu_implem = (cpu_Cas01_t)surf_cpu_resource_priv(cpu);
577 xbt_dynar_t plist = cpu_implem->power_peak_list;
578 xbt_assert((pstate_index <= xbt_dynar_length(plist)), "Invalid parameters (pstate index out of bounds)");
580 double new_power_peak = xbt_dynar_get_as(plist, pstate_index, double);
581 cpu_implem->pstate = pstate_index;
582 cpu_implem->power_peak = new_power_peak;
585 static double cpu_get_consumed_energy(void *cpu)
587 return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->energy->total_energy;
590 static void cpu_finalize(surf_model_t cpu_model)
592 lmm_system_free(cpu_model->model_private->maxmin_system);
593 cpu_model->model_private->maxmin_system = NULL;
595 if (cpu_model->model_private->action_heap)
596 xbt_heap_free(cpu_model->model_private->action_heap);
597 xbt_swag_free(cpu_model->model_private->modified_set);
599 surf_model_exit(cpu_model);
602 xbt_swag_free(cpu_running_action_set_that_does_not_need_being_checked);
603 cpu_running_action_set_that_does_not_need_being_checked = NULL;
606 static surf_model_t surf_cpu_model_init_cas01(void)
608 s_surf_action_t action;
609 s_surf_action_cpu_Cas01_t comp;
611 char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
613 xbt_cfg_get_boolean(_sg_cfg_set, "cpu/maxmin_selective_update");
615 surf_model_t cpu_model = surf_model_init();
617 if (!strcmp(optim, "Full")) {
618 cpu_model->model_private->update_mechanism = UM_FULL;
619 cpu_model->model_private->selective_update = select;
620 } else if (!strcmp(optim, "Lazy")) {
621 cpu_model->model_private->update_mechanism = UM_LAZY;
622 cpu_model->model_private->selective_update = 1;
623 xbt_assert((select == 1)
625 (xbt_cfg_is_default_value
626 (_sg_cfg_set, "cpu/maxmin_selective_update")),
627 "Disabling selective update while using the lazy update mechanism is dumb!");
629 xbt_die("Unsupported optimization (%s) for this model", optim);
632 cpu_running_action_set_that_does_not_need_being_checked =
633 xbt_swag_new(xbt_swag_offset(action, state_hookup));
635 cpu_model->name = "cpu";
636 cpu_model->type = SURF_MODEL_TYPE_CPU;
638 cpu_model->action_unref = surf_action_unref;
639 cpu_model->action_cancel = surf_action_cancel;
640 cpu_model->action_state_set = surf_action_state_set;
642 cpu_model->model_private->resource_used = cpu_resource_used;
644 if (cpu_model->model_private->update_mechanism == UM_LAZY) {
645 cpu_model->model_private->share_resources =
646 cpu_share_resources_lazy;
647 cpu_model->model_private->update_actions_state =
648 cpu_update_actions_state_lazy;
649 } else if (cpu_model->model_private->update_mechanism == UM_FULL) {
650 cpu_model->model_private->share_resources =
651 cpu_share_resources_full;
652 cpu_model->model_private->update_actions_state =
653 cpu_update_actions_state_full;
655 xbt_die("Invalid cpu update mechanism!");
657 cpu_model->model_private->update_resource_state =
658 cpu_update_resource_state;
659 cpu_model->model_private->finalize = cpu_finalize;
661 cpu_model->suspend = surf_action_suspend;
662 cpu_model->resume = surf_action_resume;
663 cpu_model->is_suspended = surf_action_is_suspended;
664 cpu_model->set_max_duration = surf_action_set_max_duration;
665 cpu_model->set_priority = surf_action_set_priority;
666 cpu_model->set_bound = surf_action_set_bound;
667 cpu_model->set_affinity = cpu_action_set_affinity;
669 cpu_model->set_category = surf_action_set_category;
671 cpu_model->get_remains = surf_action_get_remains;
673 cpu_model->extension.cpu.execute = cpu_execute;
674 cpu_model->extension.cpu.sleep = cpu_action_sleep;
676 cpu_model->extension.cpu.get_state = cpu_get_state;
677 cpu_model->extension.cpu.set_state = cpu_set_state;
678 cpu_model->extension.cpu.get_core = cpu_get_core;
679 cpu_model->extension.cpu.get_speed = cpu_get_speed;
680 cpu_model->extension.cpu.get_available_speed =
681 cpu_get_available_speed;
682 cpu_model->extension.cpu.get_current_power_peak = cpu_get_current_power_peak;
683 cpu_model->extension.cpu.get_power_peak_at = cpu_get_power_peak_at;
684 cpu_model->extension.cpu.get_nb_pstates = cpu_get_nb_pstates;
685 cpu_model->extension.cpu.set_power_peak_at = cpu_set_power_peak_at;
686 cpu_model->extension.cpu.get_consumed_energy = cpu_get_consumed_energy;
688 cpu_model->extension.cpu.add_traces = cpu_add_traces_cpu;
690 if (!cpu_model->model_private->maxmin_system) {
691 cpu_model->model_private->maxmin_system = lmm_system_new(cpu_model->model_private->selective_update);
693 if (cpu_model->model_private->update_mechanism == UM_LAZY) {
694 cpu_model->model_private->action_heap = xbt_heap_new(8, NULL);
695 xbt_heap_set_update_callback(cpu_model->model_private->action_heap,
696 surf_action_lmm_update_index_heap);
697 cpu_model->model_private->modified_set =
698 xbt_swag_new(xbt_swag_offset(comp, generic_lmm_action.action_list_hookup));
699 cpu_model->model_private->maxmin_system->keep_track = cpu_model->model_private->modified_set;
705 /*********************************************************************/
706 /* Basic sharing model for CPU: that is where all this started... ;) */
707 /*********************************************************************/
708 /* @InProceedings{casanova01simgrid, */
709 /* author = "H. Casanova", */
710 /* booktitle = "Proceedings of the IEEE Symposium on Cluster Computing */
711 /* and the Grid (CCGrid'01)", */
712 /* publisher = "IEEE Computer Society", */
713 /* title = "Simgrid: {A} Toolkit for the Simulation of Application */
717 /* note = "Available at */
718 /* \url{http://grail.sdsc.edu/papers/simgrid_ccgrid01.ps.gz}." */
722 void surf_cpu_model_init_Cas01(void)
724 char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
726 xbt_assert(!surf_cpu_model_pm);
727 xbt_assert(!surf_cpu_model_vm);
729 if (strcmp(optim, "TI") == 0) {
730 /* FIXME: do we have to supprot TI? for VM */
731 surf_cpu_model_pm = surf_cpu_model_init_ti();
732 XBT_INFO("TI model is used (it will crashed since this is the hypervisor branch)");
734 surf_cpu_model_pm = surf_cpu_model_init_cas01();
735 surf_cpu_model_vm = surf_cpu_model_init_cas01();
737 /* cpu_model is registered only to model_list, and not to
738 * model_list_invoke. The shared_resource callback function will be called
739 * from that of the workstation model. */
740 xbt_dynar_push(model_list, &surf_cpu_model_pm);
741 xbt_dynar_push(model_list, &surf_cpu_model_vm);
743 cpu_define_callbacks_cas01();
747 /* TODO: do we address nested virtualization later? */
749 surf_model_t cpu_model_cas01(int level){
750 // TODO this table should be allocated
751 if(!surf_cpu_model[level])
753 return surf_cpu_model[level];