Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge tag 'v3_9_90' into hypervisor
[simgrid.git] / src / surf / cpu_cas01.c
1 /* Copyright (c) 2009-2013. The SimGrid Team.
2  * All rights reserved.                                                     */
3
4 /* This program is free software; you can redistribute it and/or modify it
5  * under the terms of the license (GNU LGPL) which comes with this package. */
6
7 #include "surf_private.h"
8 #include "surf/surf_resource.h"
9 #include "maxmin_private.h"
10 #include "simgrid/sg_config.h"
11 #include "cpu_cas01_private.h"
12
13 #include "string.h"
14 #include "stdlib.h"
15
16 /* the model objects for physical machines and virtual machines */
17 surf_model_t surf_cpu_model_pm = NULL;
18 surf_model_t surf_cpu_model_vm = NULL;
19
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_cpu, surf,
21                                 "Logging specific to the SURF CPU IMPROVED module");
22
23 static xbt_swag_t
24     cpu_running_action_set_that_does_not_need_being_checked = NULL;
25
26 /* Additionnal callback function to cleanup some data, called from surf_resource_free */
27
28 static void cpu_cas01_cleanup(void* r){
29   cpu_Cas01_t cpu = (cpu_Cas01_t)r;
30   unsigned int iter;
31   xbt_dynar_t power_tuple = NULL;
32   xbt_dynar_foreach(cpu->energy->power_range_watts_list, iter, power_tuple)
33     xbt_dynar_free(&power_tuple);
34   xbt_dynar_free(&cpu->energy->power_range_watts_list);
35   xbt_dynar_free(&cpu->power_peak_list);
36   xbt_free(cpu->energy);
37   return;
38 }
39
40 /* This function is registered as a callback to sg_platf_new_host() and never called directly */
41 void *cpu_cas01_create_resource(const char *name, xbt_dynar_t power_peak,
42                                                                  int pstate,
43                                  double power_scale,
44                                  tmgr_trace_t power_trace,
45                                  int core,
46                                  e_surf_resource_state_t state_initial,
47                                  tmgr_trace_t state_trace,
48                                  xbt_dict_t cpu_properties,
49                                  surf_model_t cpu_model)
50 {
51   cpu_Cas01_t cpu = NULL;
52
53   xbt_assert(!surf_cpu_resource_priv(surf_cpu_resource_by_name(name)),
54              "Host '%s' declared several times in the platform file",
55              name);
56   cpu = (cpu_Cas01_t) surf_resource_new(sizeof(s_cpu_Cas01_t),
57                                         cpu_model, name,
58                                         cpu_properties, &cpu_cas01_cleanup);
59   cpu->power_peak = xbt_dynar_get_as(power_peak, pstate, double);
60   cpu->power_peak_list = power_peak;
61   cpu->pstate = pstate;
62
63   cpu->energy = xbt_new(s_energy_cpu_cas01_t, 1);
64   cpu->energy->total_energy = 0;
65   cpu->energy->power_range_watts_list = cpu_get_watts_range_list(cpu);
66   cpu->energy->last_updated = surf_get_clock();
67
68   XBT_DEBUG("CPU create: peak=%f, pstate=%d",cpu->power_peak, cpu->pstate);
69
70   xbt_assert(cpu->power_peak > 0, "Power has to be >0");
71   cpu->power_scale = power_scale;
72   cpu->core = core;
73   xbt_assert(core > 0, "Invalid number of cores %d", core);
74
75   if (power_trace)
76     cpu->power_event =
77         tmgr_history_add_trace(history, power_trace, 0.0, 0, cpu);
78
79   cpu->state_current = state_initial;
80   if (state_trace)
81     cpu->state_event =
82         tmgr_history_add_trace(history, state_trace, 0.0, 0, cpu);
83
84   cpu->constraint =
85       lmm_constraint_new(cpu_model->model_private->maxmin_system, cpu,
86                          cpu->core * cpu->power_scale * cpu->power_peak);
87
88   /* Note (hypervisor): we create a constraint object for each CPU core, which
89    * is used for making a contraint problem of CPU affinity.
90    **/
91   {
92     /* At now, we assume that a VM does not have a multicore CPU. */
93     if (core > 1)
94       xbt_assert(cpu_model == surf_cpu_model_pm);
95
96     cpu->constraint_core = xbt_new(lmm_constraint_t, core);
97
98     unsigned long i;
99     for (i = 0; i < core; i++) {
100       /* just for a unique id, never used as a string. */
101       void *cnst_id = bprintf("%s:%lu", name, i);
102       cpu->constraint_core[i] =
103         lmm_constraint_new(cpu_model->model_private->maxmin_system, cnst_id,
104             cpu->power_scale * cpu->power_peak);
105     }
106   }
107
108   xbt_lib_set(host_lib, name, SURF_CPU_LEVEL, cpu);
109
110   return xbt_lib_get_elm_or_null(host_lib, name);;
111 }
112
113
114 static void parse_cpu_init(sg_platf_host_cbarg_t host)
115 {
116   /* This function is called when a platform file is parsed. Physical machines
117    * are defined there. Thus, we use the cpu model object for the physical
118    * machine layer. */
119   cpu_cas01_create_resource(host->id,
120                       host->power_peak,
121                       host->pstate,
122                       host->power_scale,
123                       host->power_trace,
124                       host->core_amount,
125                       host->initial_state,
126                       host->state_trace, host->properties,
127                       surf_cpu_model_pm);
128 }
129
130 static void cpu_add_traces_cpu(void)
131 {
132   xbt_dict_cursor_t cursor = NULL;
133   char *trace_name, *elm;
134   static int called = 0;
135   if (called)
136     return;
137   called = 1;
138
139   /* connect all traces relative to hosts */
140   xbt_dict_foreach(trace_connect_list_host_avail, cursor, trace_name, elm) {
141     tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
142     cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
143
144     xbt_assert(host, "Host %s undefined", elm);
145     xbt_assert(trace, "Trace %s undefined", trace_name);
146
147     host->state_event =
148         tmgr_history_add_trace(history, trace, 0.0, 0, host);
149   }
150
151   xbt_dict_foreach(trace_connect_list_power, cursor, trace_name, elm) {
152     tmgr_trace_t trace = xbt_dict_get_or_null(traces_set_list, trace_name);
153     cpu_Cas01_t host = surf_cpu_resource_by_name(elm);
154
155     xbt_assert(host, "Host %s undefined", elm);
156     xbt_assert(trace, "Trace %s undefined", trace_name);
157
158     host->power_event =
159         tmgr_history_add_trace(history, trace, 0.0, 0, host);
160   }
161 }
162
163 static void cpu_define_callbacks_cas01()
164 {
165   sg_platf_host_add_cb(parse_cpu_init);
166   sg_platf_postparse_add_cb(cpu_add_traces_cpu);
167 }
168
169 static int cpu_resource_used(void *resource)
170 {
171   surf_model_t cpu_model = ((surf_resource_t) resource)->model;
172
173   /* Note (hypervisor): we do not need to look up constraint_core[i] here. Even
174    * when a task is pinned or not, its variable object is always linked to the
175    * basic contraint object.
176    **/
177
178   return lmm_constraint_used(cpu_model->model_private->maxmin_system,
179                              ((cpu_Cas01_t) resource)->constraint);
180 }
181
182 static double cpu_share_resources_lazy(surf_model_t cpu_model, double now)
183 {
184   return generic_share_resources_lazy(now, cpu_model);
185 }
186
187 static double cpu_share_resources_full(surf_model_t cpu_model, double now)
188 {
189   s_surf_action_cpu_Cas01_t action;
190   return generic_maxmin_share_resources(cpu_model->states.
191                                         running_action_set,
192                                         xbt_swag_offset(action,
193                                                         generic_lmm_action.
194                                                         variable),
195                                         cpu_model->model_private->maxmin_system, lmm_solve);
196 }
197
198 static void cpu_update_actions_state_lazy(surf_model_t cpu_model, double now, double delta)
199 {
200   generic_update_actions_state_lazy(now, delta, cpu_model);
201 }
202
203 static void cpu_update_actions_state_full(surf_model_t cpu_model, double now, double delta)
204 {
205   generic_update_actions_state_full(now, delta, cpu_model);
206 }
207
208 xbt_dynar_t cpu_get_watts_range_list(cpu_Cas01_t cpu_model)
209 {
210         xbt_dynar_t power_range_list;
211         xbt_dynar_t power_tuple;
212         int i = 0, pstate_nb=0;
213         xbt_dynar_t current_power_values;
214         double min_power, max_power;
215         xbt_dict_t props = cpu_model->generic_resource.properties;
216
217         if (props == NULL)
218                 return NULL;
219
220         char* all_power_values_str = xbt_dict_get_or_null(props, "power_per_state");
221
222         if (all_power_values_str == NULL)
223                 return NULL;
224
225
226         power_range_list = xbt_dynar_new(sizeof(xbt_dynar_t), NULL);
227         xbt_dynar_t all_power_values = xbt_str_split(all_power_values_str, ",");
228
229         pstate_nb = xbt_dynar_length(all_power_values);
230         for (i=0; i< pstate_nb; i++)
231         {
232                 /* retrieve the power values associated with the current pstate */
233                 current_power_values = xbt_str_split(xbt_dynar_get_as(all_power_values, i, char*), ":");
234                 xbt_assert(xbt_dynar_length(current_power_values) > 1,
235                                 "Power properties incorrectly defined - could not retrieve min and max power values for host %s",
236                                 cpu_model->generic_resource.name);
237
238                 /* min_power corresponds to the idle power (cpu load = 0) */
239                 /* max_power is the power consumed at 100% cpu load       */
240                 min_power = atof(xbt_dynar_get_as(current_power_values, 0, char*));
241                 max_power = atof(xbt_dynar_get_as(current_power_values, 1, char*));
242
243                 power_tuple = xbt_dynar_new(sizeof(double), NULL);
244                 xbt_dynar_push_as(power_tuple, double, min_power);
245                 xbt_dynar_push_as(power_tuple, double, max_power);
246
247                 xbt_dynar_push_as(power_range_list, xbt_dynar_t, power_tuple);
248                 xbt_dynar_free(&current_power_values);
249         }
250         xbt_dynar_free(&all_power_values);
251         return power_range_list;
252
253 }
254
255 /**
256  * Computes the power consumed by the host according to the current pstate and processor load
257  *
258  */
259 static double cpu_get_current_watts_value(cpu_Cas01_t cpu_model, double cpu_load)
260 {
261         xbt_dynar_t power_range_list = cpu_model->energy->power_range_watts_list;
262
263         if (power_range_list == NULL)
264         {
265                 XBT_DEBUG("No power range properties specified for host %s", cpu_model->generic_resource.name);
266                 return 0;
267         }
268         xbt_assert(xbt_dynar_length(power_range_list) == xbt_dynar_length(cpu_model->power_peak_list),
269                                                 "The number of power ranges in the properties does not match the number of pstates for host %s",
270                                                 cpu_model->generic_resource.name);
271
272     /* retrieve the power values associated with the current pstate */
273     xbt_dynar_t current_power_values = xbt_dynar_get_as(power_range_list, cpu_model->pstate, xbt_dynar_t);
274
275     /* min_power corresponds to the idle power (cpu load = 0) */
276     /* max_power is the power consumed at 100% cpu load       */
277     double min_power = xbt_dynar_get_as(current_power_values, 0, double);
278     double max_power = xbt_dynar_get_as(current_power_values, 1, double);
279     double power_slope = max_power - min_power;
280
281     double current_power = min_power + cpu_load * power_slope;
282
283         XBT_DEBUG("[get_current_watts] min_power=%f, max_power=%f, slope=%f", min_power, max_power, power_slope);
284     XBT_DEBUG("[get_current_watts] Current power (watts) = %f, load = %f", current_power, cpu_load);
285
286         return current_power;
287
288 }
289
290 /**
291  * Updates the total energy consumed as the sum of the current energy and
292  *                                               the energy consumed by the current action
293  */
294 void cpu_update_energy(cpu_Cas01_t cpu_model, double cpu_load)
295 {
296
297   double start_time = cpu_model->energy->last_updated;
298   double finish_time = surf_get_clock();
299
300   XBT_DEBUG("[cpu_update_energy] action time interval=(%f-%f), current power peak=%f, current pstate=%d",
301                   start_time, finish_time, cpu_model->power_peak, cpu_model->pstate);
302   double current_energy = cpu_model->energy->total_energy;
303   double action_energy = cpu_get_current_watts_value(cpu_model, cpu_load)*(finish_time-start_time);
304
305   cpu_model->energy->total_energy = current_energy + action_energy;
306   cpu_model->energy->last_updated = finish_time;
307
308   XBT_DEBUG("[cpu_update_energy] old_energy_value=%f, action_energy_value=%f", current_energy, action_energy);
309
310 }
311
312 static void cpu_update_resource_state(void *id,
313                                       tmgr_trace_event_t event_type,
314                                       double value, double date)
315 {
316   cpu_Cas01_t cpu = id;
317   lmm_variable_t var = NULL;
318   lmm_element_t elem = NULL;
319   surf_model_t cpu_model = ((surf_resource_t) cpu)->model;
320
321   if (event_type == cpu->power_event) {
322     /* TODO (Hypervisor): do the same thing for constraint_core[i] */
323     xbt_assert(cpu->core == 1, "FIXME: add power scaling code also for constraint_core[i]");
324
325     cpu->power_scale = value;
326     lmm_update_constraint_bound(cpu_model->model_private->maxmin_system, cpu->constraint,
327                                 cpu->core * cpu->power_scale *
328                                 cpu->power_peak);
329 #ifdef HAVE_TRACING
330     TRACE_surf_host_set_power(date, cpu->generic_resource.name,
331                               cpu->core * cpu->power_scale *
332                               cpu->power_peak);
333 #endif
334     while ((var = lmm_get_var_from_cnst
335             (cpu_model->model_private->maxmin_system, cpu->constraint, &elem))) {
336       surf_action_cpu_Cas01_t action = lmm_variable_id(var);
337       lmm_update_variable_bound(cpu_model->model_private->maxmin_system,
338                                 GENERIC_LMM_ACTION(action).variable,
339                                 cpu->power_scale * cpu->power_peak);
340     }
341     if (tmgr_trace_event_free(event_type))
342       cpu->power_event = NULL;
343   } else if (event_type == cpu->state_event) {
344     /* TODO (Hypervisor): do the same thing for constraint_core[i] */
345     xbt_assert(cpu->core == 1, "FIXME: add state change code also for constraint_core[i]");
346
347     if (value > 0) {
348       if(cpu->state_current == SURF_RESOURCE_OFF)
349         xbt_dynar_push_as(host_that_restart, char*, (cpu->generic_resource.name));
350       cpu->state_current = SURF_RESOURCE_ON;
351     } else {
352       lmm_constraint_t cnst = cpu->constraint;
353
354       cpu->state_current = SURF_RESOURCE_OFF;
355
356       while ((var = lmm_get_var_from_cnst(cpu_model->model_private->maxmin_system, cnst, &elem))) {
357         surf_action_t action = lmm_variable_id(var);
358
359         if (surf_action_state_get(action) == SURF_ACTION_RUNNING ||
360             surf_action_state_get(action) == SURF_ACTION_READY ||
361             surf_action_state_get(action) ==
362             SURF_ACTION_NOT_IN_THE_SYSTEM) {
363           action->finish = date;
364           surf_action_state_set(action, SURF_ACTION_FAILED);
365         }
366       }
367     }
368     if (tmgr_trace_event_free(event_type))
369       cpu->state_event = NULL;
370   } else {
371     XBT_CRITICAL("Unknown event ! \n");
372     xbt_abort();
373   }
374
375   return;
376 }
377
378
379 /*
380  *
381  * This function formulates a constraint problem that pins a given task to
382  * particular cores. Currently, it is possible to pin a task to an exactly one
383  * specific core. The system links the variable object of the task to the
384  * per-core constraint object.
385  *
386  * But, the taskset command on Linux takes a mask value specifying a CPU
387  * affinity setting of a given task. If the mask value is 0x03, the given task
388  * will be executed on the first core (CPU0) or the second core (CPU1) on the
389  * given PM. The schedular will determine appropriate placements of tasks,
390  * considering given CPU affinities and task activities.
391  *
392  * How should the system formulate constraint problems for an affinity to
393  * multiple cores?
394  *
395  * The cpu argument must be the host where the task is being executed. The
396  * action object does not have the information about the location where the
397  * action is being executed.
398  */
399 static void cpu_action_set_affinity(surf_action_t action, void *cpu, unsigned long mask)
400 {
401   lmm_variable_t var_obj = ((surf_action_lmm_t) action)->variable;
402
403   surf_model_t cpu_model = action->model_obj;
404   xbt_assert(cpu_model->type == SURF_MODEL_TYPE_CPU);
405   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
406
407   XBT_IN("(%p,%lx)", action, mask);
408
409   {
410     unsigned long nbits = 0;
411
412     /* FIXME: There is much faster algorithms doing this. */
413     unsigned long i;
414     for (i = 0; i < CPU->core; i++) {
415       unsigned long has_affinity = (1UL << i) & mask;
416       if (has_affinity)
417         nbits += 1;
418     }
419
420     if (nbits > 1) {
421       XBT_CRITICAL("Do not specify multiple cores for an affinity mask.");
422       XBT_CRITICAL("See the comment in cpu_action_set_affinity().");
423       DIE_IMPOSSIBLE;
424     }
425   }
426
427
428
429   unsigned long i;
430   for (i = 0; i < CPU->core; i++) {
431     XBT_DEBUG("clear affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
432     lmm_shrink(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj);
433
434     unsigned long has_affinity = (1UL << i) & mask;
435     if (has_affinity) {
436       /* This function only accepts an affinity setting on the host where the
437        * task is now running. In future, a task might move to another host.
438        * But, at this moment, this function cannot take an affinity setting on
439        * that future host.
440        *
441        * It might be possible to extend the code to allow this function to
442        * accept affinity settings on a future host. We might be able to assign
443        * zero to elem->value to maintain such inactive affinity settings in the
444        * system. But, this will make the system complex. */
445       XBT_DEBUG("set affinity %p to cpu-%lu@%s", action, i, CPU->generic_resource.name);
446       lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint_core[i], var_obj, 1.0);
447     }
448   }
449
450   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
451     /* FIXME (hypervisor): Do we need to do something for the LAZY mode? */
452   }
453
454   XBT_OUT();
455 }
456
457 static surf_action_t cpu_execute(void *cpu, double size)
458 {
459   surf_action_cpu_Cas01_t action = NULL;
460   //xbt_dict_cursor_t cursor = NULL;
461   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
462   surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
463
464   XBT_IN("(%s,%g)", surf_resource_name(CPU), size);
465   action =
466       surf_action_new(sizeof(s_surf_action_cpu_Cas01_t), size,
467                       cpu_model,
468                       CPU->state_current != SURF_RESOURCE_ON);
469
470   GENERIC_LMM_ACTION(action).suspended = 0;     /* Should be useless because of the
471                                                    calloc but it seems to help valgrind... */
472
473   /* Note (hypervisor): here, the bound value of the variable is set to the
474    * capacity of a CPU core. But, after MSG_{task/vm}_set_bound() were added to
475    * the hypervisor branch, this bound value is overwritten in
476    * SIMIX_host_execute().
477    * TODO: cleanup this.
478    */
479   GENERIC_LMM_ACTION(action).variable =
480       lmm_variable_new(cpu_model->model_private->maxmin_system, action,
481                        GENERIC_ACTION(action).priority,
482                        CPU->power_scale * CPU->power_peak, 1 + CPU->core); // the basic constraint plus core-specific constraints
483   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
484     GENERIC_LMM_ACTION(action).index_heap = -1;
485     GENERIC_LMM_ACTION(action).last_update = surf_get_clock();
486     GENERIC_LMM_ACTION(action).last_value = 0.0;
487   }
488   lmm_expand(cpu_model->model_private->maxmin_system, CPU->constraint,
489              GENERIC_LMM_ACTION(action).variable, 1.0);
490   XBT_OUT();
491   return (surf_action_t) action;
492 }
493
494 static surf_action_t cpu_action_sleep(void *cpu, double duration)
495 {
496   surf_action_cpu_Cas01_t action = NULL;
497   cpu_Cas01_t CPU = surf_cpu_resource_priv(cpu);
498   surf_model_t cpu_model = ((surf_resource_t) CPU)->model;
499
500   if (duration > 0)
501     duration = MAX(duration, MAXMIN_PRECISION);
502
503   XBT_IN("(%s,%g)", surf_resource_name(surf_cpu_resource_priv(cpu)), duration);
504   action = (surf_action_cpu_Cas01_t) cpu_execute(cpu, 1.0);
505   // FIXME: sleep variables should not consume 1.0 in lmm_expand
506   GENERIC_ACTION(action).max_duration = duration;
507   GENERIC_LMM_ACTION(action).suspended = 2;
508   if (duration == NO_MAX_DURATION) {
509     /* Move to the *end* of the corresponding action set. This convention
510        is used to speed up update_resource_state  */
511     xbt_swag_remove(action, ((surf_action_t) action)->state_set);
512     ((surf_action_t) action)->state_set =
513         cpu_running_action_set_that_does_not_need_being_checked;
514     xbt_swag_insert(action, ((surf_action_t) action)->state_set);
515   }
516
517   lmm_update_variable_weight(cpu_model->model_private->maxmin_system,
518                              GENERIC_LMM_ACTION(action).variable, 0.0);
519   if (cpu_model->model_private->update_mechanism == UM_LAZY) {     // remove action from the heap
520     surf_action_lmm_heap_remove(cpu_model->model_private->action_heap,(surf_action_lmm_t)action);
521     // this is necessary for a variable with weight 0 since such
522     // variables are ignored in lmm and we need to set its max_duration
523     // correctly at the next call to share_resources
524     xbt_swag_insert_at_head(action, cpu_model->model_private->modified_set);
525   }
526
527   XBT_OUT();
528   return (surf_action_t) action;
529 }
530
531 static e_surf_resource_state_t cpu_get_state(void *cpu)
532 {
533   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current;
534 }
535
536 static void cpu_set_state(void *cpu, e_surf_resource_state_t state)
537 {
538   ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current = state;
539 }
540
541 static double cpu_get_speed(void *cpu, double load)
542 {
543   return load * ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
544 }
545
546 static int cpu_get_core(void *cpu)
547 {
548   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->core;
549 }
550 static double cpu_get_available_speed(void *cpu)
551 {
552   /* number between 0 and 1 */
553   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_scale;
554 }
555
556 static double cpu_get_current_power_peak(void *cpu)
557 {
558   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
559 }
560
561 static double cpu_get_power_peak_at(void *cpu, int pstate_index)
562 {
563   xbt_dynar_t plist = ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak_list;
564   xbt_assert((pstate_index <= xbt_dynar_length(plist)), "Invalid parameters (pstate index out of bounds)");
565
566   return xbt_dynar_get_as(plist, pstate_index, double);
567 }
568
569 static int cpu_get_nb_pstates(void *cpu)
570 {
571   return xbt_dynar_length(((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak_list);
572 }
573
574 static void cpu_set_power_peak_at(void *cpu, int pstate_index)
575 {
576   cpu_Cas01_t cpu_implem = (cpu_Cas01_t)surf_cpu_resource_priv(cpu);
577   xbt_dynar_t plist = cpu_implem->power_peak_list;
578   xbt_assert((pstate_index <= xbt_dynar_length(plist)), "Invalid parameters (pstate index out of bounds)");
579
580   double new_power_peak = xbt_dynar_get_as(plist, pstate_index, double);
581   cpu_implem->pstate = pstate_index;
582   cpu_implem->power_peak = new_power_peak;
583 }
584
585 static double cpu_get_consumed_energy(void *cpu)
586 {
587   return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->energy->total_energy;
588 }
589
590 static void cpu_finalize(surf_model_t cpu_model)
591 {
592   lmm_system_free(cpu_model->model_private->maxmin_system);
593   cpu_model->model_private->maxmin_system = NULL;
594
595   if (cpu_model->model_private->action_heap)
596     xbt_heap_free(cpu_model->model_private->action_heap);
597   xbt_swag_free(cpu_model->model_private->modified_set);
598
599   surf_model_exit(cpu_model);
600   cpu_model = NULL;
601
602   xbt_swag_free(cpu_running_action_set_that_does_not_need_being_checked);
603   cpu_running_action_set_that_does_not_need_being_checked = NULL;
604 }
605
606 static surf_model_t surf_cpu_model_init_cas01(void)
607 {
608   s_surf_action_t action;
609   s_surf_action_cpu_Cas01_t comp;
610
611   char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
612   int select =
613       xbt_cfg_get_boolean(_sg_cfg_set, "cpu/maxmin_selective_update");
614
615   surf_model_t cpu_model = surf_model_init();
616
617   if (!strcmp(optim, "Full")) {
618     cpu_model->model_private->update_mechanism = UM_FULL;
619     cpu_model->model_private->selective_update = select;
620   } else if (!strcmp(optim, "Lazy")) {
621     cpu_model->model_private->update_mechanism = UM_LAZY;
622     cpu_model->model_private->selective_update = 1;
623     xbt_assert((select == 1)
624                ||
625                (xbt_cfg_is_default_value
626                 (_sg_cfg_set, "cpu/maxmin_selective_update")),
627                "Disabling selective update while using the lazy update mechanism is dumb!");
628   } else {
629     xbt_die("Unsupported optimization (%s) for this model", optim);
630   }
631
632   cpu_running_action_set_that_does_not_need_being_checked =
633       xbt_swag_new(xbt_swag_offset(action, state_hookup));
634
635   cpu_model->name = "cpu";
636   cpu_model->type = SURF_MODEL_TYPE_CPU;
637
638   cpu_model->action_unref = surf_action_unref;
639   cpu_model->action_cancel = surf_action_cancel;
640   cpu_model->action_state_set = surf_action_state_set;
641
642   cpu_model->model_private->resource_used = cpu_resource_used;
643
644   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
645     cpu_model->model_private->share_resources =
646         cpu_share_resources_lazy;
647     cpu_model->model_private->update_actions_state =
648         cpu_update_actions_state_lazy;
649   } else if (cpu_model->model_private->update_mechanism == UM_FULL) {
650     cpu_model->model_private->share_resources =
651         cpu_share_resources_full;
652     cpu_model->model_private->update_actions_state =
653         cpu_update_actions_state_full;
654   } else
655     xbt_die("Invalid cpu update mechanism!");
656
657   cpu_model->model_private->update_resource_state =
658       cpu_update_resource_state;
659   cpu_model->model_private->finalize = cpu_finalize;
660
661   cpu_model->suspend = surf_action_suspend;
662   cpu_model->resume = surf_action_resume;
663   cpu_model->is_suspended = surf_action_is_suspended;
664   cpu_model->set_max_duration = surf_action_set_max_duration;
665   cpu_model->set_priority = surf_action_set_priority;
666   cpu_model->set_bound = surf_action_set_bound;
667   cpu_model->set_affinity = cpu_action_set_affinity;
668 #ifdef HAVE_TRACING
669   cpu_model->set_category = surf_action_set_category;
670 #endif
671   cpu_model->get_remains = surf_action_get_remains;
672
673   cpu_model->extension.cpu.execute = cpu_execute;
674   cpu_model->extension.cpu.sleep = cpu_action_sleep;
675
676   cpu_model->extension.cpu.get_state = cpu_get_state;
677   cpu_model->extension.cpu.set_state = cpu_set_state;
678   cpu_model->extension.cpu.get_core = cpu_get_core;
679   cpu_model->extension.cpu.get_speed = cpu_get_speed;
680   cpu_model->extension.cpu.get_available_speed =
681       cpu_get_available_speed;
682   cpu_model->extension.cpu.get_current_power_peak = cpu_get_current_power_peak;
683   cpu_model->extension.cpu.get_power_peak_at = cpu_get_power_peak_at;
684   cpu_model->extension.cpu.get_nb_pstates = cpu_get_nb_pstates;
685   cpu_model->extension.cpu.set_power_peak_at = cpu_set_power_peak_at;
686   cpu_model->extension.cpu.get_consumed_energy = cpu_get_consumed_energy;
687
688   cpu_model->extension.cpu.add_traces = cpu_add_traces_cpu;
689
690   if (!cpu_model->model_private->maxmin_system) {
691     cpu_model->model_private->maxmin_system = lmm_system_new(cpu_model->model_private->selective_update);
692   }
693   if (cpu_model->model_private->update_mechanism == UM_LAZY) {
694     cpu_model->model_private->action_heap = xbt_heap_new(8, NULL);
695     xbt_heap_set_update_callback(cpu_model->model_private->action_heap,
696         surf_action_lmm_update_index_heap);
697     cpu_model->model_private->modified_set =
698         xbt_swag_new(xbt_swag_offset(comp, generic_lmm_action.action_list_hookup));
699     cpu_model->model_private->maxmin_system->keep_track = cpu_model->model_private->modified_set;
700   }
701
702   return cpu_model;
703 }
704
705 /*********************************************************************/
706 /* Basic sharing model for CPU: that is where all this started... ;) */
707 /*********************************************************************/
708 /* @InProceedings{casanova01simgrid, */
709 /*   author =       "H. Casanova", */
710 /*   booktitle =    "Proceedings of the IEEE Symposium on Cluster Computing */
711 /*                  and the Grid (CCGrid'01)", */
712 /*   publisher =    "IEEE Computer Society", */
713 /*   title =        "Simgrid: {A} Toolkit for the Simulation of Application */
714 /*                  Scheduling", */
715 /*   year =         "2001", */
716 /*   month =        may, */
717 /*   note =         "Available at */
718 /*                  \url{http://grail.sdsc.edu/papers/simgrid_ccgrid01.ps.gz}." */
719 /* } */
720
721
722 void surf_cpu_model_init_Cas01(void)
723 {
724   char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
725
726   xbt_assert(!surf_cpu_model_pm);
727   xbt_assert(!surf_cpu_model_vm);
728
729   if (strcmp(optim, "TI") == 0) {
730     /* FIXME: do we have to supprot TI? for VM */
731     surf_cpu_model_pm = surf_cpu_model_init_ti();
732     XBT_INFO("TI model is used (it will crashed since this is the hypervisor branch)");
733   } else {
734     surf_cpu_model_pm  = surf_cpu_model_init_cas01();
735     surf_cpu_model_vm  = surf_cpu_model_init_cas01();
736
737     /* cpu_model is registered only to model_list, and not to
738      * model_list_invoke. The shared_resource callback function will be called
739      * from that of the workstation model. */
740     xbt_dynar_push(model_list, &surf_cpu_model_pm);
741     xbt_dynar_push(model_list, &surf_cpu_model_vm);
742
743     cpu_define_callbacks_cas01();
744   }
745 }
746
747 /* TODO: do we address nested virtualization later? */
748 #if 0
749 surf_model_t cpu_model_cas01(int level){
750         // TODO this table should be allocated
751         if(!surf_cpu_model[level])
752          // allocate it
753         return surf_cpu_model[level];
754 }
755 #endif