}
}
-static void cpu_define_callbacks()
+static void cpu_define_callbacks_cas01()
{
sg_platf_host_add_cb(parse_cpu_init);
sg_platf_postparse_add_cb(cpu_add_traces_cpu);
return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current;
}
+static void cpu_set_state(void *cpu, e_surf_resource_state_t state)
+{
+ ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->state_current = state;
+}
+
static double cpu_get_speed(void *cpu, double load)
{
return load * ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->power_peak;
}
+static int cpu_get_core(void *cpu)
+{
+ return ((cpu_Cas01_t)surf_cpu_resource_priv(cpu))->core;
+}
static double cpu_get_available_speed(void *cpu)
{
/* number between 0 and 1 */
cpu_running_action_set_that_does_not_need_being_checked = NULL;
}
-static void surf_cpu_model_init_internal(surf_model_t cpu_model)
+static surf_model_t surf_cpu_model_init_cas01(void)
{
s_surf_action_t action;
s_surf_action_cpu_Cas01_t comp;
char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
int select =
- xbt_cfg_get_int(_sg_cfg_set, "cpu/maxmin_selective_update");
+ xbt_cfg_get_boolean(_sg_cfg_set, "cpu/maxmin_selective_update");
- cpu_model = surf_model_init();
+ surf_model_t cpu_model = surf_model_init();
if (!strcmp(optim, "Full")) {
cpu_model->model_private->update_mechanism = UM_FULL;
cpu_model->is_suspended = surf_action_is_suspended;
cpu_model->set_max_duration = surf_action_set_max_duration;
cpu_model->set_priority = surf_action_set_priority;
+ cpu_model->set_bound = surf_action_set_bound;
#ifdef HAVE_TRACING
cpu_model->set_category = surf_action_set_category;
#endif
cpu_model->extension.cpu.sleep = cpu_action_sleep;
cpu_model->extension.cpu.get_state = cpu_get_state;
+ cpu_model->extension.cpu.set_state = cpu_set_state;
+ cpu_model->extension.cpu.get_core = cpu_get_core;
cpu_model->extension.cpu.get_speed = cpu_get_speed;
cpu_model->extension.cpu.get_available_speed =
cpu_get_available_speed;
xbt_swag_new(xbt_swag_offset(comp, generic_lmm_action.action_list_hookup));
cpu_model->model_private->maxmin_system->keep_track = cpu_model->model_private->modified_set;
}
+
+ return cpu_model;
}
/*********************************************************************/
/* } */
-static void create_cpu_model_object(surf_model_t cpu_model)
+void surf_cpu_model_init_Cas01(void)
{
char *optim = xbt_cfg_get_string(_sg_cfg_set, "cpu/optim");
- xbt_assert(cpu_model == NULL, "wrong intialization");
+ xbt_assert(!surf_cpu_model_pm);
+ xbt_assert(!surf_cpu_model_vm);
- if (!strcmp(optim, "TI")) {
- surf_cpu_model_init_ti(cpu_model);
- return;
- }
-
- surf_cpu_model_init_internal(cpu_model);
- cpu_define_callbacks();
+ if (strcmp(optim, "TI") == 0) {
+ /* FIXME: do we have to supprot TI? for VM */
+ surf_cpu_model_pm = surf_cpu_model_init_ti();
+ XBT_INFO("TI model is used (it will crashed since this is the hypervisor branch)");
+ } else {
+ surf_cpu_model_pm = surf_cpu_model_init_cas01();
+ surf_cpu_model_vm = surf_cpu_model_init_cas01();
- /* cpu_model is registered only to model_list, and not to
- * model_list_invoke. The shared_resource callback function will be called
- * from that of the workstation model. */
- xbt_dynar_push(model_list, &cpu_model);
-}
+ /* cpu_model is registered only to model_list, and not to
+ * model_list_invoke. The shared_resource callback function will be called
+ * from that of the workstation model. */
+ xbt_dynar_push(model_list, &surf_cpu_model_pm);
+ xbt_dynar_push(model_list, &surf_cpu_model_vm);
-void surf_cpu_model_init_Cas01(void)
-{
- create_cpu_model_object(surf_cpu_model_pm);
- create_cpu_model_object(surf_cpu_model_vm);
+ cpu_define_callbacks_cas01();
+ }
}
/* TODO: do we address nested virtualization later? */