+ surf_action_workstation_KCCFLN05_t action = NULL;
+ int i, j, k;
+ int nb_link = 0;
+ int nb_host = 0;
+
+ if (parallel_task_link_set == NULL) {
+ parallel_task_link_set = xbt_dict_new();
+ }
+
+ /* Compute the number of affected resources... */
+ for (i = 0; i < workstation_nb; i++) {
+ for (j = 0; j < workstation_nb; j++) {
+ cpu_KCCFLN05_t card_src = workstation_list[i];
+ cpu_KCCFLN05_t card_dst = workstation_list[j];
+ int route_size = ROUTE(card_src->id, card_dst->id).size;
+ link_KCCFLN05_t *route =
+ ROUTE(card_src->id, card_dst->id).links;
+
+ if (communication_amount[i * workstation_nb + j] > 0)
+ for (k = 0; k < route_size; k++) {
+ xbt_dict_set(parallel_task_link_set, route[k]->name,
+ route[k], NULL);
+ }
+ }
+ }
+ nb_link = xbt_dict_length(parallel_task_link_set);
+ xbt_dict_reset(parallel_task_link_set);
+
+
+ for (i = 0; i < workstation_nb; i++)
+ if (computation_amount[i] > 0)
+ nb_host++;
+
+ action = xbt_new0(s_surf_action_workstation_KCCFLN05_t, 1);
+ DEBUG3("Creating a parallel task (%p) with %d cpus and %d links.",
+ action, nb_host, nb_link);
+ action->generic_action.using = 1;
+ action->generic_action.cost = amount;
+ action->generic_action.remains = amount;
+ action->generic_action.max_duration = NO_MAX_DURATION;
+ action->generic_action.start = surf_get_clock();
+ action->generic_action.finish = -1.0;
+ action->generic_action.model_type =
+ (surf_model_t) surf_workstation_model;
+ action->suspended = 0; /* Should be useless because of the
+ calloc but it seems to help valgrind... */
+ action->generic_action.state_set =
+ surf_workstation_model->common_public->states.running_action_set;
+
+ xbt_swag_insert(action, action->generic_action.state_set);
+ action->rate = rate;
+
+ if (action->rate > 0)
+ action->variable = lmm_variable_new(maxmin_system, action, 1.0, -1.0,
+ nb_host + nb_link);
+ else
+ action->variable =
+ lmm_variable_new(maxmin_system, action, 1.0, action->rate,
+ nb_host + nb_link);
+
+ for (i = 0; i < workstation_nb; i++)
+ if (computation_amount[i] > 0)
+ lmm_expand(maxmin_system,
+ ((cpu_KCCFLN05_t) workstation_list[i])->constraint,
+ action->variable, computation_amount[i]);
+
+ for (i = 0; i < workstation_nb; i++) {
+ for (j = 0; j < workstation_nb; j++) {
+ cpu_KCCFLN05_t card_src = workstation_list[i];
+ cpu_KCCFLN05_t card_dst = workstation_list[j];
+ int route_size = ROUTE(card_src->id, card_dst->id).size;
+ link_KCCFLN05_t *route =
+ ROUTE(card_src->id, card_dst->id).links;
+
+ for (k = 0; k < route_size; k++) {
+ if (communication_amount[i * workstation_nb + j] > 0) {
+ lmm_expand_add(maxmin_system, route[k]->constraint,
+ action->variable,
+ communication_amount[i * workstation_nb + j]);
+ }
+ }
+ }
+ }
+
+ if (nb_link + nb_host == 0) {
+ action->generic_action.cost = 1.0;
+ action->generic_action.remains = 0.0;
+ }
+
+ return (surf_action_t) action;
+}
+
+/* returns an array of link_KCCFLN05_t */
+static const void **get_route(void *src, void *dst)
+{
+ cpu_KCCFLN05_t card_src = src;
+ cpu_KCCFLN05_t card_dst = dst;
+ route_KCCFLN05_t route = &(ROUTE(card_src->id, card_dst->id));
+
+ return (const void **) route->links;
+}
+
+static int get_route_size(void *src, void *dst)
+{
+ cpu_KCCFLN05_t card_src = src;
+ cpu_KCCFLN05_t card_dst = dst;
+ route_KCCFLN05_t route = &(ROUTE(card_src->id, card_dst->id));
+ return route->size;
+}
+
+static const char *get_link_name(const void *link)
+{
+ return ((link_KCCFLN05_t) link)->name;