SD_task_destroy(t);
}
+/** @brief loads a DAX file describing a DAG
+ *
+ * See https://confluence.pegasus.isi.edu/display/pegasus/WorkflowGenerator
+ * for more details.
+ */
xbt_dynar_t SD_daxload(const char*filename) {
FILE* in_file = fopen(filename,"r");
xbt_assert1(in_file, "Unable to open \"%s\"\n", filename);
result = xbt_dynar_new(sizeof(SD_task_t),dax_task_free);
files=xbt_dict_new();
- root_task = SD_task_create("root",NULL,0);
+ root_task = SD_task_create_comp_seq("root",NULL,0);
xbt_dynar_push(result,&root_task);
- end_task = SD_task_create("end",NULL,0);
+ end_task = SD_task_create_comp_seq("end",NULL,0);
xbt_assert2(!dax_lex(),"Parse error in %s: %s",filename,dax__parse_err_msg());
dax__delete_buffer(input_buffer);
}
void STag_dax__job(void) {
double runtime = dax_parse_double(A_dax__job_runtime);
+ char *name=bprintf("%s@%s",A_dax__job_id,A_dax__job_name);
runtime*=4200000000.; /* Assume that timings were done on a 4.2GFlops machine. I mean, why not? */
// INFO3("See <job id=%s runtime=%s %.0f>",A_dax__job_id,A_dax__job_runtime,runtime);
- current_job = SD_task_create_comp_seq(A_dax__job_id,NULL,runtime);
+ current_job = SD_task_create_comp_seq(name,NULL,runtime);
+ free(name);
xbt_dynar_push(result,¤t_job);
}