# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/dag-dotload/dag.dot
+$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/dag-dotload/dag.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
> [0.000000] [sd_task/INFO] 7->end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [110.005082] [test/INFO] ------------------- Produce the trace file---------------------------
-> [110.005082] [test/INFO] Producing the trace of the run into dag.trace
+> [110.004162] [test/INFO] ------------------- Produce the trace file---------------------------
+> [110.004162] [test/INFO] Producing the trace of the run into dag.trace
$ cat ${srcdir:=.}/dag-dotload/dag.trace
> [0.000000->0.000000] node-0.acme.org compute 0.000000 flops # root
> [0.000000->10.000000] node-1.acme.org compute 10000000129.452715 flops # 0
-> [10.001680->20.001680] node-10.acme.org compute 10000000131.133657 flops # 1
-> [20.002360->30.002360] node-11.acme.org compute 10000000121.124870 flops # 2
-> [30.003040->40.003041] node-12.acme.org compute 10000000230.608025 flops # 3
-> [40.003041->50.003041] node-13.acme.org compute 10000000004.994019 flops # 4
-> [50.003721->60.003721] node-14.acme.org compute 10000000046.016401 flops # 5
-> [60.003721->70.003721] node-15.acme.org compute 10000000091.598791 flops # 6
-> [70.004401->80.004401] node-16.acme.org compute 10000000040.679438 flops # 7
-> [80.005081->90.005081] node-17.acme.org compute 10000000250.490017 flops # 8
-> [90.005081->100.005081] node-18.acme.org compute 10000000079.267649 flops # 9
-> [10.000000 -> 10.001680] node-1.acme.org -> node-10.acme.org transfer of 10001 bytes # 0->1
-> [20.001680 -> 20.002360] node-10.acme.org -> node-11.acme.org transfer of 10004 bytes # 1->2
-> [30.002360 -> 30.003040] node-11.acme.org -> node-12.acme.org transfer of 10002 bytes # 2->3
-> [50.003041 -> 50.003721] node-13.acme.org -> node-14.acme.org transfer of 10029 bytes # 4->5
-> [70.003721 -> 70.004401] node-15.acme.org -> node-16.acme.org transfer of 10005 bytes # 6->7
-> [80.004401 -> 80.005081] node-16.acme.org -> node-17.acme.org transfer of 10000 bytes # 7->8
-> [80.004401 -> 80.086113] node-16.acme.org -> node-0.acme.org transfer of 10014000 bytes # 7->end
-> [0.000000 -> 0.081712] node-0.acme.org -> node-14.acme.org transfer of 10014000 bytes # root->5
-> [100.005081->110.005082] node-0.acme.org compute 10000000129.452715 flops # end
+> [10.000680->20.000680] node-10.acme.org compute 10000000131.133657 flops # 1
+> [20.001360->30.001360] node-11.acme.org compute 10000000121.124870 flops # 2
+> [30.002040->40.002041] node-12.acme.org compute 10000000230.608025 flops # 3
+> [40.002041->50.002041] node-13.acme.org compute 10000000004.994019 flops # 4
+> [50.002721->60.002721] node-14.acme.org compute 10000000046.016401 flops # 5
+> [60.002721->70.002721] node-15.acme.org compute 10000000091.598791 flops # 6
+> [70.003401->80.003401] node-16.acme.org compute 10000000040.679438 flops # 7
+> [80.004161->90.004161] node-17.acme.org compute 10000000250.490017 flops # 8
+> [90.004161->100.004161] node-18.acme.org compute 10000000079.267649 flops # 9
+> [10.000000 -> 10.000680] node-1.acme.org -> node-10.acme.org transfer of 10001 bytes # 0->1
+> [20.000680 -> 20.001360] node-10.acme.org -> node-11.acme.org transfer of 10004 bytes # 1->2
+> [30.001360 -> 30.002040] node-11.acme.org -> node-12.acme.org transfer of 10002 bytes # 2->3
+> [50.002041 -> 50.002721] node-13.acme.org -> node-14.acme.org transfer of 10029 bytes # 4->5
+> [70.002721 -> 70.003401] node-15.acme.org -> node-16.acme.org transfer of 10005 bytes # 6->7
+> [80.003401 -> 80.004161] node-16.acme.org -> node-17.acme.org transfer of 10000 bytes # 7->8
+> [80.003401 -> 80.084193] node-16.acme.org -> node-0.acme.org transfer of 10014000 bytes # 7->end
+> [0.000000 -> 0.080712] node-0.acme.org -> node-14.acme.org transfer of 10014000 bytes # root->5
+> [100.004161->110.004162] node-0.acme.org compute 10000000129.452715 flops # end
$ rm -f ${srcdir:=.}/dag-dotload/dag.trace ${srcdir:=.}/dot.dot
! expect return 2
-$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/dag-dotload/dag_with_cycle.dot
+$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/dag-dotload/dag_with_cycle.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] the task root is not marked
> [0.000000] [sd_daxparse/WARNING] the task 1 is in a cycle
p Test the DAX loader on a small DAX instance
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/daxload/smalldax.xml
+$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/daxload/smalldax.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] Ignore file o1 size redefinition from 1000000 to 304
> [0.000000] [sd_daxparse/WARNING] Ignore file o2 size redefinition from 1000000 to 304
> [0.000000] [sd_task/INFO] 3@task1_o3_end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [84.068138] [test/INFO] ------------------- Produce the trace file---------------------------
-> [84.068138] [test/INFO] Producing the trace of the run into smalldax.trace
+> [84.067138] [test/INFO] ------------------- Produce the trace file---------------------------
+> [84.067138] [test/INFO] Producing the trace of the run into smalldax.trace
$ cat ${srcdir:=.}/daxload/smalldax.trace
> [0.000000] node-0.acme.org compute 0.000000 # root
-> [0.015600] node-1.acme.org compute 42000000000.000000 # 1@task1
+> [0.016600] node-1.acme.org compute 42000000000.000000 # 1@task1
> [0.016600] node-10.acme.org compute 42000000000.000000 # 2@task2
> [42.033200] node-11.acme.org compute 42000000000.000000 # 3@task1
> [0.000000] node-0.acme.org send node-10.acme.org 1000000.000000 # root_i2_2@task2
> [0.016600] node-10.acme.org recv node-0.acme.org 1000000.000000 # root_i2_2@task2
-> [42.015600] node-1.acme.org send node-11.acme.org 1000000.000000 # 1@task1_o1_3@task1
+> [42.016600] node-1.acme.org send node-11.acme.org 1000000.000000 # 1@task1_o1_3@task1
> [42.033200] node-11.acme.org recv node-1.acme.org 1000000.000000 # 1@task1_o1_3@task1
> [42.016600] node-10.acme.org send node-11.acme.org 1000000.000000 # 2@task2_o2_3@task1
> [42.033200] node-11.acme.org recv node-10.acme.org 1000000.000000 # 2@task2_o2_3@task1
> [84.033200] node-11.acme.org send node-0.acme.org 4167312.000000 # 3@task1_o3_end
-> [84.068138] node-0.acme.org recv node-11.acme.org 4167312.000000 # 3@task1_o3_end
+> [84.067138] node-0.acme.org recv node-11.acme.org 4167312.000000 # 3@task1_o3_end
> [0.000000] node-0.acme.org send node-1.acme.org 1000000.000000 # root_i1_1@task1
-> [0.015600] node-1.acme.org recv node-0.acme.org 1000000.000000 # root_i1_1@task1
-> [84.068138] node-0.acme.org compute 0.000000 # end
+> [0.016600] node-1.acme.org recv node-0.acme.org 1000000.000000 # root_i1_1@task1
+> [84.067138] node-0.acme.org compute 0.000000 # end
$ cmake -E remove -f ${srcdir:=.}/dax.dot ${srcdir:=.}/daxload/smalldax.trace
p Test the DAX loader with a DAX comprising a cycle.
! expect return 255
-$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/daxload/simple_dax_with_cycle.xml
+$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/daxload/simple_dax_with_cycle.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] the task root is not marked
> [0.000000] [sd_daxparse/WARNING] the task 1@task1 is in a cycle
# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/ptg-dotload/sd_ptg-dotload ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/ptg-dotload/ptg.dot
+$ $SG_TEST_EXENV ${bindir:=.}/ptg-dotload/sd_ptg-dotload ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/ptg-dotload/ptg.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
> [0.000000] [sd_task/INFO] c3->end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [3.126200] [test/INFO] Makespan: 3.126200
+> [2.931978] [test/INFO] Makespan: 2.931978
p Test the loader of DAG written in the DOT format
! expect return 2
-$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc "--log=sd_dotparse.thres:verbose" ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/schedule-dotload/dag_with_bad_schedule.dot
+$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc "--log=sd_dotparse.thres:verbose" ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/schedule-dotload/dag_with_bad_schedule.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_dotparse/VERBOSE] The schedule is ignored, task 'end' can not be scheduled on -1 hosts
> [0.000000] [sd_dotparse/VERBOSE] The schedule is ignored, task '1' can not be scheduled on 0 hosts
# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/schedule-dotload/dag_with_good_schedule.dot
+$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/schedule-dotload/dag_with_good_schedule.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
# We need to sort this out because the order changes with the sanitizers (at least)
! output sort
-$ $SG_TEST_EXENV ./throttling/sd_throttling ${srcdir:=.}/../platforms/two_clusters.xml
+$ $SG_TEST_EXENV ./throttling/sd_throttling ${srcdir:=.}/../platforms/cluster.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [5.000000] [sd_comm_throttling/INFO] Simulation stopped after 5.0000 seconds
> [5.000000] [sd_comm_throttling/INFO] Task 'Task A' start time: 0.000000, finish time: 5.000000
p Usage test of simdag's typed tasks
! output sort
-$ $SG_TEST_EXENV ./typed_tasks/sd_typed_tasks ${srcdir:=.}/../platforms/two_clusters.xml
+$ $SG_TEST_EXENV ./typed_tasks/sd_typed_tasks ${srcdir:=.}/../platforms/cluster.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 3' start time: 0.000000, finish time: 0.400000
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 1' start time: 0.000000, finish time: 0.400000
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Seq. comp. 1' start time: 0.000000, finish time: 1.000000
-> [2.080600] [sd_typed_tasks_test/INFO] Task 'MxN redist' start time: 0.400000, finish time: 0.721600
-> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 2' start time: 0.721600, finish time: 0.921600
+> [2.080600] [sd_typed_tasks_test/INFO] Task 'MxN redist' start time: 0.400000, finish time: 0.720600
+> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 2' start time: 0.720600, finish time: 0.920600
> [2.080600] [sd_typed_tasks_test/INFO] Task 'E2E comm.' start time: 1.000000, finish time: 1.080600
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Seq. comp 2.' start time: 1.080600, finish time: 2.080600
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
-> [rank 0] -> node-0.acme.org
-> [rank 1] -> node-1.acme.org
-> [rank 2] -> node-2.acme.org
-> [rank 3] -> node-3.acme.org
-> [rank 4] -> node-4.acme.org
-> [rank 5] -> node-5.acme.org
-> [rank 6] -> node-6.acme.org
-> [rank 7] -> node-7.acme.org
-> [rank 8] -> node-8.acme.org
-> [rank 9] -> node-9.acme.org
-> [rank 10] -> node-10.acme.org
-> [rank 11] -> node-11.acme.org
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+> [rank 0] -> node-0.1core.org
+> [rank 1] -> node-1.1core.org
+> [rank 2] -> node-2.1core.org
+> [rank 3] -> node-3.1core.org
+> [rank 4] -> node-4.1core.org
+> [rank 5] -> node-5.1core.org
+> [rank 6] -> node-6.1core.org
+> [rank 7] -> node-7.1core.org
+> [rank 8] -> node-0.2cores.org
+> [rank 9] -> node-1.2cores.org
+> [rank 10] -> node-2.2cores.org
+> [rank 11] -> node-3.2cores.org
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 ]
> [1] sndbuf=[12 13 14 15 16 17 18 19 20 21 22 23 ]
> [2] sndbuf=[24 25 26 27 28 29 30 31 32 33 34 35 ]