X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/a3848e5ea6b2305d6059c89705edaccbec356455..1663424e3fb120aa27df004680f4116b51e07157:/examples/msg/actions/actions.tesh diff --git a/examples/msg/actions/actions.tesh b/examples/msg/actions/actions.tesh index 250d7037d6..2a63d9a739 100644 --- a/examples/msg/actions/actions.tesh +++ b/examples/msg/actions/actions.tesh @@ -1,91 +1,91 @@ # A little tesh file testing most MPI-related actions ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment_split.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment_split.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 10.831247] (1:p0@host0) p0 recv p1 10.831247 -> [ 10.831247] (2:p1@host1) p1 send p0 1e10 10.831247 -> [ 10.831248] (1:p0@host0) p0 compute 12 0.000001 -> [ 22.831247] (0:@) Simulation time 22.8312 -> [ 22.831247] (2:p1@host1) p1 sleep 12 12.000000 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 20.703314] (1:p0@Tremblay) p0 recv p1 20.703314 +> [ 20.703314] (2:p1@Ruby) p1 send p0 1e10 20.703314 +> [ 20.703314] (1:p0@Tremblay) p0 compute 12 0.000000 +> [ 32.703314] (2:p1@Ruby) p1 sleep 12 12.000000 +> [ 32.703314] (0:@) Simulation time 32.7033 ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment.xml actions_allReduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_allReduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 0.000000] (1:p0@host0) p0 comm_size 3 0.000000 -> [ 51.095484] (1:p0@host0) p0 allReduce 5e8 5e8 51.095484 -> [ 51.095484] (2:p1@host1) p1 allReduce 5e8 5e8 51.095484 -> [ 51.095484] (3:p2@host2) p2 allReduce 5e8 5e8 51.095484 -> [101.095484] (0:@) Simulation time 101.095 -> [101.095484] (1:p0@host0) p0 compute 5e8 50.000000 -> [101.095484] (2:p1@host1) p1 compute 5e8 50.000000 -> [101.095484] (3:p2@host2) p2 compute 5e8 50.000000 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000 +> [ 7.171139] (2:p1@Ruby) p1 allReduce 5e8 5e8 7.171139 +> [ 7.171139] (3:p2@Perl) p2 allReduce 5e8 5e8 7.171139 +> [ 7.171139] (1:p0@Tremblay) p0 allReduce 5e8 5e8 7.171139 +> [ 12.268239] (2:p1@Ruby) p1 compute 5e8 5.097100 +> [ 12.268239] (1:p0@Tremblay) p0 compute 5e8 5.097100 +> [ 12.268239] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 12.268239] (0:@) Simulation time 12.2682 ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment.xml actions_barrier.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_barrier.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 0.000000] (1:p0@host0) p0 comm_size 3 0.000000 -> [ 0.000000] (2:p1@host1) p1 comm_size 3 0.000000 -> [ 0.000000] (3:p2@host2) p2 comm_size 3 0.000000 -> [ 0.017330] (1:p0@host0) p0 send p1 1E7 0.017330 -> [ 0.017330] (2:p1@host1) p1 recv p0 0.017330 -> [ 0.417330] (2:p1@host1) p1 compute 4E6 0.400000 -> [ 0.417330] (3:p2@host2) p2 compute 4E6 0.400000 -> [ 0.467330] (0:@) Simulation time 0.46733 -> [ 0.467330] (1:p0@host0) p0 compute 4.5E6 0.450000 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000 +> [ 0.000000] (2:p1@Ruby) p1 comm_size 3 0.000000 +> [ 0.000000] (3:p2@Perl) p2 comm_size 3 0.000000 +> [ 0.022653] (2:p1@Ruby) p1 recv p0 0.022653 +> [ 0.022653] (1:p0@Tremblay) p0 send p1 1E7 0.022653 +> [ 0.063430] (3:p2@Perl) p2 compute 4E6 0.040777 +> [ 0.063430] (2:p1@Ruby) p1 compute 4E6 0.040777 +> [ 0.068527] (1:p0@Tremblay) p0 compute 4.5E6 0.045874 +> [ 0.068527] (0:@) Simulation time 0.0685268 ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment.xml actions_bcast.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_bcast.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 0.000000] (1:p0@host0) p0 comm_size 3 0.000000 -> [ 0.547742] (1:p0@host0) p0 bcast 5e8 0.547742 -> [ 0.547742] (2:p1@host1) p1 bcast 5e8 0.547742 -> [ 0.547742] (3:p2@host2) p2 bcast 5e8 0.547742 -> [ 20.547742] (2:p1@host1) p1 compute 2e8 20.000000 -> [ 50.547742] (1:p0@host0) p0 compute 5e8 50.000000 -> [ 50.547742] (3:p2@host2) p2 compute 5e8 50.000000 -> [ 51.095484] (1:p0@host0) p0 bcast 5e8 0.547742 -> [ 51.095484] (2:p1@host1) p1 bcast 5e8 30.547742 -> [ 51.095484] (3:p2@host2) p2 bcast 5e8 0.547742 -> [ 71.095484] (2:p1@host1) p1 compute 2e8 20.000000 -> [101.095484] (1:p0@host0) p0 compute 5e8 50.000000 -> [101.095484] (3:p2@host2) p2 compute 5e8 50.000000 -> [101.643226] (2:p1@host1) p1 reduce 5e8 5e8 30.547742 -> [101.643226] (3:p2@host2) p2 reduce 5e8 5e8 0.547742 -> [151.643226] (0:@) Simulation time 151.643 -> [151.643226] (1:p0@host0) p0 reduce 5e8 5e8 50.547742 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000 +> [ 1.037020] (2:p1@Ruby) p1 bcast 5e8 1.037020 +> [ 1.037020] (3:p2@Perl) p2 bcast 5e8 1.037020 +> [ 1.037020] (1:p0@Tremblay) p0 bcast 5e8 1.037020 +> [ 3.075860] (2:p1@Ruby) p1 compute 2e8 2.038840 +> [ 6.134119] (1:p0@Tremblay) p0 compute 5e8 5.097100 +> [ 6.134119] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 7.171139] (2:p1@Ruby) p1 bcast 5e8 4.095279 +> [ 7.171139] (3:p2@Perl) p2 bcast 5e8 1.037020 +> [ 7.171139] (1:p0@Tremblay) p0 bcast 5e8 1.037020 +> [ 9.209979] (2:p1@Ruby) p1 compute 2e8 2.038840 +> [ 12.268239] (1:p0@Tremblay) p0 compute 5e8 5.097100 +> [ 12.268239] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 13.305258] (2:p1@Ruby) p1 reduce 5e8 5e8 4.095279 +> [ 13.305258] (3:p2@Perl) p2 reduce 5e8 5e8 1.037020 +> [ 18.402358] (1:p0@Tremblay) p0 reduce 5e8 5e8 6.134119 +> [ 18.402358] (0:@) Simulation time 18.4024 ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment.xml actions_reduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_reduce.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 0.000000] (1:p0@host0) p0 comm_size 3 0.000000 -> [ 0.547742] (2:p1@host1) p1 reduce 5e8 5e8 0.547742 -> [ 0.547742] (3:p2@host2) p2 reduce 5e8 5e8 0.547742 -> [ 50.547742] (1:p0@host0) p0 reduce 5e8 5e8 50.547742 -> [ 50.547742] (2:p1@host1) p1 compute 5e8 50.000000 -> [ 50.547742] (3:p2@host2) p2 compute 5e8 50.000000 -> [100.547742] (0:@) Simulation time 100.548 -> [100.547742] (1:p0@host0) p0 compute 5e8 50.000000 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 0.000000] (1:p0@Tremblay) p0 comm_size 3 0.000000 +> [ 1.037020] (2:p1@Ruby) p1 reduce 5e8 5e8 1.037020 +> [ 1.037020] (3:p2@Perl) p2 reduce 5e8 5e8 1.037020 +> [ 6.134119] (2:p1@Ruby) p1 compute 5e8 5.097100 +> [ 6.134119] (1:p0@Tremblay) p0 reduce 5e8 5e8 6.134119 +> [ 6.134119] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 11.231219] (1:p0@Tremblay) p0 compute 5e8 5.097100 +> [ 11.231219] (0:@) Simulation time 11.2312 ! output sort -$ ${bindir:=.}/actions --log=actions.thres=verbose homogeneous_3_hosts.xml deployment.xml actions_with_isend.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +$ ${bindir:=.}/actions --log=actions.thres=verbose ${srcdir:=.}/../../platforms/small_platform_fatpipe.xml deployment.xml actions_with_isend.txt "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > WARNING: THIS BINARY IS KINDA DEPRECATED -> [ 0.000000] (2:p1@host1) p1 Irecv p0 0.000000 -> [ 0.000000] (3:p2@host2) p2 Irecv p1 0.000000 -> [ 1.088979] (1:p0@host0) p0 send p1 1e9 1.088979 -> [ 50.000000] (3:p2@host2) p2 compute 5e8 50.000000 -> [100.000000] (2:p1@host1) p1 compute 1e9 100.000000 -> [100.000000] (2:p1@host1) p1 wait 0.000000 -> [101.088979] (1:p0@host0) p0 compute 1e9 100.000000 -> [101.088979] (2:p1@host1) p1 send p2 1e9 1.088979 -> [101.088979] (3:p2@host2) p2 wait 51.088979 -> [101.088979] (3:p2@host2) p2 Isend p0 1e9 0.000000 -> [102.177958] (1:p0@host0) p0 recv p2 1.088979 -> [151.088979] (0:@) Simulation time 151.089 -> [151.088979] (3:p2@host2) p2 compute 5e8 50.000000 +> This example is still relevant if you want to learn about MSG-based trace replay, but if you want to simulate MPI-like traces, you should use the newer version that is in the examples/smpi/replay directory instead. +> [ 0.000000] (2:p1@Ruby) p1 Irecv p0 0.000000 +> [ 0.000000] (3:p2@Perl) p2 Irecv p1 0.000000 +> [ 2.072088] (1:p0@Tremblay) p0 send p1 1e9 2.072088 +> [ 5.097100] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 10.194200] (2:p1@Ruby) p1 compute 1e9 10.194200 +> [ 10.194200] (2:p1@Ruby) p1 wait 0.000000 +> [ 12.266287] (1:p0@Tremblay) p0 compute 1e9 10.194200 +> [ 12.266287] (3:p2@Perl) p2 wait 7.169187 +> [ 12.266287] (2:p1@Ruby) p1 send p2 1e9 2.072088 +> [ 12.266287] (3:p2@Perl) p2 Isend p0 1e9 0.000000 +> [ 14.338375] (1:p0@Tremblay) p0 recv p2 2.072088 +> [ 17.363387] (3:p2@Perl) p2 compute 5e8 5.097100 +> [ 17.363387] (0:@) Simulation time 17.3634