### MC ###
IF(HAVE_MC)
- ADD_TESH(page_store --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc page_store.tesh)
ADD_TESH(tesh-mc-dwarf --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf dwarf.tesh)
ADD_TESH(tesh-mc-dwarf-expression --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf_expression --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf_expression dwarf_expression.tesh)
ADD_TESH_FACTORIES(mc-bugged2 "ucontext;raw" --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged2.tesh)
IF(CONTEXT_UCONTEXT AND PROCESSOR_x86_64) # liveness model-checking works only on 64bits (for now ...)
ADD_TESH(mc-bugged1-liveness-ucontext --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness.tesh)
+ ADD_TESH(mc-bugged1-liveness-ucontext-sparse --cfg contexts/factory:ucontext --cfg model-check/sparse-checkpoint:yes --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_sparse.tesh)
ADD_TESH(mc-bugged1-liveness-visited-ucontext --cfg contexts/factory:ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited.tesh)
+ ADD_TESH(mc-bugged1-liveness-visited-ucontext-sparse --cfg contexts/factory:ucontext --cfg model-check/sparse-checkpoint:yes --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited_sparse.tesh)
ENDIF()
ENDIF()
FOREACH (ALLTOALL_COLL 2dmesh 3dmesh pair pair_one_barrier pair_light_barrier
pair_mpi_barrier rdb ring ring_light_barrier
ring_mpi_barrier ring_one_barrier
- bruck basic_linear ompi mpich mvapich2)
+ bruck basic_linear ompi mpich mvapich2 mvapich2_scatter_dest)
ADD_TESH(tesh-smpi-alltoall-coll-${ALLTOALL_COLL} --cfg smpi/alltoall:${ALLTOALL_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/alltoall --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall alltoall_coll.tesh)
ENDFOREACH()
FOREACH (ALLTOALLV_COLL default pair pair_light_barrier pair_mpi_barrier
scatter_rdb_allgather SMP_binary SMP_binomial SMP_linear ompi mpich ompi_split_bintree ompi_pipeline mvapich2)
ADD_TESH(tesh-smpi-bcast-coll-${BCAST_COLL} --cfg smpi/bcast:${BCAST_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/bcast --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/bcast bcast_coll.tesh)
ENDFOREACH()
- FOREACH (REDUCE_COLL default arrival_pattern_aware binomial flat_tree NTSL scatter_gather ompi mpich ompi_chain ompi_binary ompi_basic_linear ompi_binomial ompi_in_order_binary mvapich2)
+ FOREACH (REDUCE_COLL default arrival_pattern_aware binomial flat_tree NTSL scatter_gather ompi mpich ompi_chain ompi_binary ompi_basic_linear ompi_binomial ompi_in_order_binary mvapich2 mvapich2_knomial)
ADD_TESH(tesh-smpi-reduce-coll-${REDUCE_COLL} --cfg smpi/reduce:${REDUCE_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/reduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce reduce_coll.tesh)
ENDFOREACH()
FOREACH (REDUCE_SCATTER_COLL default ompi mpich ompi_basic_recursivehalving ompi_ring mpich_noncomm mpich_pair mvapich2 mpich_rdb)
src/smpi/colls/alltoall-ring-mpi-barrier.c
src/smpi/colls/alltoall-ring-one-barrier.c
src/smpi/colls/alltoall-ring.c
+ src/smpi/colls/alltoall-mvapich-scatter-dest.c
src/smpi/colls/alltoallv-bruck.c
src/smpi/colls/alltoallv-ompi-basic-linear.c
src/smpi/colls/alltoallv-pair-light-barrier.c
src/smpi/colls/reduce-scatter-gather.c
src/smpi/colls/reduce_scatter-mpich.c
src/smpi/colls/reduce_scatter-ompi.c
+ src/smpi/colls/reduce-mvapich-knomial.c
src/smpi/colls/scatter-ompi.c
src/smpi/colls/smpi_automatic_selector.c
src/smpi/colls/smpi_mpich_selector.c
set(optCFLAGS "-O0 ")
# But you can still optimize this:
foreach(s
- src/xbt/mmalloc/mm.c
- src/xbt/snprintf.c src/xbt/log.c
- # For some reason, this fails to work when optimizing dynar.c:
+ # src/xbt/mmalloc/mm.c
+ # src/xbt/snprintf.c src/xbt/log.c
# src/xbt/dynar.c
- src/xbt/set.c src/xbt/setset.c src/xbt/backtrace_linux.c
+ # src/xbt/set.c src/xbt/setset.c
+ # src/xbt/backtrace_linux.c
src/mc/mc_dwarf_expression.c src/mc/mc_dwarf.c src/mc/mc_member.c
src/mc/mc_snapshot.c src/mc/mc_page_store.cpp src/mc/mc_page_snapshot.cpp
src/mc/mc_compare.cpp src/mc/mc_diff.c
${CMAKE_CURRENT_BINARY_DIR}/src/simgrid_units_main.c
)
+if(HAVE_MC)
+ set(TEST_CFILES ${TEST_CFILES}
+ src/mc/mc_page_store.cpp
+ src/mc/mc_snapshot.c
+ )
+ set(TEST_UNITS ${TEST_UNITS}
+ ${CMAKE_CURRENT_BINARY_DIR}/src/mc_page_store_unit.cpp
+ ${CMAKE_CURRENT_BINARY_DIR}/src/mc_snapshot_unit.c
+ )
+endif()
+
ADD_CUSTOM_COMMAND(
OUTPUT ${TEST_UNITS}
--- /dev/null
+#! ./tesh
+
+! expect signal SIGABRT
+! timeout 20
+$ ${bindir:=.}/bugged1_liveness ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/deploy_bugged1_liveness.xml --cfg=model-check:1 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --cfg=contexts/factory:ucontext --cfg=contexts/stack_size:256
+> [ 0.000000] (0:@) Configuration change: Set 'model-check' to '1'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/sparse-checkpoint' to 'yes'
+> [ 0.000000] (0:@) Check the liveness property promela_bugged1_liveness
+> [ 0.000000] (0:@) Get debug information ...
+> [ 0.000000] (0:@) Get debug information done !
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (2:client@Boivin) Propositions changed : r=1, cs=0
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (3:client@Fafard) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (0:@) Pair 21 already reached (equal to pair 9) !
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) | ACCEPTANCE CYCLE |
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) Counter-example that violates formula :
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iRecv(dst=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (3)Fafard (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (3)Fafard (client)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) Expanded pairs = 21
+> [ 0.000000] (0:@) Visited pairs = 21
+> [ 0.000000] (0:@) Executed transitions = 20
+> [ 0.000000] (0:@) Counter-example depth : 20
--- /dev/null
+#! ./tesh
+
+! expect signal SIGABRT
+! timeout 90
+$ ${bindir:=.}/bugged1_liveness ${srcdir:=.}/../msg_platform.xml ${srcdir:=.}/deploy_bugged1_liveness_visited.xml --cfg=model-check:1 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --cfg=contexts/factory:ucontext --cfg=model-check/visited:100 --cfg=contexts/stack_size:256
+> [ 0.000000] (0:@) Configuration change: Set 'model-check' to '1'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/visited' to '100'
+> [ 0.000000] (0:@) Configuration change: Set 'model-check/sparse-checkpoint' to 'yes'
+> [ 0.000000] (0:@) Check the liveness property promela_bugged1_liveness
+> [ 0.000000] (0:@) Get debug information ...
+> [ 0.000000] (0:@) Get debug information done !
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (3:client@Fafard) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (3:client@Fafard) Propositions changed : r=1, cs=0
+> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
+> [ 0.000000] (2:client@Boivin) Ask the request
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
+> [ 0.000000] (0:@) Pair 57 already reached (equal to pair 45) !
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) | ACCEPTANCE CYCLE |
+> [ 0.000000] (0:@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+> [ 0.000000] (0:@) Counter-example that violates formula :
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] iSend(src=(3)Fafard (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(3)Fafard (client)] Wait(comm=(verbose only) [(3)Fafard (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iSend(src=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(2)Boivin (client)-> (1)Tremblay (coordinator)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iRecv(dst=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(1)Tremblay (coordinator)] iRecv(dst=(1)Tremblay (coordinator), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) [(2)Boivin (client)] Wait(comm=(verbose only) [(1)Tremblay (coordinator)-> (2)Boivin (client)])
+> [ 0.000000] (0:@) [(2)Boivin (client)] iSend(src=(2)Boivin (client), buff=(verbose only), size=(verbose only))
+> [ 0.000000] (0:@) Expanded pairs = 57
+> [ 0.000000] (0:@) Visited pairs = 208
+> [ 0.000000] (0:@) Executed transitions = 207
+> [ 0.000000] (0:@) Counter-example depth : 50
int mmalloc_compare_heap(struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2);
int mmalloc_linear_compare_heap(xbt_mheap_t heap1, xbt_mheap_t heap2);
int init_heap_information(xbt_mheap_t heap1, xbt_mheap_t heap2, xbt_dynar_t to_ignore1, xbt_dynar_t to_ignore2);
-int compare_heap_area(void *area1, void* area2, struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2, xbt_dynar_t previous, struct s_dw_type *type, int pointer_level);
+int compare_heap_area(int process_index, void *area1, void* area2, struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2, xbt_dynar_t previous, struct s_dw_type *type, int pointer_level);
void reset_heap_information(void);
size_t mmalloc_get_bytes_used(xbt_mheap_t);
void *context;
size_t size;
int block;
+ int process_index;
}s_stack_region_t, *stack_region_t;
void heap_ignore_region_free(mc_heap_ignore_region_t r);
XBT_PUBLIC(void) MC_remove_ignore_heap(void *address, size_t size);
XBT_PUBLIC(void) MC_ignore_local_variable(const char *var_name, const char *frame);
XBT_PUBLIC(void) MC_ignore_global_variable(const char *var_name);
-void MC_new_stack_area(void *stack, char *name, void *context, size_t size);
+XBT_PUBLIC(void) MC_new_stack_area(void *stack, smx_process_t process, void *context, size_t size);
/********************************* Memory *************************************/
XBT_PUBLIC(void) MC_memory_init(void); /* Initialize the memory subsystem */
#define _GNU_SOURCE
#define UNW_LOCAL_ONLY
+
#include <string.h>
#include <link.h>
+
+#include "internal_config.h"
#include "mc_private.h"
#include "xbt/module.h"
#include <xbt/mmalloc.h>
local_variable_free((local_variable_t) * (void **) v);
}
-static void MC_region_destroy(mc_mem_region_t reg)
+void MC_region_destroy(mc_mem_region_t reg)
{
+ if (reg)
+ return;
+
//munmap(reg->data, reg->size);
xbt_free(reg->data);
if (reg->page_numbers) {
void MC_free_snapshot(mc_snapshot_t snapshot)
{
unsigned int i;
- for (i = 0; i < NB_REGIONS; i++)
+ for (i = 0; i < NB_REGIONS; i++) {
MC_region_destroy(snapshot->regions[i]);
+ }
xbt_free(snapshot->stack_sizes);
xbt_dynar_free(&(snapshot->stacks));
/******************************* Snapshot regions ********************************/
/*********************************************************************************/
-static mc_mem_region_t MC_region_new(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
+ static mc_mem_region_t mc_region_new_dense(int type, void *start_addr, void* permanent_addr, size_t size, mc_mem_region_t ref_reg)
{
- if (_sg_mc_sparse_checkpoint) {
- return mc_region_new_sparse(type, start_addr, size, ref_reg);
- }
-
mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1);
new_reg->start_addr = start_addr;
+ new_reg->permanent_addr = permanent_addr;
new_reg->data = NULL;
new_reg->size = size;
new_reg->page_numbers = NULL;
new_reg->data = xbt_malloc(size);
- memcpy(new_reg->data, start_addr, size);
+ memcpy(new_reg->data, permanent_addr, size);
XBT_DEBUG("New region : type : %d, data : %p (real addr %p), size : %zu",
- type, new_reg->data, start_addr, size);
+ type, new_reg->data, permanent_addr, size);
return new_reg;
}
+/** @brief Take a snapshot of a given region
+ *
+ * @param type
+ * @param start_addr Address of the region in the simulated process
+ * @param permanent_addr Permanent address of this data (for privatized variables, this is the virtual address of the privatized mapping)
+ * @param size Size of the data*
+ * @param ref_reg Reference corresponding region
+ */
+static mc_mem_region_t MC_region_new(int type, void *start_addr, void* permanent_addr, size_t size, mc_mem_region_t ref_reg)
+{
+ if (_sg_mc_sparse_checkpoint) {
+ return mc_region_new_sparse(type, start_addr, permanent_addr, size, ref_reg);
+ } else {
+ return mc_region_new_dense(type, start_addr, permanent_addr, size, ref_reg);
+ }
+}
+
/** @brief Restore a region from a snapshot
*
* If we are using per page snapshots, it is possible to use the reference
/*FIXME: check if start_addr is still mapped, if it is not, then map it
before copying the data */
if (!reg->page_numbers) {
- memcpy(reg->start_addr, reg->data, reg->size);
+ memcpy(reg->permanent_addr, reg->data, reg->size);
} else {
mc_region_restore_sparse(reg, ref_reg);
}
}
static void MC_snapshot_add_region(mc_snapshot_t snapshot, int type,
- void *start_addr, size_t size)
+ void *start_addr, void* permanent_addr, size_t size)
{
mc_mem_region_t ref_reg =
mc_model_checker->parent_snapshot ? mc_model_checker->parent_snapshot->regions[type] : NULL;
- mc_mem_region_t new_reg = MC_region_new(type, start_addr, size, ref_reg);
+ mc_mem_region_t new_reg = MC_region_new(type, start_addr, start_addr, size, ref_reg);
snapshot->regions[type] = new_reg;
return;
}
static void MC_get_memory_regions(mc_snapshot_t snapshot)
{
- size_t i;
void *start_heap = ((xbt_mheap_t) std_heap)->base;
void *end_heap = ((xbt_mheap_t) std_heap)->breakval;
- MC_snapshot_add_region(snapshot, 0, start_heap,
+ MC_snapshot_add_region(snapshot, 0, start_heap, start_heap,
(char *) end_heap - (char *) start_heap);
snapshot->heap_bytes_used = mmalloc_get_bytes_used(std_heap);
+ snapshot->privatization_regions = NULL;
- MC_snapshot_add_region(snapshot, 1, mc_libsimgrid_info->start_rw,
- mc_libsimgrid_info->end_rw -
- mc_libsimgrid_info->start_rw);
- if (!smpi_privatize_global_variables) {
- MC_snapshot_add_region(snapshot, 2, mc_binary_info->start_rw,
- mc_binary_info->end_rw - mc_binary_info->start_rw);
- snapshot->privatization_regions = NULL;
- snapshot->privatization_index = -1;
- } else {
+ MC_snapshot_add_region(snapshot, 1,
+ mc_libsimgrid_info->start_rw, mc_libsimgrid_info->start_rw,
+ mc_libsimgrid_info->end_rw - mc_libsimgrid_info->start_rw);
+
+#ifdef HAVE_SMPI
+ size_t i;
+ if (smpi_privatize_global_variables && smpi_process_count()) {
+ // Snapshot the global variable of the application separately for each
+ // simulated process:
snapshot->privatization_regions =
- xbt_new(mc_mem_region_t, SIMIX_process_count());
- for (i = 0; i < SIMIX_process_count(); i++) {
+ xbt_new(mc_mem_region_t, smpi_process_count());
+ for (i = 0; i < smpi_process_count(); i++) {
mc_mem_region_t ref_reg =
mc_model_checker->parent_snapshot ? mc_model_checker->parent_snapshot->privatization_regions[i] : NULL;
snapshot->privatization_regions[i] =
- MC_region_new(-1, mappings[i], size_data_exe, ref_reg);
+ MC_region_new(-1, mc_binary_info->start_rw, mappings[i], size_data_exe, ref_reg);
}
snapshot->privatization_index = loaded_page;
+ snapshot->regions[2] = NULL;
+ } else
+#endif
+ {
+ MC_snapshot_add_region(snapshot, 2,
+ mc_binary_info->start_rw, mc_binary_info->start_rw,
+ mc_binary_info->end_rw - mc_binary_info->start_rw);
+ snapshot->privatization_regions = NULL;
+ snapshot->privatization_index = -1;
}
}
}
static void mc_fill_local_variables_values(mc_stack_frame_t stack_frame,
- dw_frame_t scope, xbt_dynar_t result)
+ dw_frame_t scope, int process_index, xbt_dynar_t result)
{
void *ip = (void *) stack_frame->ip;
if (ip < scope->low_pc || ip >= scope->high_pc)
current_variable->object_info,
&(stack_frame->unw_cursor),
(void *) stack_frame->frame_base,
- NULL);
+ NULL, process_index);
} else {
xbt_die("No address");
}
// Recursive processing of nested scopes:
dw_frame_t nested_scope = NULL;
xbt_dynar_foreach(scope->scopes, cursor, nested_scope) {
- mc_fill_local_variables_values(stack_frame, nested_scope, result);
+ mc_fill_local_variables_values(stack_frame, nested_scope, process_index, result);
}
}
-static xbt_dynar_t MC_get_local_variables_values(xbt_dynar_t stack_frames)
+static xbt_dynar_t MC_get_local_variables_values(xbt_dynar_t stack_frames, int process_index)
{
unsigned cursor1 = 0;
xbt_dynar_new(sizeof(local_variable_t), local_variable_free_voidp);
xbt_dynar_foreach(stack_frames, cursor1, stack_frame) {
- mc_fill_local_variables_values(stack_frame, stack_frame->frame, variables);
+ mc_fill_local_variables_values(stack_frame, stack_frame->frame, process_index, variables);
}
return variables;
xbt_dynar_foreach(stacks_areas, cursor, current_stack) {
mc_snapshot_stack_t st = xbt_new(s_mc_snapshot_stack_t, 1);
st->stack_frames = MC_unwind_stack_frames(current_stack->context);
- st->local_variables = MC_get_local_variables_values(st->stack_frames);
+ st->local_variables = MC_get_local_variables_values(st->stack_frames, current_stack->process_index);
+ st->process_index = current_stack->process_index;
unw_word_t sp = xbt_dynar_get_as(st->stack_frames, 0, mc_stack_frame_t)->sp;
{
// We need this snapshot in order to know which
// pages needs to be stored in the next snapshot:
- if (_sg_mc_sparse_checkpoint && snapshot == mc_model_checker->parent_snapshot)
+ if (snapshot == mc_model_checker->parent_snapshot)
return true;
return false;
}
MC_snapshot_ignore_restore(snapshot);
- mc_model_checker->parent_snapshot = snapshot;
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_model_checker->parent_snapshot = snapshot;
+ }
return snapshot;
}
parent_snapshot ? parent_snapshot->regions[i] : NULL);
}
+#ifdef HAVE_SMPI
if (snapshot->privatization_regions) {
- for (i = 0; i < SIMIX_process_count(); i++) {
+ // Restore the global variables of the application separately for each
+ // simulated process:
+ for (i = 0; i < smpi_process_count(); i++) {
if (snapshot->privatization_regions[i]) {
MC_region_restore(snapshot->privatization_regions[i],
parent_snapshot ? parent_snapshot->privatization_regions[i] : NULL);
}
switch_data_segment(snapshot->privatization_index);
}
+#endif
if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
mc_softdirty_reset();
}
MC_snapshot_ignore_restore(snapshot);
- mc_model_checker->parent_snapshot = snapshot;
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_model_checker->parent_snapshot = snapshot;
+ }
}
mc_snapshot_t SIMIX_pre_mc_snapshot(smx_simcall_t simcall)
#include <inttypes.h>
#include <boost/unordered_set.hpp>
+#include "internal_config.h"
#include "mc_private.h"
+#ifdef HAVE_SMPI
+#include "smpi/private.h"
+#endif
+
#include "xbt/mmalloc.h"
#include "xbt/mmalloc/mmprivate.h"
}
static int compare_areas_with_type(struct mc_compare_state& state,
+ int process_index,
void* real_area1, mc_snapshot_t snapshot1, mc_mem_region_t region1,
void* real_area2, mc_snapshot_t snapshot2, mc_mem_region_t region2,
dw_type_t type, int pointer_level)
case DW_TAG_enumeration_type:
case DW_TAG_union_type:
{
- void* data1 =
- mc_snapshot_read_region(real_area1, region1, alloca(type->byte_size), type->byte_size);
- void* data2 =
- mc_snapshot_read_region(real_area2, region2, alloca(type->byte_size), type->byte_size);
- return (memcmp(data1, data2, type->byte_size) != 0);
- break;
+ return mc_snapshot_region_memcmp(
+ real_area1, region1, real_area2, region2,
+ type->byte_size) != 0;
}
case DW_TAG_typedef:
case DW_TAG_volatile_type:
}
for (i = 0; i < type->element_count; i++) {
size_t off = i * elm_size;
- res = compare_areas_with_type(state,
+ res = compare_areas_with_type(state, process_index,
(char*) real_area1 + off, snapshot1, region1,
(char*) real_area2 + off, snapshot2, region2,
type->subtype, pointer_level);
case DW_TAG_reference_type:
case DW_TAG_rvalue_reference_type:
{
- void* temp;
- void* addr_pointed1 = *(void**) mc_snapshot_read_region(real_area1, region1, &temp, sizeof(void**));
- void* addr_pointed2 = *(void**) mc_snapshot_read_region(real_area2, region2, &temp, sizeof(void**));
+ void* addr_pointed1 = mc_snapshot_read_pointer_region(real_area1, region1);
+ void* addr_pointed2 = mc_snapshot_read_pointer_region(real_area2, region2);
if (type->subtype && type->subtype->type == DW_TAG_subroutine_type) {
return (addr_pointed1 != addr_pointed2);
&& addr_pointed2 < mc_snapshot_get_heap_end(snapshot2)))
return 1;
// The pointers are both in the heap:
- return compare_heap_area(addr_pointed1, addr_pointed2, snapshot1,
+ return compare_heap_area(process_index, addr_pointed1, addr_pointed2, snapshot1,
snapshot2, NULL, type->subtype, pointer_level);
}
+
// The pointers are both in the current object R/W segment:
- else if (addr_pointed1 > region1->start_addr
- && (char *) addr_pointed1 <= (char *) region1->start_addr + region1->size) {
- if (!
- (addr_pointed2 > region2->start_addr
- && (char *) addr_pointed2 <= (char *) region2->start_addr + region2->size))
+ else if (mc_region_contain(region1, addr_pointed1)) {
+ if (!mc_region_contain(region2, addr_pointed2))
return 1;
if (type->dw_type_id == NULL)
return (addr_pointed1 != addr_pointed2);
else {
- return compare_areas_with_type(state,
+ return compare_areas_with_type(state, process_index,
addr_pointed1, snapshot1, region1,
addr_pointed2, snapshot2, region2,
type->subtype, pointer_level);
}
}
+ // TODO, We do not handle very well the case where
+ // it belongs to a different (non-heap) region from the current one.
+
else {
return (addr_pointed1 != addr_pointed2);
}
case DW_TAG_class_type:
xbt_dynar_foreach(type->members, cursor, member) {
void *member1 =
- mc_member_resolve(real_area1, type, member, snapshot1);
+ mc_member_resolve(real_area1, type, member, snapshot1, process_index);
void *member2 =
- mc_member_resolve(real_area2, type, member, snapshot2);
- mc_mem_region_t subregion1 = mc_get_region_hinted(member1, snapshot1, region1);
- mc_mem_region_t subregion2 = mc_get_region_hinted(member2, snapshot2, region2);
+ mc_member_resolve(real_area2, type, member, snapshot2, process_index);
+ mc_mem_region_t subregion1 = mc_get_region_hinted(member1, snapshot1, process_index, region1);
+ mc_mem_region_t subregion2 = mc_get_region_hinted(member2, snapshot2, process_index, region2);
res =
- compare_areas_with_type(state,
+ compare_areas_with_type(state, process_index,
member1, snapshot1, subregion1,
member2, snapshot2, subregion2,
member->subtype, pointer_level);
return 0;
}
-static int compare_global_variables(int region_type, mc_mem_region_t r1,
+static int compare_global_variables(mc_object_info_t object_info,
+ int process_index,
+ mc_mem_region_t r1,
mc_mem_region_t r2, mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2)
{
unsigned int cursor = 0;
dw_variable_t current_var;
- mc_object_info_t object_info = NULL;
- if (region_type == 2) {
- object_info = mc_binary_info;
- } else {
- object_info = mc_libsimgrid_info;
- }
variables = object_info->global_variables;
xbt_dynar_foreach(variables, cursor, current_var) {
dw_type_t bvariable_type = current_var->type;
res =
- compare_areas_with_type(state,
+ compare_areas_with_type(state, process_index,
(char *) current_var->address, snapshot1, r1,
(char *) current_var->address, snapshot2, r2,
bvariable_type, 0);
}
-static int compare_local_variables(mc_snapshot_t snapshot1,
+static int compare_local_variables(int process_index,
+ mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
mc_snapshot_stack_t stack1,
mc_snapshot_stack_t stack2)
dw_type_t subtype = current_var1->type;
res =
- compare_areas_with_type(state,
- current_var1->address, snapshot1, mc_get_snapshot_region(current_var1->address, snapshot1),
- current_var2->address, snapshot2, mc_get_snapshot_region(current_var2->address, snapshot2),
+ compare_areas_with_type(state, process_index,
+ current_var1->address, snapshot1, mc_get_snapshot_region(current_var1->address, snapshot1, process_index),
+ current_var2->address, snapshot2, mc_get_snapshot_region(current_var2->address, snapshot2, process_index),
subtype, 0);
if (res == 1) {
#endif
/* Init heap information used in heap comparison algorithm */
- xbt_mheap_t heap1 = (xbt_mheap_t) mc_snapshot_read(std_heap, s1,
+ xbt_mheap_t heap1 = (xbt_mheap_t) mc_snapshot_read(std_heap, s1, MC_NO_PROCESS_INDEX,
alloca(sizeof(struct mdesc)), sizeof(struct mdesc));
- xbt_mheap_t heap2 = (xbt_mheap_t) mc_snapshot_read(std_heap, s2,
+ xbt_mheap_t heap2 = (xbt_mheap_t) mc_snapshot_read(std_heap, s2, MC_NO_PROCESS_INDEX,
alloca(sizeof(struct mdesc)), sizeof(struct mdesc));
res_init = init_heap_information(heap1, heap2, s1->to_ignore, s2->to_ignore);
if (res_init == -1) {
is_diff = 0;
#endif
mc_snapshot_stack_t stack1, stack2;
-
while (cursor < xbt_dynar_length(s1->stacks)) {
stack1 =
(mc_snapshot_stack_t) xbt_dynar_get_as(s1->stacks, cursor,
stack2 =
(mc_snapshot_stack_t) xbt_dynar_get_as(s2->stacks, cursor,
mc_snapshot_stack_t);
- diff_local =
- compare_local_variables(s1, s2, stack1, stack2);
+
+ if (stack1->process_index != stack2->process_index) {
+ diff_local = 1;
+ XBT_DEBUG("(%d - %d) Stacks with different process index (%i vs %i)", num1, num2,
+ stack1->process_index, stack2->process_index);
+ }
+ else diff_local =
+ compare_local_variables(stack1->process_index, s1, s2, stack1, stack2);
if (diff_local > 0) {
#ifdef MC_DEBUG
if (is_diff == 0) {
};
#endif
+ mc_object_info_t object_infos[] = { NULL, mc_libsimgrid_info, mc_binary_info };
+
int k = 0;
for (k = 2; k != 0; --k) {
#ifdef MC_DEBUG
#endif
/* Compare global variables */
- is_diff =
- compare_global_variables(k, s1->regions[k], s2->regions[k], s1, s2);
+#ifdef HAVE_SMPI
+ if (object_infos[k] == mc_binary_info && smpi_privatize_global_variables) {
+ // Compare the global variables separately for each simulates process:
+ for (int process_index = 0; process_index < smpi_process_count(); process_index++) {
+ is_diff =
+ compare_global_variables(object_infos[k], process_index,
+ s1->privatization_regions[process_index], s2->privatization_regions[process_index], s1, s2);
+ if (is_diff) break;
+ }
+ }
+ else
+#endif
+ is_diff =
+ compare_global_variables(object_infos[k], MC_NO_PROCESS_INDEX, s1->regions[k], s2->regions[k], s1, s2);
+
if (is_diff != 0) {
#ifdef MC_DEBUG
xbt_os_walltimer_stop(timer);
/* type = heap->heapinfo[block].type; */
/* switch(type){ */
- /* case -1 : /\* Free block *\/ */
+ /* case MMALLOC_TYPE_HEAPINFO : */
+ /* case MMALLOC_TYPE_FREE : /\* Free block *\/ */
/* fprintf(stderr, "Asked to display the backtrace of a block that is free. I'm puzzled\n"); */
/* xbt_abort(); */
/* break; */
void *addr_block1, *addr_block2, *addr_frag1, *addr_frag2;
int nb_diff1 = 0, nb_diff2 = 0;
- xbt_dynar_t previous =
- xbt_dynar_new(sizeof(heap_area_pair_t), heap_area_pair_free_voidp);
-
int equal, res_compare = 0;
/* Check busy blocks */
mc_mem_region_t heap_region2 = snapshot2->regions[0];
// This is in snapshot do not use them directly:
- malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1);
- malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2);
+ malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1, MC_NO_PROCESS_INDEX);
+ malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2, MC_NO_PROCESS_INDEX);
while (i1 <= state->heaplimit) {
- // TODO, lookup in the correct region in order to speed it up:
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i1], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if (heapinfo1->type == -1) { /* Free block */
- i1++;
+ if (heapinfo1->type == MMALLOC_TYPE_FREE || heapinfo1->type == MMALLOC_TYPE_HEAPINFO) { /* Free block */
+ i1 += heapinfo1->free_block.size;
continue;
}
+ if (heapinfo1->type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
addr_block1 =
((void *) (((ADDR2UINT(i1)) - 1) * BLOCKSIZE +
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
- if (heapinfo1->type == 0) { /* Large block */
+ if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED) { /* Large block */
if (is_stack(addr_block1)) {
for (k = 0; k < heapinfo1->busy_block.size; k++)
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
res_compare =
- compare_heap_area(addr_block1, addr_block2, snapshot1, snapshot2,
+ compare_heap_area(MC_NO_PROCESS_INDEX, addr_block1, addr_block2, snapshot1, snapshot2,
NULL, NULL, 0);
if (res_compare != 1) {
i1 += heapinfo1->busy_block.size;
}
- xbt_dynar_reset(previous);
-
}
}
malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
- if (heapinfo2b->type != 0) {
+ if (heapinfo2b->type != MMALLOC_TYPE_UNFRAGMENTED) {
i2++;
continue;
}
}
res_compare =
- compare_heap_area(addr_block1, addr_block2, snapshot1, snapshot2,
+ compare_heap_area(MC_NO_PROCESS_INDEX, addr_block1, addr_block2, snapshot1, snapshot2,
NULL, NULL, 0);
if (res_compare != 1) {
i1 += heapinfo1->busy_block.size;
}
- xbt_dynar_reset(previous);
-
i2++;
}
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j1 << ((xbt_mheap_t) state->s_heap)->heapinfo[i1].
- type));
+ (j1 << heapinfo2->type));
res_compare =
- compare_heap_area(addr_frag1, addr_frag2, snapshot1, snapshot2,
+ compare_heap_area(MC_NO_PROCESS_INDEX, addr_frag1, addr_frag2, snapshot1, snapshot2,
NULL, NULL, 0);
if (res_compare != 1)
equal = 1;
- xbt_dynar_reset(previous);
-
}
}
while (i2 <= state->heaplimit && !equal) {
malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
- if (heapinfo2b->type <= 0) {
- i2++;
+
+ if (heapinfo2b->type == MMALLOC_TYPE_FREE || heapinfo2b->type == MMALLOC_TYPE_HEAPINFO) {
+ i2 += heapinfo2b->free_block.size;
continue;
}
+ if (heapinfo2b->type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
for (j2 = 0; j2 < (size_t) (BLOCKSIZE >> heapinfo2b->type);
j2++) {
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j2 << ((xbt_mheap_t) state->s_heap)->heapinfo[i2].
- type));
+ (j2 << heapinfo2b->type));
res_compare =
- compare_heap_area(addr_frag1, addr_frag2, snapshot2, snapshot2,
+ compare_heap_area(MC_NO_PROCESS_INDEX, addr_frag1, addr_frag2, snapshot2, snapshot2,
NULL, NULL, 0);
if (res_compare != 1) {
equal = 1;
- xbt_dynar_reset(previous);
break;
}
- xbt_dynar_reset(previous);
-
}
i2++;
for(i = 1; i <= state->heaplimit; i++) {
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
- if (heapinfo1->type == 0) {
+ if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED) {
if (i1 == state->heaplimit) {
if (heapinfo1->busy_block.busy_size > 0) {
if (state->equals_to1_(i, 0).valid == 0) {
for (i=1; i <= state->heaplimit; i++) {
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if (heapinfo2->type == 0) {
+ if (heapinfo2->type == MMALLOC_TYPE_UNFRAGMENTED) {
if (i1 == state->heaplimit) {
if (heapinfo2->busy_block.busy_size > 0) {
if (state->equals_to2_(i, 0).valid == 0) {
if (i1 == state->heaplimit)
XBT_DEBUG("Number of blocks/fragments not found in heap2 : %d", nb_diff2);
- xbt_dynar_free(&previous);
return ((nb_diff1 > 0) || (nb_diff2 > 0));
}
* @param size
* @param check_ignore
*/
-static int compare_heap_area_without_type(struct s_mc_diff *state,
+static int compare_heap_area_without_type(struct s_mc_diff *state, int process_index,
void *real_area1, void *real_area2,
mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
}
}
- if (mc_snapshot_region_memcp(((char *) real_area1) + i, heap_region1, ((char *) real_area2) + i, heap_region2, 1) != 0) {
+ if (mc_snapshot_region_memcmp(((char *) real_area1) + i, heap_region1, ((char *) real_area2) + i, heap_region2, 1) != 0) {
pointer_align = (i / sizeof(void *)) * sizeof(void *);
- addr_pointed1 = mc_snapshot_read_pointer((char *) real_area1 + pointer_align, snapshot1);
- addr_pointed2 = mc_snapshot_read_pointer((char *) real_area2 + pointer_align, snapshot2);
+ addr_pointed1 = mc_snapshot_read_pointer((char *) real_area1 + pointer_align, snapshot1, process_index);
+ addr_pointed2 = mc_snapshot_read_pointer((char *) real_area2 + pointer_align, snapshot2, process_index);
if (addr_pointed1 > maestro_stack_start
&& addr_pointed1 < maestro_stack_end
&& addr_pointed2 < mc_snapshot_get_heap_end(snapshot2)) {
// Both addreses are in the heap:
res_compare =
- compare_heap_area(addr_pointed1, addr_pointed2, snapshot1,
+ compare_heap_area(process_index, addr_pointed1, addr_pointed2, snapshot1,
snapshot2, previous, NULL, 0);
if (res_compare == 1) {
return res_compare;
* @param pointer_level
* @return 0 (same), 1 (different), -1 (unknown)
*/
-static int compare_heap_area_with_type(struct s_mc_diff *state,
+static int compare_heap_area_with_type(struct s_mc_diff *state, int process_index,
void *real_area1, void *real_area2,
mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
if (real_area1 == real_area2)
return -1;
else
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, area_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, area_size) != 0);
} else {
if (area_size != -1 && type->byte_size != area_size)
return -1;
else {
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
}
}
break;
if (area_size != -1 && type->byte_size != area_size)
return -1;
else
- return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcmp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
break;
case DW_TAG_typedef:
case DW_TAG_const_type:
for (i = 0; i < type->element_count; i++) {
// TODO, add support for variable stride (DW_AT_byte_stride)
res =
- compare_heap_area_with_type(state,
+ compare_heap_area_with_type(state, process_index,
(char *) real_area1 + (i * elm_size),
(char *) real_area2 + (i * elm_size),
snapshot1, snapshot2, previous,
case DW_TAG_rvalue_reference_type:
case DW_TAG_pointer_type:
if (type->subtype && type->subtype->type == DW_TAG_subroutine_type) {
- addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1);
- addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2);
+ addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1, process_index);
+ addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2, process_index);
return (addr_pointed1 != addr_pointed2);;
} else {
pointer_level++;
if (pointer_level > 1) { /* Array of pointers */
for (i = 0; i < (area_size / sizeof(void *)); i++) {
- addr_pointed1 = mc_snapshot_read_pointer((char*) real_area1 + i * sizeof(void *), snapshot1);
- addr_pointed2 = mc_snapshot_read_pointer((char*) real_area2 + i * sizeof(void *), snapshot2);
+ addr_pointed1 = mc_snapshot_read_pointer((char*) real_area1 + i * sizeof(void *), snapshot1, process_index);
+ addr_pointed2 = mc_snapshot_read_pointer((char*) real_area2 + i * sizeof(void *), snapshot2, process_index);
if (addr_pointed1 > state->s_heap
&& addr_pointed1 < mc_snapshot_get_heap_end(snapshot1)
&& addr_pointed2 > state->s_heap
&& addr_pointed2 < mc_snapshot_get_heap_end(snapshot2))
res =
- compare_heap_area(addr_pointed1, addr_pointed2, snapshot1,
+ compare_heap_area(process_index, addr_pointed1, addr_pointed2, snapshot1,
snapshot2, previous, type->subtype,
pointer_level);
else
return res;
}
} else {
- addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1);
- addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2);
+ addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1, process_index);
+ addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2, process_index);
if (addr_pointed1 > state->s_heap
&& addr_pointed1 < mc_snapshot_get_heap_end(snapshot1)
&& addr_pointed2 > state->s_heap
&& addr_pointed2 < mc_snapshot_get_heap_end(snapshot2))
- return compare_heap_area(addr_pointed1, addr_pointed2, snapshot1,
+ return compare_heap_area(process_index, addr_pointed1, addr_pointed2, snapshot1,
snapshot2, previous, type->subtype,
pointer_level);
else
if (area_size > type->byte_size && area_size % type->byte_size == 0) {
for (i = 0; i < (area_size / type->byte_size); i++) {
res =
- compare_heap_area_with_type(state,
+ compare_heap_area_with_type(state, process_index,
(char *) real_area1 + i * type->byte_size,
(char *) real_area2 + i * type->byte_size,
snapshot1, snapshot2, previous, type, -1,
xbt_dynar_foreach(type->members, cursor, member) {
// TODO, optimize this? (for the offset case)
char *real_member1 =
- mc_member_resolve(real_area1, type, member, snapshot1);
+ mc_member_resolve(real_area1, type, member, snapshot1, process_index);
char *real_member2 =
- mc_member_resolve(real_area2, type, member, snapshot2);
+ mc_member_resolve(real_area2, type, member, snapshot2, process_index);
res =
- compare_heap_area_with_type(state, real_member1, real_member2,
+ compare_heap_area_with_type(state, process_index, real_member1, real_member2,
snapshot1, snapshot2,
previous, member->subtype, -1,
check_ignore, 0);
}
break;
case DW_TAG_union_type:
- return compare_heap_area_without_type(state, real_area1, real_area2,
+ return compare_heap_area_without_type(state, process_index, real_area1, real_area2,
snapshot1, snapshot2, previous,
type->byte_size, check_ignore);
break;
*/
static dw_type_t get_offset_type(void *real_base_address, dw_type_t type,
int offset, int area_size,
- mc_snapshot_t snapshot)
+ mc_snapshot_t snapshot, int process_index)
{
// Beginning of the block, the infered variable type if the type of the block:
return member->subtype;
} else {
char *real_member =
- mc_member_resolve(real_base_address, type, member, snapshot);
+ mc_member_resolve(real_base_address, type, member, snapshot, process_index);
if (real_member - (char *) real_base_address == offset)
return member->subtype;
}
* @param pointer_level
* @return 0 (same), 1 (different), -1
*/
-int compare_heap_area(void *area1, void *area2, mc_snapshot_t snapshot1,
+int compare_heap_area(int process_index, void *area1, void *area2, mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2, xbt_dynar_t previous,
dw_type_t type, int pointer_level)
{
int check_ignore = 0;
void *real_addr_block1, *real_addr_block2, *real_addr_frag1, *real_addr_frag2;
-
int type_size = -1;
int offset1 = 0, offset2 = 0;
int new_size1 = -1, new_size2 = -1;
int match_pairs = 0;
- malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1);
- malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2);
+ malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1, process_index);
+ malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2, process_index);
malloc_info heapinfo_temp1, heapinfo_temp2;
+ void* real_area1_to_compare = area1;
+ void* real_area2_to_compare = area2;
+
if (previous == NULL) {
previous =
xbt_dynar_new(sizeof(heap_area_pair_t), heap_area_pair_free_voidp);
malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[block1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[block2], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
- if ((heapinfo1->type == -1) && (heapinfo2->type == -1)) { /* Free block */
+ if ((heapinfo1->type == MMALLOC_TYPE_FREE || heapinfo1->type==MMALLOC_TYPE_HEAPINFO)
+ && (heapinfo2->type == MMALLOC_TYPE_FREE || heapinfo2->type ==MMALLOC_TYPE_HEAPINFO)) {
+ /* Free block */
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
}
return 0;
- } else if ((heapinfo1->type == 0) && (heapinfo2->type == 0)) { /* Complete block */
+ } else if (heapinfo1->type == MMALLOC_TYPE_UNFRAGMENTED
+ && heapinfo2->type == MMALLOC_TYPE_UNFRAGMENTED) {
+ /* Complete block */
// TODO, lookup variable type from block type as done for fragmented blocks
+ offset1 = (char *) area1 - (char *) real_addr_block1;
+ offset2 = (char *) area2 - (char *) real_addr_block2;
+
if (state->equals_to1_(block1, 0).valid
&& state->equals_to2_(block2, 0).valid) {
if (equal_blocks(state, block1, block2)) {
if (type_size != -1) {
if (type_size != heapinfo1->busy_block.busy_size
&& type_size != heapinfo2->busy_block.busy_size
- && type->name != NULL && !strcmp(type->name, "s_smx_context")) {
+ && (type->name == NULL || !strcmp(type->name, "struct s_smx_context"))) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
// Process address of the fragment:
real_addr_frag1 =
(void *) ((char *) real_addr_block1 +
- (frag1 << ((xbt_mheap_t) state->s_heap)->heapinfo[block1].
- type));
+ (frag1 << heapinfo1->type));
real_addr_frag2 =
(void *) ((char *) real_addr_block2 +
- (frag2 << ((xbt_mheap_t) state->s_heap)->heapinfo[block2].
- type));
+ (frag2 << heapinfo2->type));
// Check the size of the fragments against the size of the type:
if (type_size != -1) {
}
return -1;
}
+ // ?
if (type_size != heapinfo1->busy_frag.frag_size[frag1]
|| type_size != heapinfo2->busy_frag.frag_size[frag2]) {
if (match_pairs) {
return -1;
}
}
+
// Check if the blocks are already matched together:
if (state->equals_to1_(block1, frag1).valid
&& state->equals_to2_(block2, frag2).valid) {
- if (equal_fragments(state, block1, frag1, block2, frag2)) {
+ if (offset1==offset2 && equal_fragments(state, block1, frag1, block2, frag2)) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
return 1;
}
}
+
// Size of the fragment:
size = heapinfo1->busy_frag.frag_size[frag1];
// Remember (basic) type inference.
- // The current data structure only allows us to do this for the whole block.
+ // The current data structure only allows us to do this for the whole fragment.
if (type != NULL && area1 == real_addr_frag1) {
state->types1_(block1, frag1) = type;
}
&& state->types2_(block2, frag2) != NULL) {
new_type1 =
get_offset_type(real_addr_frag1, state->types1_(block1, frag1),
- offset1, size, snapshot1);
+ offset1, size, snapshot1, process_index);
new_type2 =
get_offset_type(real_addr_frag2, state->types2_(block2, frag2),
- offset1, size, snapshot2);
+ offset1, size, snapshot2, process_index);
} else if (state->types1_(block1, frag1) != NULL) {
new_type1 =
get_offset_type(real_addr_frag1, state->types1_(block1, frag1),
- offset1, size, snapshot1);
+ offset1, size, snapshot1, process_index);
new_type2 =
get_offset_type(real_addr_frag2, state->types1_(block1, frag1),
- offset2, size, snapshot2);
+ offset2, size, snapshot2, process_index);
} else if (state->types2_(block2, frag2) != NULL) {
new_type1 =
get_offset_type(real_addr_frag1, state->types2_(block2, frag2),
- offset1, size, snapshot1);
+ offset1, size, snapshot1, process_index);
new_type2 =
get_offset_type(real_addr_frag2, state->types2_(block2, frag2),
- offset2, size, snapshot2);
+ offset2, size, snapshot2, process_index);
} else {
if (match_pairs) {
match_equals(state, previous);
/* Start comparison */
if (type) {
res_compare =
- compare_heap_area_with_type(state, area1, area2, snapshot1, snapshot2,
+ compare_heap_area_with_type(state, process_index, area1, area2, snapshot1, snapshot2,
previous, type, size, check_ignore,
pointer_level);
} else {
res_compare =
- compare_heap_area_without_type(state, area1, area2, snapshot1, snapshot2,
+ compare_heap_area_without_type(state, process_index, area1, area2, snapshot1, snapshot2,
previous, size, check_ignore);
}
if (res_compare == 1) {
|| (block > state->heapsize1) || (block < 1))
return -1;
- if (heapinfo[block].type == -1) { /* Free block */
+ if (heapinfo[block].type == MMALLOC_TYPE_FREE || heapinfo[block].type == MMALLOC_TYPE_HEAPINFO) { /* Free block */
return -1;
- } else if (heapinfo[block].type == 0) { /* Complete block */
+ } else if (heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED) { /* Complete block */
return (int) heapinfo[block].busy_block.busy_size;
} else {
frag =
} else {
- if (state->heapinfo1[i].type == -1) { /* Free block */
+ if (state->heapinfo1[i].type == MMALLOC_TYPE_FREE
+ || state->heapinfo1[i].type == MMALLOC_TYPE_HAPINFO) { /* Free block */
i++;
continue;
}
- if (state->heapinfo1[i].type == 0) { /* Large block */
+ if (state->heapinfo1[i].type == MMALLOC_TYPE_UNFRAGMENTED) { /* Large block */
if (state->heapinfo1[i].busy_block.size !=
state->heapinfo2[i].busy_block.size) {
// Computed address:
uintptr_t address = (uintptr_t) state->stack[state->stack_size - 1];
uintptr_t temp;
- uintptr_t* res = (uintptr_t*) mc_snapshot_read((void*) address, state->snapshot, &temp, sizeof(uintptr_t));
+ uintptr_t* res = (uintptr_t*) mc_snapshot_read((void*) address, state->snapshot, state->process_index, &temp, sizeof(uintptr_t));
state->stack[state->stack_size - 1] = *res;
}
break;
mc_object_info_t object_info,
unw_cursor_t * c,
void *frame_pointer_address,
- mc_snapshot_t snapshot)
+ mc_snapshot_t snapshot, int process_index)
{
s_mc_expression_state_t state;
memset(&state, 0, sizeof(s_mc_expression_state_t));
state.cursor = c;
state.snapshot = snapshot;
state.object_info = object_info;
+ state.process_index = process_index;
if (mc_dwarf_execute_expression(expression->size, expression->ops, &state))
xbt_die("Error evaluating DWARF expression");
mc_object_info_t object_info,
unw_cursor_t * c,
void *frame_pointer_address,
- mc_snapshot_t snapshot)
+ mc_snapshot_t snapshot, int process_index)
{
unw_word_t ip;
|| (c && ip >= (unw_word_t) expression->lowpc
&& ip < (unw_word_t) expression->highpc)) {
return mc_dwarf_resolve_location(expression, object_info, c,
- frame_pointer_address, snapshot);
+ frame_pointer_address, snapshot, process_index);
}
}
xbt_die("Could not resolve location");
unw_cursor_t * unw_cursor)
{
return (void *) mc_dwarf_resolve_locations(&frame->frame_base, object_info,
- unw_cursor, NULL, NULL);
+ unw_cursor, NULL, NULL, -1);
}
void mc_dwarf_expression_clear(mc_expression_t expression)
int _sg_do_model_check = 0;
int _sg_mc_checkpoint = 0;
int _sg_mc_sparse_checkpoint = 0;
-int _sg_mc_soft_dirty = 1;
+int _sg_mc_soft_dirty = 0;
char *_sg_mc_property_file = NULL;
int _sg_mc_timeout = 0;
int _sg_mc_hash = 0;
}
}
+#if 0
/** \brief Compute a hash for a given value of a given type
*
* We try to be very conservative (do not hash too ambiguous things).
}
}
-
static void mc_hash_object_globals(mc_hash_t * hash, mc_hashing_state * state,
mc_object_info_t info)
{
++i;
}
}
+#endif
uint64_t mc_hash_processes_state(int num_state, xbt_dynar_t stacks)
{
mc_hash_t hash = MC_HASH_INIT;
MC_HASH(hash, xbt_swag_size(simix_global->process_list)); // process count
- mc_hash_object_globals(&hash, &state, mc_binary_info);
+ // mc_hash_object_globals(&hash, &state, mc_binary_info);
// mc_hash_object_globals(&hash, &state, mc_libsimgrid_info);
- mc_hash_stacks(&hash, &state, stacks);
+ // mc_hash_stacks(&hash, &state, stacks);
mc_hash_state_destroy(&state);
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "internal_config.h"
#include "mc_private.h"
+#include "smpi/private.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_ignore, mc,
"Logging specific to MC ignore mechanism");
}
-void MC_new_stack_area(void *stack, char *name, void *context, size_t size)
+/** @brief Register a stack in the model checker
+ *
+ * The stacks are allocated in the heap. The MC handle them especially
+ * when we analyse/compare the content of theap so it must be told where
+ * they are with this function.
+ *
+ * @param stack
+ * @param process Process owning the stack
+ * @param context
+ * @param size Size of the stack
+ */
+void MC_new_stack_area(void *stack, smx_process_t process, void *context, size_t size)
{
int raw_mem_set = (mmalloc_get_current_heap() == mc_heap);
stack_region_t region = NULL;
region = xbt_new0(s_stack_region_t, 1);
region->address = stack;
- region->process_name = strdup(name);
+ region->process_name = process && process->name ? strdup(process->name) : NULL;
region->context = context;
region->size = size;
region->block =
((char *) stack -
(char *) ((xbt_mheap_t) std_heap)->heapbase) / BLOCKSIZE + 1;
+#ifdef HAVE_SMPI
+ if (smpi_privatize_global_variables && process) {
+ region->process_index = smpi_process_index_of_smx_process(process);
+ } else
+#endif
+ region->process_index = -1;
+
xbt_dynar_push(stacks_areas, ®ion);
if (!raw_mem_set)
* @return Process address of the given member of the 'object' struct/class
*/
void *mc_member_resolve(const void *base, dw_type_t type, dw_type_t member,
- mc_snapshot_t snapshot)
+ mc_snapshot_t snapshot, int process_index)
{
if (!member->location.size) {
return ((char *) base) + member->offset;
state.snapshot = snapshot;
state.stack_size = 1;
state.stack[0] = (uintptr_t) base;
+ state.process_index = process_index;
if (mc_dwarf_execute_expression
(member->location.size, member->location.ops, &state))
pagenos[i] = reference_pages[i];
mc_model_checker->pages->ref_page(reference_pages[i]);
} else {
- // Otherwise, we need to store the page the hard hard
+ // Otherwise, we need to store the page the hard way
// (by reading its content):
void* page = (char*) data + (i << xbt_pagebits);
pagenos[i] = mc_model_checker->pages->store_page(page);
* If possible, the restoration will be incremental
* (the modified pages will not be touched).
*
- * @param data The start of the region (must be at the beginning of a page)
- * @param pag_count Number of pages of the region
+ * @param start_addr
+ * @param page_count Number of pages of the region
+ * @param pagenos
* @param pagemap Linux kernel pagemap values fot this region (or NULL)
* @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
*/
-void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region)
+void mc_restore_page_snapshot_region(void* start_addr, size_t page_count, size_t* pagenos, uint64_t* pagemap, size_t* reference_pagenos)
{
for (size_t i=0; i!=page_count; ++i) {
bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY);
- if (softclean && reference_region && reference_region->page_numbers[i] == region->page_numbers[i]) {
+ if (softclean && reference_pagenos && pagenos[i] == reference_pagenos[i]) {
// The page is softclean and is the same as the reference one:
// the page is already in the target state.
continue;
}
// Otherwise, copy the page:
- void* target_page = mc_page_from_number(region->start_addr, i);
- const void* source_page = mc_model_checker->pages->get_page(region->page_numbers[i]);
+ void* target_page = mc_page_from_number(start_addr, i);
+ const void* source_page = mc_model_checker->pages->get_page(pagenos[i]);
memcpy(target_page, source_page, xbt_pagesize);
}
}
// ***** High level API
-mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
+mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, void* permanent_addr, size_t size, mc_mem_region_t ref_reg)
{
mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1);
new_reg->start_addr = start_addr;
+ new_reg->permanent_addr = permanent_addr;
new_reg->data = NULL;
new_reg->size = size;
new_reg->page_numbers = NULL;
xbt_assert((((uintptr_t)start_addr) & (xbt_pagesize-1)) == 0,
"Not at the beginning of a page");
+ xbt_assert((((uintptr_t)permanent_addr) & (xbt_pagesize-1)) == 0,
+ "Not at the beginning of a page");
size_t page_count = mc_page_count(size);
uint64_t* pagemap = NULL;
if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
- mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
+ mc_read_pagemap(pagemap, mc_page_number(NULL, permanent_addr), page_count);
}
// Take incremental snapshot:
- new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
+ new_reg->page_numbers = mc_take_page_snapshot_region(permanent_addr, page_count, pagemap,
ref_reg==NULL ? NULL : ref_reg->page_numbers);
if(pagemap) {
void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg)
{
- xbt_assert((((uintptr_t)reg->start_addr) & (xbt_pagesize-1)) == 0,
+ xbt_assert((((uintptr_t)reg->permanent_addr) & (xbt_pagesize-1)) == 0,
"Not at the beginning of a page");
size_t page_count = mc_page_count(reg->size);
// Read soft-dirty bits if necessary in order to know which pages have changed:
if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
- mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
+ mc_read_pagemap(pagemap, mc_page_number(NULL, reg->permanent_addr), page_count);
}
// Incremental per-page snapshot restoration:
- mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+ mc_restore_page_snapshot_region(reg->permanent_addr, page_count, reg->page_numbers,
+ pagemap, ref_reg ? ref_reg->page_numbers : NULL);
// This is funny, the restoration can restore the state of the current heap,
// if this happen free(pagemap) would free from the wrong heap:
return new s_mc_pages_store_t(500);
}
+void mc_pages_store_delete(mc_pages_store_t store)
+{
+ delete store;
+}
+
+}
+
+#ifdef SIMGRID_TEST
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <memory>
+
+#include "mc/mc_page_store.h"
+
+static int value = 0;
+
+static void new_content(void* data, size_t size)
+{
+ memset(data, ++value, size);
}
+
+static void* getpage()
+{
+ return mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+}
+
+extern "C" {
+
+XBT_TEST_SUITE("mc_page_store", "Page store");
+
+XBT_TEST_UNIT("base", test_mc_page_store, "Test adding/removing pages in the store")
+{
+ xbt_test_add("Init");
+ size_t pagesize = (size_t) getpagesize();
+ std::auto_ptr<s_mc_pages_store_t> store = std::auto_ptr<s_mc_pages_store_t>(new s_mc_pages_store(500));
+ void* data = getpage();
+ xbt_test_assert(store->size()==0, "Bad size");
+
+ xbt_test_add("Store the page once");
+ new_content(data, pagesize);
+ size_t pageno1 = store->store_page(data);
+ xbt_test_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ const void* copy = store->get_page(pageno1);
+ xbt_test_assert(memcmp(data, copy, pagesize)==0, "Page data should be the same");
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Store the same page again");
+ size_t pageno2 = store->store_page(data);
+ xbt_test_assert(pageno1==pageno2, "Page should be the same");
+ xbt_test_assert(store->get_ref(pageno1)==2, "Bad refcount");
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Store a new page");
+ new_content(data, pagesize);
+ size_t pageno3 = store->store_page(data);
+ xbt_test_assert(pageno1 != pageno3, "New page should be different");
+ xbt_test_assert(store->size()==2, "Bad size");
+
+ xbt_test_add("Unref pages");
+ store->unref_page(pageno1);
+ xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ xbt_assert(store->size()==2, "Bad size");
+ store->unref_page(pageno2);
+ xbt_test_assert(store->size()==1, "Bad size");
+
+ xbt_test_add("Reallocate page");
+ new_content(data, pagesize);
+ size_t pageno4 = store->store_page(data);
+ xbt_test_assert(pageno1 == pageno4, "Page was not reused");
+ xbt_test_assert(store->get_ref(pageno4)==1, "Bad refcount");
+ xbt_test_assert(store->size()==2, "Bad size");
+}
+
+}
+
+#endif /* SIMGRID_TEST */
typedef struct s_mc_pages_store s_mc_pages_store_t, * mc_pages_store_t;
mc_pages_store_t mc_pages_store_new();
+void mc_pages_store_delete(mc_pages_store_t store);
/**
*/
#define NB_REGIONS 3 /* binary data (data + BSS) (type = 2), libsimgrid data (data + BSS) (type = 1), std_heap (type = 0)*/
+/** @brief Copy/snapshot of a given memory region
+ *
+ * Two types of region snapshots exist:
+ * <ul>
+ * <li>flat/dense snapshots are a simple copy of the region;</li>
+ * <li>sparse/per-page snapshots are snaapshots which shared
+ * identical pages.</li>
+ * </ul>
+ */
typedef struct s_mc_mem_region{
- // Real address:
+ /** @brief Virtual address of the region in the simulated process */
void *start_addr;
- // Copy of the datra:
+
+ /** @brief Permanent virtual address of the region
+ *
+ * This is usually the same address as the simuilated process address.
+ * However, when using SMPI privatization of global variables,
+ * each SMPI process has its own set of global variables stored
+ * at a different virtual address. The scheduler maps those region
+ * on the region of the global variables.
+ *
+ * */
+ void *permanent_addr;
+
+ /** @brief Copy of the snapshot for flat snapshots regions (NULL otherwise) */
void *data;
- // Size of the data region:
+
+ /** @brief Size of the data region in bytes */
size_t size;
- // For per-page snapshots, this is an array to the number of
+
+ /** @brief Pages indices in the page store for per-page snapshots (NULL otherwise) */
size_t* page_numbers;
+
} s_mc_mem_region_t, *mc_mem_region_t;
static inline __attribute__ ((always_inline))
xbt_dynar_t ignored_data;
} s_mc_snapshot_t, *mc_snapshot_t;
-mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot);
+/** @brief Process index used when no process is available
+ *
+ * The expected behaviour is that if a process index is needed it will fail.
+ * */
+#define MC_NO_PROCESS_INDEX -1
+
+/** @brief Process index when any process is suitable
+ *
+ * We could use a special negative value in the future.
+ */
+#define MC_ANY_PROCESS_INDEX 0
+
+mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot, int process_index);
static inline __attribute__ ((always_inline))
-mc_mem_region_t mc_get_region_hinted(void* addr, mc_snapshot_t snapshot, mc_mem_region_t region)
+mc_mem_region_t mc_get_region_hinted(void* addr, mc_snapshot_t snapshot, int process_index, mc_mem_region_t region)
{
if (mc_region_contain(region, addr))
return region;
else
- return mc_get_snapshot_region(addr, snapshot);
+ return mc_get_snapshot_region(addr, snapshot, process_index);
}
/** Information about a given stack frame
typedef struct s_mc_snapshot_stack{
xbt_dynar_t local_variables;
xbt_dynar_t stack_frames; // mc_stack_frame_t
+ int process_index;
}s_mc_snapshot_stack_t, *mc_snapshot_stack_t;
typedef struct s_mc_global_t{
size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages);
void mc_free_page_snapshot_region(size_t* pagenos, size_t page_count);
-void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region);
+void mc_restore_page_snapshot_region(void* start_addr, size_t page_count, size_t* pagenos, uint64_t* pagemap, size_t* reference_pagenos);
-mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg);
+mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, void* data_addr, size_t size, mc_mem_region_t ref_reg);
+void MC_region_destroy(mc_mem_region_t reg);
void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg);
void mc_softdirty_reset();
void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size);
-void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size);
-int mc_snapshot_region_memcp(
+void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, int process_index, void* target, size_t size);
+int mc_snapshot_region_memcmp(
void* addr1, mc_mem_region_t region1,
void* addr2, mc_mem_region_t region2, size_t size);
-int mc_snapshot_memcp(
+int mc_snapshot_memcmp(
void* addr1, mc_snapshot_t snapshot1,
- void* addr2, mc_snapshot_t snapshot2, size_t size);
+ void* addr2, mc_snapshot_t snapshot2, int process_index, size_t size);
-static void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot);
+static void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot, int process_index);
/** @brief State of the model-checker (global variables for the model checker)
*
mc_expression_t locations;
} s_mc_location_list_t, *mc_location_list_t;
-uintptr_t mc_dwarf_resolve_location(mc_expression_t expression, mc_object_info_t object_info, unw_cursor_t* c, void* frame_pointer_address, mc_snapshot_t snapshot);
-uintptr_t mc_dwarf_resolve_locations(mc_location_list_t locations, mc_object_info_t object_info, unw_cursor_t* c, void* frame_pointer_address, mc_snapshot_t snapshot);
+uintptr_t mc_dwarf_resolve_location(mc_expression_t expression, mc_object_info_t object_info, unw_cursor_t* c, void* frame_pointer_address, mc_snapshot_t snapshot, int process_index);
+uintptr_t mc_dwarf_resolve_locations(mc_location_list_t locations, mc_object_info_t object_info, unw_cursor_t* c, void* frame_pointer_address, mc_snapshot_t snapshot, int process_index);
void mc_dwarf_expression_clear(mc_expression_t expression);
void mc_dwarf_expression_init(mc_expression_t expression, size_t len, Dwarf_Op* ops);
dw_type_t full_type; // The same (but more complete) type
};
-void* mc_member_resolve(const void* base, dw_type_t type, dw_type_t member, mc_snapshot_t snapshot);
+void* mc_member_resolve(const void* base, dw_type_t type, dw_type_t member, mc_snapshot_t snapshot, int process_index);
typedef struct s_dw_variable{
Dwarf_Off dwarf_offset; /* Global offset of the field. */
void* frame_base;
mc_snapshot_t snapshot;
mc_object_info_t object_info;
+ int process_index;
} s_mc_expression_state_t, *mc_expression_state_t;
int mc_dwarf_execute_expression(size_t n, const Dwarf_Op* ops, mc_expression_state_t state);
* \return Translated address in the snapshot address space
* */
static inline __attribute__((always_inline))
-void* mc_translate_address(uintptr_t addr, mc_snapshot_t snapshot)
+void* mc_translate_address(uintptr_t addr, mc_snapshot_t snapshot, int process_index)
{
// If not in a process state/clone:
return (uintptr_t *) addr;
}
- mc_mem_region_t region = mc_get_snapshot_region((void*) addr, snapshot);
+ mc_mem_region_t region = mc_get_snapshot_region((void*) addr, snapshot, process_index);
xbt_assert(mc_region_contain(region, (void*) addr), "Trying to read out of the region boundary.");
if(snapshot==NULL)
xbt_die("snapshot is NULL");
void** addr = &((xbt_mheap_t)std_heap)->breakval;
- return mc_snapshot_read_pointer(addr, snapshot);
+ return mc_snapshot_read_pointer(addr, snapshot, MC_ANY_PROCESS_INDEX);
}
static inline __attribute__ ((always_inline))
-void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot)
+void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot, int process_index)
{
void* res;
- return *(void**) mc_snapshot_read(addr, snapshot, &res, sizeof(void*));
+ return *(void**) mc_snapshot_read(addr, snapshot, process_index, &res, sizeof(void*));
}
/** @brief Read memory from a snapshot region
static inline __attribute__((always_inline))
void* mc_snapshot_read_region(void* addr, mc_mem_region_t region, void* target, size_t size)
{
- uintptr_t offset = (uintptr_t) addr - (uintptr_t) region->start_addr;
+ if (region==NULL)
+ return addr;
+
+ uintptr_t offset = (char*) addr - (char*) region->start_addr;
- xbt_assert(addr >= region->start_addr && (char*) addr+size < (char*)region->start_addr+region->size,
+ xbt_assert(mc_region_contain(region, addr),
"Trying to read out of the region boundary.");
// Linear memory region:
if (region->data) {
- return (void*) ((uintptr_t) region->data + offset);
+ return (char*) region->data + offset;
}
// Fragmented memory region:
else if (region->page_numbers) {
+ // Last byte of the region:
void* end = (char*) addr + size - 1;
if( mc_same_page(addr, end) ) {
// The memory is contained in a single page:
}
}
+static inline __attribute__ ((always_inline))
+void* mc_snapshot_read_pointer_region(void* addr, mc_mem_region_t region)
+{
+ void* res;
+ return *(void**) mc_snapshot_read_region(addr, region, &res, sizeof(void*));
+}
SG_END_DECL()
#include <stdbool.h>
+#include "internal_config.h"
+#include "smpi/private.h"
+
#include "mc_private.h"
#include "mc_mmu.h"
#include "mc_page_store.h"
-mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot)
+/** @brief Find the snapshoted region from a pointer
+ *
+ * @param addr Pointer
+ * @param snapshot Snapshot
+ * @param Snapshot region in the snapshot this pointer belongs to
+ * (or NULL if it does not belong to any snapshot region)
+ * */
+mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot, int process_index)
{
+#ifdef HAVE_SMPI
+ if (snapshot->privatization_regions) {
+
+ if (process_index < 0) {
+
+ mc_mem_region_t region = snapshot->privatization_regions[0];
+ if( mc_region_contain(region, addr) ) {
+ xbt_die("Missing process index");
+ }
+
+ } else {
+ if (process_index >= smpi_process_count()) {
+ xbt_die("Invalid process index");
+ }
+
+ mc_mem_region_t region = snapshot->privatization_regions[process_index];
+ if( mc_region_contain(region, addr) ) {
+ return region;
+ }
+
+ }
+ }
+#endif
+
for (size_t i = 0; i != NB_REGIONS; ++i) {
mc_mem_region_t region = snapshot->regions[i];
- void* start = region->start_addr;
- void* end = (char*) start + region->size;
-
- if (addr >= start && addr < end) {
+ if ( region && mc_region_contain(region, addr) ) {
return region;
}
}
*/
void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size)
{
+ // Last byte of the memory area:
void* end = (char*) addr + size - 1;
+
+ // Page of the last byte of the memory area:
size_t page_end = mc_page_number(NULL, end);
+
void* dest = target;
+ if (dest==NULL) {
+ xbt_die("Missing destination buffer for fragmented memory access");
+ }
+
// Read each page:
while (mc_page_number(NULL, addr) != page_end) {
void* snapshot_addr = mc_translate_address_region((uintptr_t) addr, region);
* @param snapshot Snapshot (or NULL is no snapshot)
* @param target Buffer to store the value
* @param size Size of the data to read in bytes
- * @return Pointer where the data is located (target buffer of original location)
+ * @return Pointer where the data is located (target buffer or original location)
*/
-void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size)
+void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, int process_index, void* target, size_t size)
{
if (snapshot) {
- mc_mem_region_t region = mc_get_snapshot_region(addr, snapshot);
+ mc_mem_region_t region = mc_get_snapshot_region(addr, snapshot, process_index);
return mc_snapshot_read_region(addr, region, target, size);
} else {
return addr;
* @param snapshot2 Region of the address in the second snapshot
* @return same as memcmp
* */
-int mc_snapshot_region_memcp(
+int mc_snapshot_region_memcmp(
void* addr1, mc_mem_region_t region1,
- void* addr2, mc_mem_region_t region2, size_t size)
+ void* addr2, mc_mem_region_t region2,
+ size_t size)
{
// Using alloca() for large allocations may trigger stack overflow:
// use malloc if the buffer is too big.
-
bool stack_alloc = size < 64;
- void* buffer = stack_alloc ? alloca(2*size) : malloc(2*size);
- void* buffer1 = mc_snapshot_read_region(addr1, region1, buffer, size);
- void* buffer2 = mc_snapshot_read_region(addr2, region2, (char*) buffer + size, size);
+ void* buffer1a = (region1==NULL || region1->data) ? NULL : stack_alloc ? alloca(size) : malloc(size);
+ void* buffer2a = (region2==NULL || region2->data) ? NULL : stack_alloc ? alloca(size) : malloc(size);
+ void* buffer1 = mc_snapshot_read_region(addr1, region1, buffer1a, size);
+ void* buffer2 = mc_snapshot_read_region(addr2, region2, buffer2a, size);
int res;
if (buffer1 == buffer2) {
- res = 0;
+ res = 0;
} else {
res = memcmp(buffer1, buffer2, size);
}
if (!stack_alloc) {
- free(buffer);
+ free(buffer1a);
+ free(buffer2a);
}
return res;
}
* @param snapshot2 Second snapshot
* @return same as memcmp
* */
-int mc_snapshot_memcp(
+int mc_snapshot_memcmp(
void* addr1, mc_snapshot_t snapshot1,
- void* addr2, mc_snapshot_t snapshot2, size_t size)
+ void* addr2, mc_snapshot_t snapshot2, int process_index, size_t size)
{
- mc_mem_region_t region1 = mc_get_snapshot_region(addr1, snapshot1);
- mc_mem_region_t region2 = mc_get_snapshot_region(addr2, snapshot2);
- return mc_snapshot_region_memcp(addr1, region1, addr2, region2, size);
+ mc_mem_region_t region1 = mc_get_snapshot_region(addr1, snapshot1, process_index);
+ mc_mem_region_t region2 = mc_get_snapshot_region(addr2, snapshot2, process_index);
+ return mc_snapshot_region_memcmp(addr1, region1, addr2, region2, size);
}
+
+#ifdef SIMGRID_TEST
+
+#include <string.h>
+#include <stdlib.h>
+
+#include <sys/mman.h>
+
+#include "mc/mc_private.h"
+
+XBT_TEST_SUITE("mc_snapshot", "Snapshots");
+
+static inline void init_memory(void* mem, size_t size)
+{
+ char* dest = (char*) mem;
+ for (int i=0; i!=size; ++i) {
+ dest[i] = rand() & 255;
+ }
+}
+
+static void test_snapshot(bool sparse_checkpoint);
+
+XBT_TEST_UNIT("page_snapshots", test_per_snpashots, "Test per-page snapshots")
+{
+ test_snapshot(1);
+}
+
+
+XBT_TEST_UNIT("flat_snapshot", test_flat_snapshots, "Test flat snapshots")
+{
+ test_snapshot(0);
+}
+
+
+static void test_snapshot(bool sparse_checkpoint) {
+
+ xbt_test_add("Initialisation");
+ _sg_mc_soft_dirty = 0;
+ _sg_mc_sparse_checkpoint = sparse_checkpoint;
+ xbt_assert(xbt_pagesize == getpagesize());
+ xbt_assert(1 << xbt_pagebits == xbt_pagesize);
+ mc_model_checker = xbt_new0(s_mc_model_checker_t, 1);
+ mc_model_checker->pages = mc_pages_store_new();
+
+ for(int n=1; n!=256; ++n) {
+
+ // Store region page(s):
+ size_t byte_size = n * xbt_pagesize;
+ void* source = mmap(NULL, byte_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ xbt_assert(source!=MAP_FAILED, "Could not allocate source memory");
+
+ // Init memory and take snapshots:
+ init_memory(source, byte_size);
+ mc_mem_region_t region0 = mc_region_new_sparse(0, source, source, byte_size, NULL);
+ for(int i=0; i<n; i+=2) {
+ init_memory((char*) source + i*xbt_pagesize, xbt_pagesize);
+ }
+ mc_mem_region_t region = mc_region_new_sparse(0, source, source, byte_size, NULL);
+
+ void* destination = mmap(NULL, byte_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ xbt_assert(source!=MAP_FAILED, "Could not allocate destination memory");
+
+ xbt_test_add("Reading whole region data for %i page(s)", n);
+ void* read = mc_snapshot_read_region(source, region, destination, byte_size);
+ xbt_test_assert(!memcmp(source, read, byte_size), "Mismatch in mc_snapshot_read_region()");
+
+ xbt_test_add("Reading parts of region data for %i page(s)", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ void* read = mc_snapshot_read_region((char*) source+offset, region, destination, size);
+ xbt_test_assert(!memcmp((char*) source+offset, read, size),
+ "Mismatch in mc_snapshot_read_region()");
+ }
+
+ xbt_test_add("Compare whole region data for %i page(s)", n);
+ xbt_test_assert(!mc_snapshot_region_memcmp(source, NULL, source, region, byte_size),
+ "Mismatch in mc_snapshot_region_memcmp() for the whole region");
+ xbt_test_assert(mc_snapshot_region_memcmp(source, region0, source, region, byte_size),
+ "Unexpected match in mc_snapshot_region_memcmp() with previous snapshot");
+
+ xbt_test_add("Compare parts of region data for %i page(s) with current value", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ xbt_test_assert(!mc_snapshot_region_memcmp((char*) source+offset, NULL, (char*) source+offset, region, size),
+ "Mismatch in mc_snapshot_region_memcmp()");
+ }
+
+ xbt_test_add("Compare parts of region data for %i page(s) with itself", n);
+ for(int j=0; j!=100; ++j) {
+ size_t offset = rand() % byte_size;
+ size_t size = rand() % (byte_size - offset);
+ xbt_test_assert(!mc_snapshot_region_memcmp((char*) source+offset, region, (char*) source+offset, region, size),
+ "Mismatch in mc_snapshot_region_memcmp()");
+ }
+
+ if (n==1) {
+ xbt_test_add("Read pointer for %i page(s)", n);
+ memcpy(source, &mc_model_checker, sizeof(void*));
+ mc_mem_region_t region2 = mc_region_new_sparse(0, source, source, byte_size, NULL);
+ xbt_test_assert(mc_snapshot_read_pointer_region(source, region2) == mc_model_checker,
+ "Mismtach in mc_snapshot_read_pointer_region()");
+ MC_region_destroy(region2);
+ }
+
+ MC_region_destroy(region);
+ MC_region_destroy(region0);
+ munmap(destination, byte_size);
+ munmap(source, byte_size);
+ }
+
+ mc_pages_store_delete(mc_model_checker->pages);
+ xbt_free(mc_model_checker);
+ mc_model_checker = NULL;
+}
+
+#endif /* SIMGRID_TEST */
+
xbt_cfg_register(&_sg_cfg_set, "model-check/soft-dirty",
"Use sparse per-page snapshots.",
xbt_cfgelm_boolean, 1, 1, _mc_cfg_cb_soft_dirty, NULL);
- xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/soft-dirty", "yes");
+ xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/soft-dirty", "no");
/* do liveness model-checking */
xbt_cfg_register(&_sg_cfg_set, "model-check/property",
sysv_maestro_context = context;
}
- if(MC_is_active() && code)
- MC_new_stack_area(context->stack, ((smx_context_t)context)->process->name,
+ if (MC_is_active() && code) {
+ MC_new_stack_area(context->stack, ((smx_context_t)context)->process,
&(context->uc), smx_context_usable_stack_size);
+ }
return (smx_context_t) context;
}
/* Update global variables */
xbt_lib_set(host_lib,name,SIMIX_HOST_LEVEL,smx_host);
-
+
return xbt_lib_get_elm_or_null(host_lib, name);
}
if (surf_resource_get_state(surf_workstation_resource_priv(h))==SURF_RESOURCE_OFF) {
surf_resource_set_state(surf_workstation_resource_priv(h), SURF_RESOURCE_ON);
+
unsigned int cpt;
smx_process_arg_t arg;
xbt_dynar_foreach(host->boot_processes,cpt,arg) {
smx_process_t process;
+ char** argv = xbt_new(char*, arg->argc);
+ for (int i=0; i<arg->argc; i++)
+ argv[i] = xbt_strdup(arg->argv[i]);
+
XBT_DEBUG("Booting Process %s(%s) right now", arg->argv[0], arg->hostname);
if (simix_global->create_process_function) {
simix_global->create_process_function(&process,
- arg->argv[0],
+ argv[0],
arg->code,
NULL,
arg->hostname,
arg->kill_time,
arg->argc,
- arg->argv,
+ argv,
arg->properties,
arg->auto_restart,
NULL);
arg->hostname,
arg->kill_time,
arg->argc,
- arg->argv,
+ argv,
arg->properties,
arg->auto_restart);
}
smx_host_priv_t host = SIMIX_host_priv(h);
xbt_assert((host != NULL), "Invalid parameters");
-
+
if (surf_resource_get_state(surf_workstation_resource_priv(h))==SURF_RESOURCE_ON) {
surf_resource_set_state(surf_workstation_resource_priv(h), SURF_RESOURCE_OFF);
xbt_swag_free(host->process_list);
/* Clean host structure */
- free(host);
+ free(host);
return;
}
--- /dev/null
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/*
+ * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
+ * University Research and Technology
+ * Corporation. All rights reserved.
+ * Copyright (c) 2004-2012 The University of Tennessee and The University
+ * of Tennessee Research Foundation. All rights
+ * reserved.
+ * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
+ * University of Stuttgart. All rights reserved.
+ * Copyright (c) 2004-2005 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009 University of Houston. All rights reserved.
+ *
+ * Additional copyrights may follow
+ */
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Copyright (c) 2001-2014, The Ohio State University. All rights
+ * reserved.
+ *
+ * This file is part of the MVAPICH2 software package developed by the
+ * team members of The Ohio State University's Network-Based Computing
+ * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda.
+ *
+ * For detailed copyright and licensing information, please refer to the
+ * copyright file COPYRIGHT in the top level MVAPICH2 directory.
+ *
+ */
+
+//correct on stampede
+#define MV2_ALLTOALL_THROTTLE_FACTOR 4
+
+#include "colls_private.h"
+
+int smpi_coll_tuned_alltoall_mvapich2_scatter_dest(
+ void *sendbuf,
+ int sendcount,
+ MPI_Datatype sendtype,
+ void *recvbuf,
+ int recvcount,
+ MPI_Datatype recvtype,
+ MPI_Comm comm)
+{
+ int comm_size, i, j;
+ MPI_Aint sendtype_extent = 0, recvtype_extent = 0;
+ int mpi_errno=MPI_SUCCESS;
+ int dst, rank;
+ MPI_Request *reqarray;
+ MPI_Status *starray;
+
+ if (recvcount == 0) return MPI_SUCCESS;
+
+ comm_size = smpi_comm_size(comm);
+ rank = smpi_comm_rank(comm);
+
+ /* Get extent of send and recv types */
+ recvtype_extent = smpi_datatype_get_extent(recvtype);
+ sendtype_extent = smpi_datatype_get_extent(sendtype);
+
+ /* Medium-size message. Use isend/irecv with scattered
+ destinations. Use Tony Ladd's modification to post only
+ a small number of isends/irecvs at a time. */
+ /* FIXME: This converts the Alltoall to a set of blocking phases.
+ Two alternatives should be considered:
+ 1) the choice of communication pattern could try to avoid
+ contending routes in each phase
+ 2) rather than wait for all communication to finish (waitall),
+ we could maintain constant queue size by using waitsome
+ and posting new isend/irecv as others complete. This avoids
+ synchronization delays at the end of each block (when
+ there are only a few isend/irecvs left)
+ */
+ int ii, ss, bblock;
+
+ //Stampede is configured with
+ bblock = MV2_ALLTOALL_THROTTLE_FACTOR;//mv2_coll_param.alltoall_throttle_factor;
+
+ if (bblock >= comm_size) bblock = comm_size;
+ /* If throttle_factor is n, each process posts n pairs of isend/irecv
+ in each iteration. */
+
+ /* FIXME: This should use the memory macros (there are storage
+ leaks here if there is an error, for example) */
+ reqarray= (MPI_Request*)xbt_malloc(2*bblock*sizeof(MPI_Request));
+
+ starray=(MPI_Status *)xbt_malloc(2*bblock*sizeof(MPI_Status));
+
+ for (ii=0; ii<comm_size; ii+=bblock) {
+ ss = comm_size-ii < bblock ? comm_size-ii : bblock;
+ /* do the communication -- post ss sends and receives: */
+ for ( i=0; i<ss; i++ ) {
+ dst = (rank+i+ii) % comm_size;
+ reqarray[i]=smpi_mpi_irecv((char *)recvbuf +
+ dst*recvcount*recvtype_extent,
+ recvcount, recvtype, dst,
+ COLL_TAG_ALLTOALL, comm);
+
+ }
+ for ( i=0; i<ss; i++ ) {
+ dst = (rank-i-ii+comm_size) % comm_size;
+ reqarray[i+ss]=smpi_mpi_isend((char *)sendbuf +
+ dst*sendcount*sendtype_extent,
+ sendcount, sendtype, dst,
+ COLL_TAG_ALLTOALL, comm);
+
+ }
+
+ /* ... then wait for them to finish: */
+ smpi_mpi_waitall(2*ss,reqarray,starray);
+
+
+ /* --BEGIN ERROR HANDLING-- */
+ if (mpi_errno == MPI_ERR_IN_STATUS) {
+ for (j=0; j<2*ss; j++) {
+ if (starray[j].MPI_ERROR != MPI_SUCCESS) {
+ mpi_errno = starray[j].MPI_ERROR;
+ }
+ }
+ }
+ }
+ /* --END ERROR HANDLING-- */
+
+ return (mpi_errno);
+
+}
COLL_APPLY(action, COLL_ALLTOALL_SIG, ring_mpi_barrier) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALL_SIG, ring_one_barrier) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALL_SIG, mvapich2) COLL_sep \
+COLL_APPLY(action, COLL_ALLTOALL_SIG, mvapich2_scatter_dest) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALL_SIG, ompi) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALL_SIG, mpich) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALL_SIG, automatic)
COLL_APPLY(action, COLL_REDUCE_SIG, ompi_binomial) COLL_sep \
COLL_APPLY(action, COLL_REDUCE_SIG, mpich) COLL_sep \
COLL_APPLY(action, COLL_REDUCE_SIG, mvapich2) COLL_sep \
+COLL_APPLY(action, COLL_REDUCE_SIG, mvapich2_knomial) COLL_sep \
COLL_APPLY(action, COLL_REDUCE_SIG, automatic)
COLL_REDUCES(COLL_PROTO, COLL_NOsep)
--- /dev/null
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/*
+ * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
+ * University Research and Technology
+ * Corporation. All rights reserved.
+ * Copyright (c) 2004-2012 The University of Tennessee and The University
+ * of Tennessee Research Foundation. All rights
+ * reserved.
+ * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
+ * University of Stuttgart. All rights reserved.
+ * Copyright (c) 2004-2005 The Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009 University of Houston. All rights reserved.
+ *
+ * Additional copyrights may follow
+ */
+
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Copyright (c) 2001-2014, The Ohio State University. All rights
+ * reserved.
+ *
+ * This file is part of the MVAPICH2 software package developed by the
+ * team members of The Ohio State University's Network-Based Computing
+ * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda.
+ *
+ * For detailed copyright and licensing information, please refer to the
+ * copyright file COPYRIGHT in the top level MVAPICH2 directory.
+ *
+ */
+
+#include "colls_private.h"
+extern int mv2_reduce_intra_knomial_factor;
+// int mv2_reduce_knomial_factor = 2;
+
+
+
+static int MPIR_Reduce_knomial_trace(int root, int reduce_knomial_factor,
+ MPI_Comm comm, int *dst, int *expected_send_count,
+ int *expected_recv_count, int **src_array)
+{
+ int mask=0x1, k, comm_size, src, rank, relative_rank, lroot=0;
+ int orig_mask=0x1;
+ int recv_iter=0, send_iter=0;
+ int *knomial_reduce_src_array=NULL;
+ comm_size = smpi_comm_size(comm);
+ rank = smpi_comm_rank(comm);
+
+ lroot = root;
+ relative_rank = (rank - lroot + comm_size) % comm_size;
+
+ /* First compute to whom we need to send data */
+ while (mask < comm_size) {
+ if (relative_rank % (reduce_knomial_factor*mask)) {
+ *dst = relative_rank/(reduce_knomial_factor*mask)*
+ (reduce_knomial_factor*mask)+root;
+ if (*dst >= comm_size) {
+ *dst -= comm_size;
+ }
+ send_iter++;
+ break;
+ }
+ mask *= reduce_knomial_factor;
+ }
+ mask /= reduce_knomial_factor;
+
+ /* Now compute how many children we have in the knomial-tree */
+ orig_mask = mask;
+ while (mask > 0) {
+ for(k=1;k<reduce_knomial_factor;k++) {
+ if (relative_rank + mask*k < comm_size) {
+ recv_iter++;
+ }
+ }
+ mask /= reduce_knomial_factor;
+ }
+
+ /* Finally, fill up the src array */
+ if(recv_iter > 0) {
+ knomial_reduce_src_array = xbt_malloc(sizeof(int)*recv_iter);
+ }
+
+ mask = orig_mask;
+ recv_iter=0;
+ while (mask > 0) {
+ for(k=1;k<reduce_knomial_factor;k++) {
+ if (relative_rank + mask*k < comm_size) {
+ src = rank + mask*k;
+ if (src >= comm_size) {
+ src -= comm_size;
+ }
+ knomial_reduce_src_array[recv_iter++] = src;
+ }
+ }
+ mask /= reduce_knomial_factor;
+ }
+
+ *expected_recv_count = recv_iter;
+ *expected_send_count = send_iter;
+ *src_array = knomial_reduce_src_array;
+ return 0;
+}
+
+int smpi_coll_tuned_reduce_mvapich2_knomial (
+ void *sendbuf,
+ void *recvbuf,
+ int count,
+ MPI_Datatype datatype,
+ MPI_Op op,
+ int root,
+ MPI_Comm comm)
+{
+ int mpi_errno = MPI_SUCCESS;
+ int rank, is_commutative;
+ int src, k;
+ MPI_Request send_request;
+ int index=0;
+ MPI_Aint true_lb, true_extent, extent;
+ MPI_Status status;
+ int recv_iter=0, dst=-1, expected_send_count, expected_recv_count;
+ int *src_array=NULL;
+ void **tmp_buf=NULL;
+ MPI_Request *requests=NULL;
+
+
+ if (count == 0) return MPI_SUCCESS;
+
+ rank = smpi_comm_rank(comm);
+
+ /* Create a temporary buffer */
+
+ smpi_datatype_extent(datatype, &true_lb, &true_extent);
+ extent = smpi_datatype_get_extent(datatype);
+
+ is_commutative = smpi_op_is_commute(op);
+
+ if (rank != root) {
+ recvbuf=(void *)xbt_malloc(count*(MAX(extent,true_extent)));
+ recvbuf = (void *)((char*)recvbuf - true_lb);
+ }
+
+ if ((rank != root) || (sendbuf != MPI_IN_PLACE)) {
+ mpi_errno = smpi_datatype_copy(sendbuf, count, datatype, recvbuf,
+ count, datatype);
+ }
+
+
+
+ MPIR_Reduce_knomial_trace(root, mv2_reduce_intra_knomial_factor, comm,
+ &dst, &expected_send_count, &expected_recv_count, &src_array);
+
+ if(expected_recv_count > 0 ) {
+ tmp_buf = xbt_malloc(sizeof(void *)*expected_recv_count);
+ requests = xbt_malloc(sizeof(MPI_Request)*expected_recv_count);
+ for(k=0; k < expected_recv_count; k++ ) {
+ tmp_buf[k] = xbt_malloc(count*(MAX(extent,true_extent)));
+ tmp_buf[k] = (void *)((char*)tmp_buf[k] - true_lb);
+ }
+
+ while(recv_iter < expected_recv_count) {
+ src = src_array[expected_recv_count - (recv_iter+1)];
+
+ requests[recv_iter]=smpi_mpi_irecv (tmp_buf[recv_iter], count, datatype ,src,
+ COLL_TAG_REDUCE, comm);
+ recv_iter++;
+
+ }
+
+ recv_iter=0;
+ while(recv_iter < expected_recv_count) {
+ index=smpi_mpi_waitany(expected_recv_count, requests,
+ &status);
+ recv_iter++;
+
+ if (is_commutative) {
+ smpi_op_apply(op, tmp_buf[index], recvbuf, &count, &datatype);
+ }
+ }
+
+ for(k=0; k < expected_recv_count; k++ ) {
+ xbt_free(tmp_buf[k]);
+ }
+ xbt_free(tmp_buf);
+ xbt_free(requests);
+ }
+
+ if(src_array != NULL) {
+ xbt_free(src_array);
+ }
+
+ if(rank != root) {
+ send_request=smpi_mpi_isend(recvbuf,count, datatype, dst,
+ COLL_TAG_REDUCE,comm);
+
+ smpi_mpi_waitall(1, &send_request, &status);
+ }
+
+ /* --END ERROR HANDLING-- */
+
+ return mpi_errno;
+}
#define MPIR_Alltoall_bruck_MV2 smpi_coll_tuned_alltoall_bruck
#define MPIR_Alltoall_RD_MV2 smpi_coll_tuned_alltoall_rdb
-#define MPIR_Alltoall_Scatter_dest_MV2 smpi_coll_tuned_alltoall_ring
+#define MPIR_Alltoall_Scatter_dest_MV2 smpi_coll_tuned_alltoall_mvapich2_scatter_dest
#define MPIR_Alltoall_pairwise_MV2 smpi_coll_tuned_alltoall_pair
#define MPIR_Alltoall_inplace_MV2 smpi_coll_tuned_alltoall_ring
int *mv2_size_allgather_tuning_table = NULL;
mv2_allgather_tuning_table **mv2_allgather_thresholds_table = NULL;
+static int MPIR_Allgather_RD_Allgather_Comm_MV2( void *sendbuf,
+ int sendcount,
+ MPI_Datatype sendtype,
+ void *recvbuf,
+ int recvcount,
+ MPI_Datatype recvtype, MPI_Comm comm_ptr)
+{
+ return 0;
+}
+
#define MPIR_Allgather_Bruck_MV2 smpi_coll_tuned_allgather_bruck
#define MPIR_Allgather_RD_MV2 smpi_coll_tuned_allgather_rdb
-#define MPIR_Allgather_RD_Allgather_Comm_MV2 smpi_coll_tuned_allgather_rdb
#define MPIR_Allgather_Ring_MV2 smpi_coll_tuned_allgather_ring
mv2_reduce_tuning_table *mv2_reduce_thresholds_table = NULL;
-int mv2_reduce_intra_knomial_factor = -1;
-int mv2_reduce_inter_knomial_factor = -1;
+int mv2_reduce_intra_knomial_factor = 2;
+int mv2_reduce_inter_knomial_factor = 2;
int (*MV2_Reduce_function)( void *sendbuf,
void *recvbuf,
MPI_Comm comm_ptr)=NULL;
-#define MPIR_Reduce_inter_knomial_wrapper_MV2 smpi_coll_tuned_reduce_binomial
-#define MPIR_Reduce_intra_knomial_wrapper_MV2 smpi_coll_tuned_reduce_binomial
+#define MPIR_Reduce_inter_knomial_wrapper_MV2 smpi_coll_tuned_reduce_mvapich2_knomial
+#define MPIR_Reduce_intra_knomial_wrapper_MV2 smpi_coll_tuned_reduce_mvapich2_knomial
#define MPIR_Reduce_binomial_MV2 smpi_coll_tuned_reduce_binomial
#define MPIR_Reduce_redscat_gather_MV2 smpi_coll_tuned_reduce_scatter_gather
#define MPIR_Reduce_shmem_MV2 smpi_coll_tuned_reduce_ompi_basic_linear
#include "smpi/smpi_cocci.h"
#include "instr/instr_private.h"
+SG_BEGIN_DECL()
+
struct s_smpi_process_data;
typedef struct s_smpi_process_data *smpi_process_data_t;
extern void** mappings;
extern int loaded_page;
+int smpi_process_index_of_smx_process(smx_process_t process);
+
+SG_END_DECL()
+
#endif
double smpi_running_power;
int* fds;
+size_t mappings_count = 0;
void** mappings;
int loaded_page = -1;
char* start_data_exe = NULL;
if (loaded_page==dest)//no need to switch either
return;
-
#ifdef HAVE_MMAP
int i;
if(loaded_page==-1){//initial switch, do the copy from the real page here
return data ? data->index : MPI_UNDEFINED;
}
+int smpi_process_index_of_smx_process(smx_process_t process) {
+ smpi_process_data_t data = SIMIX_process_get_data(process);
+ return data ? data->index : MPI_UNDEFINED;
+}
+
MPI_Comm smpi_process_comm_world(void)
{
smpi_process_data_t data = smpi_process_data();
type = mdp->heapinfo[block].type;
switch (type) {
- case -1: /* Already free */
+ case MMALLOC_TYPE_HEAPINFO:
UNLOCK(mdp);
- THROWF(system_error, 0, "Asked to free a fragment in a block that is already free. I'm puzzled\n");
+ THROWF(system_error, 0, "Asked to free a fragment in a heapinfo block. I'm confused.\n");
+ break;
+
+ case MMALLOC_TYPE_FREE: /* Already free */
+ UNLOCK(mdp);
+ THROWF(system_error, 0, "Asked to free a fragment in a block that is already free. I'm puzzled.\n");
break;
- case 0:
+ case MMALLOC_TYPE_UNFRAGMENTED:
/* Get as many statistics as early as we can. */
mdp -> heapstats.chunks_used--;
mdp -> heapstats.bytes_used -=
mdp->heapinfo[i].free_block.size += mdp->heapinfo[block].busy_block.size;
/* Mark all my ex-blocks as free */
for (it=0; it<mdp->heapinfo[block].busy_block.size; it++) {
- if (mdp->heapinfo[block+it].type <0) {
+ if (mdp->heapinfo[block+it].type < 0) {
fprintf(stderr,"Internal Error: Asked to free a block already marked as free (block=%lu it=%d type=%lu). Please report this bug.\n",
(unsigned long)block,it,(unsigned long)mdp->heapinfo[block].type);
abort();
}
- mdp->heapinfo[block+it].type = -1;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
}
block = i;
(unsigned long)block,it,(unsigned long)mdp->heapinfo[block].free_block.size,(unsigned long)mdp->heapinfo[block].type);
abort();
}
- mdp->heapinfo[block+it].type = -1;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
}
}
break;
default:
+ if (type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
/* Do some of the statistics. */
mdp -> heapstats.chunks_used--;
mdp -> heapstats.bytes_used -= 1 << type;
xbt_swag_remove(&mdp->heapinfo[block],&mdp->fraghead[type]);
/* pretend that this block is used and free it so that it gets properly coalesced with adjacent free blocks */
- mdp->heapinfo[block].type = 0;
+ mdp->heapinfo[block].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block].busy_block.size = 1;
mdp->heapinfo[block].busy_block.busy_size = 0;
-/* Initialization for access to a mmap'd malloc managed region. */
+/* Initialization for acces s to a mmap'd malloc managed region. */
/* Copyright (c) 2012-2014. The SimGrid Team.
* All rights reserved. */
int bytes = 0;
while(i<=((struct mdesc *)heap)->heaplimit){
- if(((struct mdesc *)heap)->heapinfo[i].type == 0){
+ if(((struct mdesc *)heap)->heapinfo[i].type == MMALLOC_TYPE_UNFRAGMENTED){
if(((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size > 0)
bytes += ((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size;
- }else if(((struct mdesc *)heap)->heapinfo[i].type > 0){
+ } else if(((struct mdesc *)heap)->heapinfo[i].type > 0){
for(j=0; j < (size_t) (BLOCKSIZE >> ((struct mdesc *)heap)->heapinfo[i].type); j++){
if(((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j] > 0)
bytes += ((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j];
ssize_t mmalloc_get_busy_size(xbt_mheap_t heap, void *ptr){
ssize_t block = ((char*)ptr - (char*)(heap->heapbase)) / BLOCKSIZE + 1;
- if(heap->heapinfo[block].type == -1)
+ if(heap->heapinfo[block].type < 0)
return -1;
- else if(heap->heapinfo[block].type == 0)
+ else if(heap->heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED)
return heap->heapinfo[block].busy_block.busy_size;
else{
ssize_t frag = ((uintptr_t) (ADDR2UINT (ptr) % (BLOCKSIZE))) >> heap->heapinfo[block].type;
}
}
+
+void mmcheck(xbt_mheap_t heap) {return;
+ if (!heap->heapinfo)
+ return;
+ malloc_info* heapinfo = NULL;
+ for (size_t i=1; i < heap->heaplimit; i += mmalloc_get_increment(heapinfo)) {
+ heapinfo = heap->heapinfo + i;
+ switch (heapinfo->type) {
+ case MMALLOC_TYPE_HEAPINFO:
+ case MMALLOC_TYPE_FREE:
+ if (heapinfo->free_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ break;
+ case MMALLOC_TYPE_UNFRAGMENTED:
+ if (heapinfo->busy_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ if (heapinfo->busy_block.busy_size==0 && heapinfo->busy_block.size!=0) {
+ xbt_die("Empty busy block");
+ }
+ break;
+ default:
+ if (heapinfo->type<0) {
+ xbt_die("Unkown mmalloc block type.");
+ }
+ }
+ }
+}
return (result);
}
+/** Initialise heapinfo about the heapinfo pages :)
+ *
+ */
+static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp)
+{
+ // Update heapinfo about the heapinfo pages (!):
+ xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
+ int block = BLOCK(mdp->heapinfo);
+ size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
+ // Mark them as free:
+ for (size_t j=0; j!=nblocks; ++j) {
+ mdp->heapinfo[block+j].type = MMALLOC_TYPE_FREE;
+ mdp->heapinfo[block+j].free_block.size = 0;
+ mdp->heapinfo[block+j].free_block.next = 0;
+ mdp->heapinfo[block+j].free_block.prev = 0;
+ }
+ mdp->heapinfo[block].free_block.size = nblocks;
+}
+
/* Finish the initialization of the mheap. If we want to inline it
* properly, we need to make the align function publicly visible, too */
static void initialize(xbt_mheap_t mdp)
int i;
malloc_info mi; /* to compute the offset of the swag hook */
+ // Update mdp meta-data:
mdp->heapsize = HEAP / BLOCKSIZE;
mdp->heapinfo = (malloc_info *)
align(mdp, mdp->heapsize * sizeof(malloc_info));
+ mdp->heapbase = (void *) mdp->heapinfo;
+ mdp->flags |= MMALLOC_INITIALIZED;
+ // Update root heapinfo:
memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info));
- mdp->heapinfo[0].type=-1;
+ mdp->heapinfo[0].type = MMALLOC_TYPE_FREE;
mdp->heapinfo[0].free_block.size = 0;
mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0;
mdp->heapindex = 0;
- mdp->heapbase = (void *) mdp->heapinfo;
- mdp->flags |= MMALLOC_INITIALIZED;
+
+ initialize_heapinfo_heapinfo(mdp);
for (i=0;i<BLOCKLOG;i++) {
xbt_swag_init(&(mdp->fraghead[i]),
/* Copy old info into new location */
oldinfo = mdp->heapinfo;
newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
- memset(newinfo, 0, newsize * sizeof(malloc_info));
memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
+ /* Initialise the new blockinfo : */
+ memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0,
+ (newsize - mdp->heapsize)* sizeof(malloc_info));
+
/* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */
size_t offset=((char*)newinfo)-((char*)oldinfo);
/* mark the space previously occupied by the block info as free by first marking it
* as occupied in the regular way, and then freing it */
for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++){
- newinfo[BLOCK(oldinfo)+it].type = 0;
+ newinfo[BLOCK(oldinfo)+it].type = MMALLOC_TYPE_UNFRAGMENTED;
newinfo[BLOCK(oldinfo)+it].busy_block.ignore = 0;
}
newinfo[BLOCK(oldinfo)].busy_block.busy_size = size;
mfree(mdp, (void *) oldinfo);
mdp->heapsize = newsize;
+
+ initialize_heapinfo_heapinfo(mdp);
}
mdp->heaplimit = BLOCK((char *) result + size);
block = BLOCK(result);
for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block+it].busy_block.busy_size = 0;
mdp->heapinfo[block+it].busy_block.ignore = 0;
+ mdp->heapinfo[block+it].busy_block.size = 0;
}
mdp->heapinfo[block].busy_block.size = blocks;
mdp->heapinfo[block].busy_block.busy_size = requested_size;
}
for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
mdp->heapinfo[block+it].busy_block.busy_size = 0;
mdp->heapinfo[block+it].busy_block.ignore = 0;
+ mdp->heapinfo[block+it].busy_block.size = 0;
}
mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
+ mdp->heapinfo[block].busy_block.busy_size = requested_size;
//mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
//mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
? -1 \
: (MDP) -> fd)
-/* Get core for the memory region specified by MDP, using SIZE as the
- amount to either add to or subtract from the existing region. Works
- like sbrk(), but using mmap().
-
- It never returns NULL. Instead, it dies verbosely on errors. */
-
+/** @brief Add memoty to this heap
+ *
+ * Get core for the memory region specified by MDP, using SIZE as the
+ * amount to either add to or subtract from the existing region. Works
+ * like sbrk(), but using mmap().
+ *
+ * It never returns NULL. Instead, it dies verbosely on errors.
+ *
+ * @param mdp The heap
+ * @param size Bytes to allocate for this heap (or <0 to free memory from this heap)
+ */
void *mmorecore(struct mdesc *mdp, ssize_t size)
{
ssize_t test = 0;
void *mapto; /* Address we actually mapped to */
char buf = 0; /* Single byte to write to extend mapped file */
-// fprintf(stderr,"increase %p by %u\n",mdp,size);
-
if (size == 0) {
/* Just return the current "break" value. */
result = mdp->breakval;
}
/* Let's call mmap. Note that it is possible that mdp->top
- is 0. In this case mmap will choose the address for us */
+ is 0. In this case mmap will choose the address for us.
+ This call might very well overwrite an already existing memory mapping
+ (leading to weird bugs).
+ */
mapto = mmap(mdp->top, mapbytes, PROT_READ | PROT_WRITE,
MAP_PRIVATE_OR_SHARED(mdp) | MAP_IS_ANONYMOUS(mdp) |
MAP_FIXED, MAP_ANON_OR_FD(mdp), foffset);
- if (mapto == (void *) -1/* That's MAP_FAILED */) {
+ if (mapto == MAP_FAILED) {
char buff[1024];
fprintf(stderr,"Internal error: mmap returned MAP_FAILED! error: %s\n",strerror(errno));
sprintf(buff,"cat /proc/%d/maps",getpid());
result = (void *) mdp->breakval;
mdp->breakval = (char *) mdp->breakval + size;
} else {
+ /* Memory is already mapped, we only need to increase the breakval: */
result = (void *) mdp->breakval;
mdp->breakval = (char *) mdp->breakval + size;
}
int fragment2;
}s_heap_area_pair_t, *heap_area_pair_t;
+#define MMALLOC_TYPE_HEAPINFO (-2)
+#define MMALLOC_TYPE_FREE (-1)
+#define MMALLOC_TYPE_UNFRAGMENTED 0
+/* >0 values are fragmented blocks */
+
/* Data structure giving per-block information.
*
* There is one such structure in the mdp->heapinfo array per block used in that heap,
};
} malloc_info;
-/* Internal structure that defines the format of the malloc-descriptor.
- This gets written to the base address of the region that mmalloc is
- managing, and thus also becomes the file header for the mapped file,
- if such a file exists. */
-
+/** @brief Descriptor of a mmalloc area
+ *
+ * Internal structure that defines the format of the malloc-descriptor.
+ * This gets written to the base address of the region that mmalloc is
+ * managing, and thus also becomes the file header for the mapped file,
+ * if such a file exists.
+ * */
struct mdesc {
- /* Semaphore locking the access to the heap */
+ /** @brief Semaphore locking the access to the heap */
sem_t sem;
- /* Number of processes that attached the heap */
+ /** @brief Number of processes that attached the heap */
unsigned int refcount;
- /* Chained lists of mdescs */
+ /** @brief Chained lists of mdescs */
struct mdesc *next_mdesc;
- /* The "magic number" for an mmalloc file. */
+ /** @brief The "magic number" for an mmalloc file. */
char magic[MMALLOC_MAGIC_SIZE];
- /* The size in bytes of this structure, used as a sanity check when reusing
- a previously created mapped file. */
+ /** @brief The size in bytes of this structure
+ *
+ * Used as a sanity check when reusing a previously created mapped file.
+ * */
unsigned int headersize;
- /* The version number of the mmalloc package that created this file. */
+ /** @brief Version number of the mmalloc package that created this file. */
unsigned char version;
unsigned int options;
- /* Some flag bits to keep track of various internal things. */
+ /** @brief Some flag bits to keep track of various internal things. */
unsigned int flags;
- /* Number of info entries. */
+ /** @brief Number of info entries. */
size_t heapsize;
- /* Pointer to first block of the heap (base of the first block). */
+ /** @brief Pointer to first block of the heap (base of the first block). */
void *heapbase;
- /* Current search index for the heap table. */
- /* Search index in the info table. */
+ /** @brief Current search index for the heap table.
+ *
+ * Search index in the info table.
+ */
size_t heapindex;
- /* Limit of valid info table indices. */
+ /** @brief Limit of valid info table indices. */
size_t heaplimit;
- /* Block information table. */
- /* Table indexed by block number giving per-block information. */
+ /** @brief Block information table.
+ *
+ * Table indexed by block number giving per-block information.
+ */
malloc_info *heapinfo;
- /* List of all blocks containing free fragments of this size.
+ /* @brief List of all blocks containing free fragments of a given size.
+ *
* The array indice is the log2 of requested size.
* Actually only the sizes 8->11 seem to be used, but who cares? */
s_xbt_swag_t fraghead[BLOCKLOG];
- /* The base address of the memory region for this malloc heap. This
- is the location where the bookkeeping data for mmap and for malloc
- begins. */
-
+ /* @brief Base address of the memory region for this malloc heap
+ *
+ * This is the location where the bookkeeping data for mmap and
+ * for malloc begins.
+ */
void *base;
- /* The current location in the memory region for this malloc heap which
- represents the end of memory in use. */
-
+ /** @brief End of memory in use
+ *
+ * Some memory might be already mapped by the OS but not used
+ * by the heap.
+ * */
void *breakval;
- /* The end of the current memory region for this malloc heap. This is
- the first location past the end of mapped memory.
- Compared to breakval, this value is rounded to the next memory page.
- */
-
+ /** @brief End of the current memory region for this malloc heap.
+ *
+ * This is the first location past the end of mapped memory.
+ *
+ * Compared to breakval, this value is rounded to the next memory page.
+ */
void *top;
- /* Open file descriptor for the file to which this malloc heap is mapped.
- This will always be a valid file descriptor, since /dev/zero is used
- by default if no open file is supplied by the client. Also note that
- it may change each time the region is mapped and unmapped. */
-
+ /** @brief Open file descriptor for the file to which this malloc heap is mapped
+ *
+ * If this value is negative, MAP_ANONYMOUS memory is used.
+ *
+ * Also note that it may change each time the region is mapped and unmapped. */
int fd;
- /* Instrumentation. */
-
+ /* @brief Instrumentation */
struct mstats heapstats;
};
XBT_PUBLIC( void *)__mmalloc_remap_core(xbt_mheap_t mdp);
-/* Get core for the memory region specified by MDP, using SIZE as the
- amount to either add to or subtract from the existing region. Works
- like sbrk(), but using mmap(). */
XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size);
/* Thread-safety (if the sem is already created)
}
+static inline int mmalloc_get_increment(malloc_info* heapinfo) {
+ if (heapinfo->type < 0) {
+ return heapinfo->free_block.size;
+ } else if (heapinfo->type == 0) {
+ return heapinfo->busy_block.size;
+ } else {
+ return 1;
+ }
+}
+
+void mmcheck(xbt_mheap_t heap);
+
#endif /* __MMPRIVATE_H */
type = mdp->heapinfo[block].type;
switch (type) {
- case -1:
+ case MMALLOC_TYPE_HEAPINFO:
+ fprintf(stderr, "Asked realloc a fragment coming from a heapinfo block. I'm confused.\n");
+ abort();
+ break;
+
+ case MMALLOC_TYPE_FREE:
fprintf(stderr, "Asked realloc a fragment coming from a *free* block. I'm puzzled.\n");
abort();
break;
- case 0:
+ case MMALLOC_TYPE_UNFRAGMENTED:
/* Maybe reallocate a large block to a small fragment. */
if (size <= BLOCKSIZE / 2) { // Full block -> Fragment; no need to optimize for time
/* The new size is smaller; return excess memory to the free list. */
//printf("(%s) return excess memory...",xbt_thread_self_name());
for (it= block+blocks; it< mdp->heapinfo[block].busy_block.size ; it++){
- mdp->heapinfo[it].type = 0; // FIXME that should be useless, type should already be 0 here
+ mdp->heapinfo[it].type = MMALLOC_TYPE_UNFRAGMENTED; // FIXME that should be useless, type should already be 0 here
mdp->heapinfo[it].busy_block.ignore = 0;
+ mdp->heapinfo[it].busy_block.size = 0;
+ mdp->heapinfo[it].busy_block.busy_size = 0;
}
mdp->heapinfo[block + blocks].busy_block.size
default: /* Fragment -> ??; type=logarithm to base two of the fragment size. */
+ if (type < 0) {
+ fprintf(stderr, "Unkown mmalloc block type.\n");
+ abort();
+ }
+
if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type)) {
/* The new size is the same kind of fragment. */
//printf("(%s) new size is same kind of fragment...",xbt_thread_self_name());
if(HAVE_MC)
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
-
- add_executable(page_store page_store.cpp)
- target_link_libraries(page_store simgrid)
endif()
set(tesh_files
${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/page_store.tesh
PARENT_SCOPE
)
set(testsuite_src
${testsuite_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/page_store.cpp
PARENT_SCOPE
)
assert(var);
void* frame_base = mc_find_frame_base(subprogram, info, cursor);
- xbt_assert((void*)mc_dwarf_resolve_locations(&var->locations, info, cursor, frame_base, NULL) == address,
+ xbt_assert((void*)mc_dwarf_resolve_locations(&var->locations, info, cursor, frame_base, NULL, -1) == address,
"Bad resolution of local variable %s of %s", variable, function);
}
+++ /dev/null
-#include <string.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-#include "mc/mc_page_store.h"
-
-static int value = 0;
-
-static void new_content(void* data, size_t size)
-{
- memset(data, ++value, size);
-}
-
-static void* getpage()
-{
- return mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-}
-
-int main(int argc, char** argv)
-{
- // Init
- size_t pagesize = (size_t) getpagesize();
- mc_pages_store_t store = new s_mc_pages_store(500);
- void* data = getpage();
-
- // Init:
- xbt_assert(store->size()==0, "Bad size");
-
- // Store the page once:
- new_content(data, pagesize);
- size_t pageno1 = store->store_page(data);
- xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
- const void* copy = store->get_page(pageno1);
- xbt_assert(memcmp(data, copy, pagesize)==0, "Page data should be the same");
- xbt_assert(store->size()==1, "Bad size");
-
- // Store the same page again:
- size_t pageno2 = store->store_page(data);
- xbt_assert(pageno1==pageno2, "Page should be the same");
- xbt_assert(store->get_ref(pageno1)==2, "Bad refcount");
- xbt_assert(store->size()==1, "Bad size");
-
- // Store a new page:
- new_content(data, pagesize);
- size_t pageno3 = store->store_page(data);
- xbt_assert(pageno1 != pageno3, "New page should be different");
- xbt_assert(store->size()==2, "Bad size");
-
- // Unref pages:
- store->unref_page(pageno1);
- xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
- xbt_assert(store->size()==2, "Bad size");
- store->unref_page(pageno2);
- xbt_assert(store->size()==1, "Bad size");
-
- // Reallocate page:
- new_content(data, pagesize);
- size_t pageno4 = store->store_page(data);
- xbt_assert(pageno1 == pageno4, "Page was not reused");
- xbt_assert(store->get_ref(pageno4)==1, "Bad refcount");
- xbt_assert(store->size()==2, "Bad size");
-
- return 0;
-}
+++ /dev/null
-#! ./tesh
-
-$ $SG_TEST_EXENV ${bindir:=.}/page_store
char mailbox[256];
msg_task_t task = NULL;
-
+ msg_host_t jupiter = MSG_get_host_by_name("Jupiter");
sprintf(mailbox, "jupi");
task = MSG_task_create("task on", task_comp_size, task_comm_size, NULL);
MSG_task_destroy(task);
MSG_process_sleep(1);
- MSG_host_off(MSG_get_host_by_name("Jupiter"));
+ MSG_host_off(jupiter);
task = MSG_task_create("task off", task_comp_size, task_comm_size, NULL);
XBT_INFO("Sending \"%s\"", task->name);
if (MSG_task_send_with_timeout(task, mailbox, 1) != MSG_OK)
MSG_task_destroy(task);
- MSG_host_on(MSG_get_host_by_name("Jupiter"));
+ MSG_host_on(jupiter);
+ xbt_swag_t jupi_processes = MSG_host_get_process_list(jupiter);
+ void *process;
+ xbt_swag_foreach(process, jupi_processes) {
+ MSG_process_kill(process);
+ }
task = MSG_task_create("task on without proc", task_comp_size, task_comm_size, NULL);
XBT_INFO("Sending \"%s\"", task->name);
> [Tremblay:master:(1) 2.169155] [msg_test/INFO] Sending "task on without proc"
> [Tremblay:master:(1) 3.169155] [msg_test/INFO] Sending "task on with proc"
> [Tremblay:master:(1) 3.338309] [msg_test/INFO] Sending "finalize"
-> [Jupiter:slave:(3) 3.993652] [msg_test/INFO] Task "task on with proc" done
+> [Jupiter:slave:(4) 3.993652] [msg_test/INFO] Task "task on with proc" done
> [Tremblay:master:(1) 4.012666] [msg_test/INFO] Goodbye now!
-> [Jupiter:slave:(3) 4.012666] [msg_test/INFO] I'm done. See you!
+> [Jupiter:slave:(4) 4.012666] [msg_test/INFO] I'm done. See you!
> [4.012666] [msg_test/INFO] Simulation time 4.01267
<platform version="3">
<!-- The master process (with some arguments) -->
<process host="Tremblay" function="test">
- <argument value="4"/>
</process>
</platform>
$outfile = $infile;
$outfile =~ s/\.c$/_unit.c/;
+ $outfile =~ s/\.cpp$/_unit.cpp/;
$outfile =~ s|.*/([^/]*)$|$1| if $outfile =~ m|/|;
$outfile = "$outdir$outfile";