teshsuite/smpi/coll-allgather/coll-allgather
teshsuite/smpi/coll-allgatherv/coll-allgatherv
teshsuite/smpi/coll-allreduce/coll-allreduce
-teshsuite/smpi/alltoall2
-teshsuite/smpi/alltoall/alltoall2
-teshsuite/smpi/alltoall/alltoall_basic
-teshsuite/smpi/alltoall/alltoall_coll
-teshsuite/smpi/alltoall_basic
-teshsuite/smpi/alltoall_coll
+teshsuite/smpi/coll-alltoall/coll-alltoall
teshsuite/smpi/coll-alltoallv/coll-alltoallv
teshsuite/smpi/coll-bcast/coll-bcast
teshsuite/smpi/coll-barrier/coll-barrier
teshsuite/smpi/mpich-test/*/*.stdo
teshsuite/smpi/pt2pt-dsend/pt2pt-dsend
teshsuite/smpi/pt2pt-pingpong/pt2pt-pingpong
-teshsuite/smpi/reduce_coll
-teshsuite/smpi/reduce/reduce
-teshsuite/smpi/reduce/reduce_coll
-teshsuite/smpi/reduce/reduce_scatter_coll
-teshsuite/smpi/reduce_scatter_coll
+teshsuite/smpi/coll-reduce/coll-reduce
+teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter
teshsuite/smpi/coll-scatter/coll-scatter
teshsuite/smpi/shared/shared
teshsuite/smpi/smpi_sendrecv
target_link_libraries(smpi_replay simgrid)
if(HAVE_MC)
- foreach(x bugged1 bugged2 bugged1_liveness only_send_deterministic mutual_exclusion non_termination1 non_termination2 non_termination3 non_termination4)
+ foreach(x bugged1 bugged2 bugged1_liveness only_send_deterministic mutual_exclusion non_termination1
+ non_termination2 non_termination3 non_termination4)
add_executable (smpi_${x} mc/${x}.c)
target_link_libraries(smpi_${x} simgrid)
set_target_properties(smpi_${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "./mc")
- endforeach()
+ set(examples_src ${examples_src} mc/${x}.c)
+ endforeach()
+ ADD_TESH(smpi-mc-only-send-determinism -setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/mc --cd ${CMAKE_BINARY_DIR}/examples/smpi/mc ${CMAKE_HOME_DIRECTORY}/examples/smpi/mc/only_send_deterministic.tesh)
endif()
+
+ADD_TESH(smpi-tracing-ptp --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/tracing/smpi_traced.tesh)
+ADD_TESH(smpi-replay-simple --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay/smpi_replay.tesh)
endif()
-set(tesh_files
- ${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced.tesh
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/smpi_replay.tesh
- PARENT_SCOPE)
-set(examples_src
- ${examples_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/mvmul.c
- ${CMAKE_CURRENT_SOURCE_DIR}/bcbench.c
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/replay.c
- ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced.c
- ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced_simple.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/bugged2.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/bugged1.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/bugged1_liveness.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/only_send_deterministic.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/mutual_exclusion.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/non_termination1.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/non_termination2.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/non_termination3.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/non_termination4.c
- PARENT_SCOPE)
-set(bin_files
- ${bin_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/hostfile
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/promela_bugged1_liveness
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged1_liveness
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged1
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged2
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_only_send_deterministic
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_mutual_exclusion
- ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_non_termination
- PARENT_SCOPE)
-set(txt_files
- ${txt_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions0.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions1.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_allReduce.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_allgatherv.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_alltoall.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_alltoallv.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_barrier.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_bcast.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_bcast_reduce_datatypes.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_gather.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_reducescatter.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_waitall.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_with_isend.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/replay/split_traces
- PARENT_SCOPE)
+set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/smpi_replay.tesh PARENT_SCOPE)
+set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/mvmul.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcbench.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/replay.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tracing/smpi_traced_simple.c PARENT_SCOPE)
+set(bin_files ${bin_files} ${CMAKE_CURRENT_SOURCE_DIR}/hostfile
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/promela_bugged1_liveness
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged1_liveness
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged1
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_bugged2
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_only_send_deterministic
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_mutual_exclusion
+ ${CMAKE_CURRENT_SOURCE_DIR}/mc/hostfile_non_termination PARENT_SCOPE)
+set(txt_files ${txt_files} ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions0.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions1.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_allReduce.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_allgatherv.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_alltoall.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_alltoallv.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_barrier.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_bcast.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_bcast_reduce_datatypes.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_gather.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_reducescatter.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_waitall.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/actions_with_isend.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/replay/split_traces PARENT_SCOPE)
add_executable (se se.c)
target_link_libraries(se simgrid)
+ ADD_TESH_FACTORIES(smpi-energy "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
endif()
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/energy.tesh PARENT_SCOPE)
if(enable_smpi AND SMPI_FORTRAN)
set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
-
add_executable (sef sef.f)
target_link_libraries(sef simgrid)
+ ADD_TESH_FACTORIES(smpi-energy-f77 "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
endif()
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/energy.tesh PARENT_SCOPE)
if(enable_smpi AND SMPI_FORTRAN)
set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpif90")
-
add_executable (sef90 sef90.f90)
target_link_libraries(sef90 simgrid)
+ ADD_TESH_FACTORIES(smpi-energy-f90 "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
endif()
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/energy.tesh PARENT_SCOPE)
#include <mpi.h>
#include <simgrid/modelchecker.h>
-
int main(int argc, char **argv)
{
int recv_buff, err, size, rank, i;
//#ifdef HAVE_MC
//MC_assert(recv_buff == size - 1);
//#endif
-
}else{
MPI_Send(&rank, 1, MPI_INT, 0, 42, MPI_COMM_WORLD);
printf("Sent %d to rank 0\n", rank);
int r, cs;
int main(int argc, char **argv){
-
int err, size, rank;
int recv_buff;
MPI_Status status;
int CS_used = 0;
xbt_dynar_t requests = xbt_dynar_new(sizeof(int), NULL);
-
+
/* Initialize MPI */
err = MPI_Init(&argc, &argv);
if(err != MPI_SUCCESS){
#include <mpi.h>
#include <simgrid/modelchecker.h>
-
int main(int argc, char **argv)
{
int x,y, err, size, rank;
#define RELEASE_TAG 2
int main(int argc, char **argv){
-
int err, size, rank;
int recv_buff;
MPI_Status status;
int CS_used = 0;
xbt_dynar_t requests = xbt_dynar_new(sizeof(int), NULL);
-
+
/* Initialize MPI */
err = MPI_Init(&argc, &argv);
if(err != MPI_SUCCESS){
}
}else{
if(!xbt_dynar_is_empty(requests)){
- printf("CS release. Grant to queued requests (queue size: %lu)",
- xbt_dynar_length(requests));
+ printf("CS release. Grant to queued requests (queue size: %lu)", xbt_dynar_length(requests));
xbt_dynar_shift(requests, &recv_buff);
MPI_Send(&rank, 1, MPI_INT, recv_buff, GRANT_TAG, MPI_COMM_WORLD);
CS_used = 1;
while(1){
printf("%d asks the request.\n", rank);
MPI_Send(&rank, 1, MPI_INT, 0, REQUEST_TAG, MPI_COMM_WORLD);
-
+
MPI_Recv(&recv_buff, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
-
+
printf("%d got the answer. Release it.\n", rank);
MPI_Send(&rank, 1, MPI_INT, 0, RELEASE_TAG, MPI_COMM_WORLD);
-
}
}
int y = 8;
int main(int argc, char **argv) {
-
int recv_buff, size, rank;
MPI_Status status;
-
+
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size); /* Get nr of tasks */
int x;
int main(int argc, char **argv) {
-
int recv_buff, size, rank;
MPI_Status status;
-
+
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size); /* Get nr of tasks */
int y = 0;
int main(int argc, char **argv) {
-
int recv_x, recv_y, size, rank;
MPI_Status status;
-
+
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size); /* Get nr of tasks */
int x = 20;
int main(int argc, char **argv) {
-
int recv_x = 1, size, rank;
MPI_Status status;
MPI_Init(&argc, &argv);
-
+
MPI_Comm_size(MPI_COMM_WORLD, &size); /* Get nr of tasks */
MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* Get id of this process */
-
+
MC_ignore(&(status.count), sizeof(status.count));
if(rank==0){
MPI_Recv(&recv_x, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
}else{
-
while (x >= 0) {
if (MC_random(0,1) == 0) {
x -= 1;
}
MPI_Finalize();
-
+
return 0;
}
#include <mpi.h>
#include <simgrid/modelchecker.h>
-
int main(int argc, char **argv)
{
int recv_buff, err, size, rank, i;
MPI_Recv(&recv_buff, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
//printf("Message received from %d\n", recv_buff);
}
-
}else{
MPI_Send(&rank, 1, MPI_INT, 0, 42, MPI_COMM_WORLD);
//printf("Sent %d to rank 0\n", rank);
add_executable (replay_multiple replay_multiple.c)
target_link_libraries(replay_multiple simgrid)
+ IF(NOT HAVE_MC)
+ ADD_TESH(smpi-replay-multiple --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/replay_multiple --cd ${CMAKE_BINARY_DIR}/examples/smpi/replay_multiple ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay_multiple/replay_multiple.tesh)
+ ENDIF()
endif()
-set(tesh_files
- ${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/replay_multiple.tesh
- PARENT_SCOPE)
-set(examples_src
- ${examples_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/replay_multiple.c
- PARENT_SCOPE)
-set(txt_files
- ${txt_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/generate_multiple_deployment.sh
- ${CMAKE_CURRENT_SOURCE_DIR}/description_file
- ${CMAKE_CURRENT_SOURCE_DIR}/README
- ${CMAKE_CURRENT_SOURCE_DIR}/smpi_replay.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace0.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace1.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace2.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace3.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace4.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace5.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace6.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace7.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace8.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace9.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace10.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace11.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace12.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace13.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace14.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace15.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace16.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace17.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace18.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace19.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace20.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace21.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace22.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace23.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace24.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace25.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace26.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace27.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace28.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace29.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace30.txt
- ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace31.txt
- PARENT_SCOPE)
+set(txt_files ${txt_files} ${CMAKE_CURRENT_SOURCE_DIR}/generate_multiple_deployment.sh
+ ${CMAKE_CURRENT_SOURCE_DIR}/description_file
+ ${CMAKE_CURRENT_SOURCE_DIR}/README
+ ${CMAKE_CURRENT_SOURCE_DIR}/smpi_replay.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace0.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace1.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace2.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace3.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace4.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace5.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace6.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace7.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace8.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace9.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace10.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace11.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace12.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace13.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace14.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace15.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace16.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace17.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace18.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace19.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace20.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace21.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace22.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace23.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace24.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace25.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace26.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace27.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace28.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace29.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace30.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/ti_traces_32_1/ti_trace31.txt PARENT_SCOPE)
+set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/replay_multiple.tesh PARENT_SCOPE)
+set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/replay_multiple.c PARENT_SCOPE)
-This is an example of the replay of several traces in the same simulation,
- in order to watch how they interact on a platform.
-
+This is an example of the replay of several traces in the same simulation, to watch how they interact on a platform.
The main item of this example is the description file
-The description_file file is an example of the launching of 2 replay traces
-(both of the same application/size).
+The description_file file is an example of the launching of 2 replay traces (both of the same application/size).
Each line has the form :
2 smpi_replay.txt 32 125000000000
-This launchs an instance "2" that will replay file smpi_replay.txt with 32
-processes, and each one of these processes will sleep for 125000000000 flops
-before the run.
+This launchs an instance "2" that will replay file smpi_replay.txt with 32 processes, and each one of these processes
+will sleep for 125000000000 flops before the run.
+In order to be replayed, a deployment file must be generatedfrom this description file, and from the intended platform
+file and hostfiles. The script generate_multiple_deployment.sh can be used as such :
-In order to be replayed, a deployment file must be generated
-from this description file, and from the intended platform file and hostfiles.
-The script generate_multiple_deployment.sh can be used as such :
+./generate_multiple_deployment.sh -platform platform_file.xml -hostfile hostfile description_file deployment.xml
-./generate_multiple_deployment.sh -platform platform_file.xml
- -hostfile hostfile description_file deployment.xml
-
The output deployment file will be written in deployment.xml
-Please not that the deployment will use the hostfile in order, placing processes
-of the first instance using the first node, and the ones of the following
-instances on the following nodes.
+Please not that the deployment will use the hostfile in order, placing processes of the first instance using the first
+node, and the ones of the following instances on the following nodes.
The actual replay can now be launched, using classic options for SMPI
./replay_multiple description_file platform_file.xml deployment.xml --cfg=... --log=...
-
-
add_executable (masterslave_mailbox_smpi masterslave_mailbox_smpi.c)
target_link_libraries(masterslave_mailbox_smpi simgrid)
+ ADD_TESH_FACTORIES(smpi-msg-masterslave "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_msg_masterslave --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_msg_masterslave ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_msg_masterslave/msg_smpi.tesh)
endif()
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/msg_smpi.tesh PARENT_SCOPE)
endif()
include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
- foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoallv coll-barrier coll-bcast coll-gather coll-scatter
- pt2pt-dsend pt2pt-pingpong type-hvector type-indexed type-struct type-vector)
+ foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
+ coll-gather coll-reduce coll-reduce-scatter coll-scatter pt2pt-dsend pt2pt-pingpong
+ type-hvector type-indexed type-struct type-vector)
add_executable (${x} ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
ADD_TESH_FACTORIES(tesh-smpi-${x} "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/${x} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/${x} ${x}.tesh)
endforeach()
- foreach (ALLGATHER_COLL 2dmesh 3dmesh bruck GB loosely_lr NTSLR_NB pair rdb rhv ring SMP_NTS smp_simple
- spreading_simple ompi mpich ompi_neighborexchange mvapich2 mvapich2_smp impi)
- ADD_TESH(tesh-smpi-coll-allgather-${ALLGATHER_COLL} --cfg smpi/allgather:${ALLGATHER_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allgather --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allgather coll-allgather.tesh)
+ foreach (ALLGATHER 2dmesh 3dmesh bruck GB loosely_lr NTSLR_NB pair rdb rhv ring SMP_NTS smp_simple spreading_simple
+ ompi mpich ompi_neighborexchange mvapich2 mvapich2_smp impi)
+ ADD_TESH(tesh-smpi-coll-allgather-${ALLGATHER} --cfg smpi/allgather:${ALLGATHER} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allgather --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allgather coll-allgather.tesh)
endforeach()
- foreach (ALLGATHERV_COLL GB pair ring ompi mpich ompi_neighborexchange ompi_bruck mpich_rdb mpich_ring mvapich2 impi)
- ADD_TESH(tesh-smpi-coll-allgatherv-${ALLGATHERV_COLL} --cfg smpi/allgatherv:${ALLGATHERV_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allgatherv --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allgatherv coll-allgatherv.tesh)
+ foreach (ALLGATHERV GB pair ring ompi mpich ompi_neighborexchange ompi_bruck mpich_rdb mpich_ring mvapich2 impi)
+ ADD_TESH(tesh-smpi-coll-allgatherv-${ALLGATHERV} --cfg smpi/allgatherv:${ALLGATHERV} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allgatherv --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allgatherv coll-allgatherv.tesh)
endforeach()
- foreach (ALLREDUCE_COLL lr rab1 rab2 rab_rdb rdb smp_binomial smp_binomial_pipeline smp_rdb smp_rsag smp_rsag_lr impi
- smp_rsag_rab redbcast ompi mpich ompi_ring_segmented mvapich2 mvapich2_rs mvapich2_two_level)
- ADD_TESH(tesh-smpi-coll-allreduce-${ALLREDUCE_COLL} --cfg smpi/allreduce:${ALLREDUCE_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allreduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allreduce coll-allreduce.tesh)
+ foreach (ALLREDUCE lr rab1 rab2 rab_rdb rdb smp_binomial smp_binomial_pipeline smp_rdb smp_rsag smp_rsag_lr impi
+ smp_rsag_rab redbcast ompi mpich ompi_ring_segmented mvapich2 mvapich2_rs mvapich2_two_level)
+ ADD_TESH(tesh-smpi-coll-allreduce-${ALLREDUCE} --cfg smpi/allreduce:${ALLREDUCE} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allreduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allreduce coll-allreduce.tesh)
endforeach()
- foreach (ALLTOALLV_COLL pair pair_light_barrier pair_mpi_barrier pair_one_barrier ring ring_light_barrier
- ring_mpi_barrier ring_one_barrier bruck ompi mpich mvapich2 ompi_basic_linear impi)
- ADD_TESH(tesh-smpi-coll-alltoallv-${ALLTOALLV_COLL} --cfg smpi/alltoallv:${ALLTOALLV_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-alltoallv --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-alltoallv coll-alltoallv.tesh)
+ foreach (ALLTOALL 2dmesh 3dmesh pair pair_rma pair_one_barrier pair_light_barrier pair_mpi_barrier rdb ring
+ ring_light_barrier ring_mpi_barrier ring_one_barrier bruck basic_linear ompi mpich mvapich2
+ mvapich2_scatter_dest impi)
+ ADD_TESH(tesh-smpi-coll-alltoall-${ALLTOALL} --cfg smpi/alltoall:${ALLTOALL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-alltoall --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-alltoall coll-alltoall.tesh)
endforeach()
- foreach (BARRIER_COLL ompi mpich ompi_basic_linear ompi_tree ompi_bruck ompi_recursivedoubling ompi_doublering mvapich2_pair mvapich2 impi)
- ADD_TESH(tesh-smpi-coll-barrier-${BARRIER_COLL} --cfg smpi/barrier:${BARRIER_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-barrier --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-barrier coll-barrier.tesh)
+ foreach (ALLTOALLV pair pair_light_barrier pair_mpi_barrier pair_one_barrier ring ring_light_barrier ring_mpi_barrier
+ ring_one_barrier bruck ompi mpich mvapich2 ompi_basic_linear impi)
+ ADD_TESH(tesh-smpi-coll-alltoallv-${ALLTOALLV} --cfg smpi/alltoallv:${ALLTOALLV} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-alltoallv --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-alltoallv coll-alltoallv.tesh)
endforeach()
- foreach (BCAST_COLL arrival_pattern_aware arrival_pattern_aware_wait arrival_scatter binomial_tree flattree
- flattree_pipeline NTSB NTSL NTSL_Isend scatter_LR_allgather scatter_rdb_allgather SMP_binary
- SMP_binomial SMP_linear ompi mpich ompi_split_bintree ompi_pipeline mvapich2 mvapich2_intra_node
- mvapich2_knomial_intra_node impi)
- ADD_TESH(tesh-smpi-coll-bcast-${BCAST_COLL} --cfg smpi/bcast:${BCAST_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-bcast --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-bcast coll-bcast.tesh)
+ foreach (BARRIER ompi mpich ompi_basic_linear ompi_tree ompi_bruck ompi_recursivedoubling ompi_doublering mvapich2_pair mvapich2 impi)
+ ADD_TESH(tesh-smpi-coll-barrier-${BARRIER} --cfg smpi/barrier:${BARRIER} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-barrier --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-barrier coll-barrier.tesh)
endforeach()
- foreach (GATHER_COLL ompi mpich ompi_basic_linear ompi_linear_sync ompi_binomial mvapich2 mvapich2_two_level impi)
- ADD_TESH(tesh-smpi-coll-gather-${GATHER_COLL} --cfg smpi/gather:${GATHER_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-gather --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-gather coll-gather.tesh)
+ foreach (BCAST arrival_pattern_aware arrival_pattern_aware_wait arrival_scatter binomial_tree flattree
+ flattree_pipeline NTSB NTSL NTSL_Isend scatter_LR_allgather scatter_rdb_allgather SMP_binary
+ SMP_binomial SMP_linear ompi mpich ompi_split_bintree ompi_pipeline mvapich2 mvapich2_intra_node
+ mvapich2_knomial_intra_node impi)
+ ADD_TESH(tesh-smpi-coll-bcast-${BCAST} --cfg smpi/bcast:${BCAST} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-bcast --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-bcast coll-bcast.tesh)
endforeach()
- foreach (SCATTER_COLL ompi mpich ompi_basic_linear ompi_binomial mvapich2 mvapich2_two_level_binomial mvapich2_two_level_direct impi)
- ADD_TESH(tesh-smpi-coll-scatter-${SCATTER_COLL} --cfg smpi/scatter:${SCATTER_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-scatter --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-scatter coll-scatter.tesh)
+ foreach (GATHER ompi mpich ompi_basic_linear ompi_linear_sync ompi_binomial mvapich2 mvapich2_two_level impi)
+ ADD_TESH(tesh-smpi-coll-gather-${GATHER} --cfg smpi/gather:${GATHER} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-gather --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-gather coll-gather.tesh)
+ endforeach()
+
+ foreach (REDUCE arrival_pattern_aware binomial flat_tree NTSL scatter_gather ompi mpich ompi_chain ompi_binary impi
+ ompi_basic_linear ompi_binomial ompi_in_order_binary mvapich2 mvapich2_knomial mvapich2_two_level rab)
+ ADD_TESH(tesh-smpi-coll-reduce-${REDUCE} --cfg smpi/reduce:${REDUCE} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-reduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-reduce coll-reduce.tesh)
+ endforeach()
+
+ foreach (REDUCE_SCATTER ompi mpich ompi_basic_recursivehalving ompi_ring mpich_noncomm mpich_pair mvapich2 mpich_rdb impi)
+ ADD_TESH(tesh-smpi-coll-reduce-scatter-${REDUCE_SCATTER} --cfg smpi/reduce_scatter:${REDUCE_SCATTER} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-reduce-scatter --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-reduce-scatter coll-reduce-scatter.tesh)
+ endforeach()
+
+ foreach (SCATTER ompi mpich ompi_basic_linear ompi_binomial mvapich2 mvapich2_two_level_binomial mvapich2_two_level_direct impi)
+ ADD_TESH(tesh-smpi-coll-scatter-${SCATTER} --cfg smpi/scatter:${SCATTER} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-scatter --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-scatter coll-scatter.tesh)
endforeach()
endif()
ADD_TESH(tesh-smpi-coll-allreduce-large --cfg smpi/allreduce:ompi_ring_segmented --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allreduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allreduce coll-allreduce-large.tesh)
ADD_TESH(tesh-smpi-coll-allreduce-automatic --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-allreduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-allreduce coll-allreduce-automatic.tesh)
+# Extra allreduce test: cluster-types
+ADD_TESH(tesh-smpi-cluster-types --cfg smpi/alltoall:mvapich2 --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/coll-alltoall --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/coll-alltoall clusters.tesh)
+
# Extra pt2pt pingpong test: broken usage ti-tracing
ADD_TESH_FACTORIES(tesh-smpi-broken "thread" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/pt2pt-pingpong --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt-pingpong broken_hostfiles.tesh)
ADD_TESH(tesh-smpi-replay-ti-tracing --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/pt2pt-pingpong --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt-pingpong TI_output.tesh)
set(teshsuite_src ${teshsuite_src} PARENT_SCOPE)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/coll-allreduce/coll-allreduce-large.tesh
${CMAKE_CURRENT_SOURCE_DIR}/coll-allreduce/coll-allreduce-automatic.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll-alltoall/clusters.tesh
${CMAKE_CURRENT_SOURCE_DIR}/pt2pt-pingpong/broken_hostfiles.tesh
${CMAKE_CURRENT_SOURCE_DIR}/pt2pt-pingpong/TI_output.tesh PARENT_SCOPE)
set(bin_files ${bin_files} ${CMAKE_CURRENT_SOURCE_DIR}/hostfile
+ ${CMAKE_CURRENT_SOURCE_DIR}/hostfile_cluster
${CMAKE_CURRENT_SOURCE_DIR}/hostfile_coll
${CMAKE_CURRENT_SOURCE_DIR}/hostfile_empty PARENT_SCOPE)
+++ /dev/null
-if(enable_smpi)
- if(WIN32)
- set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
- else()
- set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
- endif()
- include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
-
- add_executable (alltoall2 alltoall2.c)
- target_link_libraries(alltoall2 simgrid)
- add_executable (alltoall_basic alltoall_basic.c)
- target_link_libraries(alltoall_basic simgrid)
- add_executable (alltoall_coll alltoall_coll.c)
- target_link_libraries(alltoall_coll simgrid)
-endif()
-
-
-set(tesh_files
- ${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_coll.tesh
- ${CMAKE_CURRENT_SOURCE_DIR}/clusters.tesh
- PARENT_SCOPE)
-set(examples_src
- ${examples_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_coll.c
- ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_basic.c
- ${CMAKE_CURRENT_SOURCE_DIR}/alltoall2.c
- PARENT_SCOPE)
-set(bin_files
- ${bin_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/hostfile_cluster
- PARENT_SCOPE)
+++ /dev/null
-/* Copyright (c) 2012, 2014. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-/****************************************************************************
-
- MESSAGE PASSING INTERFACE TEST CASE SUITE
-
- Copyright IBM Corp. 1995
-
- IBM Corp. hereby grants a non-exclusive license to use, copy, modify, and
- distribute this software for any purpose and without fee provided that the
- above copyright notice and the following paragraphs appear in all copies.
-
- IBM Corp. makes no representation that the test cases comprising this
- suite are correct or are an accurate representation of any standard.
-
- In no event shall IBM be liable to any party for direct, indirect, special
- incidental, or consequential damage arising out of the use of this software
- even if IBM Corp. has been advised of the possibility of such damage.
-
- IBM CORP. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED
- TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS AND IBM
- CORP. HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
- ENHANCEMENTS, OR MODIFICATIONS.
-
-****************************************************************************
-
- These test cases reflect an interpretation of the MPI Standard. They are
- are, in most cases, unit tests of specific MPI behaviors. If a user of any
- test case from this set believes that the MPI Standard requires behavior
- different than that implied by the test case we would appreciate feedback.
-
- Comments may be sent to:
- Richard Treumann
- treumann@kgn.ibm.com
-
-****************************************************************************
-*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include "mpi.h"
-
-#define MAXLEN 10000
-
-int main(int argc, char *argv[])
-{
-#define N 1000000
- int *out, *in, i, j, k;
- int myself, tasks;
-
- out = malloc(N * sizeof(int));
- in = malloc(N * sizeof(int));
- if ((out == NULL) || (in == NULL)) {
- printf("Error: cannot allocate N bytes for in or out arrays\n");
- exit(1);
- }
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &myself);
- MPI_Comm_size(MPI_COMM_WORLD, &tasks);
- for (j = 1; j <= MAXLEN; j *= 10) {
- if (0 == myself) {
- printf("* calling MPI_Alltoall with buffers of %d ints\n", j);
- }
- for (i = 0; i < j * tasks; i++)
- out[i] = myself;
-
- MPI_Alltoall(out, j, MPI_INT, in, j, MPI_INT, MPI_COMM_WORLD);
-
- for (i = 0; i < tasks; i++) {
- for (k = 0; k < j; k++) {
- if (in[k + i * j] != i) {
- printf("<%d> bad answer (%d) at index %d of %d (should be %d)\n",
- myself, in[k + i * j], k + i * j, j * tasks, i);
- break;
- }
- }
- }
- }
- MPI_Barrier(MPI_COMM_WORLD);
- if (myself == 0)
- printf("TEST COMPLETE\n");
- MPI_Finalize();
- return EXIT_SUCCESS;
-}
+++ /dev/null
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include "mpi.h"
-
-#ifndef EXIT_SUCCESS
-#define EXIT_SUCCESS 0
-#define EXIT_FAILURE 1
-#endif
-
-int main(int argc, char *argv[])
-{
- int rank, size;
- int i;
- int *sb;
- int *rb;
- int status;
-
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
-
- sb = (int *) malloc(size * sizeof(int));
- if (!sb) {
- perror("can't allocate send buffer");
- fflush(stderr);
- MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
- exit(EXIT_FAILURE);
- }
- rb = (int *) malloc(size * sizeof(int));
- if (!rb) {
- perror("can't allocate recv buffer");
- fflush(stderr);
- free(sb);
- MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
- exit(EXIT_FAILURE);
- }
- for (i = 0; i < size; ++i) {
- sb[i] = rank + 1;
- rb[i] = 0;
- }
-
- status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD);
-
- printf("[%d] rcvbuf=[", rank);
- for (i = 0; i < size; i++)
- printf("%d ", rb[i]);
- printf("]\n");
-
-
- if (rank == 0) {
- if (status != MPI_SUCCESS) {
- printf("all_to_all returned %d\n", status);
- fflush(stdout);
- }
- }
- free(sb);
- free(rb);
- MPI_Finalize();
- return (EXIT_SUCCESS);
-}
#define EXIT_FAILURE 1
#endif
-//define MAXLEN 300000
-
int main(int argc, char *argv[])
{
int rank, size;
printf("%d ", rb[i]);
printf("]\n");
-
if (rank == 0) {
if (status != MPI_SUCCESS) {
printf("all_to_all returned %d\n", status);
! output sort
p Test classic - backbone
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ./hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/../alltoall/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ./hostfile_cluster -platform ../../../examples/platforms/clusters_routing_full.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/../alltoall/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/clusters_routing_full.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test torus
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ./hostfile_cluster -platform ../../../examples/platforms/torus_cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/../alltoall/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/torus_cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ./hostfile_cluster -platform ../../../examples/platforms/fat_tree_cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/../alltoall/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/fat_tree_cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree IB
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ./hostfile_cluster -platform ../../../examples/platforms/fat_tree_cluster.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/../alltoall/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/fat_tree_cluster.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
printf("%d ", rb[i]);
printf("]\n");
-
if (rank == 0) {
if (status != MPI_SUCCESS) {
printf("all_to_all returned %d\n", status);
! output sort
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/alltoall_coll -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
free(msg);
}
-
int main(int argc, char **argv)
{
MPI_Comm comm;
/*
* Test of reduce scatter.
- *
- * Each processor contributes its rank + the index to the reduction,
- * then receives the ith sum
- *
+ * Each processor contributes its rank + the index to the reduction, then receives the ith sum
* Can be called with any number of processors.
*/
int size, rank, i, sumval;
MPI_Comm comm;
-
MPI_Init( &argc, &argv );
comm = MPI_COMM_WORLD;
MPI_Comm_rank( comm, &rank );
sendbuf = (int *) malloc( size * sizeof(int) );
for (i=0; i<size; i++)
- sendbuf[i] = rank + i;
+ sendbuf[i] = rank + i;
recvcounts = (int *)malloc( size * sizeof(int) );
recvbuf = (int *)malloc( size * sizeof(int) );
for (i=0; i<size; i++)
- recvcounts[i] = 1;
+ recvcounts[i] = 1;
MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
sumval = size * rank + ((size - 1) * size)/2;
-/* recvbuf should be size * (rank + i) */
+ /* recvbuf should be size * (rank + i) */
if (recvbuf[0] != sumval) {
- err++;
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval );
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval );
}
MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
if (rank == 0 && toterr == 0) {
- printf( " No Errors\n" );
+ printf( " No Errors\n" );
}
free(sendbuf);
free(recvcounts);
free(recvbuf);
-
- MPI_Finalize( );
+
+ MPI_Finalize();
return toterr;
}
! output sort
p Test reduce_scatter
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/reduce_scatter_coll --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> No Errors
> [rank 0] -> Tremblay
> [rank 10] -> Fafard
sb = (int *) xbt_malloc(size * sizeof(int));
rb = (int *) xbt_malloc(size * sizeof(int));
-
+
for (i = 0; i < size; ++i) {
sb[i] = rank*size + i;
rb[i] = 0;
for (i = 0; i < size; i++)
printf("%d ", sb[i]);
printf("]\n");
-
+
int root=0;
status = MPI_Reduce(sb, rb, size, MPI_INT, MPI_SUM, root, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
fflush(stdout);
}
}
-
-
+
printf("[%d] second sndbuf=[", rank);
for (i = 0; i < 1; i++)
printf("%d ", sb[i]);
printf("]\n");
-
+
root=size-1;
status = MPI_Reduce(sb, rb, 1, MPI_INT, MPI_PROD, root, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/reduce_coll --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
> [0] second sndbuf=[0 ]
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-/**
- * MESSAGE PASSING INTERFACE TEST CASE SUITE
- *
- * Copyright IBM Corp. 1995
- *
- * IBM Corp. hereby grants a non-exclusive license to use, copy, modify, and
- *distribute this software for any purpose and without fee provided that the
- *above copyright notice and the following paragraphs appear in all copies.
-
- * IBM Corp. makes no representation that the test cases comprising this
- * suite are correct or are an accurate representation of any standard.
-
- * In no event shall IBM be liable to any party for direct, indirect, special
- * incidental, or consequential damage arising out of the use of this software
- * even if IBM Corp. has been advised of the possibility of such damage.
-
- * IBM CORP. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS AND IBM
- * CORP. HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
- * ENHANCEMENTS, OR MODIFICATIONS.
- **/
#include <stdio.h>
#include <mpi.h>
-static int ibm_test(int rank, int size)
-{
-
-#define MAXLEN 10000
-
- int success = 1;
- int root = 0;
- int i, j, k;
- int *out;
- int *in;
-
- out = malloc(MAXLEN * 64 * sizeof(int));
- in = malloc(MAXLEN * sizeof(int));
-
- for (j = 1; j <= MAXLEN; j *= 10) {
- root = (root + 1) % size;
- if (rank == root)
- for (i = 0; i < j * size; i++)
- out[i] = i;
-
- MPI_Scatter(out, j, MPI_INT, in, j, MPI_INT, root, MPI_COMM_WORLD);
-
- for (k = 0; k < j; k++) {
- if (in[k] != k + rank * j) {
- fprintf(stderr, "task %d bad answer (%d) at index %d k of %d (should be %d)",rank, in[k], k, j, (k + rank * j));
- return (0);
- }
- }
- }
- free(out);
- free(in);
- MPI_Barrier(MPI_COMM_WORLD);
- return (success);
-}
-
-/** small test: the root sends a single distinct double to other processes */
-static int small_test(int rank, int size)
-{
- int success = 1;
- int retval;
- int sendcount = 1; // one double to each process
- int recvcount = 1;
- int i;
- double *sndbuf = NULL;
- double rcvd;
- int root = 0; // arbitrary choice
-
- // on root, initialize sendbuf
- if (root == rank) {
- sndbuf = malloc(size * sizeof(double));
- for (i = 0; i < size; i++) {
- sndbuf[i] = (double) i;
- }
- }
-
- retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD);
- if (root == rank) {
- free(sndbuf);
- }
- if (retval != MPI_SUCCESS) {
- fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval);
- return 0;
- }
- // verification
- if ((double) rank != rcvd) {
- fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank);
- success = 0;
- }
- return (success);
-}
-
int main(int argc, char **argv)
{
- int size, rank;
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ int size, rank;
+ int success = 1;
+ int retval;
+ int sendcount = 1; // one double to each process
+ int recvcount = 1;
+ int i;
+ double *sndbuf = NULL;
+ double rcvd;
+ int root = 0; // arbitrary choice
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ // on root, initialize sendbuf
+ if (root == rank) {
+ sndbuf = malloc(size * sizeof(double));
+ for (i = 0; i < size; i++) {
+ sndbuf[i] = (double) i;
+ }
+ }
+
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ if (root == rank) {
+ free(sndbuf);
+ }
+ if (retval != MPI_SUCCESS) {
+ fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval);
+ return 0;
+ }
+ // verification
+ if ((double) rank != rcvd) {
+ fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank);
+ success = 0;
+ }
/* test 1 */
if (0 == rank)
printf("** Small Test Result: ...\n");
- if (!small_test(rank, size))
- printf("\t[%d] failed.\n", rank);
- else
- printf("\t[%d] ok.\n", rank);
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- /* test 2 */
- if (0 == rank)
- printf("** IBM Test Result: ...\n");
- if (!ibm_test(rank, size))
+ if (!success)
printf("\t[%d] failed.\n", rank);
else
printf("\t[%d] ok.\n", rank);
p Test scatter
$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] ok.
-> [0] ok.
-> [10] ok.
> [10] ok.
> [11] ok.
-> [11] ok.
-> [12] ok.
> [12] ok.
> [13] ok.
-> [13] ok.
-> [14] ok.
> [14] ok.
> [15] ok.
-> [15] ok.
-> [1] ok.
> [1] ok.
> [2] ok.
-> [2] ok.
-> [3] ok.
> [3] ok.
> [4] ok.
-> [4] ok.
-> [5] ok.
> [5] ok.
> [6] ok.
-> [6] ok.
-> [7] ok.
> [7] ok.
> [8] ok.
-> [8] ok.
-> [9] ok.
> [9] ok.
-> ** IBM Test Result: ...
> ** Small Test Result: ...
> [rank 0] -> Tremblay
> [rank 10] -> Fafard
target_link_libraries(compute2 simgrid)
add_executable (compute3 compute3.c)
target_link_libraries(compute3 simgrid)
+ ADD_TESH_FACTORIES(tesh-smpi-compute "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/compute --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/compute compute.tesh)
endif()
set(tesh_files
+++ /dev/null
-if(enable_smpi)
- if(WIN32)
- set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
- else()
- set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
- endif()
- include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
-
- add_executable (reduce reduce.c)
- target_link_libraries(reduce simgrid)
- add_executable (reduce_coll reduce_coll.c)
- target_link_libraries(reduce_coll simgrid)
- add_executable (reduce_scatter_coll reduce_scatter_coll.c)
- target_link_libraries(reduce_scatter_coll simgrid)
-endif()
-
-set(tesh_files
- ${tesh_files}
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce.tesh
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce_coll.tesh
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce_scatter_coll.tesh
- PARENT_SCOPE)
-set(examples_src
- ${examples_src}
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce.c
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce_coll.c
- ${CMAKE_CURRENT_SOURCE_DIR}/reduce_scatter_coll.c
- PARENT_SCOPE)
+++ /dev/null
-/* Copyright (c) 2012-2014. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include <stdio.h>
-#include <mpi.h>
-
-/**
- * MESSAGE PASSING INTERFACE TEST CASE SUITE
- *
- * Copyright IBM Corp. 1995
- *
- * IBM Corp. hereby grants a non-exclusive license to use, copy, modify, and
- *distribute this software for any purpose and without fee provided that the
- *above copyright notice and the following paragraphs appear in all copies.
-
- * IBM Corp. makes no representation that the test cases comprising this
- * suite are correct or are an accurate representation of any standard.
-
- * In no event shall IBM be liable to any party for direct, indirect, special
- * incidental, or consequential damage arising out of the use of this software
- * even if IBM Corp. has been advised of the possibility of such damage.
-
- * IBM CORP. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS AND IBM
- * CORP. HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
- * ENHANCEMENTS, OR MODIFICATIONS.
- * ***************************************************************************
- **/
-static int ibm_test(int rank, int size)
-{
- int success = 1;
-#define MAXLEN 10000
-
- int root, i, j, k;
- int out[MAXLEN];
- int in[MAXLEN];
- root = size / 2;
-
- for (j = 1; j <= MAXLEN; j *= 10) {
- for (i = 0; i < j; i++)
- out[i] = i;
-
- MPI_Reduce(out, in, j, MPI_INT, MPI_SUM, root, MPI_COMM_WORLD);
-
- if (rank == root) {
- for (k = 0; k < j; k++) {
- if (in[k] != k * size) {
- printf("bad answer (%d) at index %d of %d (should be %d)", in[k],
- k, j, k * size);
- success = 0;
- break;
- }
- }
- }
- }
- return (success);
-}
-
-
-
-
-int main(int argc, char **argv)
-{
- int size, rank;
- int root = 0;
- int value;
- int sum = -99, sum_mirror = -99, min = 999, max = -999;
-
- double start_timer;
- int quiet = 0;
-
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-
- if (argc > 1 && !strcmp(argv[1], "-q"))
- quiet = 1;
-
- start_timer = MPI_Wtime();
-
- value = rank + 1; /* easy to verify that sum= (size*(size+1))/2; */
-
- //printf("[%d] has value %d\n", rank, value);
- MPI_Reduce(&value, &sum, 1, MPI_INT, MPI_SUM, root, MPI_COMM_WORLD);
- MPI_Reduce(&value, &sum_mirror, 1, MPI_INT, MPI_SUM, root,
- MPI_COMM_WORLD);
-
- MPI_Reduce(&value, &min, 1, MPI_INT, MPI_MIN, root, MPI_COMM_WORLD);
- MPI_Reduce(&value, &max, 1, MPI_INT, MPI_MAX, root, MPI_COMM_WORLD);
- if (rank == root) {
- printf("** Scalar Int Test Result:\n");
- printf("\t[%d] sum=%d ... validation ", rank, sum);
- if (((size * (size + 1)) / 2 == sum) && (sum_mirror == sum))
- printf("ok.\n");
- else
- printf("failed (sum=%d,sum_mirror=%d while both sould be %d.\n",
- sum, sum_mirror, (size * (size + 1)) / 2);
- printf("\t[%d] min=%d ... validation ", rank, min);
- if (1 == min)
- printf("ok.\n");
- else
- printf("failed.\n");
- printf("\t[%d] max=%d ... validation ", rank, max);
- if (size == max)
- printf("ok.\n");
- else
- printf("failed.\n");
- if (!quiet)
- printf("Elapsed time=%f s\n", MPI_Wtime() - start_timer);
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- if (0 == rank)
- printf("** IBM Test Result: ...\n");
- if (!ibm_test(rank, size))
- printf("\t[%d] failed.\n", rank);
- else if (!quiet)
- printf("\t[%d] ok.\n", rank);
- else
- printf("\tok.\n");
-
- MPI_Finalize();
- return 0;
-}
+++ /dev/null
-# use the tested library, not the installed one
-# (since we want to pass it to the child, it has to be redefined before each command)
-# Go for the first test
-p Test Reduce with 3 processes
-! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 3 ${bindir:=.}/reduce -q --log=smpi_kernel.thres:warning
-> [rank 0] -> Tremblay
-> [rank 1] -> Tremblay
-> [rank 2] -> Tremblay
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
-> ** Scalar Int Test Result:
-> [0] sum=6 ... validation ok.
-> [0] min=1 ... validation ok.
-> [0] max=3 ... validation ok.
-> ** IBM Test Result: ...
-> ok.
-> ok.
-> ok.
-
-# second test
-! setenv LD_LIBRARY_PATH=../../lib
-! output sort
-p Test Reduce with 5 processes
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 5 ${bindir:=.}/reduce -q --log=smpi_kernel.thres:warning
-> [rank 0] -> Tremblay
-> [rank 1] -> Tremblay
-> [rank 2] -> Tremblay
-> [rank 3] -> Tremblay
-> [rank 4] -> Jupiter
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
-> ** Scalar Int Test Result:
-> [0] sum=15 ... validation ok.
-> [0] min=1 ... validation ok.
-> [0] max=5 ... validation ok.
-> ** IBM Test Result: ...
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-
-# third test
-! output sort
-! setenv LD_LIBRARY_PATH=../../lib
-p Test Reduce with 12 processes
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 12 ${bindir:=.}/reduce -q --log=smpi_kernel.thres:warning
-> [rank 0] -> Tremblay
-> [rank 1] -> Tremblay
-> [rank 2] -> Tremblay
-> [rank 3] -> Tremblay
-> [rank 4] -> Jupiter
-> [rank 5] -> Jupiter
-> [rank 6] -> Jupiter
-> [rank 7] -> Jupiter
-> [rank 8] -> Fafard
-> [rank 9] -> Fafard
-> [rank 10] -> Fafard
-> [rank 11] -> Fafard
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
-> ** Scalar Int Test Result:
-> [0] sum=78 ... validation ok.
-> [0] min=1 ... validation ok.
-> [0] max=12 ... validation ok.
-> ** IBM Test Result: ...
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
-> ok.
MPI_Type_hvector(SIZE, 1, SIZE*sizeof(double), MPI_DOUBLE, &columntype);
MPI_Type_commit(&columntype);
-
if (rank == 0) {
for(i=0; i <SIZE;i++)
for(j=0; j <SIZE;j++)
teshsuite/simix/stack_overflow/CMakeLists.txt
teshsuite/smpi/CMakeLists.txt
- teshsuite/smpi/alltoall/CMakeLists.txt
teshsuite/smpi/compute/CMakeLists.txt
- teshsuite/smpi/reduce/CMakeLists.txt
teshsuite/smpi/shared/CMakeLists.txt
teshsuite/smpi/isp/umpire/CMakeLists.txt
teshsuite/smpi/mpich3-test/CMakeLists.txt
ADD_TESH(mc-bugged1-liveness-visited-ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited.tesh)
ADD_TESH(mc-bugged1-liveness-visited-ucontext-sparse --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited_sparse.tesh)
if(HAVE_C_STACK_CLEANER)
- # This test checks if the stack cleaner is makign a difference:
+ # This test checks if the stack cleaner is making a difference:
add_test(mc-bugged1-liveness-stack-cleaner
${CMAKE_HOME_DIRECTORY}/examples/msg/mc/bugged1_liveness_stack_cleaner
${CMAKE_HOME_DIRECTORY}/examples/msg/mc/
# END TESH TESTS
### SMPI ###
- IF(enable_smpi)
- # BEGIN TESH TESTS
- # smpi examples
- ADD_TESH_FACTORIES(tesh-smpi-reduce "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/reduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce reduce.tesh)
- ADD_TESH_FACTORIES(tesh-smpi-compute "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/compute --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/compute compute.tesh)
- FOREACH (ALLTOALL_COLL 2dmesh 3dmesh pair pair_rma pair_one_barrier pair_light_barrier
- pair_mpi_barrier rdb ring ring_light_barrier
- ring_mpi_barrier ring_one_barrier
- bruck basic_linear ompi mpich mvapich2 mvapich2_scatter_dest, impi)
- ADD_TESH(tesh-smpi-alltoall-coll-${ALLTOALL_COLL} --cfg smpi/alltoall:${ALLTOALL_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/alltoall --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall alltoall_coll.tesh)
- ENDFOREACH()
- FOREACH (REDUCE_COLL default arrival_pattern_aware binomial flat_tree NTSL scatter_gather ompi mpich ompi_chain ompi_binary ompi_basic_linear ompi_binomial ompi_in_order_binary mvapich2 mvapich2_knomial mvapich2_two_level impi rab)
- ADD_TESH(tesh-smpi-reduce-coll-${REDUCE_COLL} --cfg smpi/reduce:${REDUCE_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/reduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce reduce_coll.tesh)
- ENDFOREACH()
- FOREACH (REDUCE_SCATTER_COLL default ompi mpich ompi_basic_recursivehalving ompi_ring mpich_noncomm mpich_pair mvapich2 mpich_rdb impi)
- ADD_TESH(tesh-smpi-reduce-scatter-coll-${REDUCE_SCATTER_COLL} --cfg smpi/reduce_scatter:${REDUCE_SCATTER_COLL} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/reduce --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce reduce_scatter_coll.tesh)
- ENDFOREACH()
- ADD_TESH(tesh-smpi-clusters-types --cfg smpi/alltoall:mvapich2 --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/alltoall --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall clusters.tesh)
- # END TESH TESTS
IF(enable_smpi_MPICH3_testsuite)
IF(HAVE_THREAD_CONTEXTS)
ADD_TEST(test-smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatize_global_variables:${HAVE_PRIVATIZATION})
ENDIF()
ENDIF()
- # BEGIN TESH TESTS
- ADD_TESH_FACTORIES(smpi-energy "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
- IF(SMPI_FORTRAN)
- ADD_TESH_FACTORIES(smpi-energy-f77 "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
- ADD_TESH_FACTORIES(smpi-energy-f90 "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
- ENDIF()
- ADD_TESH_FACTORIES(smpi-msg-masterslave "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_msg_masterslave --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_msg_masterslave ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_msg_masterslave/msg_smpi.tesh)
- IF(NOT HAVE_MC)
- ADD_TESH(smpi-replay-multiple --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/replay_multiple --cd ${CMAKE_BINARY_DIR}/examples/smpi/replay_multiple ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay_multiple/replay_multiple.tesh)
- ENDIF()
- ADD_TESH(smpi-tracing-ptp --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/tracing/smpi_traced.tesh)
- ADD_TESH(smpi-replay-simple --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay/smpi_replay.tesh)
- IF(HAVE_MC)
- ADD_TESH(smpi-mc-only-send-determinism --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/mc --cd ${CMAKE_BINARY_DIR}/examples/smpi/mc ${CMAKE_HOME_DIRECTORY}/examples/smpi/mc/only_send_deterministic.tesh)
- ENDIF()
- # END TESH TESTS
- ENDIF()
-
## BINDINGS ##
### LUA ###
IF(HAVE_LUA)