foreach(x coll-allgather coll-allgatherv coll-allreduce coll-allreduce-with-leaks coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
type-hvector type-indexed type-struct type-vector bug-17132 gh-139 timers privatization
- io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub)
+ io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub replay)
add_executable (${x} EXCLUDE_FROM_ALL ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
type-hvector type-indexed type-struct type-vector bug-17132 gh-139 timers privatization
macro-shared auto-shared macro-partial-shared macro-partial-shared-communication
- io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub)
+ io-simple io-simple-at io-all io-all-at io-shared io-ordered topo-cart-sub replay)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.c)
endforeach()
# Extra pt2pt pingpong test: broken usage ti-tracing
ADD_TESH_FACTORIES(tesh-smpi-broken "thread" --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/pt2pt-pingpong --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi/pt2pt-pingpong ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt-pingpong/broken_hostfiles.tesh)
ADD_TESH(tesh-smpi-replay-ti-tracing --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt-pingpong --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi/pt2pt-pingpong ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt-pingpong/TI_output.tesh)
+ ADD_TESH(tesh-smpi-replay-ti-tracing-coll --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/replay --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi/replay ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/replay/replay.tesh)
+
ADD_TESH_FACTORIES(tesh-smpi-gh-139 "thread" --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/gh-139 --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi/gh-139 ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/gh-139/gh-139.tesh)
# Simple privatization tests
--- /dev/null
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+
+#define BUFSIZE 1024*1024
+
+int
+main (int argc, char **argv){
+ int i, nprocs = -1;
+ int rank = -1;
+ int *sendbuf, *recvbuf, *displs, *counts, *rcounts, *alltoallvcounts;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+
+ sendbuf = (int *) malloc (BUFSIZE * nprocs * sizeof(int));
+ for (i = 0; i < BUFSIZE * nprocs; i++)
+ sendbuf[i] = rank;
+
+ alltoallvcounts = (int *) malloc (nprocs * sizeof(int));
+ for (i = 0; i < nprocs; i++)
+ if ((i + rank) < BUFSIZE)
+ alltoallvcounts[i] = i + rank;
+ else
+ alltoallvcounts[i] = BUFSIZE;
+
+ if (rank == 0) {
+ recvbuf = (int *) malloc (BUFSIZE * nprocs * sizeof(int));
+ for (i = 0; i < BUFSIZE * nprocs; i++)
+ recvbuf[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * BUFSIZE;
+ if (i < BUFSIZE)
+ rcounts[i] = counts[i] = i;
+ else
+ rcounts[i] = counts[i] = BUFSIZE;
+ }
+ }
+
+ //first test, with unallocated non significative buffers
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Bcast (sendbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Gather (&sendbuf[rank*BUFSIZE], BUFSIZE, MPI_INT, recvbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Scatter (recvbuf, BUFSIZE, MPI_INT, sendbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Gatherv (&sendbuf[rank*BUFSIZE], (rank < BUFSIZE) ? rank : BUFSIZE, MPI_INT, recvbuf, rcounts, displs, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Scatterv (recvbuf, counts, displs, MPI_INT, sendbuf, (rank < BUFSIZE) ? rank : BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Reduce (sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+
+ if (rank != 0) {
+ recvbuf = (int *) malloc (BUFSIZE * nprocs * sizeof(int));
+ for (i = 0; i < BUFSIZE * nprocs; i++)
+ recvbuf[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * BUFSIZE;
+ if (i < BUFSIZE)
+ rcounts[i] = counts[i] = i;
+ else
+ rcounts[i] = counts[i] = BUFSIZE;
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Bcast (sendbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Gather (&sendbuf[rank*BUFSIZE], BUFSIZE, MPI_INT, recvbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Scatter (recvbuf, BUFSIZE, MPI_INT, sendbuf, BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Gatherv (&sendbuf[rank*BUFSIZE], (rank < BUFSIZE) ? rank : BUFSIZE, MPI_INT, recvbuf, rcounts, displs, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Scatterv (recvbuf, counts, displs, MPI_INT, sendbuf, (rank < BUFSIZE) ? rank : BUFSIZE, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Reduce (sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+ MPI_Allgather (sendbuf, BUFSIZE, MPI_INT, recvbuf, BUFSIZE, MPI_INT, MPI_COMM_WORLD);
+ MPI_Alltoall (recvbuf, BUFSIZE, MPI_INT, sendbuf, BUFSIZE, MPI_INT, MPI_COMM_WORLD);
+ MPI_Allgatherv (sendbuf, (rank < BUFSIZE) ? rank : BUFSIZE, MPI_INT, recvbuf, rcounts, displs, MPI_INT, MPI_COMM_WORLD);
+ MPI_Alltoallv (recvbuf, alltoallvcounts, displs, MPI_INT, sendbuf, alltoallvcounts, displs, MPI_INT, MPI_COMM_WORLD);
+ MPI_Allreduce (sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ MPI_Reduce_scatter (sendbuf, recvbuf, rcounts, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ MPI_Scan (sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ MPI_Exscan (sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ free (alltoallvcounts);
+ free (sendbuf);
+ free (recvbuf);
+ free (displs);
+ free (counts);
+ free (rcounts);
+
+ MPI_Finalize ();
+ return 0;
+}