src/replay/replay
src/testall
teshsuite/bug-17132/bug-17132
+teshsuite/smpi/isp/umpire/abort
+teshsuite/smpi/isp/umpire/abort1
+teshsuite/smpi/isp/umpire/abort2
+teshsuite/smpi/isp/umpire/abort3
+teshsuite/smpi/isp/umpire/any_src-can-deadlock
+teshsuite/smpi/isp/umpire/any_src-can-deadlock10
+teshsuite/smpi/isp/umpire/any_src-can-deadlock10_mod
+teshsuite/smpi/isp/umpire/any_src-can-deadlock11
+teshsuite/smpi/isp/umpire/any_src-can-deadlock2
+teshsuite/smpi/isp/umpire/any_src-can-deadlock3
+teshsuite/smpi/isp/umpire/any_src-can-deadlock4
+teshsuite/smpi/isp/umpire/any_src-can-deadlock4_mod
+teshsuite/smpi/isp/umpire/any_src-can-deadlock5
+teshsuite/smpi/isp/umpire/any_src-can-deadlock5_mod
+teshsuite/smpi/isp/umpire/any_src-can-deadlock6
+teshsuite/smpi/isp/umpire/any_src-can-deadlock6_mod
+teshsuite/smpi/isp/umpire/any_src-can-deadlock7
+teshsuite/smpi/isp/umpire/any_src-can-deadlock8
+teshsuite/smpi/isp/umpire/any_src-can-deadlock9
+teshsuite/smpi/isp/umpire/any_src-deadlock
+teshsuite/smpi/isp/umpire/any_src-wait-deadlock
+teshsuite/smpi/isp/umpire/any_src-wait-deadlock2
+teshsuite/smpi/isp/umpire/any_src-waitall-deadlock
+teshsuite/smpi/isp/umpire/any_src-waitall-deadlock2
+teshsuite/smpi/isp/umpire/any_src-waitall-deadlock3
+teshsuite/smpi/isp/umpire/any_src-waitany-deadlock
+teshsuite/smpi/isp/umpire/any_src-waitany-deadlock2
+teshsuite/smpi/isp/umpire/basic-deadlock
+teshsuite/smpi/isp/umpire/basic-deadlock-cart_create
+teshsuite/smpi/isp/umpire/basic-deadlock-cart_sub
+teshsuite/smpi/isp/umpire/basic-deadlock-comm_create
+teshsuite/smpi/isp/umpire/basic-deadlock-comm_dup
+teshsuite/smpi/isp/umpire/basic-deadlock-comm_split
+teshsuite/smpi/isp/umpire/basic-deadlock-graph_create
+teshsuite/smpi/isp/umpire/basic-deadlock-intercomm_create
+teshsuite/smpi/isp/umpire/basic-deadlock-intercomm_merge
+teshsuite/smpi/isp/umpire/bcast-deadlock
+teshsuite/smpi/isp/umpire/change-send-buffer
+teshsuite/smpi/isp/umpire/change-send-buffer-exhaustive
+teshsuite/smpi/isp/umpire/change-send-buffer-type-exhaustive
+teshsuite/smpi/isp/umpire/collective-exhaustive-byte-int-mismatch
+teshsuite/smpi/isp/umpire/collective-exhaustive-no-error
+teshsuite/smpi/isp/umpire/collective-misorder
+teshsuite/smpi/isp/umpire/collective-misorder-allreduce
+teshsuite/smpi/isp/umpire/collective-misorder2
+teshsuite/smpi/isp/umpire/comm-bcast-deadlock
+teshsuite/smpi/isp/umpire/comm-deadlock
+teshsuite/smpi/isp/umpire/comm-dup-no-error
+teshsuite/smpi/isp/umpire/comm-dup-no-free
+teshsuite/smpi/isp/umpire/comm-dup-no-free2
+teshsuite/smpi/isp/umpire/comm-simple
+teshsuite/smpi/isp/umpire/comm-split-no-free
+teshsuite/smpi/isp/umpire/comm-translate-ranks
+teshsuite/smpi/isp/umpire/complex-deadlock
+teshsuite/smpi/isp/umpire/deadlock-config
+teshsuite/smpi/isp/umpire/deadlock-config_blocking
+teshsuite/smpi/isp/umpire/dropped-req
+teshsuite/smpi/isp/umpire/errhandler-no-error
+teshsuite/smpi/isp/umpire/errhandler-no-free
+teshsuite/smpi/isp/umpire/finalize-deadlock
+teshsuite/smpi/isp/umpire/group-no-error
+teshsuite/smpi/isp/umpire/group-no-error-exhaustive
+teshsuite/smpi/isp/umpire/group-no-error2
+teshsuite/smpi/isp/umpire/group-no-error3
+teshsuite/smpi/isp/umpire/group-no-free
+teshsuite/smpi/isp/umpire/group-no-free-exhaustive
+teshsuite/smpi/isp/umpire/group-no-free2
+teshsuite/smpi/isp/umpire/group-no-free3
+teshsuite/smpi/isp/umpire/hello
+teshsuite/smpi/isp/umpire/intercomm_create-deadlock
+teshsuite/smpi/isp/umpire/intercomm_create-deadlock2
+teshsuite/smpi/isp/umpire/intercomm_create-deadlock3
+teshsuite/smpi/isp/umpire/intercomm_create-deadlock4
+teshsuite/smpi/isp/umpire/intercomm_create-no-error
+teshsuite/smpi/isp/umpire/intercomm_merge-deadlock
+teshsuite/smpi/isp/umpire/irecv-deadlock
+teshsuite/smpi/isp/umpire/irecv-isend-ok
+teshsuite/smpi/isp/umpire/irecv-isend-ok2
+teshsuite/smpi/isp/umpire/lost-request
+teshsuite/smpi/isp/umpire/lost-request-waitall
+teshsuite/smpi/isp/umpire/lost-request2
+teshsuite/smpi/isp/umpire/lost-request3
+teshsuite/smpi/isp/umpire/no-error
+teshsuite/smpi/isp/umpire/no-error-any_src
+teshsuite/smpi/isp/umpire/no-error-derived-comms
+teshsuite/smpi/isp/umpire/no-error-interleaved-isend
+teshsuite/smpi/isp/umpire/no-error-persistent
+teshsuite/smpi/isp/umpire/no-error-persistent-all-completions
+teshsuite/smpi/isp/umpire/no-error-persistent-test
+teshsuite/smpi/isp/umpire/no-error-persistent-testpartial
+teshsuite/smpi/isp/umpire/no-error-persistent-waitpartial
+teshsuite/smpi/isp/umpire/no-error-probe
+teshsuite/smpi/isp/umpire/no-error-probe-any_src
+teshsuite/smpi/isp/umpire/no-error-probe-any_tag
+teshsuite/smpi/isp/umpire/no-error-test
+teshsuite/smpi/isp/umpire/no-error-testall
+teshsuite/smpi/isp/umpire/no-error-testany
+teshsuite/smpi/isp/umpire/no-error-vector-isend
+teshsuite/smpi/isp/umpire/no-error-wait
+teshsuite/smpi/isp/umpire/no-error-wait-any_src
+teshsuite/smpi/isp/umpire/no-error-wait-any_src2
+teshsuite/smpi/isp/umpire/no-error-wait-any_src3
+teshsuite/smpi/isp/umpire/no-error-wait-any_src4
+teshsuite/smpi/isp/umpire/no-error-waitall
+teshsuite/smpi/isp/umpire/no-error-waitall-any_src
+teshsuite/smpi/isp/umpire/no-error-waitall-any_src2
+teshsuite/smpi/isp/umpire/no-error-waitall-any_src3
+teshsuite/smpi/isp/umpire/no-error-waitany
+teshsuite/smpi/isp/umpire/no-error-waitany-any_src
+teshsuite/smpi/isp/umpire/no-error-waitany-any_src2
+teshsuite/smpi/isp/umpire/no-error-waitany-any_src3
+teshsuite/smpi/isp/umpire/no-error-waitany2
+teshsuite/smpi/isp/umpire/no-error2
+teshsuite/smpi/isp/umpire/no-error3
+teshsuite/smpi/isp/umpire/no-error3-any_src
+teshsuite/smpi/isp/umpire/no-error4
+teshsuite/smpi/isp/umpire/no-error4-any_src
+teshsuite/smpi/isp/umpire/op-no-error
+teshsuite/smpi/isp/umpire/op-no-free
+teshsuite/smpi/isp/umpire/partial-recv
+teshsuite/smpi/isp/umpire/partial-recv-exhaustive
+teshsuite/smpi/isp/umpire/partial-recv-persistent
+teshsuite/smpi/isp/umpire/partial-recv-persistent2
+teshsuite/smpi/isp/umpire/partial-recv-persistent3
+teshsuite/smpi/isp/umpire/partial-recv-persistent4
+teshsuite/smpi/isp/umpire/persistent
+teshsuite/smpi/isp/umpire/persistent2
+teshsuite/smpi/isp/umpire/persistent3
+teshsuite/smpi/isp/umpire/persistent4
+teshsuite/smpi/isp/umpire/probe-any_src-types-can-mismatch
+teshsuite/smpi/isp/umpire/probe-any_tag-types-can-mismatch
+teshsuite/smpi/isp/umpire/probe-deadlock
+teshsuite/smpi/isp/umpire/pt2pt-byte-int-mismatch
+teshsuite/smpi/isp/umpire/remote_group-no-error
+teshsuite/smpi/isp/umpire/send-recv-ok
+teshsuite/smpi/isp/umpire/sendrecv-deadlock
+teshsuite/smpi/isp/umpire/temp.txt
+teshsuite/smpi/isp/umpire/tmpzLRJML
+teshsuite/smpi/isp/umpire/type-commit-twice
+teshsuite/smpi/isp/umpire/type-no-error
+teshsuite/smpi/isp/umpire/type-no-error-exhaustive
+teshsuite/smpi/isp/umpire/type-no-error-exhaustive-with-isends
+teshsuite/smpi/isp/umpire/type-no-free
+teshsuite/smpi/isp/umpire/type-no-free-exhaustive
+teshsuite/smpi/isp/umpire/type-no-free2
+teshsuite/smpi/isp/umpire/type-no-free3
+teshsuite/smpi/isp/umpire/wait-deadlock
+teshsuite/smpi/isp/umpire/waitall-deadlock
+teshsuite/smpi/isp/umpire/waitany-deadlock
teshsuite/smpi/bcast/bcast_coll
teshsuite/smpi/compute/compute2
teshsuite/smpi/compute/compute3
ENDIF()
+if(enable_smpi AND enable_smpi_ISP_testsuite)
+FOREACH (tesh
+ any_src-can-deadlock10
+ any_src-can-deadlock4
+ any_src-can-deadlock5
+ any_src-can-deadlock6
+ any_src-waitall-deadlock2
+ any_src-waitall-deadlock3
+ any_src-waitany-deadlock2
+ any_src-waitany-deadlock
+ any_src-wait-deadlock
+ basic-deadlock-comm_create
+ basic-deadlock-comm_dup
+ basic-deadlock-comm_split
+ basic-deadlock
+ bcast-deadlock
+ collective-misorder-allreduce
+ collective-misorder
+ complex-deadlock
+ deadlock-config
+ finalize-deadlock
+ irecv-deadlock
+ no-error2
+ no-error3-any_src
+ no-error3
+ no-error
+ )
+ ADD_TESH(umpire_${tesh} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/isp/umpire ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/isp/umpire/${tesh}.tesh)
+ENDFOREACH()
+endif()
+
## OTHER ##
ADD_TEST(testall ${CMAKE_BINARY_DIR}/src/testall)
teshsuite/smpi/shared/CMakeLists.txt
teshsuite/smpi/struct/CMakeLists.txt
teshsuite/smpi/vector/CMakeLists.txt
+ teshsuite/smpi/isp/umpire/CMakeLists.txt
teshsuite/smpi/mpich3-test/CMakeLists.txt
teshsuite/smpi/mpich3-test/attr/CMakeLists.txt
teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/shared)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/struct)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/vector)
+
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/isp/umpire)
+
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll)
option(enable_smpi "Whether SMPI in included in library." on)
option(enable_smpi_MPICH3_testsuite "Whether the test suite form MPICH 3 should be built" off)
endif()
+option(enable_smpi_ISP_testsuite "Whether the test suite form ISP should be built" off)
if(enable_scala AND NOT enable_java)
message(WARNING "For using scala you must turn java on with command:\ncmake -Denable_java=on .")
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+set(umpire_src
+ abort1.c
+ abort2.c
+ abort3.c
+ abort.c
+ any_src-can-deadlock10.c
+ any_src-can-deadlock10_mod.c
+ any_src-can-deadlock11.c
+ any_src-can-deadlock2.c
+ any_src-can-deadlock3.c
+ any_src-can-deadlock4.c
+ any_src-can-deadlock4_mod.c
+ any_src-can-deadlock5.c
+ any_src-can-deadlock5_mod.c
+ any_src-can-deadlock6.c
+ any_src-can-deadlock6_mod.c
+ any_src-can-deadlock7.c
+ any_src-can-deadlock8.c
+ any_src-can-deadlock9.c
+ any_src-can-deadlock.c
+ any_src-deadlock.c
+ any_src-waitall-deadlock2.c
+ any_src-waitall-deadlock3.c
+ any_src-waitall-deadlock.c
+ any_src-waitany-deadlock2.c
+ any_src-waitany-deadlock.c
+ any_src-wait-deadlock2.c
+ any_src-wait-deadlock.c
+ basic-deadlock.c
+ basic-deadlock-cart_create.c
+ basic-deadlock-cart_sub.c
+ basic-deadlock-comm_create.c
+ basic-deadlock-comm_dup.c
+ basic-deadlock-comm_split.c
+ basic-deadlock-graph_create.c
+ basic-deadlock-intercomm_create.c
+ basic-deadlock-intercomm_merge.c
+ bcast-deadlock.c
+ change-send-buffer.c
+ change-send-buffer-exhaustive.c
+ change-send-buffer-type-exhaustive.c
+ collective-exhaustive-byte-int-mismatch.c
+ collective-exhaustive-no-error.c
+ collective-misorder2.c
+ collective-misorder-allreduce.c
+ collective-misorder.c
+ comm-bcast-deadlock.c
+ comm-deadlock.c
+ comm-dup-no-error.c
+ comm-dup-no-free2.c
+ comm-dup-no-free.c
+ comm-simple.c
+ comm-split-no-free.c
+ comm-translate-ranks.c
+ complex-deadlock.c
+ deadlock-config_blocking.c
+ deadlock-config.c
+ dropped-req.c
+ errhandler-no-error.c
+ errhandler-no-free.c
+ finalize-deadlock.c
+ group-no-error2.c
+ group-no-error3.c
+ group-no-error.c
+ group-no-error-exhaustive.c
+ group-no-free2.c
+ group-no-free3.c
+ group-no-free.c
+ group-no-free-exhaustive.c
+ hello.c
+ intercomm_create-deadlock2.c
+ intercomm_create-deadlock3.c
+ intercomm_create-deadlock4.c
+ intercomm_create-deadlock.c
+ intercomm_create-no-error.c
+ intercomm_merge-deadlock.c
+ irecv-deadlock.c
+ irecv-isend-ok2.c
+ irecv-isend-ok.c
+ lost-request2.c
+ lost-request3.c
+ lost-request.c
+ lost-request-waitall.c
+ no-error2.c
+ no-error3-any_src.c
+ no-error3.c
+ no-error4-any_src.c
+ no-error4.c
+ no-error-any_src.c
+ no-error.c
+ no-error-derived-comms.c
+ no-error-interleaved-isend.c
+ no-error-persistent-all-completions.c
+ no-error-persistent.c
+ no-error-persistent-test.c
+ no-error-persistent-testpartial.c
+ no-error-persistent-waitpartial.c
+ no-error-probe-any_src.c
+ no-error-probe-any_tag.c
+ no-error-probe.c
+ no-error-testall.c
+ no-error-testany.c
+ no-error-test.c
+ no-error-vector-isend.c
+ # no-error-wait2.c # Does not compile.
+ no-error-waitall-any_src2.c
+ no-error-waitall-any_src3.c
+ no-error-waitall-any_src.c
+ no-error-waitall.c
+ no-error-waitany2.c
+ no-error-waitany-any_src2.c
+ no-error-waitany-any_src3.c
+ no-error-waitany-any_src.c
+ no-error-waitany.c
+ no-error-wait-any_src2.c
+ no-error-wait-any_src3.c
+ no-error-wait-any_src4.c
+ no-error-wait-any_src.c
+ no-error-wait.c
+ op-no-error.c
+ op-no-free.c
+ partial-recv.c
+ partial-recv-exhaustive.c
+ partial-recv-persistent2.c
+ partial-recv-persistent3.c
+ partial-recv-persistent4.c
+ partial-recv-persistent.c
+ persistent2.c
+ persistent3.c
+ persistent4.c
+ persistent.c
+ probe-any_src-types-can-mismatch.c
+ probe-any_tag-types-can-mismatch.c
+ probe-deadlock.c
+ pt2pt-byte-int-mismatch.c
+ remote_group-no-error.c
+ sendrecv-deadlock.c
+ send-recv-ok.c
+ type-commit-twice.c
+ type-no-error.c
+ type-no-error-exhaustive.c
+ type-no-error-exhaustive-with-isends.c
+ type-no-free2.c
+ type-no-free3.c
+ type-no-free.c
+ type-no-free-exhaustive.c
+ waitall-deadlock.c
+ waitany-deadlock.c
+ wait-deadlock.c
+ )
+
+set(sources "")
+FOREACH(s ${umpire_src})
+ set(sources ${sources} ${CMAKE_CURRENT_SOURCE_DIR}/${s})
+ENDFOREACH()
+
+set(umpire_tesh "")
+foreach(tesh
+ any_src-can-deadlock10
+ any_src-can-deadlock4
+ any_src-can-deadlock5
+ any_src-can-deadlock6
+ any_src-waitall-deadlock2
+ any_src-waitall-deadlock3
+ any_src-waitany-deadlock2
+ any_src-waitany-deadlock
+ any_src-wait-deadlock
+ basic-deadlock-comm_create
+ basic-deadlock-comm_dup
+ basic-deadlock-comm_split
+ basic-deadlock
+ bcast-deadlock
+ collective-misorder-allreduce
+ collective-misorder
+ complex-deadlock
+ deadlock-config
+ finalize-deadlock
+ irecv-deadlock
+ no-error2
+ no-error3-any_src
+ no-error3
+ no-error
+ )
+ set(umpire_tesh ${umpire_tesh} teshsuite/smpi/isp/umpire/${tesh}.tesh)
+ENDFOREACH()
+
+set(examples_src ${examples_src} ${sources} PARENT_SCOPE)
+set(txt_files ${txt_files} ${CMAKE_CURRENT_SOURCE_DIR}/README PARENT_SCOPE)
+set(EXTRA_DIST ${EXTRA_DIST} ${CMAKE_CURRENT_SOURCE_DIR}/no-error-wait2.c PARENT_SCOPE)
+set(tesh_files ${tesh_files} ${umpire_tesh} PARENT_SCOPE)
+
+if(enable_smpi AND enable_smpi_ISP_testsuite)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+
+ FOREACH (s ${umpire_src})
+ STRING(REGEX REPLACE "\\.c$" "" exe ${s})
+ add_executable(${exe} ${s})
+ target_link_libraries(${exe} simgrid)
+ set_source_files_properties(${s} PROPERTIES COMPILE_FLAGS "-Wno-error")
+ ENDFOREACH(s)
+endif()
--- /dev/null
+ISP Test taken from <http://www.cs.utah.edu/formal_verification/ISP_Tests/>.
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Oct 29 2002 */
+/* abort.c -- call MPI abort in all tasks... */
+
+
+#include <stdio.h>
+#include "mpi.h"
+#include "mpi.h"
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ printf ("(%d) Aborting\n", rank);
+ MPI_Abort (MPI_COMM_WORLD, -1);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Oct 29 2002 */
+/* abort1.c -- call MPI abort in all tasks other than zero... */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ if (rank) {
+ printf ("(%d) Aborting\n", rank);
+ MPI_Abort (MPI_COMM_WORLD, -1);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Oct 29 2002 */
+/* abort2.c -- call MPI abort in task zero... */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ if (rank == 0) {
+ printf ("(%d) Aborting\n", rank);
+ MPI_Abort (MPI_COMM_WORLD, -1);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Oct 29 2002 */
+/* abort3.c -- call MPI abort in task one... */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ /* ensure that we have a non-manager that doesn't abort... */
+ printf ("not enough tasks\n");
+ }
+ else {
+ if (rank == 1) {
+ printf ("(%d) Aborting\n", rank);
+ MPI_Abort (MPI_COMM_WORLD, -1);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Jan 3 2002 */
+/* any_src-can-deadlock.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; pretty random */
+/* as to which is received first... */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Sep 30 2003 */
+/* any_src-can-deadlock10.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 1 before 2 with all task */
+/* 0 ops being posted before both 1 and 2 */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ // sleep (60);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-can-deadlock10
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Sep 30 2003 */
+/* any_src-can-deadlock10.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 1 before 2 with all task */
+/* 0 ops being posted before both 1 and 2 */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ // sleep (60);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock11.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; sleeps generally */
+/* make order 2 before 1 with all task */
+/* 0 ops being posted before both 1 and 2 */
+/* same as any_src-can-deadlock10.c */
+/* except tasks 1 and 2 are interchanged */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ //sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ //sleep (60);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Jan 3 2002 */
+/* any_src-can-deadlock.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; the likely outcome */
+/* because we sleep task 1 */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (60);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Jan 3 2002 */
+/* any_src-can-deadlock.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; the unlikely outcome */
+/* because we sleep task 2 */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ //sleep (60);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock4.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; pretty random */
+/* as to which is received first... */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-can-deadlock4
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock4.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; pretty random */
+/* as to which is received first... */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock5.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 1 before 2 with all task */
+/* 0 ops being posted after both 1 and 2 */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ //sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-can-deadlock5
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock5.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 1 before 2 with all task */
+/* 0 ops being posted after both 1 and 2 */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ //sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock5.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 2 before 1 with all task */
+/* 0 ops being posted after both 1 and 2 */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+// sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-can-deadlock6
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock5.c -- deadlock occurs if task 0 receives */
+/* from task 1 first; sleeps generally */
+/* make order 2 before 1 with all task */
+/* 0 ops being posted after both 1 and 2 */
+
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+// sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock7.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; pretty random */
+/* as to which is received first... */
+/* same as any_src-can-deadlock4.c */
+/* except tasks 1 and 2 are interchanged */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock8.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; sleeps generally */
+/* make order 1 before 2 with all task */
+/* 0 ops being posted after both 1 and 2 */
+/* same as any_src-can-deadlock6.c */
+/* except tasks 1 and 2 are interchanged */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+// sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 26 2003 */
+/* any_src-can-deadlock9.c -- deadlock occurs if task 0 receives */
+/* from task 2 first; sleeps generally */
+/* make order 2 before 1 with all task */
+/* 0 ops being posted after both 1 and 2 */
+/* same as any_src-can-deadlock5.c */
+/* except tasks 1 and 2 are interchanged */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ //sleep (60);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Wait (&req, &status);
+ }
+ else if (rank == 2)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ // sleep (30);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Recv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Wait (&req, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+
+ MPI_Wait (&req, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-wait-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 23
+> [0.000000] [mc_global/INFO] Visited states = 23
+> [0.000000] [mc_global/INFO] Executed transitions = 22
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ int i;
+ MPI_Status status;
+ MPI_Request req;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+ printf("Proc 0: Request number - %d\n",req);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+
+ MPI_Wait (&req, &status);
+ printf("Proc 0: Request number after wait test- %d\n",req);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &req);
+ printf("Proc 1: Request number - %d\n",req);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Wait (&req, &status);
+ printf("Proc 1: Request number after wait test- %d\n",req);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status statuses[2];
+ MPI_Request reqs[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ /* this code is very similar to no-error-waitall-any_src.c */
+ /* but deadlocks since task 2's send and recv are inverted... */
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitall (2, reqs, statuses);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 2, 2, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 1, MPI_COMM_WORLD, statuses);
+ }
+ else if (rank == 2)
+ {
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 2, MPI_COMM_WORLD, statuses);
+
+ memset (buf1, 1, buf_size);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status statuses[2];
+ MPI_Request reqs[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ /* this code is very similar to no-error-waitall-any_src.c */
+ /* but deadlocks since task 2's send and recv are inverted... */
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitall (2, reqs, statuses);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 1, MPI_COMM_WORLD, statuses);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-waitall-deadlock2
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status statuses[2];
+ MPI_Request reqs[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ /* this code is very similar to no-error-waitall-any_src.c */
+ /* but deadlocks since task 1's send and recv are inverted... */
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitall (2, reqs, statuses);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Isend (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Isend (buf0, buf_size, MPI_INT, 2, 1, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitall (2, reqs, statuses);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 1, MPI_COMM_WORLD, statuses);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD, statuses);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-waitall-deadlock3
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 29
+> [0.000000] [mc_global/INFO] Visited states = 29
+> [0.000000] [mc_global/INFO] Executed transitions = 28
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ int done;
+ MPI_Status status;
+ MPI_Request reqs[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ /* this code is very similar to no-error-waitall-any_src.c */
+ /* but deadlocks since task 2's send and recv are inverted... */
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitany (2, reqs, &done, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-waitany-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 24
+> [0.000000] [mc_global/INFO] Visited states = 24
+> [0.000000] [mc_global/INFO] Executed transitions = 23
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ int done;
+ MPI_Status statuses[2];
+ MPI_Request reqs[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ /* this code is very similar to no-error-waitall-any_src.c */
+ /* but deadlocks since task 2's send and recv are inverted... */
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Irecv (buf0, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Irecv (buf1, buf_size, MPI_INT,
+ MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitall (2, reqs, statuses);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Isend (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &reqs[0]);
+
+ MPI_Isend (buf0, buf_size, MPI_INT, 2, 1, MPI_COMM_WORLD, &reqs[1]);
+
+ MPI_Waitany (2, reqs, &done, statuses);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 0, 1, MPI_COMM_WORLD, statuses);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+
+ MPI_Wait (&reqs[(done + 1) % 2], statuses);
+ }
+ else if (rank == 2)
+ {
+ // sleep (60);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 1, MPI_COMM_WORLD, statuses);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/any_src-waitany-deadlock2
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 29
+> [0.000000] [mc_global/INFO] Visited states = 29
+> [0.000000] [mc_global/INFO] Executed transitions = 28
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define TWOD 2
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm;
+ MPI_Group world_group, dgroup;
+ int *granks;
+ int dims[TWOD], periods[TWOD];
+ int drank, dnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 4) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* create a 2 X nprocs/2 torus topology, allow reordering */
+ dims[0] = 2;
+ dims[1] = nprocs/2;
+ periods[0] = periods[1] = 1;
+ MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &comm);
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define TWOD 2
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, comm;
+ MPI_Group world_group, dgroup;
+ int *granks;
+ int dims[TWOD], periods[TWOD], remain_dims[TWOD];
+ int drank, dnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 4) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make cartesian communicator temporarily... */
+ /* create a 2 X nprocs/2 torus topology, allow reordering */
+ dims[0] = 2;
+ dims[1] = nprocs/2;
+ periods[0] = periods[1] = 1;
+ MPI_Cart_create (MPI_COMM_WORLD, TWOD, dims, periods, 1, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ /* create 2 1 X nprocs/2 topologies... */
+ remain_dims[0] = 0;
+ remain_dims[1] = 1;
+ MPI_Cart_sub (temp, remain_dims, &comm);
+ /* free up temporarily created cartesian communicator... */
+ MPI_Comm_free (&temp);
+ }
+ else {
+ comm = MPI_COMM_NULL;
+ }
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm;
+ MPI_Group world_group, dgroup;
+ int *granks;
+ int i, drank, dnprocs, newsize;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ MPI_Comm_group (MPI_COMM_WORLD, &world_group);
+ if (nprocs % 2)
+ newsize = (nprocs/2) + 1;
+ else
+ newsize = nprocs/2;
+ granks = (int *) malloc (sizeof(int) * (newsize));
+ for (i = 0; i < newsize; i++)
+ granks [i] = 2 * i;
+ MPI_Group_incl (world_group, newsize, granks, &dgroup);
+ MPI_Comm_create (MPI_COMM_WORLD, dgroup, &comm);
+ MPI_Group_free (&world_group);
+ MPI_Group_free (&dgroup);
+ free (granks);
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/basic-deadlock-comm_create
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 23
+> [0.000000] [mc_global/INFO] Visited states = 23
+> [0.000000] [mc_global/INFO] Executed transitions = 22
+> (1) Got MPI_COMM_NULL
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ MPI_Comm_dup (MPI_COMM_WORLD, &comm);
+
+ if (rank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (rank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/basic-deadlock-comm_dup
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 23
+> [0.000000] [mc_global/INFO] Visited states = 23
+> [0.000000] [mc_global/INFO] Executed transitions = 22
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm;
+ int drank, dnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &comm);
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/basic-deadlock-comm_split
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 35
+> [0.000000] [mc_global/INFO] Visited states = 35
+> [0.000000] [mc_global/INFO] Executed transitions = 34
+> (1) Derived communicator too small (size = 1)
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define GRAPH_SZ 4
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm;
+ int drank, dnprocs;
+ int graph_index[] = { 2, 3, 4, 6 };
+ int graph_edges[] = { 1, 3, 0, 3, 0, 2 };
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 4) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* create the graph on p.268 MPI: The Complete Reference... */
+ MPI_Graph_create (MPI_COMM_WORLD, GRAPH_SZ,
+ graph_index, graph_edges, 1, &comm);
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm;
+ int drank, dnprocs, rleader, rnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ MPI_Comm_size (intercomm, &dnprocs);
+ MPI_Comm_rank (intercomm, &drank);
+ MPI_Comm_remote_size (intercomm, &rnprocs);
+
+ if (rnprocs > drank) {
+ if (rank % 2) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ else {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ }
+ else {
+ printf ("(%d) Intercomm too small (lrank = %d; remote size = %d)\n",
+ rank, drank, rnprocs);
+ }
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm comm, temp, intercomm;
+ int drank, dnprocs, rleader;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm == MPI_COMM_NULL) {
+ printf ("(%d) MPI_Intercomm_Create returned MPI_COMM_NULL\n", rank);
+ printf ("(%d) Aborting...\n", rank);
+ MPI_Abort (MPI_COMM_WORLD, 666);
+ exit (666);
+ }
+
+ MPI_Intercomm_merge (intercomm, rank % 2, &comm);
+
+ MPI_Comm_free (&intercomm);
+
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_size (comm, &dnprocs);
+ MPI_Comm_rank (comm, &drank);
+
+ if (dnprocs > 1) {
+ if (drank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, comm, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, comm);
+ }
+ else if (drank == 1) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, comm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, comm);
+ }
+ }
+ else {
+ printf ("(%d) Derived communicator too small (size = %d)\n",
+ rank, dnprocs);
+ }
+
+ MPI_Comm_free (&comm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ memset (buf0, 0, buf_size);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/basic-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 23
+> [0.000000] [mc_global/INFO] Visited states = 23
+> [0.000000] [mc_global/INFO] Executed transitions = 22
+> Aborted
+
--- /dev/null
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int rank;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ if (rank == 0) {
+ memset (buf0, 0, buf_size);
+ MPI_Bcast (buf0, buf_size, MPI_INT, 1, MPI_COMM_WORLD);
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, MPI_COMM_WORLD);
+ }
+ else {
+ if (rank == 1)
+ memset (buf1, 1, buf_size);
+
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Bcast (buf0, buf_size, MPI_INT, 1, MPI_COMM_WORLD);
+ }
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/bcast-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 4
+> [0.000000] [mc_global/INFO] Visited states = 4
+> [0.000000] [mc_global/INFO] Executed transitions = 3
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+/* change-send-buffer-exhaustive.c -- do lots pt-2-pt ops with */
+/* send buffer overwrite errors*/
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/change-send-buffer-exhaustive.c,v 1.5 2002/10/24 17:04:54 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "mpi.h"
+
+#define BUF_SIZE 128
+#define NUM_SEND_TYPES 8
+#define NUM_PERSISTENT_SEND_TYPES 4
+#define NUM_BSEND_TYPES 2
+#define NUM_COMPLETION_MECHANISMS 8
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
+ int buf[BUF_SIZE * 2 * NUM_SEND_TYPES];
+ int i, j, k, at_size, send_t_number, index, outcount, total, flag;
+ int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
+ MPI_Request aReq[2 * NUM_SEND_TYPES];
+ MPI_Status aStatus[2 * NUM_SEND_TYPES];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Buffer_attach (bbuf, sizeof(int) *
+ (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
+
+ if (rank == 0) {
+ /* set up persistent sends... */
+ send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;
+
+ MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ send_t_number++;
+
+ MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+
+ send_t_number++;
+
+ MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ send_t_number++;
+
+ MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+ }
+
+ for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
+ if (rank == 0) {
+ /* initialize all of the send buffers */
+ for (j = 0; j < NUM_SEND_TYPES; j++) {
+ for (i = 0; i < BUF_SIZE; i++) {
+ buf[2 * j * BUF_SIZE + i] = i;
+ buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
+ }
+ }
+ }
+ else if (rank == 1) {
+ /* zero out all of the receive buffers */
+ bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0) {
+ /* set up transient sends... */
+ send_t_number = 0;
+
+ MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ send_t_number++;
+
+ MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ send_t_number++;
+
+ /* Barrier to ensure receives are posted for rsends... */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ send_t_number++;
+
+ MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
+ 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
+ MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
+ BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
+ comm, &aReq[send_t_number * 2 + 1]);
+
+ /* just to be paranoid */
+ send_t_number++;
+ assert (send_t_number == NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
+
+ /* start the persistent sends... */
+ if (k % 2) {
+ MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2, &aReq[2 * send_t_number]);
+ }
+ else {
+ for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
+ MPI_Start (&aReq[2 * send_t_number + j]);
+ }
+ }
+
+ /* NOTE: Changing the send buffer of a Bsend is NOT an error... */
+ for (j = 0; j < NUM_SEND_TYPES; j++) {
+ /* muck the buffers */
+ buf[j * 2 * BUF_SIZE + (BUF_SIZE >> 1)] = BUF_SIZE;
+ }
+
+ printf ("USER MSG: 6 change send buffer errors in iteration #%d:\n", k);
+
+ /* complete the sends */
+ switch (k/2) {
+ case 0:
+ /* use MPI_Wait */
+ for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
+ MPI_Wait (&aReq[j], &aStatus[j]);
+ }
+ break;
+
+ case 1:
+ /* use MPI_Waitall */
+ MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
+ break;
+
+ case 2:
+ /* use MPI_Waitany */
+ for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
+ MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
+ }
+
+ break;
+
+ case 3:
+ /* use MPI_Waitsome */
+ total = 0;
+ while (total < NUM_SEND_TYPES * 2) {
+ MPI_Waitsome (NUM_SEND_TYPES * 2, aReq, &outcount, indices, aStatus);
+
+ total += outcount;
+ }
+
+ break;
+
+ case 4:
+ /* use MPI_Test */
+ for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
+ flag = 0;
+
+ while (!flag) {
+ MPI_Test (&aReq[j], &flag, &aStatus[j]);
+ }
+ }
+
+ break;
+
+ case 5:
+ /* use MPI_Testall */
+ flag = 0;
+ while (!flag) {
+ MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
+ }
+
+ break;
+
+ case 6:
+ /* use MPI_Testany */
+ for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
+ flag = 0;
+ while (!flag) {
+ MPI_Testany (NUM_SEND_TYPES * 2, aReq, &index, &flag, aStatus);
+ }
+ }
+
+ break;
+
+ case 7:
+ /* use MPI_Testsome */
+ total = 0;
+ while (total < NUM_SEND_TYPES * 2) {
+ outcount = 0;
+
+ while (!outcount) {
+ MPI_Testsome (NUM_SEND_TYPES * 2, aReq,
+ &outcount, indices, aStatus);
+ }
+
+ total += outcount;
+ }
+
+ break;
+
+ default:
+ assert (0);
+ break;
+ }
+ }
+ else if (rank == 1) {
+ /* set up receives for all of the sends */
+ for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
+ MPI_Irecv (&buf[j * BUF_SIZE], BUF_SIZE,
+ MPI_INT, 0, j, comm, &aReq[j]);
+ }
+
+ /* Barrier to ensure receives are posted for rsends... */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* complete all of the receives... */
+ MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
+ }
+ else {
+ /* Barrier to ensure receives are posted for rsends... */
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0) {
+ /* free the persistent requests */
+ for (i = 2* (NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
+ i < 2 * NUM_SEND_TYPES; i++) {
+ MPI_Request_free (&aReq[i]);
+ }
+ }
+
+ MPI_Buffer_detach (bbuf, &at_size);
+
+ assert (at_size ==
+ sizeof(int) * (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* type-no-error-exhaustive-with-isends.c -- send with weird types */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/change-send-buffer-type-exhaustive.c,v 1.1 2002/06/08 09:11:34 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "mpi.h"
+
+
+typedef struct _test_basic_struct_t
+{
+ double the_double;
+ char the_char;
+}
+test_basic_struct_t;
+
+
+typedef struct _test_lb_ub_struct_t
+{
+ double dontsend_double1;
+ double the_double_to_send;
+ char the_chars[8]; /* only send the first one... */
+ double dontsend_double2;
+}
+test_lb_ub_struct_t;
+
+
+#define TYPE_CONSTRUCTOR_COUNT 7
+#define MSG_COUNT 3
+
+/*
+*/
+#define RUN_TYPE_STRUCT
+#define RUN_TYPE_VECTOR
+#define RUN_TYPE_HVECTOR
+#define RUN_TYPE_INDEXED
+#define RUN_TYPE_HINDEXED
+#define RUN_TYPE_CONTIGUOUS
+#define RUN_TYPE_STRUCT_LB_UB
+/*
+*/
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int i, j, k, basic_extent;
+ int blocklens[4], displs[4];
+ MPI_Datatype structtypes[4];
+ MPI_Datatype newtype[TYPE_CONSTRUCTOR_COUNT];
+ MPI_Request aReq[TYPE_CONSTRUCTOR_COUNT];
+ MPI_Status aStatus[TYPE_CONSTRUCTOR_COUNT];
+#ifdef RUN_TYPE_STRUCT
+ test_basic_struct_t struct_buf[MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_VECTOR
+ test_basic_struct_t vector_buf[7*MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_HVECTOR
+ test_basic_struct_t hvector_buf[44*MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_INDEXED
+ test_basic_struct_t indexed_buf[132*MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_HINDEXED
+ test_basic_struct_t hindexed_buf[272*MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_CONTIGUOUS
+ test_basic_struct_t contig_buf[2720*MSG_COUNT];
+#endif
+#ifdef RUN_TYPE_STRUCT
+ test_lb_ub_struct_t struct_lb_ub_send_buf[MSG_COUNT];
+ test_basic_struct_t struct_lb_ub_recv_buf[MSG_COUNT];
+#endif
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ structtypes[0] = MPI_DOUBLE;
+ structtypes[1] = MPI_CHAR;
+ blocklens[0] = blocklens[1] = 1;
+ displs[0] = 0;
+ displs[1] = sizeof(double);
+
+ MPI_Barrier (comm);
+
+ /* create the types */
+ MPI_Type_struct (2, blocklens, displs, structtypes, &newtype[0]);
+
+ MPI_Type_extent (newtype[0], &basic_extent);
+ if (basic_extent != sizeof (test_basic_struct_t)) {
+ fprintf (stderr, "(%d): Unexpect extent for struct\n");
+ MPI_Abort (MPI_COMM_WORLD, 666);
+ }
+
+ MPI_Type_vector (2, 3, 4, newtype[0], &newtype[1]);
+ MPI_Type_hvector (3, 2, 15 * sizeof (test_basic_struct_t),
+ newtype[1], &newtype[2]);
+ displs[1] = 2;
+ MPI_Type_indexed (2, blocklens, displs, newtype[2], &newtype[3]);
+ displs[1] = 140 * sizeof (test_basic_struct_t);
+ MPI_Type_hindexed (2, blocklens, displs, newtype[3], &newtype[4]);
+ MPI_Type_contiguous (10, newtype[4], &newtype[5]);
+
+ structtypes[0] = MPI_LB;
+ structtypes[1] = MPI_DOUBLE;
+ structtypes[2] = MPI_CHAR;
+ structtypes[3] = MPI_UB;
+ blocklens[0] = blocklens[1] = blocklens[2] = blocklens[3] = 1;
+ displs[0] = -sizeof(double);
+ displs[1] = 0;
+ displs[2] = sizeof(double);
+ displs[3] = 2*sizeof(double)+8*sizeof(char);
+
+ MPI_Type_struct (4, blocklens, displs, structtypes, &newtype[6]);
+
+#ifdef RUN_TYPE_STRUCT
+ MPI_Type_commit (&newtype[0]);
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ MPI_Type_commit (&newtype[1]);
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ MPI_Type_commit (&newtype[2]);
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ MPI_Type_commit (&newtype[3]);
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ MPI_Type_commit (&newtype[4]);
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ MPI_Type_commit (&newtype[5]);
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ MPI_Type_commit (&newtype[6]);
+#endif
+
+ if (rank == 0) {
+ /* initialize buffers */
+ for (i = 0; i < MSG_COUNT; i++) {
+#ifdef RUN_TYPE_STRUCT
+ struct_buf[i].the_double = 1.0;
+ struct_buf[i].the_char = 'a';
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ for (j = 0; j < 7; j++) {
+ vector_buf[i*7 + j].the_double = 1.0;
+ vector_buf[i*7 + j].the_char = 'a';
+ }
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ for (j = 0; j < 44; j++) {
+ hvector_buf[i*44 + j].the_double = 1.0;
+ hvector_buf[i*44 + j].the_char = 'a';
+ }
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ for (j = 0; j < 132; j++) {
+ indexed_buf[i*132 + j].the_double = 1.0;
+ indexed_buf[i*132 + j].the_char = 'a';
+ }
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ for (j = 0; j < 272; j++) {
+ hindexed_buf[i*272 + j].the_double = 1.0;
+ hindexed_buf[i*272 + j].the_char = 'a';
+ }
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ for (j = 0; j < 2720; j++) {
+ contig_buf[i*2720 + j].the_double = 1.0;
+ contig_buf[i*2720 + j].the_char = 'a';
+ }
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ struct_lb_ub_send_buf[i].dontsend_double1 = 1.0;
+ struct_lb_ub_send_buf[i].the_double_to_send = 1.0;
+ for (j = 0; j < 8; j++)
+ struct_lb_ub_send_buf[i].the_chars[j] = 'a';
+ struct_lb_ub_send_buf[i].dontsend_double2 = 1.0;
+#endif
+ }
+
+ /* set up the sends */
+#ifdef RUN_TYPE_STRUCT
+ MPI_Isend (struct_buf, MSG_COUNT, newtype[0], 1, 0, comm, &aReq[0]);
+#else
+ aReq[0] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ MPI_Isend (vector_buf, MSG_COUNT, newtype[1], 1, 1, comm, &aReq[1]);
+#else
+ aReq[1] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ MPI_Isend (hvector_buf, MSG_COUNT, newtype[2], 1, 2, comm, &aReq[2]);
+#else
+ aReq[2] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ MPI_Isend (indexed_buf, MSG_COUNT, newtype[3], 1, 3, comm, &aReq[3]);
+#else
+ aReq[3] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ MPI_Isend (hindexed_buf, MSG_COUNT, newtype[4], 1, 4, comm, &aReq[4]);
+#else
+ aReq[4] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ MPI_Isend (contig_buf, MSG_COUNT, newtype[5], 1, 5, comm, &aReq[5]);
+#else
+ aReq[5] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_STRUCT
+ MPI_Isend (&(struct_lb_ub_send_buf[0].the_double_to_send),
+ MSG_COUNT, newtype[6], 1, 6, comm, &aReq[6]);
+#else
+ aReq[6] = MPI_REQUEST_NULL;
+#endif
+ }
+ else if (rank == 1) {
+ /* initialize buffers */
+ for (i = 0; i < MSG_COUNT; i++) {
+#ifdef RUN_TYPE_STRUCT
+ struct_buf[i].the_double = 2.0;
+ struct_buf[i].the_char = 'b';
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ for (j = 0; j < 7; j++) {
+ vector_buf[i*7 + j].the_double = 2.0;
+ vector_buf[i*7 + j].the_char = 'b';
+ }
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ for (j = 0; j < 44; j++) {
+ hvector_buf[i*44 + j].the_double = 2.0;
+ hvector_buf[i*44 + j].the_char = 'b';
+ }
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ for (j = 0; j < 132; j++) {
+ indexed_buf[i*132 + j].the_double = 2.0;
+ indexed_buf[i*132 + j].the_char = 'b';
+ }
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ for (j = 0; j < 272; j++) {
+ hindexed_buf[i*272 + j].the_double = 2.0;
+ hindexed_buf[i*272 + j].the_char = 'b';
+ }
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ for (j = 0; j < 2720; j++) {
+ contig_buf[i*2720 + j].the_double = 2.0;
+ contig_buf[i*2720 + j].the_char = 'b';
+ }
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ struct_lb_ub_recv_buf[i].the_double = 2.0;
+ struct_lb_ub_recv_buf[i].the_char = 'b';
+#endif
+ }
+
+ /* set up the receives... */
+#ifdef RUN_TYPE_STRUCT
+ MPI_Irecv (struct_buf, MSG_COUNT, newtype[0], 0, 0, comm, &aReq[0]);
+#else
+ aReq[0] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ MPI_Irecv (vector_buf, MSG_COUNT, newtype[1], 0, 1, comm, &aReq[1]);
+#else
+ aReq[1] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ MPI_Irecv (hvector_buf, MSG_COUNT, newtype[2], 0, 2, comm, &aReq[2]);
+#else
+ aReq[2] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ MPI_Irecv (indexed_buf, MSG_COUNT, newtype[3], 0, 3, comm, &aReq[3]);
+#else
+ aReq[3] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ MPI_Irecv (hindexed_buf, MSG_COUNT, newtype[4], 0, 4, comm, &aReq[4]);
+#else
+ aReq[4] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ MPI_Irecv (contig_buf, MSG_COUNT, newtype[5], 0, 5, comm, &aReq[5]);
+#else
+ aReq[5] = MPI_REQUEST_NULL;
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ MPI_Irecv (struct_lb_ub_recv_buf,
+ MSG_COUNT, newtype[0], 0, 6, comm, &aReq[6]);
+#else
+ aReq[6] = MPI_REQUEST_NULL;
+#endif
+ }
+
+ if (rank == 0) {
+ /* muck something in each that is not in the holes... */
+ /* see below to see where the holes are... */
+ /* don't use the first one to test handling of count arg */
+ i = (MSG_COUNT > 1) ? 1 : 0;
+
+#ifdef RUN_TYPE_STRUCT
+ /* muck the char member... */
+ struct_buf[i].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ /* muck the element after the hole due to stride in vector... */
+ vector_buf[i*7 + 4].the_double = 3.0;
+ vector_buf[i*7 + 4].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ /* muck the element after the hole due to stride in hvector... */
+ hvector_buf[i*44 + 15].the_double = 3.0;
+ hvector_buf[i*44 + 15].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ /* muck the element after the hole due to indexed displacements... */
+ indexed_buf[i*132 + 44 + 44].the_double = 3.0;
+ indexed_buf[i*132 + 44 + 44].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ /* muck the element after the hole due to hindexed displacements... */
+ hindexed_buf[i*272 + 132 + 8].the_double = 3.0;
+ hindexed_buf[i*272 + 132 + 8].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ /* muck element after the hole due to hindex displacements, hindex 2... */
+ contig_buf[i*2720 + 272 + 140].the_double = 3.0;
+ contig_buf[i*2720 + 272 + 140].the_char = 'c';
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ /* muck the double member and char member being sent... */
+ struct_lb_ub_send_buf[i].the_double_to_send = 3.0;
+ struct_lb_ub_send_buf[i].the_chars[0] = 'c';
+#endif
+ }
+
+ if ((rank == 0) || (rank == 1))
+ /* wait on everything... */
+ MPI_Waitall (TYPE_CONSTRUCTOR_COUNT, aReq, aStatus);
+
+ if (rank == 1) {
+ /* check the holes... */
+ for (i = 0; i < MSG_COUNT; i++) {
+#ifdef RUN_TYPE_STRUCT
+ /* no holes in struct_buf... */
+#endif
+
+#ifdef RUN_TYPE_VECTOR
+ /* one hole in vector_buf... */
+ assert ((vector_buf[i*7 + 3].the_double == 2.0) &&
+ (vector_buf[i*7 + 3].the_char == 'b'));
+#endif
+
+#ifdef RUN_TYPE_HVECTOR
+ /* eight holes in hvector_buf... */
+ /* hole in first vector, first block... */
+ assert ((hvector_buf[i*44 + 3].the_double == 2.0) &&
+ (hvector_buf[i*44 + 3].the_char == 'b'));
+ /* hole in second vector, first block... */
+ assert ((hvector_buf[i*44 + 10].the_double == 2.0) &&
+ (hvector_buf[i*44 + 10].the_char == 'b'));
+ /* hole in between first and second vector blocks... */
+ assert ((hvector_buf[i*44 + 14].the_double == 2.0) &&
+ (hvector_buf[i*44 + 14].the_char == 'b'));
+ /* hole in first vector, second block... */
+ assert ((hvector_buf[i*44 + 18].the_double == 2.0) &&
+ (hvector_buf[i*44 + 18].the_char == 'b'));
+ /* hole in second vector, second block... */
+ assert ((hvector_buf[i*44 + 25].the_double == 2.0) &&
+ (hvector_buf[i*44 + 25].the_char == 'b'));
+ /* hole in between second and third vector blocks... */
+ assert ((hvector_buf[i*44 + 29].the_double == 2.0) &&
+ (hvector_buf[i*44 + 29].the_char == 'b'));
+ /* hole in first vector, third block... */
+ assert ((hvector_buf[i*44 + 33].the_double == 2.0) &&
+ (hvector_buf[i*44 + 33].the_char == 'b'));
+ /* hole in second vector, third block... */
+ assert ((hvector_buf[i*44 + 40].the_double == 2.0) &&
+ (hvector_buf[i*44 + 40].the_char == 'b'));
+#endif
+
+#ifdef RUN_TYPE_INDEXED
+ /* sixty holes in indexed_buf... */
+ /* hole in first vector, first block, first hvector... */
+ assert ((indexed_buf[i*132 + 3].the_double == 2.0) &&
+ (indexed_buf[i*132 + 3].the_char == 'b'));
+ /* hole in second vector, first block, first hvector... */
+ assert ((indexed_buf[i*132 + 10].the_double == 2.0) &&
+ (indexed_buf[i*132 + 10].the_char == 'b'));
+ /* hole in between first and second vector blocks, first hvector... */
+ assert ((indexed_buf[i*132 + 14].the_double == 2.0) &&
+ (indexed_buf[i*132 + 14].the_char == 'b'));
+ /* hole in first vector, second block, first hvector... */
+ assert ((indexed_buf[i*132 + 18].the_double == 2.0) &&
+ (indexed_buf[i*132 + 18].the_char == 'b'));
+ /* hole in second vector, second block, first hvector... */
+ assert ((indexed_buf[i*132 + 25].the_double == 2.0) &&
+ (indexed_buf[i*132 + 25].the_char == 'b'));
+ /* hole in between second and third vector blocks, first hvector... */
+ assert ((indexed_buf[i*132 + 29].the_double == 2.0) &&
+ (indexed_buf[i*132 + 29].the_char == 'b'));
+ /* hole in first vector, third block, first hvector... */
+ assert ((indexed_buf[i*132 + 33].the_double == 2.0) &&
+ (indexed_buf[i*132 + 33].the_char == 'b'));
+ /* hole in second vector, third block, first hvector... */
+ assert ((indexed_buf[i*132 + 40].the_double == 2.0) &&
+ (indexed_buf[i*132 + 40].the_char == 'b'));
+ /* hole in between hvectors... */
+ for (j = 0; j < 44; j++) {
+ assert ((indexed_buf[i*132 + 44 + j].the_double == 2.0) &&
+ (indexed_buf[i*132 + 44 + j].the_char == 'b'));
+ }
+ /* hole in first vector, first block, second hvector... */
+ assert ((indexed_buf[i*132 + 3 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 3 + 88].the_char == 'b'));
+ /* hole in second vector, first block, second hvector... */
+ assert ((indexed_buf[i*132 + 10 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 10 + 88].the_char == 'b'));
+ /* hole in between first and second vector blocks, second hvector... */
+ assert ((indexed_buf[i*132 + 14 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 14 + 88].the_char == 'b'));
+ /* hole in first vector, second block, second hvector... */
+ assert ((indexed_buf[i*132 + 18 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 18 + 88].the_char == 'b'));
+ /* hole in second vector, second block, second hvector... */
+ assert ((indexed_buf[i*132 + 25 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 25 + 88].the_char == 'b'));
+ /* hole in between second and third vector blocks, second hvector... */
+ assert ((indexed_buf[i*132 + 29 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 29 + 88].the_char == 'b'));
+ /* hole in first vector, third block, second hvector... */
+ assert ((indexed_buf[i*132 + 33 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 33 + 88].the_char == 'b'));
+ /* hole in second vector, third block, second hvector... */
+ assert ((indexed_buf[i*132 + 40 + 88].the_double == 2.0) &&
+ (indexed_buf[i*132 + 40 + 88].the_char == 'b'));
+#endif
+
+#ifdef RUN_TYPE_HINDEXED
+ /* one hundred twenty eight holes in hindexed_buf... */
+ /* hole in first vector, first block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 3].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 3].the_char == 'b'));
+ /* hole in second vector, first block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 10].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 10].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 1, index 1... */
+ assert ((hindexed_buf[i*272 + 14].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 14].the_char == 'b'));
+ /* hole in first vector, second block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 18].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 18].the_char == 'b'));
+ /* hole in second vector, second block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 25].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 25].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 1, index 1... */
+ assert ((hindexed_buf[i*272 + 29].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 29].the_char == 'b'));
+ /* hole in first vector, third block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 33].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 33].the_char == 'b'));
+ /* hole in second vector, third block, first hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 40].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 40].the_char == 'b'));
+ /* hole in between hvectors, index 1... */
+ for (j = 0; j < 44; j++) {
+ assert ((hindexed_buf[i*272 + 44 + j].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 44 + j].the_char == 'b'));
+ }
+ /* hole in first vector, first block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 3 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 3 + 88].the_char == 'b'));
+ /* hole in second vector, first block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 10 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 10 + 88].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 2, index 1... */
+ assert ((hindexed_buf[i*272 + 14 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 14 + 88].the_char == 'b'));
+ /* hole in first vector, second block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 18 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 18 + 88].the_char == 'b'));
+ /* hole in second vector, second block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 25 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 25 + 88].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 2, index 1... */
+ assert ((hindexed_buf[i*272 + 29 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 29 + 88].the_char == 'b'));
+ /* hole in first vector, third block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 33 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 33 + 88].the_char == 'b'));
+ /* hole in second vector, third block, second hvector, index 1... */
+ assert ((hindexed_buf[i*272 + 40 + 88].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 40 + 88].the_char == 'b'));
+ /* indexed hole... */
+ for (j = 0; j < 8; j++) {
+ assert ((hindexed_buf[i*272 + 132 + j].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 132 + j].the_char == 'b'));
+ }
+ /* hole in first vector, first block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 3 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 3 + 140].the_char == 'b'));
+ /* hole in second vector, first block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 10 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 10 + 140].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 1, index 2... */
+ assert ((hindexed_buf[i*272 + 14 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 14 + 140].the_char == 'b'));
+ /* hole in first vector, second block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 18 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 18 + 140].the_char == 'b'));
+ /* hole in second vector, second block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 25 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 25 + 140].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 1, index 2... */
+ assert ((hindexed_buf[i*272 + 29 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 29 + 140].the_char == 'b'));
+ /* hole in first vector, third block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 33 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 33 + 140].the_char == 'b'));
+ /* hole in second vector, third block, first hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 40 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 40 + 140].the_char == 'b'));
+ /* hole in between hvectors, index 2... */
+ for (j = 0; j < 44; j++) {
+ assert ((hindexed_buf[i*272 + 44 + j + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 44 + j + 140].the_char == 'b'));
+ }
+ /* hole in first vector, first block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 3 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 3 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, first block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 10 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 10 + 88 + 140].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 2, index 2... */
+ assert ((hindexed_buf[i*272 + 14 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 14 + 88 + 140].the_char == 'b'));
+ /* hole in first vector, second block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 18 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 18 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, second block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 25 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 25 + 88 + 140].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 2, index 2... */
+ assert ((hindexed_buf[i*272 + 29 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 29 + 88 + 140].the_char == 'b'));
+ /* hole in first vector, third block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 33 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 33 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, third block, second hvector, index 2... */
+ assert ((hindexed_buf[i*272 + 40 + 88 + 140].the_double == 2.0) &&
+ (hindexed_buf[i*272 + 40 + 88 + 140].the_char == 'b'));
+#endif
+
+#ifdef RUN_TYPE_CONTIGUOUS
+ for (j = 0; j < 10; j++) {
+ /* hole in first vector, first block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 3].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 3].the_char == 'b'));
+ /* hole in second vector, first block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 10].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 10].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 1, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 14].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 14].the_char == 'b'));
+ /* hole in first vector, second block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 18].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 18].the_char == 'b'));
+ /* hole in second vector, second block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 25].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 25].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 1, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 29].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 29].the_char == 'b'));
+ /* hole in first vector, third block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 33].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 33].the_char == 'b'));
+ /* hole in second vector, third block, first hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 40].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 40].the_char == 'b'));
+ /* hole in between hvectors, index 1... */
+ for (k = 0; k < 44; k++) {
+ assert ((contig_buf[i*2720 + j*272 + 44 + k].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 44 + k].the_char == 'b'));
+ }
+ /* hole in first vector, first block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 3 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 3 + 88].the_char == 'b'));
+ /* hole in second vector, first block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 10 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 10 + 88].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 2, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 14 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 14 + 88].the_char == 'b'));
+ /* hole in first vector, second block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 18 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 18 + 88].the_char == 'b'));
+ /* hole in second vector, second block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 25 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 25 + 88].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 2, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 29 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 29 + 88].the_char == 'b'));
+ /* hole in first vector, third block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 33 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 33 + 88].the_char == 'b'));
+ /* hole in second vector, third block, second hvector, index 1... */
+ assert ((contig_buf[i*2720 + j*272 + 40 + 88].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 40 + 88].the_char == 'b'));
+ /* indexed hole... */
+ for (k = 0; k < 8; k++) {
+ assert ((contig_buf[i*2720 + j*272 + 132 + k].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 132 + k].the_char == 'b'));
+ }
+ /* hole in first vector, first block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 3 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 3 + 140].the_char == 'b'));
+ /* hole in second vector, first block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 10 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 10 + 140].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 1, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 14 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 14 + 140].the_char == 'b'));
+ /* hole in first vector, second block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 18 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 18 + 140].the_char == 'b'));
+ /* hole in second vector, second block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 25 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 25 + 140].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 1, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 29 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 29 + 140].the_char == 'b'));
+ /* hole in first vector, third block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 33 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 33 + 140].the_char == 'b'));
+ /* hole in second vector, third block, first hvector, index 2... */
+ assert ((contig_buf[i*2720 + j*272 + 40 + 140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 40 + 140].the_char == 'b'));
+ /* hole in between hvectors, index 2... */
+ for (k = 0; k < 44; k++) {
+ assert ((contig_buf[i*2720+j*272+44+k+140].the_double == 2.0) &&
+ (contig_buf[i*2720 +j*272+44+k+140].the_char == 'b'));
+ }
+ /* hole in first vector, first block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+3+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 3 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, first block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+10+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 10 + 88 + 140].the_char == 'b'));
+ /* hole between first & second vector blocks, hvector 2, index 2... */
+ assert ((contig_buf[i*2720+j*272+14+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 14 + 88 + 140].the_char == 'b'));
+ /* hole in first vector, second block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+18+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 18 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, second block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+25+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 25 + 88 + 140].the_char == 'b'));
+ /* hole between second & third vector blocks, hvector 2, index 2... */
+ assert ((contig_buf[i*2720+j*272+29+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 29 + 88 + 140].the_char == 'b'));
+ /* hole in first vector, third block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+33+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 33 + 88 + 140].the_char == 'b'));
+ /* hole in second vector, third block, second hvector, index 2... */
+ assert ((contig_buf[i*2720+j*272+40+88+140].the_double == 2.0) &&
+ (contig_buf[i*2720 + j*272 + 40 + 88 + 140].the_char == 'b'));
+ }
+#endif
+
+#ifdef RUN_TYPE_STRUCT_LB_UB
+ /* no holes in struct_lb_ub_recv_buf... */
+#endif
+ }
+ }
+
+ for (i = 0; i < TYPE_CONSTRUCTOR_COUNT; i++)
+ MPI_Type_free (&newtype[i]);
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (j-vetter@llnl.gov) Mon Nov 1 1999 */
+/* lost-request.c -- overwrite a request and essentially lose a synch point */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/change-send-buffer.c,v 1.3 2002/07/30 21:34:42 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int tag1 = 0;
+ int tag2 = 0;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[128];
+ int buf1[128];
+ int i;
+ MPI_Request aReq[2];
+ MPI_Status aStatus[2];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+int j, k;
+ for (i = 0; i < 128; i++)
+ {
+ buf0[i] = i;
+ buf1[i] = 127 - i;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ switch (rank)
+ {
+ case 0:
+ MPI_Isend (buf0, 128, MPI_INT, 1, tag1, comm, &aReq[0]);
+ MPI_Isend (buf1, 128, MPI_INT, 1, tag2, comm, &aReq[1]);
+ /* do some work here */
+
+ buf0[64] = 1000000;
+
+ MPI_Wait (&aReq[0], &aStatus[0]);
+ MPI_Wait (&aReq[1], &aStatus[1]);
+
+ break;
+
+ case 1:
+ MPI_Irecv (buf0, 128, MPI_INT, 0, tag1, comm, &aReq[0]);
+ MPI_Irecv (buf1, 128, MPI_INT, 0, tag2, comm, &aReq[1]);
+ /* do some work here ... */
+ MPI_Wait (&aReq[0], &aStatus[0]);
+ MPI_Wait (&aReq[1], &aStatus[1]);
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ for (i = 0; i < 128; i++)
+ {
+ buf0[i] = i;
+ buf1[i] = 127 - i;
+ }
+ switch (rank)
+ {
+ case 0:
+ MPI_Isend (buf0, 128, MPI_INT, 1, tag1, comm, &aReq[0]);
+ MPI_Isend (buf1, 128, MPI_INT, 1, tag2, comm, &aReq[1]);
+ /* do some work here */
+
+ buf0[64] = 1000000;
+
+ MPI_Waitall (2, aReq, aStatus);
+
+ break;
+
+ case 1:
+ MPI_Irecv (buf0, 128, MPI_INT, 0, tag1, comm, &aReq[0]);
+ MPI_Irecv (buf1, 128, MPI_INT, 0, tag2, comm, &aReq[1]);
+ /* do some work here ... */
+ MPI_Waitall (2, aReq, aStatus);
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Oct 17 2002 */
+/* collective-exhaustive-byte-int-mismatch.c -- do many collective */
+/* operations with simple type mismatches whenever possible (i.e., */
+/* MPI_INT does not match MPI_BYTE, despite many thinking it does) */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/collective-exhaustive-byte-int-mismatch.c,v 1.1 2002/10/24 17:04:54 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "mpi.h"
+
+
+#define RUN_BARRIER
+#define RUN_BCAST
+#define RUN_GATHER
+#define RUN_GATHERV
+#define RUN_SCATTER
+#define RUN_SCATTERV
+#define RUN_ALLGATHER
+#define RUN_ALLGATHERV
+#define RUN_ALLTOALL
+#define RUN_ALLTOALLV
+#define RUN_REDUCE
+#define RUN_ALLREDUCE
+#define RUN_REDUCE_SCATTER
+#define RUN_SCAN
+
+
+#define RUN_MAX
+#define RUN_MIN
+#define RUN_SUM
+#define RUN_PROD
+#define RUN_LAND
+#define RUN_BAND
+#define RUN_LOR
+#define RUN_BOR
+#define RUN_LXOR
+#define RUN_BXOR
+#define RUN_USEROP
+
+
+#define buf_size 128
+#define OP_COUNT 1
+
+
+#ifdef RUN_USEROP
+typedef struct {
+ double real, imag;
+} complex_t;
+
+void
+complex_prod (void *inp, void *inoutp, int *len, MPI_Datatype *dptr)
+{
+ int i, stop;
+ complex_t c;
+ complex_t *in = (complex_t *) inp;
+ complex_t *inout = (complex_t *) inoutp;
+
+ if (*dptr == MPI_BYTE)
+ stop = (*len)/(2 * sizeof(double));
+ else
+ stop = *len;
+
+ for (i = 0; i < stop; i++) {
+ c.real = inout->real * in->real - inout->imag * in->imag;
+ c.imag = inout->real * in->imag + inout->imag * in->real;
+ *inout = c;
+ in++;
+ inout++;
+ }
+
+ return;
+}
+#endif
+
+
+int
+main (int argc, char **argv)
+{
+ int i, nprocs = -1;
+ int rank = -1;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int *buf0, *buf1, *displs, *counts, *rcounts, *alltoallvcounts;
+#ifdef RUN_USEROP
+ MPI_Op user_op;
+ complex_t *a, *answer;
+ MPI_Datatype ctype;
+#endif
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ buf0 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf0);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf0[i] = rank;
+
+#ifdef RUN_ALLTOALLV
+ alltoallvcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (alltoallvcounts);
+#endif
+
+#ifdef RUN_USEROP
+ a = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+ for (i = 0; i < buf_size * nprocs; i++) {
+ a[i].real = ((double) rank)/((double) nprocs);
+ a[i].imag = ((double) (-rank))/((double) nprocs);
+ }
+
+ MPI_Type_contiguous (2, MPI_DOUBLE, &ctype);
+ MPI_Type_commit (&ctype);
+
+ MPI_Op_create (complex_prod, 1 /* TRUE */, &user_op);
+#endif
+
+ if (rank == 0) {
+ buf1 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf1);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf1[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (displs && counts && rcounts);
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * buf_size;
+ if (i < buf_size)
+ rcounts[i] = counts[i] = i;
+ else
+ rcounts[i] = counts[i] = buf_size;
+ if ((i + rank) < buf_size)
+ alltoallvcounts[i] = i + rank;
+ else
+ alltoallvcounts[i] = buf_size;
+ }
+
+#ifdef RUN_USEROP
+ answer = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+#endif
+
+#ifdef RUN_BARRIER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Barrier (comm);
+#endif
+
+#ifdef RUN_BCAST
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_GATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gather (&buf0[rank*buf_size], buf_size,
+ MPI_INT, buf1, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_SCATTER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatter (buf1, buf_size, MPI_INT, buf0, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_GATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gatherv (&buf0[rank*buf_size],
+ (rank < buf_size) ? rank : buf_size,
+ MPI_INT, buf1, rcounts, displs, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_SCATTERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatterv (buf1, counts, displs, MPI_INT, buf0,
+ (rank < buf_size) ? rank : buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_REDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_MAX, 0, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_MIN, 0, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_SUM, 0, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_PROD, 0, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LAND, 0, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BAND, 0, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LOR, 0, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BOR, 0, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, 0, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, 0, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (a, answer, buf_size, ctype, user_op, 0, comm);
+#endif
+#endif
+
+#ifdef RUN_ALLGATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgather (buf0, buf_size, MPI_INT, buf1, buf_size, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLTOALL
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoall (buf1, buf_size, MPI_INT, buf0, buf_size, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLGATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgatherv (buf0,
+ (rank < buf_size) ? rank : buf_size,
+ MPI_INT, buf1, rcounts, displs, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLTOALLV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoallv (buf1, alltoallvcounts, displs, MPI_INT,
+ buf0, alltoallvcounts, displs, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLREDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (a, answer, buf_size, ctype, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_REDUCE_SCATTER
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (a, answer, rcounts, ctype, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_SCAN
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (a, answer, buf_size, ctype, user_op, comm);
+#endif
+#endif
+ }
+ else {
+ int *ricounts, *rdcounts;
+
+#ifdef RUN_BARRIER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Barrier (comm);
+#endif
+
+#ifdef RUN_BCAST
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Bcast (buf0, buf_size * sizeof(int), MPI_BYTE, 0, comm);
+#endif
+
+#ifdef RUN_GATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gather (&buf0[rank*buf_size], buf_size * sizeof(int),
+ MPI_BYTE, buf1, buf_size * sizeof(int), MPI_BYTE, 0, comm);
+#endif
+
+#ifdef RUN_SCATTER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatter (buf1, buf_size * sizeof(int), MPI_BYTE,
+ buf0, buf_size * sizeof(int), MPI_BYTE, 0, comm);
+#endif
+
+#ifdef RUN_GATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gatherv (&buf0[rank*buf_size],
+ ((rank < buf_size) ? rank : buf_size) * sizeof(int),
+ MPI_BYTE, buf1, rcounts, displs, MPI_BYTE, 0, comm);
+#endif
+
+#ifdef RUN_SCATTERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatterv (buf1, counts, displs, MPI_BYTE, buf0,
+ ((rank < buf_size) ? rank : buf_size) * sizeof(int),
+ MPI_BYTE, 0, comm);
+#endif
+
+#ifdef RUN_REDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MAX, 0, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MIN, 0, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_SUM, 0, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_PROD, 0, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LAND, 0, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1,
+ buf_size * sizeof(int), MPI_BYTE, MPI_BAND, 0, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LOR, 0, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1,
+ buf_size * sizeof(int), MPI_BYTE, MPI_BOR, 0, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LXOR, 0, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1,
+ buf_size * sizeof(int), MPI_BYTE, MPI_BXOR, 0, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (a, answer,
+ buf_size * 2 * sizeof(double), MPI_BYTE, user_op, 0, comm);
+#endif
+#endif
+
+ buf1 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf1);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf1[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ ricounts = (int *) malloc (nprocs * sizeof(int));
+ rdcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (displs && counts && rcounts);
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * buf_size * sizeof(int);
+ if (i < buf_size) {
+ rcounts[i] = counts[i] = i * sizeof(int);
+ ricounts[i] = i;
+ rdcounts[i] = i * 2 * sizeof(double);
+ }
+ else {
+ rcounts[i] = counts[i] = buf_size * sizeof(int);
+ ricounts[i] = buf_size;
+ rdcounts[i] = buf_size * 2 * sizeof(double);
+ }
+ if ((i + rank) < buf_size)
+ alltoallvcounts[i] = i + rank * sizeof(int);
+ else
+ alltoallvcounts[i] = buf_size * sizeof(int);
+ }
+
+#ifdef RUN_USEROP
+ answer = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+#endif
+
+#ifdef RUN_ALLGATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgather (buf0, buf_size * sizeof(int), MPI_BYTE,
+ buf1, buf_size * sizeof(int), MPI_BYTE, comm);
+#endif
+
+#ifdef RUN_ALLTOALL
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoall (buf1, buf_size * sizeof(int), MPI_BYTE,
+ buf0, buf_size * sizeof(int), MPI_BYTE, comm);
+#endif
+
+#ifdef RUN_ALLGATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgatherv (buf0,
+ ((rank < buf_size) ? rank : buf_size) * sizeof(int),
+ MPI_BYTE, buf1, rcounts, displs, MPI_BYTE, comm);
+#endif
+
+#ifdef RUN_ALLTOALLV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoallv (buf1, alltoallvcounts, displs, MPI_BYTE,
+ buf0, alltoallvcounts, displs, MPI_BYTE, comm);
+#endif
+
+#ifdef RUN_ALLREDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1,
+ buf_size * sizeof (int), MPI_BYTE, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1,
+ buf_size * sizeof (int), MPI_BYTE, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1,
+ buf_size * sizeof (int), MPI_BYTE, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (a, answer,
+ buf_size * 2 * sizeof (double), MPI_BYTE, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_REDUCE_SCATTER
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_BYTE, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_BYTE, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, ricounts, MPI_UNSIGNED, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_BYTE, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (a, answer, rdcounts, MPI_BYTE, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_SCAN
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size * sizeof(int), MPI_BYTE, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size * sizeof(int), MPI_BYTE, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_UNSIGNED, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size * sizeof(int), MPI_BYTE, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (a, answer,
+ buf_size * 2 * sizeof(double), MPI_BYTE, user_op, comm);
+#endif
+#endif
+ }
+
+#ifdef RUN_BARRIER
+ MPI_Barrier (comm);
+#endif
+
+#ifdef RUN_USEROP
+ free (a);
+ free (answer);
+ MPI_Op_free (&user_op);
+ MPI_Type_free (&ctype);
+#endif
+
+#ifdef RUN_ALLTOALLV
+ free (alltoallvcounts);
+#endif
+
+ free (buf0);
+ free (buf1);
+ free (displs);
+ free (counts);
+ free (rcounts);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+
+ return 0;
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Oct 17 2002 */
+/* collective-exhaustive-no-error.c -- do some many collective operations */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/collective-exhaustive-no-error.c,v 1.1 2002/10/24 17:04:54 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include "mpi.h"
+
+
+#define RUN_BARRIER
+#define RUN_BCAST
+#define RUN_GATHER
+#define RUN_GATHERV
+#define RUN_SCATTER
+#define RUN_SCATTERV
+#define RUN_ALLGATHER
+#define RUN_ALLGATHERV
+#define RUN_ALLTOALL
+#define RUN_ALLTOALLV
+#define RUN_REDUCE
+#define RUN_ALLREDUCE
+#define RUN_REDUCE_SCATTER
+#define RUN_SCAN
+
+
+#define RUN_MAX
+#define RUN_MIN
+#define RUN_SUM
+#define RUN_PROD
+#define RUN_LAND
+#define RUN_BAND
+#define RUN_LOR
+#define RUN_BOR
+#define RUN_LXOR
+#define RUN_BXOR
+#define RUN_MAXLOC
+#define RUN_MINLOC
+#define RUN_USEROP
+
+
+#define buf_size 128
+#define OP_COUNT 10
+
+
+#ifdef RUN_USEROP
+typedef struct {
+ double real, imag;
+} complex_t;
+
+void
+complex_prod (void *inp, void *inoutp, int *len, MPI_Datatype *dptr)
+{
+ int i;
+ complex_t c;
+ complex_t *in = (complex_t *) inp;
+ complex_t *inout = (complex_t *) inoutp;
+
+ for (i = 0; i < *len; i++) {
+ c.real = inout->real * in->real - inout->imag * in->imag;
+ c.imag = inout->real * in->imag + inout->imag * in->real;
+ *inout = c;
+ in++;
+ inout++;
+ }
+
+ return;
+}
+#endif
+
+
+int
+main (int argc, char **argv)
+{
+ int i, nprocs = -1;
+ int rank = -1;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int *buf0, *buf1, *displs, *counts, *rcounts, *alltoallvcounts;
+#ifdef RUN_USEROP
+ MPI_Op user_op;
+ complex_t *a, *answer;
+ MPI_Datatype ctype;
+#endif
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ buf0 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf0);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf0[i] = rank;
+
+#ifdef RUN_ALLTOALLV
+ alltoallvcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (alltoallvcounts);
+ for (i = 0; i < nprocs; i++)
+ if ((i + rank) < buf_size)
+ alltoallvcounts[i] = i + rank;
+ else
+ alltoallvcounts[i] = buf_size;
+#endif
+
+#ifdef RUN_USEROP
+ a = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+ for (i = 0; i < buf_size * nprocs; i++) {
+ a[i].real = ((double) rank)/((double) nprocs);
+ a[i].imag = ((double) (-rank))/((double) nprocs);
+ }
+
+ MPI_Type_contiguous (2, MPI_DOUBLE, &ctype);
+ MPI_Type_commit (&ctype);
+
+ MPI_Op_create (complex_prod, 1 /* TRUE */, &user_op);
+#endif
+
+ if (rank == 0) {
+ buf1 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf1);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf1[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (displs && counts && rcounts);
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * buf_size;
+ if (i < buf_size)
+ rcounts[i] = counts[i] = i;
+ else
+ rcounts[i] = counts[i] = buf_size;
+ }
+
+#ifdef RUN_USEROP
+ answer = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+#endif
+ }
+
+#ifdef RUN_BARRIER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Barrier (comm);
+#endif
+
+#ifdef RUN_BCAST
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_GATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gather (&buf0[rank*buf_size], buf_size,
+ MPI_INT, buf1, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_SCATTER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatter (buf1, buf_size, MPI_INT, buf0, buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_GATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Gatherv (&buf0[rank*buf_size],
+ (rank < buf_size) ? rank : buf_size,
+ MPI_INT, buf1, rcounts, displs, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_SCATTERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scatterv (buf1, counts, displs, MPI_INT, buf0,
+ (rank < buf_size) ? rank : buf_size, MPI_INT, 0, comm);
+#endif
+
+#ifdef RUN_REDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_MAX, 0, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_MIN, 0, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_SUM, 0, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_PROD, 0, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LAND, 0, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BAND, 0, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LOR, 0, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BOR, 0, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, 0, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, 0, comm);
+#endif
+
+#ifdef RUN_MAXLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_2INT, MPI_MAXLOC, 0, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MAXLOC test\n");
+#endif
+
+#ifdef RUN_MINLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (buf0, buf1, buf_size, MPI_2INT, MPI_MINLOC, 0, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MINLOC test\n");
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce (a, answer, buf_size, ctype, user_op, 0, comm);
+#endif
+#endif
+
+ if (rank != 0) {
+ buf1 = (int *) malloc (buf_size * nprocs * sizeof(int));
+ assert (buf1);
+ for (i = 0; i < buf_size * nprocs; i++)
+ buf1[i] = i;
+
+ displs = (int *) malloc (nprocs * sizeof(int));
+ counts = (int *) malloc (nprocs * sizeof(int));
+ rcounts = (int *) malloc (nprocs * sizeof(int));
+ assert (displs && counts && rcounts);
+ for (i = 0; i < nprocs; i++) {
+ displs[i] = i * buf_size;
+ if (i < buf_size)
+ rcounts[i] = counts[i] = i;
+ else
+ rcounts[i] = counts[i] = buf_size;
+ }
+
+#ifdef RUN_USEROP
+ answer = (complex_t *) malloc (buf_size * nprocs * sizeof(complex_t));
+#endif
+ }
+
+#ifdef RUN_ALLGATHER
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgather (buf0, buf_size, MPI_INT, buf1, buf_size, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLTOALL
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoall (buf1, buf_size, MPI_INT, buf0, buf_size, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLGATHERV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allgatherv (buf0,
+ (rank < buf_size) ? rank : buf_size,
+ MPI_INT, buf1, rcounts, displs, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLTOALLV
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Alltoallv (buf1, alltoallvcounts, displs, MPI_INT,
+ buf0, alltoallvcounts, displs, MPI_INT, comm);
+#endif
+
+#ifdef RUN_ALLREDUCE
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_MAXLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_2INT, MPI_MAXLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MAXLOC test\n");
+#endif
+
+#ifdef RUN_MINLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (buf0, buf1, buf_size, MPI_2INT, MPI_MINLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MINLOC test\n");
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Allreduce (a, answer, buf_size, ctype, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_REDUCE_SCATTER
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_MAXLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_2INT, MPI_MAXLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MAXLOC test\n");
+#endif
+
+#ifdef RUN_MINLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (buf0, buf1, rcounts, MPI_2INT, MPI_MINLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MINLOC test\n");
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Reduce_scatter (a, answer, rcounts, ctype, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_SCAN
+#ifdef RUN_MAX
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_MAX, comm);
+#endif
+
+#ifdef RUN_MIN
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_MIN, comm);
+#endif
+
+#ifdef RUN_SUM
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_SUM, comm);
+#endif
+
+#ifdef RUN_PROD
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_PROD, comm);
+#endif
+
+#ifdef RUN_LAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LAND, comm);
+#endif
+
+#ifdef RUN_BAND
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BAND, comm);
+#endif
+
+#ifdef RUN_LOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LOR, comm);
+#endif
+
+#ifdef RUN_BOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BOR, comm);
+#endif
+
+#ifdef RUN_LXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_LXOR, comm);
+#endif
+
+#ifdef RUN_BXOR
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_INT, MPI_BXOR, comm);
+#endif
+
+#ifdef RUN_MAXLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_2INT, MPI_MAXLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MAXLOC test\n");
+#endif
+
+#ifdef RUN_MINLOC
+ if (nprocs > 1)
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (buf0, buf1, buf_size, MPI_2INT, MPI_MINLOC, comm);
+ else
+ fprintf (stderr, "Not enough tasks for MINLOC test\n");
+#endif
+
+#ifdef RUN_USEROP
+ for (i = 0; i < OP_COUNT; i++)
+ MPI_Scan (a, answer, buf_size, ctype, user_op, comm);
+#endif
+#endif
+
+#ifdef RUN_BARRIER
+ MPI_Barrier (comm);
+#endif
+
+#ifdef RUN_USEROP
+ free (a);
+ free (answer);
+ MPI_Op_free (&user_op);
+ MPI_Type_free (&ctype);
+#endif
+
+#ifdef RUN_ALLTOALLV
+ free (alltoallvcounts);
+#endif
+
+ free (buf0);
+ free (buf1);
+ free (displs);
+ free (counts);
+ free (rcounts);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+
+ return 0;
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (j-vetter@llnl.gov) Mon Nov 1 1999 */
+/* collective-misorder.c -- do some collective operations (w/ one of them out of order) */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/collective-misorder-allreduce.c,v 1.2 2000/12/04 19:09:45 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int sbuf[buf_size];
+ int rbuf[buf_size];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ memset (sbuf, 0, buf_size);
+ memset (rbuf, 1, buf_size);
+
+ MPI_Barrier (comm);
+
+ switch (rank)
+ {
+ case 0:
+ MPI_Reduce(sbuf,rbuf,1,MPI_INT,MPI_MAX,0,comm);
+ break;
+
+ default:
+ MPI_Allreduce(sbuf,rbuf,1,MPI_INT, MPI_MAX, comm);
+ break;
+ }
+
+ MPI_Barrier(comm);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/collective-misorder-allreduce
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 23
+> [0.000000] [mc_global/INFO] Visited states = 23
+> [0.000000] [mc_global/INFO] Executed transitions = 22
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (j-vetter@llnl.gov) Mon Nov 1 1999 */
+/* collective-misorder.c -- do some collective operations (w/ one of them out of order) */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/collective-misorder.c,v 1.2 2000/12/04 19:09:45 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ memset (buf0, 0, buf_size);
+ memset (buf1, 1, buf_size);
+
+ MPI_Barrier (comm);
+ MPI_Barrier (comm);
+
+ switch (rank)
+ {
+ case 0:
+ MPI_Bcast (buf0, buf_size, MPI_INT, 1, comm); /* note that I didn't use root == 0 */
+ MPI_Barrier (comm);
+ break;
+
+ default:
+ MPI_Barrier (comm);
+ MPI_Bcast (buf0, buf_size, MPI_INT, 1, comm);
+ break;
+ }
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/collective-misorder
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 45
+> [0.000000] [mc_global/INFO] Visited states = 45
+> [0.000000] [mc_global/INFO] Executed transitions = 44
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (j-vetter@llnl.gov) Mon Nov 1 1999 */
+/* collective-misorder.c -- do some collective operations (w/ one of them out of order) */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/collective-misorder2.c,v 1.1 2002/01/05 00:19:39 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ memset (buf0, 0, buf_size);
+ memset (buf1, 1, buf_size);
+
+ MPI_Barrier (comm);
+ MPI_Barrier (comm);
+
+ switch (rank)
+ {
+ case 0:
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, comm);
+ MPI_Barrier (comm);
+ break;
+
+ default:
+ MPI_Barrier (comm);
+ MPI_Bcast (buf0, buf_size, MPI_INT, 0, comm);
+ break;
+ }
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Tue Aug 12 2003 */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-bcast-deadlock.c,v 1.1 2003/09/02 15:57:49 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm inverted_comm;
+ int bcast_rank;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs != 3)
+ {
+ printf ("Incorrect number of tasks; exactly 3 required\n");
+ }
+ else {
+ /* create inverted communicator... */
+ MPI_Comm_split (comm, 0, nprocs - rank, &inverted_comm);
+
+ if (rank == 1) {
+ MPI_Bcast (&rank, 1, MPI_INT, 1, inverted_comm);
+ MPI_Bcast (&bcast_rank, 1, MPI_INT, 2, comm);
+ }
+ else if (rank == 2) {
+ MPI_Bcast (&rank, 1, MPI_INT, 2, comm);
+ MPI_Bcast (&bcast_rank, 1, MPI_INT, 1, inverted_comm);
+ }
+ else {
+ MPI_Bcast (&bcast_rank, 1, MPI_INT, 2, comm);
+ MPI_Bcast (&bcast_rank, 1, MPI_INT, 1, inverted_comm);
+ }
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (vetter3@llnl.gov) Thu Feb 24 2000 */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-deadlock.c,v 1.2 2000/12/04 19:09:45 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm nc1;
+ int dat = 1234;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+
+ if (rank == 0)
+ {
+ printf ("Creating first new comm\n");
+ }
+ {
+ int color = rank % 2;
+ int key = 1;
+ int nrank;
+ int nsize;
+ MPI_Comm_split (comm, color, key, &nc1);
+ MPI_Comm_size (nc1, &nsize);
+ MPI_Comm_rank (nc1, &nrank);
+ printf ("world task %d/%d/%d maps to new comm task %d/%d/%d\n",
+ comm, nprocs, rank, nc1, nsize, nrank);
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("Entering deadlock state.....\n");
+
+ if (rank == 1)
+ {
+ MPI_Bcast (&dat, 1, MPI_INT, 0, nc1);
+ }
+ else
+ {
+ MPI_Bcast (&dat, 1, MPI_INT, 0, comm);
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Dec 20 2002 */
+
+/* comm-dup-no-error.c - "correctly" use many communicators... */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-dup-no-error.c,v 1.1 2003/01/13 18:31:47 bronis Exp $";
+#endif
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* Approximately 4100 gets "ERROR: 0032-160 Too many communicators" */
+/* with IBM's MPI (AIX 5.1.0, PSSP 3.4) as of 1/13/03... */
+/* Umpire failure is graceful - comm creates are identified... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 1/13/03... */
+/* UMPIRE FAILURE IS NOT GRACEFUL AS OF THIS TIME IN THIS CASE... */
+#define ITERATIONS 10
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm[ITERATIONS];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Comm_dup (MPI_COMM_WORLD, &newcomm[i]);
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Barrier (newcomm[i]);
+ }
+
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Comm_free (&newcomm[i]);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (vetter3@llnl.gov) Thu Feb 24 2000 */
+
+/* type-commit-twice.c -- do a type commit twice w/ the same type */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-dup-no-free.c,v 1.1.1.1 2000/08/23 17:28:26 vetter Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+ MPI_Comm_dup (comm, &newcomm); /* not freed */
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Dec 20 2002 */
+
+/* comm-dup-no-free2.c - leak many communicators created with comm dup */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-dup-no-free2.c,v 1.1 2003/01/13 18:31:47 bronis Exp $";
+#endif
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* Approximately 4100 gets "ERROR: 0032-160 Too many communicators" */
+/* with IBM's MPI (AIX 5.1.0, PSSP 3.4) as of 1/13/03... */
+/* Umpire failure is graceful - comm creates are identified... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 1/13/03... */
+/* UMPIRE FAILURE IS NOT GRACEFUL AS OF THIS TIME IN THIS CASE... */
+#define ITERATIONS 10
+#define COMMS_PER_ITERATION 3
+#define COMMS_LOST_PER_ITERATION 1
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i, j;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm[COMMS_PER_ITERATION];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < COMMS_PER_ITERATION; j++) {
+ MPI_Comm_dup (MPI_COMM_WORLD, &newcomm[j]);
+
+ MPI_Barrier (newcomm[j]);
+
+ if (j < COMMS_PER_ITERATION - COMMS_LOST_PER_ITERATION) {
+ MPI_Comm_free (&newcomm[j]);
+ }
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (vetter3@llnl.gov) Thu Feb 24 2000 */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-simple.c,v 1.2 2000/12/04 19:09:45 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ printf ("WARNING: doesn't really deadlock yet! work-in-progress.\n");
+
+ MPI_Barrier (comm);
+
+ {
+ int color = rank % 2;
+ int key = 1;
+ int nrank;
+ int nsize;
+ int dat = 0;
+
+ MPI_Comm_split (comm, color, key, &newcomm);
+
+ MPI_Comm_size (newcomm, &nsize);
+ MPI_Comm_rank (newcomm, &nrank);
+ printf ("world task %d/%d/%d maps to new comm task %d/%d/%d\n",
+ comm, nprocs, rank, newcomm, nsize, nrank);
+
+ if (nrank == 0)
+ {
+ dat = 1000 + color;
+ }
+
+ MPI_Bcast (&dat, 1, MPI_INT, 0, newcomm);
+
+ printf ("world task %d/%d/%d maps to new comm task %d/%d/%d --> %d\n",
+ comm, nprocs, rank, newcomm, nsize, nrank, dat);
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (vetter3@llnl.gov) Thu Feb 24 2000 */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-split-no-free.c,v 1.1.1.1 2000/08/23 17:28:26 vetter Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+ {
+ int color = rank % 2;
+ int key = 1;
+ MPI_Comm_split (comm, color, key, &newcomm); /* not freed */
+ }
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Jeffrey Vetter (vetter3@llnl.gov) Thu Feb 24 2000 */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/comm-translate-ranks.c,v 1.1.1.1 2000/08/23 17:28:26 vetter Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Comm newcomm;
+ int key = -1;
+ int nrank;
+ int nsize;
+ int dat = 0;
+ int color = -1;
+
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+
+ {
+ color = rank % 2;
+ key = 1;
+ MPI_Comm_split (comm, color, key, &newcomm);
+
+ MPI_Comm_size (newcomm, &nsize);
+ MPI_Comm_rank (newcomm, &nrank);
+ printf ("world task %d/%d/%d maps to new comm task %d/%d/%d\n",
+ comm, nprocs, rank, newcomm, nsize, nrank);
+ }
+
+ MPI_Barrier (comm);
+
+ /* after every comm constructor, fetch the rank translation from the
+ 0 rank of that comm (assume that there is a 0). */
+
+ if (nrank == 0)
+ {
+ int i;
+ MPI_Group wcGroup;
+ int wcRanks[128];
+ MPI_Group ncGroup;
+ int ncRanks[128];
+
+ MPI_Comm_group (comm, &wcGroup);
+ MPI_Comm_group (newcomm, &ncGroup);
+
+ for (i = 0; i < nprocs; i++)
+ {
+ wcRanks[i] = i;
+ }
+
+ MPI_Group_translate_ranks (wcGroup, nprocs, wcRanks, ncGroup, ncRanks);
+
+ for (i = 0; i < nprocs; i++)
+ {
+ if (ncRanks[i] == MPI_UNDEFINED)
+ {
+ printf ("World rank %d ->\tUNDEFINED\n", wcRanks[i]);
+ }
+ else
+ {
+ printf ("World rank %d ->\t%d\n", wcRanks[i], ncRanks[i]);
+ }
+ }
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ int zero_to_two;
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ memset (buf0, 0, buf_size);
+
+ zero_to_two = 1;
+
+ MPI_Send (&zero_to_two, 1, MPI_INT, 2, 0, MPI_COMM_WORLD);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1)
+ {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 2)
+ {
+
+ MPI_Recv (&zero_to_two, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/complex-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 26
+> [0.000000] [mc_global/INFO] Visited states = 26
+> [0.000000] [mc_global/INFO] Executed transitions = 25
+> Aborted
+
--- /dev/null
+/* $Header: /usr/gapps/asde/cvs-vault/umpire/tests/deadlock-config.c,v 1.2 2001/09/20 22:27:28 bronis Exp $ */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 32000
+
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ float data[buf_size];
+ int tag = 30;
+ char processor_name[128];
+ int namelen = buf_size;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ if (rank == 0)
+ {
+ printf ("WARNING: This test depends on the MPI's eager limit. "
+ "Set it appropriately.\n");
+ }
+ printf ("Initializing (%d of %d)\n", rank, nprocs);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+ {
+ int dest = (rank == nprocs - 1) ? (0) : (rank + 1);
+ data[0] = rank;
+ MPI_Send (data, buf_size, MPI_FLOAT, dest, tag, MPI_COMM_WORLD);
+ printf ("(%d) sent data %f\n", rank, data[0]);
+ fflush (stdout);
+ }
+ {
+ int src = (rank == 0) ? (nprocs - 1) : (rank - 1);
+ MPI_Status status;
+ MPI_Recv (data, buf_size, MPI_FLOAT, src, tag, MPI_COMM_WORLD, &status);
+ printf ("(%d) got data %f\n", rank, data[0]);
+ fflush (stdout);
+ }
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+ /* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/deadlock-config
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> WARNING: This test depends on the MPI's eager limit. Set it appropriately.
+> Initializing (0 of 3)
+> (0) is alive on Tremblay
+> Initializing (1 of 3)
+> (1) is alive on Jupiter
+> Initializing (2 of 3)
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] Expanded states = 4
+> [0.000000] [mc_global/INFO] Visited states = 4
+> [0.000000] [mc_global/INFO] Executed transitions = 3
+> Aborted
+
--- /dev/null
+/* $Header: /usr/gapps/asde/cvs-vault/umpire/tests/deadlock-config.c,v 1.2 2001/09/20 22:27:28 bronis Exp $ */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 32000
+
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ float data[buf_size];
+ int tag = 30;
+ char processor_name[128];
+ int namelen = buf_size;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ if (rank == 0)
+ {
+ printf ("WARNING: This test depends on the MPI's eager limit. "
+ "Set it appropriately.\n");
+ }
+ printf ("Initializing (%d of %d)\n", rank, nprocs);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+ {
+ int dest = (rank == nprocs - 1) ? (0) : (rank + 1);
+ data[0] = rank;
+ MPI_Send (data, buf_size, MPI_FLOAT, dest, tag, MPI_COMM_WORLD);
+ printf ("(%d) sent data %f\n", rank, data[0]);
+ fflush (stdout);
+ }
+ {
+ int src = (rank == 0) ? (nprocs - 1) : (rank - 1);
+ MPI_Status status;
+ MPI_Recv (data, buf_size, MPI_FLOAT, src, tag, MPI_COMM_WORLD, &status);
+ printf ("(%d) got data %f\n", rank, data[0]);
+ fflush (stdout);
+ }
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+ /* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Wed Nov 29 2000 */
+/* dropped-req.c -- create a request that's never matched */
+/* NOTE: THIS TEST ASSUMES THAT MPI LIBRARY USES EAGER SENDS IF */
+/* BUFFER IS ZERO BYTES; WILL DEADLOCK IN WHILE LOOP IF FALSE */
+
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* 10000 breaks umpire due to running out of memory as of 12/20/02... */
+/* FAILURE IS NOT GRACEFUL AS OF THIS TIME... */
+#define ITERATIONS 10
+
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include "mpi.h"
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int tag = 31;
+ int i;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Request req;
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier(comm);
+
+ /* 0 sends task nprocs-1 a message that is never received */
+ if (rank == 0) {
+ for (i = 0; i < ITERATIONS; i++) {
+ int flag = 0;
+ MPI_Isend (&tag, 0, MPI_BYTE, nprocs - 1, tag, comm, &req);
+
+ while (!flag)
+ MPI_Test (&req, &flag, &status);
+ }
+ }
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* errhandler-no-error.c -- construct some MPI_Errhandlers and free them */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/errhandler-no-error.c,v 1.1 2002/05/29 16:09:47 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+/* multiple instances of same errhandler to exercise more Umpire code... */
+#define ERRHANDLER_COUNT 5
+
+
+
+void
+myErrhandler (MPI_Comm *comm, int *errorcode, ...)
+{
+ char buf[MPI_MAX_ERROR_STRING];
+ int error_strlen;
+
+ /* print alert */
+ fprintf (stderr, "Caught an MPI Error! Time to abort!\n");
+
+ /* get and print MPI error message... */
+ MPI_Error_string (*(errorcode), buf, &error_strlen);
+ fprintf (stderr, "%s\n", buf);
+
+ MPI_Abort (*comm, *errorcode);
+
+ return;
+}
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ int i;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Errhandler newerrhandler[ERRHANDLER_COUNT];
+ MPI_Errhandler newerrhandler2[ERRHANDLER_COUNT];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+
+ for (i = 0; i < ERRHANDLER_COUNT; i++)
+ MPI_Errhandler_create (myErrhandler, &newerrhandler[i]);
+
+ for (i = 0; i < ERRHANDLER_COUNT; i++)
+ MPI_Errhandler_free (&newerrhandler[i]);
+
+ MPI_Barrier (comm);
+
+ /* now with an alias... */
+
+ for (i = 0; i < ERRHANDLER_COUNT; i++)
+ MPI_Errhandler_create (myErrhandler, &newerrhandler[i]);
+
+ for (i = 0; i < ERRHANDLER_COUNT; i++) {
+ newerrhandler2[i] = newerrhandler[i];
+ MPI_Errhandler_free (&newerrhandler2[i]);
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* errhandler-no-error.c -- construct some MPI_Errhandlers and free them */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/errhandler-no-free.c,v 1.1 2002/05/29 16:09:48 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+/* multiple instances of same errhandler to exercise more Umpire code... */
+#define ERRHANDLER_COUNT 5
+
+
+
+void
+myErrhandler (MPI_Comm *comm, int *errorcode, ...)
+{
+ char buf[MPI_MAX_ERROR_STRING];
+ int error_strlen;
+
+ /* print alert */
+ fprintf (stderr, "Caught an MPI Error! Time to abort!\n");
+
+ /* get and print MPI error message... */
+ MPI_Error_string (*(errorcode), buf, &error_strlen);
+ fprintf (stderr, "%s\n", buf);
+
+ MPI_Abort (*comm, *errorcode);
+
+ return;
+}
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ int i;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Errhandler newerrhandler[ERRHANDLER_COUNT];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (comm);
+
+ for (i = 0; i < ERRHANDLER_COUNT; i++)
+ MPI_Errhandler_create (myErrhandler, &newerrhandler[i]);
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* finalize-deadlock.c -- deadlock caused by calling MPI_Finalize */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int its_raining = 0;
+ MPI_Status status;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2)
+ {
+ printf ("not enough tasks\n");
+ }
+ else if (rank == 0)
+ {
+ MPI_Recv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+ }
+ else if ((rank == 1) && (its_raining))
+ {
+ /* This code is never executed... */
+ memset (buf0, 1, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+#! ./tesh
+
+! expect return 134
+$ ${bindir:=.}/../../../../bin/smpirun -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml -np 3 --cfg=model-check:1 --cfg=smpi/running_power:1e9 --cfg=smpi/coll_selector:mpich ${bindir:=.}/finalize-deadlock
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP_gamma' to '4194304'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'model-check' to '1'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/running_power' to '1e9'
+> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/coll_selector' to 'mpich'
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0.000000] [mc_global/INFO] Check a safety property
+> [0.000000] [mc_global/INFO] Get debug information ...
+> [0.000000] [mc_global/INFO] Get debug information done !
+> (0) is alive on Tremblay
+> (1) is alive on Jupiter
+> (2) is alive on Fafard
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] *** DEAD-LOCK DETECTED ***
+> [0.000000] [mc_global/INFO] **************************
+> [0.000000] [mc_global/INFO] Locked request:
+> [0.000000] [mc_global/INFO] Counter-example execution trace:
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iSend(src=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] iRecv(dst=(2)Jupiter (1), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(3)Fafard (2)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iSend(src=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (1)Tremblay (0)])
+> [0.000000] [mc_global/INFO] [(1)Tremblay (0)] iRecv(dst=(1)Tremblay (0), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(2)Jupiter (1)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iSend(src=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(2)Jupiter (1)] Wait(comm=(verbose only) [(3)Fafard (2)-> (2)Jupiter (1)])
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] iRecv(dst=(3)Fafard (2), buff=(verbose only), size=(verbose only))
+> [0.000000] [mc_global/INFO] [(3)Fafard (2)] Wait(comm=(verbose only) [(1)Tremblay (0)-> (3)Fafard (2)])
+> [0.000000] [mc_global/INFO] Expanded states = 20
+> [0.000000] [mc_global/INFO] Visited states = 20
+> [0.000000] [mc_global/INFO] Executed transitions = 19
+> (1) Finished normally
+> (2) Finished normally
+> Aborted
+
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* type-no-error-exhaustive.c -- use all group constructors correctly */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-error-exhaustive.c,v 1.2 2002/07/30 21:34:42 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+
+#define GROUP_CONSTRUCTOR_COUNT 8
+#define INTERCOMM_CREATE_TAG 666
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int i;
+ int ranks[2], ranges[1][3];
+ MPI_Group newgroup[GROUP_CONSTRUCTOR_COUNT];
+ MPI_Group newgroup2[GROUP_CONSTRUCTOR_COUNT];
+ MPI_Comm temp;
+ MPI_Comm intercomm = MPI_COMM_NULL;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ ranks[0] = 0;
+ ranks[1] = 1;
+
+ ranges[0][0] = 0;
+ ranges[0][1] = 2;
+ ranges[0][2] = 2;
+
+ MPI_Barrier (comm);
+
+ if (nprocs < 3) {
+ printf ("requires at least 3 tasks\n");
+ }
+ else {
+ /* create the groups */
+ if (GROUP_CONSTRUCTOR_COUNT > 0)
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 1)
+ MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 2)
+ MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 3)
+ MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 4)
+ MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 5)
+ MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 6)
+ MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 7)
+ MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 8) {
+ /* need lots of stuff for this constructor... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
+
+ if (rank % 3) {
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
+ (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
+ nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_remote_group (intercomm, &newgroup[8]);
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ MPI_Comm_group (temp, &newgroup[8]);
+ }
+
+ MPI_Comm_free (&temp);
+ }
+
+ for (i = 0; i < GROUP_CONSTRUCTOR_COUNT; i++)
+ MPI_Group_free (&newgroup[i]);
+
+ MPI_Barrier (comm);
+
+ /* create the groups again and free with an alias... */
+ if (GROUP_CONSTRUCTOR_COUNT > 0)
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 1)
+ MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 2)
+ MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 3)
+ MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 4)
+ MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 5)
+ MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 6)
+ MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 7)
+ MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 8) {
+ /* need lots of stuff for this constructor... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
+
+ if (rank % 3) {
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
+ (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
+ nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_remote_group (intercomm, &newgroup[8]);
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ MPI_Comm_group (temp, &newgroup[8]);
+ }
+
+ MPI_Comm_free (&temp);
+ }
+
+ for (i = 0; i < GROUP_CONSTRUCTOR_COUNT; i++) {
+ newgroup2[i] = newgroup[i];
+ MPI_Group_free (&newgroup2[i]);
+ }
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-error.c -- construct a group and free it */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-error.c,v 1.2 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group newgroup, newgroup2;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup);
+ MPI_Group_free (&newgroup);
+ MPI_Barrier (MPI_COMM_WORLD);
+ /* now with an alias... */
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup);
+ newgroup2 = newgroup;
+ MPI_Group_free (&newgroup2);
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-error.c -- "correctly" construct many groups and free them */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-error2.c,v 1.1 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 12/20/02... */
+/* FAILURE IS NOT GRACEFUL AS OF THIS TIME... */
+#define ITERATIONS 100
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group newgroup[ITERATIONS];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup[i]);
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Group_free (&newgroup[i]);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-error3.c -- "correctly" construct many groups and free them */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-error3.c,v 1.1 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 12/20/02... */
+/* FAILURE IS NOT GRACEFUL AS OF THIS TIME... */
+#define ITERATIONS 100
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group worldgroup, newgroup[ITERATIONS];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Comm_group (MPI_COMM_WORLD, &worldgroup);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ /* create groups that don't include the local rank... */
+ MPI_Group_excl (worldgroup, 1, &rank, &newgroup[i]);
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ MPI_Group_free (&newgroup[i]);
+ }
+
+ MPI_Group_free (&worldgroup);
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-free-exhaustive.c -- use all group constructors without freeing */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-free-exhaustive.c,v 1.1 2002/05/29 16:09:49 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+
+#define GROUP_CONSTRUCTOR_COUNT 9
+#define INTERCOMM_CREATE_TAG 666
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int comm = MPI_COMM_WORLD;
+ char processor_name[128];
+ int namelen = 128;
+ int i;
+ int ranks[2], ranges[1][3];
+ MPI_Group newgroup[GROUP_CONSTRUCTOR_COUNT];
+ MPI_Group newgroup2[GROUP_CONSTRUCTOR_COUNT];
+ MPI_Comm temp;
+ MPI_Comm intercomm = MPI_COMM_NULL;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (comm, &nprocs);
+ MPI_Comm_rank (comm, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ ranks[0] = 0;
+ ranks[1] = 1;
+
+ ranges[0][0] = 0;
+ ranges[0][1] = 2;
+ ranges[0][2] = 2;
+
+ MPI_Barrier (comm);
+
+ if (nprocs < 3) {
+ printf ("requires at least 3 tasks\n");
+ }
+ else {
+ /* create the groups */
+ if (GROUP_CONSTRUCTOR_COUNT > 0)
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup[0]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 1)
+ MPI_Group_incl (newgroup[0], 2, ranks, &newgroup[1]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 2)
+ MPI_Group_excl (newgroup[0], 2, ranks, &newgroup[2]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 3)
+ MPI_Group_range_incl (newgroup[0], 1, ranges, &newgroup[3]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 4)
+ MPI_Group_range_excl (newgroup[0], 1, ranges, &newgroup[4]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 5)
+ MPI_Group_union (newgroup[1], newgroup[3], &newgroup[5]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 6)
+ MPI_Group_intersection (newgroup[5], newgroup[2], &newgroup[6]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 7)
+ MPI_Group_difference (newgroup[5], newgroup[2], &newgroup[7]);
+
+ if (GROUP_CONSTRUCTOR_COUNT > 8) {
+ /* need lots of stuff for this constructor... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 3, nprocs - rank, &temp);
+
+ if (rank % 3) {
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
+ (((nprocs % 3) == 2) && ((rank % 3) == 2)) ?
+ nprocs - 1 : nprocs - (rank % 3) - (nprocs % 3),
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_remote_group (intercomm, &newgroup[8]);
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ MPI_Comm_group (temp, &newgroup[8]);
+ }
+
+ MPI_Comm_free (&temp);
+ }
+ }
+
+ MPI_Barrier (comm);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-free.c -- construct a group without freeing */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-free.c,v 1.2 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group newgroup;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup);
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-free2.c -- construct many groups without freeing some */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-free2.c,v 1.1 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 12/20/02... */
+/* FAILURE IS NOT GRACEFUL AS OF THIS TIME... */
+#define ITERATIONS 100
+#define GROUPS_PER_ITERATION 3
+#define GROUPS_LOST_PER_ITERATION 1
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i, j;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group newgroup[GROUPS_PER_ITERATION];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < GROUPS_PER_ITERATION; j++) {
+ MPI_Comm_group (MPI_COMM_WORLD, &newgroup[j]);
+
+ if (j < GROUPS_PER_ITERATION - GROUPS_LOST_PER_ITERATION) {
+ MPI_Group_free (&newgroup[j]);
+ }
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* group-no-free3.c -- construct many groups without freeing some */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/group-no-free3.c,v 1.1 2003/01/13 18:31:48 bronis Exp $";
+#endif
+
+
+/* NOTE: Some value of ITERATIONS will imply resource exhaustion */
+/* either in Umpire or MPI no matter how things are implemented */
+/* the best we can hope for is to fail gracefully... */
+/* UNKNOWN N breaks umpire due to running out of memory as of 12/20/02... */
+/* FAILURE IS NOT GRACEFUL AS OF THIS TIME... */
+#define ITERATIONS 10
+#define GROUPS_PER_ITERATION 3
+#define GROUPS_LOST_PER_ITERATION 1
+
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int i, j;
+ char processor_name[128];
+ int namelen = 128;
+ MPI_Group worldgroup, newgroup[GROUPS_PER_ITERATION];
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Comm_group (MPI_COMM_WORLD, &worldgroup);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < GROUPS_PER_ITERATION; j++) {
+ /* create groups that don't include the local rank... */
+ MPI_Group_excl (worldgroup, 1, &rank, &newgroup[j]);
+
+ if (j < GROUPS_PER_ITERATION - GROUPS_LOST_PER_ITERATION) {
+ MPI_Group_free (&newgroup[j]);
+ }
+ }
+ }
+
+ MPI_Group_free (&worldgroup);
+ MPI_Barrier (MPI_COMM_WORLD);
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- C -*-
+
+ @PROLOGUE@
+
+ -----
+
+ Jeffrey Vetter vetter@llnl.gov
+ Center for Applied Scientific Computing, LLNL
+ 31 Oct 2000
+
+ hello.c -- simple hello world app
+
+ */
+
+#ifndef lint
+static char *rcsid = "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/hello.c,v 1.2 2000/12/04 19:09:46 bronis Exp $";
+#endif
+
+#include "mpi.h"
+
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ int recvbuf = 0;
+
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ printf ("MPI comm size is %d with rank %d executing\n", nprocs, rank);
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Reduce (&rank, &recvbuf, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+ if (rank == 0)
+ {
+ printf ("Reduce max is %d\n", recvbuf);
+ }
+ MPI_Barrier (MPI_COMM_WORLD);
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+
+/* eof */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm;
+ int trank, tnprocs;
+ int drank, dnprocs, rleader, rnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ MPI_Comm_size (temp, &tnprocs);
+ MPI_Comm_rank (temp, &trank);
+
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+
+ if (trank == 1) {
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, temp, &status);
+ }
+
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ if (tnprocs > 1) {
+ if (trank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, temp);
+ }
+ }
+ else {
+ printf ("(%d) Split communicator too small\n", rank);
+ }
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ MPI_Comm_size (intercomm, &dnprocs);
+ MPI_Comm_rank (intercomm, &drank);
+ MPI_Comm_remote_size (intercomm, &rnprocs);
+
+ if (rnprocs > drank) {
+ if (rank % 2) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ else {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, drank, 0, intercomm);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, drank, 0, intercomm, &status);
+ }
+ }
+ else {
+ printf ("(%d) Intercomm too small (lrank = %d; remote size = %d)\n",
+ rank, drank, rnprocs);
+ }
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm;
+ int trank, tnprocs;
+ int drank, dnprocs, rleader, rnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 3) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ MPI_Comm_size (temp, &tnprocs);
+ MPI_Comm_rank (temp, &trank);
+
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+
+ if ((trank == 0) && (rank % 2)) {
+ MPI_Recv (buf0, buf_size, MPI_INT,
+ rleader, 0, MPI_COMM_WORLD, &status);
+ }
+
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ if ((trank == 0) && !(rank % 2)) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, temp);
+ }
+ else {
+ printf ("(%d) Split communicator too small\n", rank);
+ }
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ MPI_Comm_size (intercomm, &dnprocs);
+ MPI_Comm_rank (intercomm, &drank);
+ MPI_Comm_remote_size (intercomm, &rnprocs);
+
+ if (rnprocs > drank) {
+ if (rank % 2) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ else {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, drank, 0, intercomm);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, drank, 0, intercomm, &status);
+ }
+ }
+ else {
+ printf ("(%d) Intercomm too small (lrank = %d; remote size = %d)\n",
+ rank, drank, rnprocs);
+ }
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm;
+ int trank, tnprocs;
+ int drank, dnprocs, rleader, rnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 4) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ MPI_Comm_size (temp, &tnprocs);
+ MPI_Comm_rank (temp, &trank);
+
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+
+ if (rank == 1) {
+ MPI_Recv (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
+ }
+
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ if (rank == 0) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ MPI_Comm_size (intercomm, &dnprocs);
+ MPI_Comm_rank (intercomm, &drank);
+ MPI_Comm_remote_size (intercomm, &rnprocs);
+
+ if (rnprocs > drank) {
+ if (rank % 2) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ else {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, drank, 0, intercomm);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, drank, 0, intercomm, &status);
+ }
+ }
+ else {
+ printf ("(%d) Intercomm too small (lrank = %d; remote size = %d)\n",
+ rank, drank, rnprocs);
+ }
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm;
+ int trank, tnprocs;
+ int drank, dnprocs, rleader, rnprocs;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 5) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ MPI_Comm_size (temp, &tnprocs);
+ MPI_Comm_rank (temp, &trank);
+
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+
+ if ((rank % 2) && (trank == 0)) {
+ MPI_Recv (buf0, buf_size, MPI_INT, 2, 0, MPI_COMM_WORLD, &status);
+ }
+ else if (rank == 0) {
+ MPI_Recv (buf0, buf_size, MPI_INT, 1, 0, MPI_COMM_WORLD, &status);
+ }
+
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ if (rank == 2) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, rleader, 0, MPI_COMM_WORLD);
+ }
+ else if (rank == 1) {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ MPI_Comm_size (intercomm, &dnprocs);
+ MPI_Comm_rank (intercomm, &drank);
+ MPI_Comm_remote_size (intercomm, &rnprocs);
+
+ if (rnprocs > drank) {
+ if (rank % 2) {
+ memset (buf1, 1, buf_size);
+
+ MPI_Recv (buf0, buf_size, MPI_INT, drank, 0, intercomm, &status);
+
+ MPI_Send (buf1, buf_size, MPI_INT, drank, 0, intercomm);
+ }
+ else {
+ memset (buf0, 0, buf_size);
+
+ MPI_Send (buf0, buf_size, MPI_INT, drank, 0, intercomm);
+
+ MPI_Recv (buf1, buf_size, MPI_INT, drank, 0, intercomm, &status);
+ }
+ }
+ else {
+ printf ("(%d) Intercomm too small (lrank = %d; remote size = %d)\n",
+ rank, drank, rnprocs);
+ }
+
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ printf ("(%d) Got MPI_COMM_NULL\n", rank);
+ }
+ }
+ else {
+ printf ("(%d) MPI_Comm_split got MPI_COMM_NULL\n", rank);
+ }
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ MPI_Finalize ();
+ printf ("(%d) Finished normally\n", rank);
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) */
+
+/* type-no-error-exhaustive.c -- use all group constructors correctly */
+
+#ifndef lint
+static char *rcsid =
+ "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/intercomm_create-no-error.c,v 1.1 2002/07/30 21:34:43 bronis Exp $";
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+
+#define INTERCOMM_CREATE_TAG 666
+
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int i;
+ MPI_Comm temp, intercomm = MPI_COMM_NULL;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 2) {
+ printf ("requires at least 2 tasks\n");
+ }
+ else {
+ /* need lots of stuff for this constructor... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD,
+ nprocs -
+ ((rank % 2) ? 2 - (nprocs % 2) : 1 + (nprocs % 2)),
+ INTERCOMM_CREATE_TAG, &intercomm);
+
+ MPI_Comm_free (&intercomm);
+
+ MPI_Comm_free (&temp);
+ }
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ printf ("(%d) Finished normally\n", rank);
+ MPI_Finalize ();
+}
+
+/* EOF */
--- /dev/null
+/* -*- Mode: C; -*- */
+/* Creator: Bronis R. de Supinski (bronis@llnl.gov) Fri Mar 17 2000 */
+/* no-error.c -- do some MPI calls without any errors */
+
+#include <stdio.h>
+#include "mpi.h"
+
+#define buf_size 128
+
+#define INTERCOMM_CREATE_TAG 666
+
+int
+main (int argc, char **argv)
+{
+ int nprocs = -1;
+ int rank = -1;
+ char processor_name[128];
+ int namelen = 128;
+ int buf0[buf_size];
+ int buf1[buf_size];
+ MPI_Status status;
+ MPI_Comm temp, intercomm, intercomm2, comm, comm2;
+ int trank, tnprocs;
+ int drank, dnprocs, rleader;
+
+ /* init */
+ MPI_Init (&argc, &argv);
+ MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+ MPI_Get_processor_name (processor_name, &namelen);
+ printf ("(%d) is alive on %s\n", rank, processor_name);
+ fflush (stdout);
+
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ if (nprocs < 4) {
+ printf ("not enough tasks\n");
+ }
+ else {
+ /* need to make split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD, rank % 2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ /* create an intercommunicator temporarily so can merge it... */
+ rleader = ((rank + nprocs) % 2) ? nprocs - 2 : nprocs - 1;
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm);
+ MPI_Comm_free (&temp);
+
+ if (intercomm != MPI_COMM_NULL) {
+ /* need to make a different split communicator temporarily... */
+ MPI_Comm_split (MPI_COMM_WORLD,
+ rank < nprocs/2, nprocs - rank, &temp);
+
+ if (temp != MPI_COMM_NULL) {
+ /* create another intercommunicator temporarily to merge... */
+ rleader = (rank < nprocs/2) ? nprocs - 1 : nprocs/2 - 1;
+ MPI_Intercomm_create (temp, 0, MPI_COMM_WORLD, rleader,
+ INTERCOMM_CREATE_TAG, &intercomm2);
+ MPI_Comm_free (&temp);
+
+ if (intercomm2 != MPI_COMM_NULL) {
+ if (rank < nprocs/2) {
+ MPI_Intercomm_merge (intercomm2, rank < nprocs/2, &comm2);
+ MPI_Comm_free (&intercomm2);
+ MPI_Intercomm_merge (intercomm, rank % 2, &comm);
+ MPI_Comm_free (&intercomm);
+ }
+ else {
+ MPI_Intercomm_merge (intercomm, rank % 2, &comm);
+ MPI_Comm_free (&intercomm);
+ MPI_Intercomm_merge (intercomm2, rank < nprocs/2, &comm2);
+ MPI_Comm_free (&intercomm2);
+ }
+
+ if ((comm != MPI_COMM_NULL) && (comm2 != MPI_COMM_NULL)) {<